crash-utility-crash-9cd43f5/0000775000372000037200000000000015107550337015374 5ustar juerghjuerghcrash-utility-crash-9cd43f5/lzorle_decompress.c0000664000372000037200000001467215107550337021305 0ustar juerghjuergh/* lzorle_decompress.h * * from kernel lib/lzo/lzo1x_decompress_safe.c * * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer * Copyright (C) 2024 NIO * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "lzorle_decompress.h" /* This MAX_255_COUNT is the maximum number of times we can add 255 to a base * count without overflowing an integer. The multiply will overflow when * multiplying 255 by more than MAXINT/255. The sum will overflow earlier * depending on the base count. Since the base count is taken from a u8 * and a few bits, it is safe to assume that it will always be lower than * or equal to 2*255, thus we can always prevent any overflow by accepting * two less 255 steps. See Documentation/lzo.txt for more information. */ #define MAX_255_COUNT ((((ulong)~0) / 255) - 2) static inline uint16_t get_unaligned_le16 (const uint8_t *p) { return p[0] | p[1] << 8; } int lzorle_decompress_safe(const unsigned char *in, ulong in_len, unsigned char *out, ulong *out_len, void *other/* NOT USED */) { unsigned char *op; const unsigned char *ip; ulong t, next; ulong state = 0; const unsigned char *m_pos; const unsigned char * const ip_end = in + in_len; unsigned char * const op_end = out + *out_len; unsigned char bitstream_version; static int efficient_unaligned_access = -1; if (efficient_unaligned_access == -1) { #if defined(ARM) || defined(ARM64) || defined(X86) || defined(X86_64) || defined(PPC) || defined(PPC64) || defined(S390)|| defined(S390X) efficient_unaligned_access = TRUE; #else efficient_unaligned_access = FALSE; #endif if ((kt->ikconfig_flags & IKCONFIG_AVAIL) && (get_kernel_config("CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS", NULL) == IKCONFIG_Y)) efficient_unaligned_access = TRUE; } op = out; ip = in; if (in_len < 3) goto input_overrun; if (in_len >= 5 && *ip == 17) { bitstream_version = ip[1]; ip += 2; } else { bitstream_version = 0; } if (*ip > 17) { t = *ip++ - 17; if (t < 4) { next = t; goto match_next; } goto copy_literal_run; } for (;;) { t = *ip++; if (t < 16) { if (state == 0) { if (t == 0) { ulong offset; const unsigned char *ip_last = ip; while (*ip == 0) { ip++; NEED_IP(1); } offset = ip - ip_last; if (offset > MAX_255_COUNT) return LZO_E_ERROR; offset = (offset << 8) - offset; t += offset + 15 + *ip++; } t += 3; copy_literal_run: if (efficient_unaligned_access && (HAVE_IP(t + 15) && HAVE_OP(t + 15))) { const unsigned char *ie = ip + t; unsigned char *oe = op + t; do { COPY8(op, ip); op += 8; ip += 8; COPY8(op, ip); op += 8; ip += 8; } while (ip < ie); ip = ie; op = oe; } else { NEED_OP(t); NEED_IP(t + 3); do { *op++ = *ip++; } while (--t > 0); } state = 4; continue; } else if (state != 4) { next = t & 3; m_pos = op - 1; m_pos -= t >> 2; m_pos -= *ip++ << 2; TEST_LB(m_pos); NEED_OP(2); op[0] = m_pos[0]; op[1] = m_pos[1]; op += 2; goto match_next; } else { next = t & 3; m_pos = op - (1 + M2_MAX_OFFSET); m_pos -= t >> 2; m_pos -= *ip++ << 2; t = 3; } } else if (t >= 64) { next = t & 3; m_pos = op - 1; m_pos -= (t >> 2) & 7; m_pos -= *ip++ << 3; t = (t >> 5) - 1 + (3 - 1); } else if (t >= 32) { t = (t & 31) + (3 - 1); if (t == 2) { ulong offset; const unsigned char *ip_last = ip; while (*ip == 0) { ip++; NEED_IP(1); } offset = ip - ip_last; if (offset > MAX_255_COUNT) return LZO_E_ERROR; offset = (offset << 8) - offset; t += offset + 31 + *ip++; NEED_IP(2); } m_pos = op - 1; next = get_unaligned_le16(ip); ip += 2; m_pos -= next >> 2; next &= 3; } else { NEED_IP(2); next = get_unaligned_le16(ip); if (((next & 0xfffc) == 0xfffc) && ((t & 0xf8) == 0x18) && bitstream_version) { NEED_IP(3); t &= 7; t |= ip[2] << 3; t += MIN_ZERO_RUN_LENGTH; NEED_OP(t); memset(op, 0, t); op += t; next &= 3; ip += 3; goto match_next; } else { m_pos = op; m_pos -= (t & 8) << 11; t = (t & 7) + (3 - 1); if (t == 2) { ulong offset; const unsigned char *ip_last = ip; while (*ip == 0) { ip++; NEED_IP(1); } offset = ip - ip_last; if (offset > MAX_255_COUNT) return LZO_E_ERROR; offset = (offset << 8) - offset; t += offset + 7 + *ip++; NEED_IP(2); next = get_unaligned_le16(ip); } ip += 2; m_pos -= next >> 2; next &= 3; if (m_pos == op) goto eof_found; m_pos -= 0x4000; } } TEST_LB(m_pos); if (efficient_unaligned_access && (op - m_pos >= 8)) { unsigned char *oe = op + t; if (HAVE_OP(t + 15)) { do { COPY8(op, m_pos); op += 8; m_pos += 8; COPY8(op, m_pos); op += 8; m_pos += 8; } while (op < oe); op = oe; if (HAVE_IP(6)) { state = next; COPY4(op, ip); op += next; ip += next; continue; } } else { NEED_OP(t); do { *op++ = *m_pos++; } while (op < oe); } } else { unsigned char *oe = op + t; NEED_OP(t); op[0] = m_pos[0]; op[1] = m_pos[1]; op += 2; m_pos += 2; do { *op++ = *m_pos++; } while (op < oe); } match_next: state = next; t = next; if (efficient_unaligned_access && (HAVE_IP(6) && HAVE_OP(4))) { COPY4(op, ip); op += t; ip += t; } else { NEED_IP(t + 3); NEED_OP(t); while (t > 0) { *op++ = *ip++; t--; } } } eof_found: *out_len = op - out; return (t != 3 ? LZO_E_ERROR : ip == ip_end ? LZO_E_OK : ip < ip_end ? LZO_E_INPUT_NOT_CONSUMED : LZO_E_INPUT_OVERRUN); input_overrun: *out_len = op - out; return LZO_E_INPUT_OVERRUN; output_overrun: *out_len = op - out; return LZO_E_OUTPUT_OVERRUN; lookbehind_overrun: *out_len = op - out; return LZO_E_LOOKBEHIND_OVERRUN; } crash-utility-crash-9cd43f5/ipcs.c0000664000372000037200000007755715107550337016523 0ustar juerghjuergh/* ipcs.c - provide information on ipc facilities * * Copyright (C) 2012 FUJITSU LIMITED * Auther: Qiao Nuohan * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" /* From the crash source top-level directory */ #define SPECIFIED_NOTHING 0x0 #define SPECIFIED_ID 0x1 #define SPECIFIED_ADDR 0x2 #define IPCS_INIT 0x1 #define IDR_ORIG 0x2 #define IDR_RADIX 0x4 #define IDR_XARRAY 0x8 #define MAX_ID_SHIFT (sizeof(int)*8 - 1) #define MAX_ID_BIT (1U << MAX_ID_SHIFT) #define MAX_ID_MASK (MAX_ID_BIT - 1) #define SHM_DEST 01000 #define SHM_LOCKED 02000 struct shm_info { ulong shmid_kernel; int key; int shmid; ulong rss; ulong swap; unsigned int uid; unsigned int perms; ulong bytes; ulong nattch; ulong shm_inode; int deleted; }; struct sem_info { ulong sem_array; int key; int semid; unsigned int uid; unsigned int perms; ulong nsems; int deleted; }; struct msg_info { ulong msg_queue; int key; int msgid; unsigned int uid; unsigned int perms; ulong bytes; ulong messages; int deleted; }; struct ipcs_table { int idr_bits; ulong init_flags; ulong hugetlbfs_f_op_addr; ulong shm_f_op_addr; ulong shm_f_op_huge_addr; int use_shm_f_op; int seq_multiplier; int cnt; struct list_pair *lp; }; /* * function declaration */ static int dump_shared_memory(int, ulong, int, ulong); static int dump_semaphore_arrays(int, ulong, int, ulong); static int dump_message_queues(int, ulong, int, ulong); static int ipc_search_idr(ulong, int, ulong, int (*)(ulong, int, ulong, int, int), int); static int ipc_search_array(ulong, int, ulong, int (*)(ulong, int, ulong, int, int), int); static int dump_shm_info(ulong, int, ulong, int, int); static int dump_sem_info(ulong, int, ulong, int, int); static int dump_msg_info(ulong, int, ulong, int, int); static void get_shm_info(struct shm_info *, ulong, int); static void get_sem_info(struct sem_info *, ulong, int); static void get_msg_info(struct msg_info *, ulong, int); static void add_rss_swap(ulong, int, ulong *, ulong *); static int is_file_hugepages(ulong); static void gather_radix_tree_entries(ulong); static void gather_xarray_entries(ulong); /* * global data */ static struct ipcs_table ipcs_table = { 0 }; void ipcs_init(void) { if (ipcs_table.init_flags & IPCS_INIT) { return; } ipcs_table.init_flags |= IPCS_INIT; MEMBER_OFFSET_INIT(file_f_op, "file", "f_op"); MEMBER_OFFSET_INIT(file_private_data, "file", "private_data"); MEMBER_OFFSET_INIT(hstate_order, "hstate", "order"); MEMBER_OFFSET_INIT(hugetlbfs_sb_info_hstate, "hugetlbfs_sb_info", "hstate"); MEMBER_OFFSET_INIT(idr_layers, "idr", "layers"); MEMBER_OFFSET_INIT(idr_layer_layer, "idr_layer", "layer"); MEMBER_OFFSET_INIT(idr_layer_ary, "idr_layer", "ary"); MEMBER_OFFSET_INIT(idr_top, "idr", "top"); MEMBER_OFFSET_INIT(idr_cur, "idr", "cur"); MEMBER_OFFSET_INIT(ipc_id_ary_p, "ipc_id_ary", "p"); MEMBER_OFFSET_INIT(ipc_ids_entries, "ipc_ids", "entries"); MEMBER_OFFSET_INIT(ipc_ids_max_id, "ipc_ids", "max_id"); MEMBER_OFFSET_INIT(ipc_ids_in_use, "ipc_ids", "in_use"); MEMBER_OFFSET_INIT(ipc_ids_ipcs_idr, "ipc_ids", "ipcs_idr"); MEMBER_OFFSET_INIT(ipc_namespace_ids, "ipc_namespace", "ids"); MEMBER_OFFSET_INIT(kern_ipc_perm_key, "kern_ipc_perm", "key"); MEMBER_OFFSET_INIT(kern_ipc_perm_id, "kern_ipc_perm", "id"); MEMBER_OFFSET_INIT(kern_ipc_perm_uid, "kern_ipc_perm", "uid"); MEMBER_OFFSET_INIT(kern_ipc_perm_mode, "kern_ipc_perm", "mode"); MEMBER_OFFSET_INIT(kern_ipc_perm_deleted, "kern_ipc_perm", "deleted"); MEMBER_OFFSET_INIT(kern_ipc_perm_seq, "kern_ipc_perm", "seq"); MEMBER_OFFSET_INIT(nsproxy_ipc_ns, "nsproxy", "ipc_ns"); MEMBER_OFFSET_INIT(shmem_inode_info_vfs_inode, "shmem_inode_info", "vfs_inode"); MEMBER_OFFSET_INIT(shmem_inode_info_swapped, "shmem_inode_info", "swapped"); if (INVALID_MEMBER(shmem_inode_info_swapped)) ANON_MEMBER_OFFSET_INIT(shmem_inode_info_swapped, "shmem_inode_info", "swapped"); MEMBER_OFFSET_INIT(shm_file_data_file, "shm_file_data", "file"); MEMBER_OFFSET_INIT(shmid_kernel_shm_perm, "shmid_kernel", "shm_perm"); MEMBER_OFFSET_INIT(shmid_kernel_shm_segsz, "shmid_kernel", "shm_segsz"); MEMBER_OFFSET_INIT(shmid_kernel_shm_nattch, "shmid_kernel", "shm_nattch"); MEMBER_OFFSET_INIT(shmid_kernel_shm_file, "shmid_kernel", "shm_file"); MEMBER_OFFSET_INIT(shmid_kernel_id, "shmid_kernel", "id"); MEMBER_OFFSET_INIT(sem_array_sem_perm, "sem_array", "sem_perm"); MEMBER_OFFSET_INIT(sem_array_sem_id, "sem_array", "sem_id"); MEMBER_OFFSET_INIT(sem_array_sem_nsems, "sem_array", "sem_nsems"); MEMBER_OFFSET_INIT(msg_queue_q_perm, "msg_queue", "q_perm"); MEMBER_OFFSET_INIT(msg_queue_q_id, "msg_queue", "q_id"); MEMBER_OFFSET_INIT(msg_queue_q_cbytes, "msg_queue", "q_cbytes"); MEMBER_OFFSET_INIT(msg_queue_q_qnum, "msg_queue", "q_qnum"); MEMBER_OFFSET_INIT(super_block_s_fs_info, "super_block", "s_fs_info"); /* * struct size */ STRUCT_SIZE_INIT(ipc_ids, "ipc_ids"); STRUCT_SIZE_INIT(shmid_kernel, "shmid_kernel"); STRUCT_SIZE_INIT(sem_array, "sem_array"); STRUCT_SIZE_INIT(msg_queue, "msg_queue"); STRUCT_SIZE_INIT(hstate, "hstate"); if (symbol_exists("hugetlbfs_file_operations")) ipcs_table.hugetlbfs_f_op_addr = symbol_value("hugetlbfs_file_operations"); if (symbol_exists("is_file_shm_hugepages")) { ipcs_table.use_shm_f_op = TRUE; ipcs_table.shm_f_op_addr = symbol_value("shm_file_operations"); if (symbol_exists("shm_file_operations_huge")) { ipcs_table.shm_f_op_huge_addr = symbol_value("shm_file_operations_huge"); } else { ipcs_table.shm_f_op_huge_addr = -1; } } else { ipcs_table.use_shm_f_op = FALSE; ipcs_table.shm_f_op_addr = -1; ipcs_table.shm_f_op_huge_addr = -1; } if (VALID_MEMBER(idr_layer_ary) && get_array_length("idr_layer.ary", NULL, 0) > 64) ipcs_table.idr_bits = 8; else if (BITS32()) ipcs_table.idr_bits = 5; else if (BITS64()) ipcs_table.idr_bits = 6; else error(FATAL, "machdep->bits is not 32 or 64"); if (VALID_MEMBER(idr_idr_rt)) { if (STREQ(MEMBER_TYPE_NAME("idr", "idr_rt"), "xarray")) ipcs_table.init_flags |= IDR_XARRAY; else { if (MEMBER_EXISTS("radix_tree_root", "rnode")) ipcs_table.init_flags |= IDR_RADIX; else if (MEMBER_EXISTS("radix_tree_root", "xa_head")) ipcs_table.init_flags |= IDR_XARRAY; } } else ipcs_table.init_flags |= IDR_ORIG; ipcs_table.seq_multiplier = 32768; } /* * Arguments are passed to the command functions in the global args[argcnt] * array. See getopt(3) for info on dash arguments. Check out defs.h and * other crash commands for usage of the myriad of utility routines available * to accomplish what your task. */ void cmd_ipcs(void) { int specified; char *specified_value[MAXARGS]; int value_index; int c; int shm, sem, msg, verbose; int i; ulong value, task; int found; struct task_context *tc; char buf[BUFSIZE]; value_index = 0; specified = SPECIFIED_NOTHING; shm = 0; sem = 0; msg = 0; verbose = 0; tc = NULL; while ((c = getopt(argcnt, args, "smMqn:")) != EOF) { switch(c) { case 's': sem = 1; break; case 'm': shm = 1; break; case 'M': shm = 1; verbose = 1; break; case 'q': msg = 1; break; case 'n': switch (str_to_context(optarg, &value, &tc)) { case STR_PID: case STR_TASK: break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); break; } break; default: cmd_usage(pc->curcmd, SYNOPSIS);; return; } } while (args[optind]) { if (value_index >= MAXARGS) error(FATAL, "too many id/member specified\n"); specified |= SPECIFIED_ID | SPECIFIED_ADDR; specified_value[value_index] = args[optind]; stol(args[optind], FAULT_ON_ERROR, NULL); optind++; value_index++; } if (THIS_KERNEL_VERSION < LINUX(2,6,0)) command_not_supported(); ipcs_init(); if (!shm && !sem && !msg) shm = sem = msg = 1; task = tc ? tc->task : pid_to_task(0); if (!value_index) { if (shm) dump_shared_memory(specified, 0, verbose, task); if (sem) dump_semaphore_arrays(specified, 0, 0, task); if (msg) dump_message_queues(specified, 0, 0, task); } else { open_tmpfile(); i = 0; while (i < value_index) { found = 0; value = stol(specified_value[i], FAULT_ON_ERROR, NULL); if (shm) found += dump_shared_memory(specified, value, verbose, task); if (sem) found += dump_semaphore_arrays(specified, value, 0, task); if (msg) found += dump_message_queues(specified, value, 0, task); if (!found) fprintf(pc->saved_fp, "invalid id or address: %s\n\n", specified_value[i]); i++; } fflush(fp); rewind(fp); while (fgets(buf, BUFSIZE, fp)) fprintf(pc->saved_fp, "%s", buf); close_tmpfile(); } } static int dump_shared_memory(int specified, ulong specified_value, int verbose, ulong task) { ulong nsproxy_p, ipc_ns_p; ulong ipc_ids_p; int (*ipc_search)(ulong, int, ulong, int (*)(ulong, int, ulong, int, int), int); int (*dump_shm)(ulong, int, ulong, int, int); char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; char buf7[BUFSIZE]; if (!verbose && specified == SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=12?12:VADDR_PRLEN, LJUST, "SHMID_KERNEL"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "SHMID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 10, LJUST, "BYTES"), mkstring(buf6, 6, LJUST, "NATTCH"), mkstring(buf7, 6, LJUST, "STATUS")); } dump_shm = dump_shm_info; if (VALID_MEMBER(kern_ipc_perm_id)) { ipc_search = ipc_search_idr; } else { ipc_search = ipc_search_array; } if (symbol_exists("shm_ids")) { ipc_ids_p = symbol_value("shm_ids"); } else { readmem(task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy_p, sizeof(ulong), "task_struct.nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy_p + OFFSET(nsproxy_ipc_ns), KVADDR, &ipc_ns_p, sizeof(ulong), "nsproxy.ipc_ns", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine ipc_namespace location!\n"); if (MEMBER_SIZE("ipc_namespace","ids") == sizeof(ulong) * 3) readmem(ipc_ns_p + OFFSET(ipc_namespace_ids) + sizeof(ulong) * 2, KVADDR, &ipc_ids_p, sizeof(ulong), "ipc_namespace.ids[2]", FAULT_ON_ERROR); else ipc_ids_p = ipc_ns_p + OFFSET(ipc_namespace_ids) + 2 * SIZE(ipc_ids); } if (ipc_search(ipc_ids_p, specified, specified_value, dump_shm, verbose)) { return 1; } else { if (verbose && specified == SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=12?12:VADDR_PRLEN, LJUST, "SHMID_KERNEL"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "SHMID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 10, LJUST, "BYTES"), mkstring(buf6, 6, LJUST, "NATTCH"), mkstring(buf7, 6, LJUST, "STATUS")); fprintf(fp, "(none allocated)\n\n"); } return 0; } } static int dump_semaphore_arrays(int specified, ulong specified_value, int verbose, ulong task) { ulong nsproxy_p, ipc_ns_p; ulong ipc_ids_p; int (*ipc_search)(ulong, int, ulong, int (*)(ulong, int, ulong, int, int), int); int (*dump_sem)(ulong, int, ulong, int, int); char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; if (specified == SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=10?10:VADDR_PRLEN, LJUST, "SEM_ARRAY"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "SEMID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 10, LJUST, "NSEMS")); } dump_sem = dump_sem_info; if (VALID_MEMBER(kern_ipc_perm_id)) { ipc_search = ipc_search_idr; } else { ipc_search = ipc_search_array; } if (symbol_exists("sem_ids")) { ipc_ids_p = symbol_value("sem_ids"); } else { readmem(task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy_p, sizeof(ulong), "task_struct.nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy_p + OFFSET(nsproxy_ipc_ns), KVADDR, &ipc_ns_p, sizeof(ulong), "nsproxy.ipc_ns", FAULT_ON_ERROR|QUIET)) error(FATAL, "cannot determine ipc_namespace location!\n"); if (MEMBER_SIZE("ipc_namespace","ids") == sizeof(ulong) * 3) readmem(ipc_ns_p + OFFSET(ipc_namespace_ids), KVADDR, &ipc_ids_p, sizeof(ulong), "ipc_namespace.ids[2]", FAULT_ON_ERROR); else ipc_ids_p = ipc_ns_p + OFFSET(ipc_namespace_ids); } return ipc_search(ipc_ids_p, specified, specified_value, dump_sem, verbose); } static int dump_message_queues(int specified, ulong specified_value, int verbose, ulong task) { ulong nsproxy_p, ipc_ns_p; ulong ipc_ids_p; int (*ipc_search)(ulong, int, ulong, int (*)(ulong, int, ulong, int, int), int); int (*dump_msg)(ulong, int, ulong, int, int); char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; if (specified == SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=10?10:VADDR_PRLEN, LJUST, "MSG_QUEUE"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "MSQID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 12, LJUST, "USED-BYTES"), mkstring(buf6, 12, LJUST, "MESSAGES")); } dump_msg = dump_msg_info; if (VALID_MEMBER(kern_ipc_perm_id)) { ipc_search = ipc_search_idr; } else { ipc_search = ipc_search_array; } if (symbol_exists("msg_ids")) { ipc_ids_p = symbol_value("msg_ids"); } else { readmem(task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy_p, sizeof(ulong), "task_struct.nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy_p + OFFSET(nsproxy_ipc_ns), KVADDR, &ipc_ns_p, sizeof(ulong), "nsproxy.ipc_ns", FAULT_ON_ERROR|QUIET)) error(FATAL, "cannot determine ipc_namespace location!\n"); if (MEMBER_SIZE("ipc_namespace","ids") == sizeof(ulong) * 3) readmem(ipc_ns_p + OFFSET(ipc_namespace_ids) + sizeof(ulong), KVADDR, &ipc_ids_p, sizeof(ulong), "ipc_namespace.ids[2]", FAULT_ON_ERROR); else ipc_ids_p = ipc_ns_p + OFFSET(ipc_namespace_ids) + SIZE(ipc_ids); } return ipc_search(ipc_ids_p, specified, specified_value, dump_msg, verbose); } /* * if shared memory information is stored in an array, use this function. */ static int ipc_search_array(ulong ipc_ids_p, int specified, ulong specified_value, int (*fn)(ulong, int, ulong, int, int), int verbose) { ulong entries_p; int max_id, i; ulong *array; int found = 0; int allocated = 0; readmem(ipc_ids_p + OFFSET(ipc_ids_entries), KVADDR, &entries_p, sizeof(ulong), "ipc_ids.entries", FAULT_ON_ERROR); readmem(ipc_ids_p + OFFSET(ipc_ids_max_id), KVADDR, &max_id, sizeof(int), "ipc_ids.max_id", FAULT_ON_ERROR); if (max_id < 0) { if (specified == SPECIFIED_NOTHING && !verbose) fprintf(fp, "(none allocated)\n\n"); return 0; } array = (ulong *)GETBUF(sizeof(ulong *) * (max_id + 1)); if (VALID_MEMBER(ipc_id_ary_p)) readmem(entries_p + OFFSET(ipc_id_ary_p), KVADDR, array, sizeof(ulong *) * (max_id + 1), "ipc_id_ary.p", FAULT_ON_ERROR); else readmem(entries_p, KVADDR, array, sizeof(ulong *)*(max_id+1), "ipc_id array", FAULT_ON_ERROR); for (i=0; i<=max_id; i++) { if (array[i] == 0) continue; if (fn(array[i], specified, specified_value, i, verbose)) { allocated++; found = 1; if (specified != SPECIFIED_NOTHING) break; } } if (specified == SPECIFIED_NOTHING && !verbose) { if (!allocated) fprintf(fp, "(none allocated)\n"); fprintf(fp, "\n"); } FREEBUF(array); if (found) return 1; else return 0; } /* * if shared memory information is stored by using idr, use this function to * get data. */ static int ipc_search_idr(ulong ipc_ids_p, int specified, ulong specified_value, int (*fn)(ulong, int, ulong, int, int), int verbose) { int i, in_use; ulong ipcs_idr_p; ulong ipc; int next_id, total; int found = 0; readmem(ipc_ids_p + OFFSET(ipc_ids_in_use), KVADDR, &in_use, sizeof(int), "ipc_ids.in_use", FAULT_ON_ERROR); ipcs_idr_p = ipc_ids_p + OFFSET(ipc_ids_ipcs_idr); if (!in_use) { if (specified == SPECIFIED_NOTHING && !verbose) fprintf(fp, "(none allocated)\n\n"); return 0; } if (VALID_MEMBER(idr_idr_rt)) { switch (ipcs_table.init_flags & (IDR_RADIX|IDR_XARRAY)) { case IDR_RADIX: gather_radix_tree_entries(ipcs_idr_p); break; case IDR_XARRAY: gather_xarray_entries(ipcs_idr_p); break; } for (i = 0; i < ipcs_table.cnt; i++) { ipc = (ulong)ipcs_table.lp[i].value; if (fn(ipc, specified, specified_value, UNUSED, verbose)) { found = 1; if (specified != SPECIFIED_NOTHING) break; } } if (ipcs_table.lp) FREEBUF(ipcs_table.lp); } else { for (total = 0, next_id = 0; total < in_use; next_id++) { ipc = idr_find(ipcs_idr_p, next_id); if (ipc == 0) continue; total++; if (fn(ipc, specified, specified_value, next_id, verbose)) { found = 1; if (specified != SPECIFIED_NOTHING) break; } } } if (!verbose && specified == SPECIFIED_NOTHING) fprintf(fp, "\n"); if (found || specified == SPECIFIED_NOTHING) return 1; else return 0; } /* * search every idr_layer */ ulong idr_find(ulong idp, int id) { ulong idr_layer_p; int layer; int idr_layers; int n; int index; readmem(idp + OFFSET(idr_top), KVADDR, &idr_layer_p, sizeof(ulong), "idr.top", FAULT_ON_ERROR); if (!idr_layer_p) return 0; if (VALID_MEMBER(idr_layer_layer)) { readmem(idr_layer_p + OFFSET(idr_layer_layer), KVADDR, &layer, sizeof(int), "idr_layer.layer", FAULT_ON_ERROR); n = (layer + 1) * ipcs_table.idr_bits; } else { readmem(idp + OFFSET(idr_layers), KVADDR, &idr_layers, sizeof(int), "idr.layers", FAULT_ON_ERROR); n = idr_layers * ipcs_table.idr_bits; } id &= MAX_ID_MASK; if (id >= (1 << n)) return 0; while (n > 0 && idr_layer_p) { n -= ipcs_table.idr_bits; index = (id >> n) & ((1 << ipcs_table.idr_bits) - 1); readmem(idr_layer_p + OFFSET(idr_layer_ary) + sizeof(ulong) * index, KVADDR, &idr_layer_p, sizeof(ulong), "idr_layer.ary", FAULT_ON_ERROR); } return idr_layer_p; } /* * only specified is not SPECIFIED_NOTHIND, and the specified_value is found, * then return 1 */ static int dump_shm_info(ulong shp, int specified, ulong specified_value, int id, int verbose) { struct shm_info shm_info; char buf[BUFSIZE]; char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; char buf7[BUFSIZE]; get_shm_info(&shm_info, shp, id); if (shm_info.deleted) return 0; if (((specified & SPECIFIED_ID) && shm_info.shmid == specified_value) || ((specified & SPECIFIED_ADDR) && shm_info.shmid_kernel == specified_value) || specified == SPECIFIED_NOTHING) { if (verbose || specified != SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=12?12:VADDR_PRLEN, LJUST, "SHMID_KERNEL"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "SHMID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 10, LJUST, "BYTES"), mkstring(buf6, 6, LJUST, "NATTCH"), mkstring(buf7, 6, LJUST, "STATUS")); } fprintf(fp, "%s %08x %-10d %-5d %-5o %-10ld %-6ld %-s %-s\n", mkstring(buf, VADDR_PRLEN <= 12 ? 12 : VADDR_PRLEN, LJUST|LONG_HEX, (char *)shm_info.shmid_kernel), shm_info.key, shm_info.shmid, shm_info.uid, shm_info.perms & 0777, shm_info.bytes, shm_info.nattch, shm_info.perms & SHM_DEST ? "dest" : "", shm_info.perms & SHM_LOCKED ? "locked" : ""); if (verbose) { fprintf(fp, "PAGES ALLOCATED/RESIDENT/SWAPPED: %ld/%ld/%ld\n", (shm_info.bytes+PAGESIZE()-1) >> PAGESHIFT(), shm_info.rss, shm_info.swap); fprintf(fp, "INODE: %lx\n", shm_info.shm_inode); } if (verbose || specified != SPECIFIED_NOTHING) fprintf(fp, "\n"); return 1; } else return 0; } /* * only specified is not SPECIFIED_NOTHIND, and the specified_value is found, * then return 1 */ static int dump_sem_info(ulong shp, int specified, ulong specified_value, int id, int verbose) { struct sem_info sem_info; char buf[BUFSIZE]; char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; get_sem_info(&sem_info, shp, id); if (sem_info.deleted) return 0; if (((specified & SPECIFIED_ID) && sem_info.semid == specified_value) || ((specified & SPECIFIED_ADDR) && sem_info.sem_array == specified_value) || specified == SPECIFIED_NOTHING) { if (specified != SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=10?10:VADDR_PRLEN, LJUST, "SEM_ARRAY"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "SEMID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 10, LJUST, "NSEMS")); } fprintf(fp, "%s %08x %-10d %-5d %-5o %-10ld\n", mkstring(buf, VADDR_PRLEN <= 10 ? 10 : VADDR_PRLEN, LJUST|LONG_HEX, (char *)sem_info.sem_array), sem_info.key, sem_info.semid, sem_info.uid, sem_info.perms & 0777, sem_info.nsems); if (specified != SPECIFIED_NOTHING) fprintf(fp, "\n"); return 1; } else return 0; } /* * only specified is not SPECIFIED_NOTHIND, and the specified_value is found, * then return 1 */ static int dump_msg_info(ulong shp, int specified, ulong specified_value, int id, int verbose) { struct msg_info msg_info; char buf[BUFSIZE]; char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; get_msg_info(&msg_info, shp, id); if (msg_info.deleted) return 0; if (((specified & SPECIFIED_ID) && msg_info.msgid == specified_value) || ((specified & SPECIFIED_ADDR) && msg_info.msg_queue == specified_value) || specified == SPECIFIED_NOTHING) { if (specified != SPECIFIED_NOTHING) { fprintf(fp, "%s %s %s %s %s %s %s\n", mkstring(buf0, VADDR_PRLEN<=10?10:VADDR_PRLEN, LJUST, "MSG_QUEUE"), mkstring(buf1, 8, LJUST, "KEY"), mkstring(buf2, 10, LJUST, "MSQID"), mkstring(buf3, 5, LJUST, "UID"), mkstring(buf4, 5, LJUST, "PERMS"), mkstring(buf5, 12, LJUST, "USED-BYTES"), mkstring(buf6, 12, LJUST, "MESSAGES")); } fprintf(fp, "%s %08x %-10d %-5d %-5o %-12ld %-12ld\n", mkstring(buf, VADDR_PRLEN <= 10 ? 10 : VADDR_PRLEN, LJUST|LONG_HEX, (char *)msg_info.msg_queue), msg_info.key, msg_info.msgid, msg_info.uid, msg_info.perms & 0777, msg_info.bytes, msg_info.messages); if (specified != SPECIFIED_NOTHING) fprintf(fp, "\n"); return 1; } else return 0; } static void get_shm_info(struct shm_info *shm_info, ulong shp, int id) { char buf[BUFSIZE]; ulong filep, dentryp, inodep; shm_info->shmid_kernel = shp - OFFSET(shmid_kernel_shm_perm); /* * cache shmid_kernel */ readmem(shm_info->shmid_kernel, KVADDR, buf, SIZE(shmid_kernel), "shmid_kernel", FAULT_ON_ERROR); shm_info->key = INT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_key)); if (VALID_MEMBER(shmid_kernel_id)) shm_info->shmid = INT(buf + OFFSET(shmid_kernel_id)); else shm_info->shmid = INT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_id)); shm_info->uid = UINT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_uid)); if (BITS32()) shm_info->perms = USHORT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_mode)); else shm_info->perms = UINT(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_mode)); shm_info->bytes = ULONG(buf + OFFSET(shmid_kernel_shm_segsz)); shm_info->nattch = ULONG(buf + OFFSET(shmid_kernel_shm_nattch)); filep = ULONG(buf + OFFSET(shmid_kernel_shm_file)); readmem(filep + OFFSET(file_f_dentry), KVADDR, &dentryp, sizeof(ulong), "file.f_dentry", FAULT_ON_ERROR); readmem(dentryp + OFFSET(dentry_d_inode), KVADDR, &inodep, sizeof(ulong), "dentry.d_inode", FAULT_ON_ERROR); /* * shm_inode here is the vfs_inode of struct shmem_inode_info */ shm_info->shm_inode = inodep; shm_info->rss = 0; shm_info->swap = 0; add_rss_swap(inodep, is_file_hugepages(filep), &shm_info->rss, &shm_info->swap); shm_info->deleted = UCHAR(buf + OFFSET(shmid_kernel_shm_perm) + OFFSET(kern_ipc_perm_deleted)); } static void get_sem_info(struct sem_info *sem_info, ulong shp, int id) { char buf[BUFSIZE]; sem_info->sem_array = shp - OFFSET(sem_array_sem_perm); /* * cache sem_array */ readmem(sem_info->sem_array, KVADDR, buf, SIZE(sem_array), "sem_array", FAULT_ON_ERROR); sem_info->key = INT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_key)); if (VALID_MEMBER(sem_array_sem_id)) sem_info->semid = INT(buf + OFFSET(sem_array_sem_id)); else if (VALID_MEMBER(kern_ipc_perm_id)) sem_info->semid = INT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_id)); else { ulong seq; seq = ULONG(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_seq)); sem_info->semid = ipcs_table.seq_multiplier * seq + id; } sem_info->uid = UINT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_uid)); if (BITS32()) sem_info->perms = USHORT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_mode)); else sem_info->perms = UINT(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_mode)); sem_info->nsems = ULONG(buf + OFFSET(sem_array_sem_nsems)); sem_info->deleted = UCHAR(buf + OFFSET(sem_array_sem_perm) + OFFSET(kern_ipc_perm_deleted)); } static void get_msg_info(struct msg_info *msg_info, ulong shp, int id) { char buf[BUFSIZE]; msg_info->msg_queue = shp - OFFSET(msg_queue_q_perm); /* * cache msg_queue */ readmem(msg_info->msg_queue, KVADDR, buf, SIZE(msg_queue), "msg_queue", FAULT_ON_ERROR); msg_info->key = INT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_key)); if (VALID_MEMBER(msg_queue_q_id)) msg_info->msgid = INT(buf + OFFSET(msg_queue_q_id)); else if (VALID_MEMBER(kern_ipc_perm_id)) msg_info->msgid = INT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_id)); else { ulong seq; seq = ULONG(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_seq)); msg_info->msgid = ipcs_table.seq_multiplier * seq + id; } msg_info->uid = UINT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_uid)); if (BITS32()) msg_info->perms = USHORT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_mode)); else msg_info->perms = UINT(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_mode)); msg_info->bytes = ULONG(buf + OFFSET(msg_queue_q_cbytes)); msg_info->messages = ULONG(buf + OFFSET(msg_queue_q_qnum)); msg_info->deleted = UCHAR(buf + OFFSET(msg_queue_q_perm) + OFFSET(kern_ipc_perm_deleted)); } /* * get rss & swap related to every shared memory, and get the total number of rss * & swap */ static void add_rss_swap(ulong inode_p, int hugepage, ulong *rss, ulong *swap) { unsigned long mapping_p, nr_pages; readmem(inode_p + OFFSET(inode_i_mapping), KVADDR, &mapping_p, sizeof(ulong), "inode.i_mapping", FAULT_ON_ERROR); readmem(mapping_p + OFFSET(address_space_nrpages), KVADDR, &nr_pages, sizeof(ulong), "address_space.nrpages", FAULT_ON_ERROR); if (hugepage) { unsigned long pages_per_hugepage; if (VALID_SIZE(hstate)) { unsigned long i_sb_p, hsb_p, hstate_p; unsigned int order; readmem(inode_p + OFFSET(inode_i_sb), KVADDR, &i_sb_p, sizeof(ulong), "inode.i_sb", FAULT_ON_ERROR); readmem(i_sb_p + OFFSET(super_block_s_fs_info), KVADDR, &hsb_p, sizeof(ulong), "super_block.s_fs_info", FAULT_ON_ERROR); readmem(hsb_p + OFFSET(hugetlbfs_sb_info_hstate), KVADDR, &hstate_p, sizeof(ulong), "hugetlbfs_sb_info.hstate", FAULT_ON_ERROR); readmem(hstate_p + OFFSET(hstate_order), KVADDR, &order, sizeof(uint), "hstate.order", FAULT_ON_ERROR); pages_per_hugepage = 1 << order; } else { unsigned long hpage_shift; /* * HPAGE_SHIFT is 21 after commit 83a5101b * (kernel > 2.6.24) */ if (THIS_KERNEL_VERSION > LINUX(2, 6, 24)) { hpage_shift = 21; } else { /* * HPAGE_SHIFT: * x86(PAE): 21 * x86(no PAE): 22 * x86_64: 21 */ if ((machine_type("X86") && !(machdep->flags & PAE))) hpage_shift = 22; else hpage_shift = 21; } pages_per_hugepage = (1 << hpage_shift) / PAGESIZE(); } *rss += pages_per_hugepage * nr_pages; } else { unsigned long swapped; *rss += nr_pages; readmem(inode_p - OFFSET(shmem_inode_info_vfs_inode) + OFFSET(shmem_inode_info_swapped), KVADDR, &swapped, sizeof(ulong), "shmem_inode_info.swapped", FAULT_ON_ERROR); *swap += swapped; } } static int is_file_hugepages(ulong file_p) { unsigned long f_op, sfd_p; again: readmem(file_p + OFFSET(file_f_op), KVADDR, &f_op, sizeof(ulong), "file.f_op", FAULT_ON_ERROR); if (f_op == ipcs_table.hugetlbfs_f_op_addr) return 1; if (ipcs_table.use_shm_f_op) { if (ipcs_table.shm_f_op_huge_addr != -1) { if (f_op == ipcs_table.shm_f_op_huge_addr) return 1; } else { if (f_op == ipcs_table.shm_f_op_addr) { readmem(file_p + OFFSET(file_private_data), KVADDR, &sfd_p, sizeof(ulong), "file.private_data", FAULT_ON_ERROR); readmem(sfd_p + OFFSET(shm_file_data_file), KVADDR, &file_p, sizeof(ulong), "shm_file_data.file", FAULT_ON_ERROR); goto again; } } } return 0; } static void gather_radix_tree_entries(ulong ipcs_idr_p) { long len; ipcs_table.cnt = do_radix_tree(ipcs_idr_p, RADIX_TREE_COUNT, NULL); if (ipcs_table.cnt) { len = sizeof(struct list_pair) * (ipcs_table.cnt+1); ipcs_table.lp = (struct list_pair *)GETBUF(len); ipcs_table.lp[0].index = ipcs_table.cnt; ipcs_table.cnt = do_radix_tree(ipcs_idr_p, RADIX_TREE_GATHER, ipcs_table.lp); } else ipcs_table.lp = NULL; } static void gather_xarray_entries(ulong ipcs_idr_p) { long len; ipcs_table.cnt = do_xarray(ipcs_idr_p, XARRAY_COUNT, NULL); if (ipcs_table.cnt) { len = sizeof(struct list_pair) * (ipcs_table.cnt+1); ipcs_table.lp = (struct list_pair *)GETBUF(len); ipcs_table.lp[0].index = ipcs_table.cnt; ipcs_table.cnt = do_xarray(ipcs_idr_p, XARRAY_GATHER, ipcs_table.lp); } else ipcs_table.lp = NULL; } crash-utility-crash-9cd43f5/diskdump.c0000664000372000037200000026426015107550337017372 0ustar juerghjuergh/* * diskdump.c * * The diskdump module optionally creates either ELF vmcore * dumpfiles, or compressed dumpfiles derived from the LKCD format. * In the case of ELF vmcore files, since they are identical to * netdump dumpfiles, the facilities in netdump.c are used. For * compressed dumpfiles, the facilities in this file are used. * * Copyright (C) 2004-2015 David Anderson * Copyright (C) 2004-2015 Red Hat, Inc. All rights reserved. * Copyright (C) 2005 FUJITSU LIMITED * Copyright (C) 2005 NEC Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "diskdump.h" #include "xen_dom0.h" #include "vmcore.h" #include "maple_tree.h" #include "lzorle_decompress.h" #define BITMAP_SECT_LEN 4096 struct diskdump_data { char *filename; ulong flags; /* DISKDUMP_LOCAL, plus anything else... */ int dfd; /* dumpfile file descriptor */ FILE *ofp; /* fprintf(dd->ofp, "xxx"); */ int machine_type; /* machine type identifier */ /* header */ struct disk_dump_header *header; struct disk_dump_sub_header *sub_header; struct kdump_sub_header *sub_header_kdump; unsigned long long max_mapnr; /* 64bit max_mapnr */ size_t data_offset; int block_size; int block_shift; char *bitmap; off_t bitmap_len; char *dumpable_bitmap; int byte, bit; char *compressed_page; /* copy of compressed page data */ char *curbufptr; /* ptr to uncompressed page buffer */ unsigned char *notes_buf; /* copy of elf notes */ void **nt_prstatus_percpu; uint num_prstatus_notes; void **nt_qemu_percpu; void **nt_qemucs_percpu; uint num_qemu_notes; void **nt_vmcoredd_array; uint num_vmcoredd_notes; /* page cache */ struct page_cache_hdr { /* header for each cached page */ uint32_t pg_flags; uint64_t pg_addr; char *pg_bufptr; ulong pg_hit_count; } page_cache_hdr[DISKDUMP_CACHED_PAGES]; char *page_cache_buf; /* base of cached buffer pages */ int evict_index; /* next page to evict */ ulong evictions; /* total evictions done */ ulong cached_reads; ulong *valid_pages; int max_sect_len; /* highest bucket of valid_pages */ ulong accesses; ulong snapshot_task; }; static struct diskdump_data diskdump_data = { 0 }; static struct diskdump_data *dd = &diskdump_data; ulong *diskdump_flags = &diskdump_data.flags; static int __diskdump_memory_dump(FILE *); static void dump_vmcoreinfo(FILE *); static void dump_note_offsets(FILE *); static char *vmcoreinfo_read_string(const char *); static void diskdump_get_osrelease(void); static int valid_note_address(unsigned char *); /* For split dumpfile */ static struct diskdump_data **dd_list = NULL; static int num_dd = 0; static int num_dumpfiles = 0; int dumpfile_is_split(void) { return KDUMP_SPLIT(); } int have_crash_notes(int cpu) { ulong crash_notes, notes_ptr; char *buf, *p; Elf64_Nhdr *note = NULL; if (!readmem(symbol_value("crash_notes"), KVADDR, &crash_notes, sizeof(crash_notes), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read \"crash_notes\"\n"); return FALSE; } if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) notes_ptr = crash_notes + kt->__per_cpu_offset[cpu]; else notes_ptr = crash_notes; buf = GETBUF(SIZE(note_buf)); if (!readmem(notes_ptr, KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { error(WARNING, "cpu %d: cannot read NT_PRSTATUS note\n", cpu); return FALSE; } note = (Elf64_Nhdr *)buf; p = buf + sizeof(Elf64_Nhdr); if (note->n_type != NT_PRSTATUS) { error(WARNING, "cpu %d: invalid NT_PRSTATUS note (n_type != NT_PRSTATUS)\n", cpu); return FALSE; } if (!STRNEQ(p, "CORE")) { error(WARNING, "cpu %d: invalid NT_PRSTATUS note (name != \"CORE\")\n", cpu); return FALSE; } return TRUE; } int diskdump_is_cpu_prstatus_valid(int cpu) { static int crash_notes_exists = -1; if (crash_notes_exists == -1) crash_notes_exists = kernel_symbol_exists("crash_notes"); return (!crash_notes_exists || have_crash_notes(cpu)); } void map_cpus_to_prstatus_kdump_cmprs(void) { void **nt_ptr; int online, i, j, nrcpus; size_t size; if (pc->flags2 & QEMU_MEM_DUMP_COMPRESSED) /* notes exist for all cpus */ goto resize_note_pointers; if (!(online = get_cpus_online()) || (online == kt->cpus)) goto resize_note_pointers; if (CRASHDEBUG(1)) error(INFO, "cpus: %d online: %d NT_PRSTATUS notes: %d (remapping)\n", kt->cpus, online, dd->num_prstatus_notes); size = NR_CPUS * sizeof(void *); nt_ptr = (void **)GETBUF(size); BCOPY(dd->nt_prstatus_percpu, nt_ptr, size); BZERO(dd->nt_prstatus_percpu, size); /* * Re-populate the array with the notes mapping to online cpus */ nrcpus = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); for (i = 0, j = 0; i < nrcpus; i++) { if (in_cpu_map(ONLINE_MAP, i) && machdep->is_cpu_prstatus_valid(i)) { dd->nt_prstatus_percpu[i] = nt_ptr[j++]; dd->num_prstatus_notes = MAX(dd->num_prstatus_notes, i+1); } } FREEBUF(nt_ptr); resize_note_pointers: /* * For architectures that only utilize the note pointers * within this file, resize the arrays accordingly. */ if (machine_type("X86_64") || machine_type("X86") || machine_type("ARM64")) { if ((dd->nt_prstatus_percpu = realloc(dd->nt_prstatus_percpu, dd->num_prstatus_notes * sizeof(void *))) == NULL) error(FATAL, "compressed kdump: cannot realloc NT_PRSTATUS note pointers\n"); if (dd->num_qemu_notes) { if ((dd->nt_qemu_percpu = realloc(dd->nt_qemu_percpu, dd->num_qemu_notes * sizeof(void *))) == NULL) error(FATAL, "compressed kdump: cannot realloc QEMU note pointers\n"); if ((dd->nt_qemucs_percpu = realloc(dd->nt_qemucs_percpu, dd->num_qemu_notes * sizeof(void *))) == NULL) error(FATAL, "compressed kdump: cannot realloc QEMU note pointers\n"); } else { free(dd->nt_qemu_percpu); free(dd->nt_qemucs_percpu); } } } static void add_diskdump_data(char* name) { #define DDL_SIZE 16 int i; int sz = sizeof(void *); struct diskdump_data *ddp; if (dd_list == NULL) { dd_list = calloc(DDL_SIZE, sz); num_dd = DDL_SIZE; } else { for (i = 0; i < num_dumpfiles; i++) { ddp = dd_list[i]; if (same_file(ddp->filename, name)) error(FATAL, "split dumpfiles are identical:\n" " %s\n %s\n", ddp->filename, name); if (memcmp(ddp->header, dd->header, sizeof(struct disk_dump_header))) error(FATAL, "split dumpfiles derived from different vmcores:\n" " %s\n %s\n", ddp->filename, name); } } if (num_dumpfiles == num_dd) { /* expand list */ struct diskdump_data **tmp; tmp = calloc(num_dd*2, sz); memcpy(tmp, dd_list, sz*num_dd); free(dd_list); dd_list = tmp; num_dd *= 2; } dd_list[num_dumpfiles] = dd; dd->flags |= DUMPFILE_SPLIT; dd->filename = name; if (CRASHDEBUG(1)) fprintf(fp, "%s: start_pfn=%llu, end_pfn=%llu\n", name, dd->sub_header_kdump->start_pfn_64, dd->sub_header_kdump->end_pfn_64); } static void clean_diskdump_data(void) { int i; if (dd_list == NULL) return; for (i=1; ibitmap, nr >> 3, nr & 7); } static inline int page_is_dumpable(unsigned long nr) { return dd->dumpable_bitmap[nr>>3] & (1 << (nr & 7)); } static inline int dump_is_partial(const struct disk_dump_header *header) { return header->bitmap_blocks >= divideup(divideup(dd->max_mapnr, 8), dd->block_size) * 2; } static int open_dump_file(char *file) { int fd; fd = open(file, O_RDONLY); if (fd < 0) { error(INFO, "diskdump / compressed kdump: unable to open dump file %s\n", file); return FALSE; } if (KDUMP_SPLIT()) dd = calloc(1, sizeof(*dd)); dd->dfd = fd; return TRUE; } void process_elf32_notes(void *note_buf, unsigned long size_note) { Elf32_Nhdr *nt; size_t index, len = 0; int num = 0; int vmcoredd_num = 0; int qemu_num = 0; for (index = 0; index < size_note; index += len) { nt = note_buf + index; if (nt->n_type == NT_PRSTATUS) { dd->nt_prstatus_percpu[num] = nt; num++; } len = sizeof(Elf32_Nhdr); if (STRNEQ((char *)nt + len, "QEMU")) { ulong *ptr = (ulong *)((char *)nt + sizeof(Elf32_Nhdr) + nt->n_namesz); dd->nt_qemucs_percpu[qemu_num] = (ulong *)roundup((ulong) ptr, 4); dd->nt_qemu_percpu[qemu_num] = nt; qemu_num++; } if (nt->n_type == NT_XEN_KDUMP_CR3 || nt->n_type == XEN_ELFNOTE_CRASH_INFO) { void *data = (char*)(nt + 1) + roundup(nt->n_namesz, 4); process_xen_note(nt->n_type, data, nt->n_descsz); } if (nt->n_type == NT_VMCOREDD && vmcoredd_num < NR_DEVICE_DUMPS) { dd->nt_vmcoredd_array[vmcoredd_num] = nt; vmcoredd_num++; } len = roundup(len + nt->n_namesz, 4); len = roundup(len + nt->n_descsz, 4); } if (num > 0) { pc->flags2 |= ELF_NOTES; dd->num_prstatus_notes = num; } if (qemu_num > 0) { pc->flags2 |= QEMU_MEM_DUMP_COMPRESSED; dd->num_qemu_notes = qemu_num; } if (vmcoredd_num > 0) dd->num_vmcoredd_notes = vmcoredd_num; return; } void process_elf64_notes(void *note_buf, unsigned long size_note) { Elf64_Nhdr *nt; size_t index, len = 0; int num = 0; int vmcoredd_num = 0; int qemu_num = 0; for (index = 0; index < size_note; index += len) { nt = note_buf + index; if (nt->n_type == NT_PRSTATUS) { dd->nt_prstatus_percpu[num] = nt; num++; } if ((nt->n_type == NT_TASKSTRUCT) && (STRNEQ((char *)nt + sizeof(Elf64_Nhdr), "SNAP"))) { pc->flags2 |= (LIVE_DUMP|SNAP); dd->snapshot_task = *((ulong *)((char *)nt + sizeof(Elf64_Nhdr) + nt->n_namesz)); } len = sizeof(Elf64_Nhdr); if (STRNEQ((char *)nt + len, "QEMU")) { ulong *ptr = (ulong *)((char *)nt + sizeof(Elf64_Nhdr) + nt->n_namesz); dd->nt_qemucs_percpu[qemu_num] = (ulong *)roundup((ulong) ptr, 4); dd->nt_qemu_percpu[qemu_num] = nt; qemu_num++; } if (nt->n_type == NT_XEN_KDUMP_CR3 || nt->n_type == XEN_ELFNOTE_CRASH_INFO) { void *data = (char*)(nt + 1) + roundup(nt->n_namesz, 4); process_xen_note(nt->n_type, data, nt->n_descsz); } if (nt->n_type == NT_VMCOREDD && vmcoredd_num < NR_DEVICE_DUMPS) { dd->nt_vmcoredd_array[vmcoredd_num] = nt; vmcoredd_num++; } len = roundup(len + nt->n_namesz, 4); len = roundup(len + nt->n_descsz, 4); } if (num > 0) { pc->flags2 |= ELF_NOTES; dd->num_prstatus_notes = num; } if (qemu_num > 0) { pc->flags2 |= QEMU_MEM_DUMP_COMPRESSED; dd->num_qemu_notes = qemu_num; } if (vmcoredd_num > 0) dd->num_vmcoredd_notes = vmcoredd_num; return; } void x86_process_elf_notes(void *note_ptr, unsigned long size_note) { if (machine_type("X86_64")) process_elf64_notes(note_ptr, size_note); else if (machine_type("X86")) process_elf32_notes(note_ptr, size_note); } #if defined(__i386__) && (defined(ARM) || defined(MIPS)) /* * The kdump_sub_header member offsets are different when the crash * binary is built natively on an ARM host vs. when built with * "make target=ARM" on an x86/x86_64 host. This is because the * off_t structure members will be aligned on an 8-byte boundary when * compiled as an ARM binary -- which will be reflected in the * kdump_sub_header in a compressed ARM kdump. * * When crash is compiled as an x86 binary, these are the * structure's offsets: * * struct kdump_sub_header { * [0] unsigned long phys_base; * [4] int dump_level; / header_version 1 and later / * [8] int split; / header_version 2 and later / * [12] unsigned long start_pfn; / header_version 2 and later / * [16] unsigned long end_pfn; / header_version 2 and later / * [20] off_t offset_vmcoreinfo; / header_version 3 and later / * [28] unsigned long size_vmcoreinfo; / header_version 3 and later / * [32] off_t offset_note; / header_version 4 and later / * [40] unsigned long size_note; / header_version 4 and later / * [44] off_t offset_eraseinfo; / header_version 5 and later / * [52] unsigned long size_eraseinfo; / header_version 5 and later / * [56] unsigned long long start_pfn_64; / header_version 6 and later / * [64] unsigned long long end_pfn_64; / header_version 6 and later / * [72] unsigned long long max_mapnr_64; / header_version 6 and later / * }; * * But when compiled on an ARM processor, each 64-bit "off_t" would be pushed * up to an 8-byte boundary: * * struct kdump_sub_header { * [0] unsigned long phys_base; * [4] int dump_level; / header_version 1 and later / * [8] int split; / header_version 2 and later / * [12] unsigned long start_pfn; / header_version 2 and later / * [16] unsigned long end_pfn; / header_version 2 and later / * [24] off_t offset_vmcoreinfo; / header_version 3 and later / * [32] unsigned long size_vmcoreinfo; / header_version 3 and later / * [40] off_t offset_note; / header_version 4 and later / * [48] unsigned long size_note; / header_version 4 and later / * [56] off_t offset_eraseinfo; / header_version 5 and later / * [64] unsigned long size_eraseinfo; / header_version 5 and later / * [72] unsigned long long start_pfn_64; / header_version 6 and later / * [80] unsigned long long end_pfn_64; / header_version 6 and later / * [88] unsigned long long max_mapnr_64; / header_version 6 and later / * }; * */ struct kdump_sub_header_ARM_target { unsigned long phys_base; int dump_level; /* header_version 1 and later */ int split; /* header_version 2 and later */ unsigned long start_pfn; /* header_version 2 and later */ unsigned long end_pfn; /* header_version 2 and later */ int pad1; off_t offset_vmcoreinfo; /* header_version 3 and later */ unsigned long size_vmcoreinfo; /* header_version 3 and later */ int pad2; off_t offset_note; /* header_version 4 and later */ unsigned long size_note; /* header_version 4 and later */ int pad3; off_t offset_eraseinfo; /* header_version 5 and later */ unsigned long size_eraseinfo; /* header_version 5 and later */ int pad4; unsigned long long start_pfn_64; /* header_version 6 and later */ unsigned long long end_pfn_64; /* header_version 6 and later */ unsigned long long max_mapnr_64; /* header_version 6 and later */ }; static void arm_kdump_header_adjust(int header_version) { struct kdump_sub_header *kdsh; struct kdump_sub_header_ARM_target *kdsh_ARM_target; kdsh = dd->sub_header_kdump; kdsh_ARM_target = (struct kdump_sub_header_ARM_target *)kdsh; if (header_version >= 3) { kdsh->offset_vmcoreinfo = kdsh_ARM_target->offset_vmcoreinfo; kdsh->size_vmcoreinfo = kdsh_ARM_target->size_vmcoreinfo; } if (header_version >= 4) { kdsh->offset_note = kdsh_ARM_target->offset_note; kdsh->size_note = kdsh_ARM_target->size_note; } if (header_version >= 5) { kdsh->offset_eraseinfo = kdsh_ARM_target->offset_eraseinfo; kdsh->size_eraseinfo = kdsh_ARM_target->size_eraseinfo; } if (header_version >= 6) { kdsh->start_pfn_64 = kdsh_ARM_target->start_pfn_64; kdsh->end_pfn_64 = kdsh_ARM_target->end_pfn_64; kdsh->max_mapnr_64 = kdsh_ARM_target->max_mapnr_64; } else { kdsh->start_pfn_64 = kdsh_ARM_target->start_pfn; kdsh->end_pfn_64 = kdsh_ARM_target->end_pfn; kdsh->max_mapnr_64 = dd->max_mapnr; } } #endif /* __i386__ && (ARM || MIPS) */ /* * Read page descriptor. */ static int read_pd(int fd, off_t offset, page_desc_t *pd) { int ret; if (FLAT_FORMAT()) { if (!read_flattened_format(fd, offset, pd, sizeof(*pd))) return READ_ERROR; } else { if (offset < 0) { if (CRASHDEBUG(8)) fprintf(fp, "read_pd: invalid offset: %lx\n", offset); return SEEK_ERROR; } if ((ret = pread(fd, pd, sizeof(*pd), offset)) != sizeof(*pd)) { if (ret == -1 && CRASHDEBUG(8)) fprintf(fp, "read_pd: pread error: %s\n", strerror(errno)); return READ_ERROR; } } return 0; } static int read_dump_header(char *file) { struct disk_dump_header *header = NULL; struct disk_dump_sub_header *sub_header = NULL; struct kdump_sub_header *sub_header_kdump = NULL; size_t size; off_t bitmap_len; int block_size = (int)sysconf(_SC_PAGESIZE); off_t offset; const off_t failed = (off_t)-1; ulong pfn; int i, j, max_sect_len; int is_split = 0; ulonglong tmp, *bitmap; if (block_size < 0) return FALSE; restart: if ((header = realloc(header, block_size)) == NULL) error(FATAL, "diskdump / compressed kdump: cannot malloc block_size buffer\n"); if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, 0, header, block_size)) { error(FATAL, "diskdump / compressed kdump: cannot read header\n"); goto err; } } else { if (lseek(dd->dfd, 0, SEEK_SET) == failed) { if (CRASHDEBUG(1)) error(INFO, "diskdump / compressed kdump: cannot lseek dump header\n"); goto err; } if (read(dd->dfd, header, block_size) < block_size) { if (CRASHDEBUG(1)) error(INFO, "diskdump / compressed kdump: cannot read dump header\n"); goto err; } } /* validate dump header */ if (!memcmp(header->signature, DISK_DUMP_SIGNATURE, sizeof(header->signature))) { dd->flags |= DISKDUMP_LOCAL; } else if (!memcmp(header->signature, KDUMP_SIGNATURE, sizeof(header->signature))) { dd->flags |= KDUMP_CMPRS_LOCAL; if (header->header_version >= 1) dd->flags |= ERROR_EXCLUDED; } else { if (CRASHDEBUG(1)) error(INFO, "diskdump / compressed kdump: dump does not have panic dump header\n"); goto err; } if (CRASHDEBUG(1)) fprintf(fp, "%s: header->utsname.machine: %s\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", header->utsname.machine); if (STRNEQ(header->utsname.machine, "i686") && machine_type_mismatch(file, "X86", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "x86_64") && machine_type_mismatch(file, "X86_64", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "ia64") && machine_type_mismatch(file, "IA64", NULL, 0)) goto err; else if (STREQ(header->utsname.machine, "ppc") && machine_type_mismatch(file, "PPC", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "ppc64") && machine_type_mismatch(file, "PPC64", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "arm") && machine_type_mismatch(file, "ARM", NULL, 0)) goto err; else if (STREQ(header->utsname.machine, "mips") && machine_type_mismatch(file, "MIPS", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "mips64") && machine_type_mismatch(file, "MIPS64", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "s390x") && machine_type_mismatch(file, "S390X", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "aarch64") && machine_type_mismatch(file, "ARM64", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "riscv64") && machine_type_mismatch(file, "RISCV64", NULL, 0)) goto err; else if (STRNEQ(header->utsname.machine, "loongarch64") && machine_type_mismatch(file, "LOONGARCH64", NULL, 0)) goto err; if (header->block_size != block_size) { block_size = header->block_size; if (CRASHDEBUG(1)) fprintf(fp, "retrying with different block/page size: %d\n", header->block_size); goto restart; } dd->block_size = header->block_size; dd->block_shift = ffs(header->block_size) - 1; if ((DISKDUMP_VALID() && (sizeof(*header) + sizeof(void *) * header->nr_cpus > block_size)) || header->nr_cpus <= 0) { error(WARNING, "%s: invalid nr_cpus value: %d\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", header->nr_cpus); if (!machine_type("S390") && !machine_type("S390X") && !machine_type("X86") && !machine_type("X86_64")) { if (DISKDUMP_VALID()) goto err; } } /* read sub header */ offset = (off_t)block_size; if (DISKDUMP_VALID()) { if ((sub_header = malloc(block_size)) == NULL) error(FATAL, "diskdump: cannot malloc sub_header buffer\n"); if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, sub_header, block_size)) { error(INFO, "diskdump: cannot read dump sub header\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "diskdump: cannot lseek dump sub header\n"); goto err; } if (read(dd->dfd, sub_header, block_size) < block_size) { error(INFO, "diskdump: cannot read dump sub header\n"); goto err; } } dd->sub_header = sub_header; /* the 64bit max_mapnr only exists in sub-header of compressed * kdump file, if it's not a compressed kdump file, we have to * use the old 32bit max_mapnr in dumpfile header. * max_mapnr may be truncated here. */ dd->max_mapnr = header->max_mapnr; } else if (KDUMP_CMPRS_VALID()) { if ((sub_header_kdump = malloc(block_size)) == NULL) error(FATAL, "compressed kdump: cannot malloc sub_header_kdump buffer\n"); if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, sub_header_kdump, block_size)) { error(INFO, "compressed kdump: cannot read dump sub header\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "compressed kdump: cannot lseek dump sub header\n"); goto err; } if (read(dd->dfd, sub_header_kdump, block_size) < block_size) { error(INFO, "compressed kdump: cannot read dump sub header\n"); goto err; } } dd->sub_header_kdump = sub_header_kdump; #if defined(__i386__) && (defined(ARM) || defined(MIPS)) arm_kdump_header_adjust(header->header_version); #endif /* use 64bit max_mapnr in compressed kdump file sub-header */ if (header->header_version >= 6) dd->max_mapnr = dd->sub_header_kdump->max_mapnr_64; else { dd->sub_header_kdump->start_pfn_64 = dd->sub_header_kdump->start_pfn; dd->sub_header_kdump->end_pfn_64 = dd->sub_header_kdump->end_pfn; } } if (header->header_version < 6) dd->max_mapnr = header->max_mapnr; /* read memory bitmap */ bitmap_len = (off_t)block_size * header->bitmap_blocks; dd->bitmap_len = bitmap_len; offset = (off_t)block_size * (1 + header->sub_hdr_size); dd->dumpable_bitmap = calloc(bitmap_len, 1); if (CRASHDEBUG(8)) fprintf(fp, "%s: memory bitmap offset: %llx\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", (ulonglong)offset); if (FLAT_FORMAT()) { if ((dd->bitmap = malloc(bitmap_len)) == NULL) error(FATAL, "%s: cannot malloc bitmap buffer\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); if (!read_flattened_format(dd->dfd, offset, dd->bitmap, bitmap_len)) { error(INFO, "%s: cannot read memory bitmap\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); goto err; } } else { struct stat sbuf; if (fstat(dd->dfd, &sbuf) != 0) { error(INFO, "Cannot fstat the dump file\n"); goto err; } /* * For memory regions mapped with the mmap(), attempts access to * a page of the buffer that lies beyond the end of the mapped file, * which may cause SIGBUS(see the mmap() man page). */ if (bitmap_len + offset > sbuf.st_size) { error(INFO, "Mmap: Beyond the end of mapped file, corrupted?\n"); goto err; } dd->bitmap = mmap(NULL, bitmap_len, PROT_READ, MAP_SHARED, dd->dfd, offset); if (dd->bitmap == MAP_FAILED) error(FATAL, "%s: cannot mmap bitmap buffer\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); madvise(dd->bitmap, bitmap_len, MADV_WILLNEED); } if (dump_is_partial(header)) memcpy(dd->dumpable_bitmap, dd->bitmap + bitmap_len/2, bitmap_len/2); else memcpy(dd->dumpable_bitmap, dd->bitmap, bitmap_len); dd->data_offset = (1UL + header->sub_hdr_size + header->bitmap_blocks) * header->block_size; dd->header = header; if (machine_type("ARM")) dd->machine_type = EM_ARM; else if (machine_type("MIPS") || machine_type("MIPS64")) dd->machine_type = EM_MIPS; else if (machine_type("X86")) dd->machine_type = EM_386; else if (machine_type("X86_64")) dd->machine_type = EM_X86_64; else if (machine_type("IA64")) dd->machine_type = EM_IA_64; else if (machine_type("PPC")) dd->machine_type = EM_PPC; else if (machine_type("PPC64")) dd->machine_type = EM_PPC64; else if (machine_type("S390X")) dd->machine_type = EM_S390; else if (machine_type("ARM64")) dd->machine_type = EM_AARCH64; else if (machine_type("SPARC64")) dd->machine_type = EM_SPARCV9; else if (machine_type("RISCV64")) dd->machine_type = EM_RISCV; else if (machine_type("LOONGARCH64")) dd->machine_type = EM_LOONGARCH; else { error(INFO, "%s: unsupported machine type: %s\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", MACHINE_TYPE); goto err; } /* process elf notes data */ if (KDUMP_CMPRS_VALID() && !(dd->flags & NO_ELF_NOTES) && (dd->header->header_version >= 4) && (sub_header_kdump->offset_note) && (sub_header_kdump->size_note) && (machdep->process_elf_notes)) { size = sub_header_kdump->size_note; offset = sub_header_kdump->offset_note; if ((dd->notes_buf = malloc(size)) == NULL) error(FATAL, "compressed kdump: cannot malloc notes" " buffer\n"); if ((dd->nt_prstatus_percpu = malloc(NR_CPUS * sizeof(void *))) == NULL) error(FATAL, "compressed kdump: cannot malloc pointer" " to NT_PRSTATUS notes\n"); if ((dd->nt_qemu_percpu = malloc(NR_CPUS * sizeof(void *))) == NULL) error(FATAL, "qemu mem dump compressed: cannot malloc pointer" " to QEMU notes\n"); if ((dd->nt_qemucs_percpu = malloc(NR_CPUS * sizeof(void *))) == NULL) error(FATAL, "qemu mem dump compressed: cannot malloc pointer" " to QEMUCS notes\n"); if ((dd->nt_vmcoredd_array = malloc(NR_DEVICE_DUMPS * sizeof(void *))) == NULL) error(FATAL, "compressed kdump: cannot malloc array for " "vmcore device dump notes\n"); if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, dd->notes_buf, size)) { error(INFO, "compressed kdump: cannot read notes data" "\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "compressed kdump: cannot lseek notes data\n"); goto err; } if (read(dd->dfd, dd->notes_buf, size) < size) { error(INFO, "compressed kdump: cannot read notes data" "\n"); goto err; } } machdep->process_elf_notes(dd->notes_buf, size); } /* Check if dump file contains erasesinfo data */ if (KDUMP_CMPRS_VALID() && (dd->header->header_version >= 5) && (sub_header_kdump->offset_eraseinfo) && (sub_header_kdump->size_eraseinfo)) pc->flags2 |= ERASEINFO_DATA; if (KDUMP_CMPRS_VALID() && (dd->header->header_version >= 3) && dd->sub_header_kdump->offset_vmcoreinfo && dd->sub_header_kdump->size_vmcoreinfo) pc->flags2 |= VMCOREINFO; if (KDUMP_CMPRS_VALID() && (dd->header->status & DUMP_DH_COMPRESSED_INCOMPLETE)) pc->flags2 |= INCOMPLETE_DUMP; if (KDUMP_CMPRS_VALID() && (dd->header->status & DUMP_DH_EXCLUDED_VMEMMAP)) pc->flags2 |= EXCLUDED_VMEMMAP; /* For split dumpfile */ if (KDUMP_CMPRS_VALID()) { is_split = ((dd->header->header_version >= 2) && (sub_header_kdump->split)); if ((is_split && (num_dumpfiles != 0) && (dd_list == NULL))|| (!is_split && (num_dumpfiles != 0))) { clean_diskdump_data(); goto err; } if (is_split) add_diskdump_data(file); num_dumpfiles++; } if (!is_split) { max_sect_len = divideup(dd->max_mapnr, BITMAP_SECT_LEN); pfn = 0; dd->filename = file; } else { unsigned long long start = sub_header_kdump->start_pfn_64; unsigned long long end = sub_header_kdump->end_pfn_64; max_sect_len = divideup(end - start + 1, BITMAP_SECT_LEN); pfn = start; } dd->valid_pages = calloc(sizeof(ulong), max_sect_len + 1); dd->max_sect_len = max_sect_len; /* It is safe to convert it to (ulonglong *). */ bitmap = (ulonglong *)dd->dumpable_bitmap; for (i = 1; i < max_sect_len + 1; i++) { dd->valid_pages[i] = dd->valid_pages[i - 1]; for (j = 0; j < BITMAP_SECT_LEN; j += 64, pfn += 64) { tmp = bitmap[pfn >> 6]; if (tmp) dd->valid_pages[i] += hweight64(tmp); } } return TRUE; err: free(header); if (sub_header) free(sub_header); if (sub_header_kdump) free(sub_header_kdump); if (dd->bitmap) { if (FLAT_FORMAT()) free(dd->bitmap); else munmap(dd->bitmap, dd->bitmap_len); } if (dd->dumpable_bitmap) free(dd->dumpable_bitmap); if (dd->notes_buf) free(dd->notes_buf); if (dd->nt_prstatus_percpu) free(dd->nt_prstatus_percpu); if (dd->nt_qemu_percpu) free(dd->nt_qemu_percpu); if (dd->nt_qemucs_percpu) free(dd->nt_qemucs_percpu); if (dd->nt_vmcoredd_array) free(dd->nt_vmcoredd_array); dd->flags &= ~(DISKDUMP_LOCAL|KDUMP_CMPRS_LOCAL); pc->flags2 &= ~ELF_NOTES; return FALSE; } static ulong pfn_to_pos(ulong pfn) { ulong desc_pos, j, valid; ulong p1, p2; if (KDUMP_SPLIT()) { p1 = pfn - dd->sub_header_kdump->start_pfn_64; p2 = round(p1, BITMAP_SECT_LEN) + dd->sub_header_kdump->start_pfn_64; } else { p1 = pfn; p2 = round(pfn, BITMAP_SECT_LEN); } valid = dd->valid_pages[p1 / BITMAP_SECT_LEN]; for (j = p2, desc_pos = valid; j <= pfn; j++) if (page_is_dumpable(j)) desc_pos++; return desc_pos; } /* * Determine whether a file is a diskdump creation, and if TRUE, * initialize the diskdump_data structure based upon the contents * of the diskdump header data. */ int is_diskdump(char *file) { int sz, i; if (!open_dump_file(file) || !read_dump_header(file)) return FALSE; sz = dd->block_size * (DISKDUMP_CACHED_PAGES); if ((dd->page_cache_buf = malloc(sz)) == NULL) error(FATAL, "%s: cannot malloc compressed page_cache_buf\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) dd->page_cache_hdr[i].pg_bufptr = &dd->page_cache_buf[i * dd->block_size]; if ((dd->compressed_page = (char *)malloc(dd->block_size)) == NULL) error(FATAL, "%s: cannot malloc compressed page space\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); if (CRASHDEBUG(1)) __diskdump_memory_dump(fp); if (pc->flags2 & GET_OSRELEASE) diskdump_get_osrelease(); #ifdef LZO if (lzo_init() == LZO_E_OK) dd->flags |= LZO_SUPPORTED; #endif #ifdef SNAPPY dd->flags |= SNAPPY_SUPPORTED; #endif #ifdef ZSTD dd->flags |= ZSTD_SUPPORTED; #endif pc->read_vmcoreinfo = vmcoreinfo_read_string; if ((pc->flags2 & GET_LOG) && KDUMP_CMPRS_VALID()) { pc->dfd = dd->dfd; pc->readmem = read_diskdump; pc->flags |= DISKDUMP; get_log_from_vmcoreinfo(file); } return TRUE; } /* * Perform any post-dumpfile determination stuff here. * At a minimum */ int diskdump_init(char *unused, FILE *fptr) { if (!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) return FALSE; machdep->is_cpu_prstatus_valid = diskdump_is_cpu_prstatus_valid; dd->ofp = fptr; return TRUE; } /* * Get the relocational offset from the sub header of kdump. */ int diskdump_phys_base(unsigned long *phys_base) { if (KDUMP_CMPRS_VALID()) { *phys_base = dd->sub_header_kdump->phys_base; return TRUE; } return FALSE; } int diskdump_set_phys_base(unsigned long phys_base) { if (diskdump_kaslr_check()) { dd->sub_header_kdump->phys_base = phys_base; return TRUE; } return FALSE; } /* * Check whether paddr is already cached. */ static int page_is_cached(physaddr_t paddr) { int i; struct page_cache_hdr *pgc; dd->accesses++; for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) { pgc = &dd->page_cache_hdr[i]; if (!DISKDUMP_VALID_PAGE(pgc->pg_flags)) continue; if (pgc->pg_addr == paddr) { pgc->pg_hit_count++; dd->curbufptr = pgc->pg_bufptr; dd->cached_reads++; return TRUE; } } return FALSE; } /* * Translate physical address in paddr to PFN number. This means normally that * we just shift paddr by some constant. Some architectures need special * handling for this, however. */ static ulong paddr_to_pfn(physaddr_t paddr) { #ifdef ARM /* * In ARM, PFN 0 means first page in kernel direct-mapped view. * This is also first page in mem_map as well. */ return (paddr - machdep->machspec->phys_base) >> dd->block_shift; #else return paddr >> dd->block_shift; #endif } /* * Cache the page's data. * * If an empty page cache location is available, take it. Otherwise, evict * the entry indexed by evict_index, and then bump evict index. The hit_count * is only gathered for dump_diskdump_environment(). * * If the page is compressed, uncompress it into the selected page cache entry. * If the page is raw, just copy it into the selected page cache entry. * If all works OK, update diskdump->curbufptr to point to the page's * uncompressed data. */ static int cache_page(physaddr_t paddr) { int i, ret; int found; ulong pfn; ulong desc_pos; off_t seek_offset; page_desc_t pd; const int block_size = dd->block_size; ulong retlen; #ifdef ZSTD static ZSTD_DCtx *dctx = NULL; #endif for (i = found = 0; i < DISKDUMP_CACHED_PAGES; i++) { if (DISKDUMP_VALID_PAGE(dd->page_cache_hdr[i].pg_flags)) continue; found = TRUE; break; } if (!found) { i = dd->evict_index; dd->page_cache_hdr[i].pg_hit_count = 0; dd->evict_index = (dd->evict_index+1) % DISKDUMP_CACHED_PAGES; dd->evictions++; } dd->page_cache_hdr[i].pg_flags = 0; dd->page_cache_hdr[i].pg_addr = paddr; dd->page_cache_hdr[i].pg_hit_count++; /* find page descriptor */ pfn = paddr_to_pfn(paddr); desc_pos = pfn_to_pos(pfn); seek_offset = dd->data_offset + (off_t)(desc_pos - 1)*sizeof(page_desc_t); /* read page descriptor */ ret = read_pd(dd->dfd, seek_offset, &pd); if (ret) return ret; /* sanity check */ if (pd.size > block_size) return READ_ERROR; /* read page data */ if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, pd.offset, dd->compressed_page, pd.size)) return READ_ERROR; } else if (0 == pd.offset) { /* * First check whether zero_excluded has been set. */ if (*diskdump_flags & ZERO_EXCLUDED) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump/cache_page: zero-fill: " "paddr/pfn: %llx/%lx\n", (ulonglong)paddr, pfn); memset(dd->compressed_page, 0, dd->block_size); } else { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump/cache_page: " "descriptor with zero offset found at " "paddr/pfn/pos: %llx/%lx/%lx\n", (ulonglong)paddr, pfn, desc_pos); return PAGE_INCOMPLETE; } } else { if (pd.offset < 0) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump/cache_page: invalid offset: %lx\n", pd.offset); return SEEK_ERROR; } if ((ret = pread(dd->dfd, dd->compressed_page, pd.size, pd.offset)) != pd.size) { if (ret == -1 && CRASHDEBUG(8)) fprintf(fp, "read_diskdump/cache_page: pread error: %s\n", strerror(errno)); return READ_ERROR; } } if (pd.flags & DUMP_DH_COMPRESSED_ZLIB) { retlen = block_size; ret = uncompress((unsigned char *)dd->page_cache_hdr[i].pg_bufptr, &retlen, (unsigned char *)dd->compressed_page, pd.size); if ((ret != Z_OK) || (retlen != block_size)) { error(INFO, "%s: uncompress failed: %d\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ret); return READ_ERROR; } } else if (pd.flags & DUMP_DH_COMPRESSED_LZO) { if (!(dd->flags & LZO_SUPPORTED)) { error(INFO, "%s: uncompress failed: no lzo compression support\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); return READ_ERROR; } #ifdef LZO retlen = block_size; ret = lzo1x_decompress_safe((unsigned char *)dd->compressed_page, pd.size, (unsigned char *)dd->page_cache_hdr[i].pg_bufptr, &retlen, LZO1X_MEM_DECOMPRESS); if ((ret != LZO_E_OK) || (retlen != block_size)) { error(INFO, "%s: uncompress failed: %d\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ret); return READ_ERROR; } #endif } else if (pd.flags & DUMP_DH_COMPRESSED_SNAPPY) { if (!(dd->flags & SNAPPY_SUPPORTED)) { error(INFO, "%s: uncompress failed: no snappy compression support\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); return READ_ERROR; } #ifdef SNAPPY ret = snappy_uncompressed_length((char *)dd->compressed_page, pd.size, (size_t *)&retlen); if (ret != SNAPPY_OK) { error(INFO, "%s: uncompress failed: %d\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ret); return READ_ERROR; } ret = snappy_uncompress((char *)dd->compressed_page, pd.size, (char *)dd->page_cache_hdr[i].pg_bufptr, (size_t *)&retlen); if ((ret != SNAPPY_OK) || (retlen != block_size)) { error(INFO, "%s: uncompress failed: %d\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", ret); return READ_ERROR; } #endif } else if (pd.flags & DUMP_DH_COMPRESSED_ZSTD) { if (!(dd->flags & ZSTD_SUPPORTED)) { error(INFO, "%s: uncompess failed: no zstd compression support\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); return READ_ERROR; } #ifdef ZSTD if (!dctx) { dctx = ZSTD_createDCtx(); if (!dctx) { error(INFO, "%s: uncompess failed: cannot create ZSTD_DCtx\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump"); return READ_ERROR; } } retlen = ZSTD_decompressDCtx(dctx, dd->page_cache_hdr[i].pg_bufptr, block_size, dd->compressed_page, pd.size); if (ZSTD_isError(retlen) || (retlen != block_size)) { error(INFO, "%s: uncompress failed: %d (%s)\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", retlen, ZSTD_getErrorName(retlen)); return READ_ERROR; } #endif } else memcpy(dd->page_cache_hdr[i].pg_bufptr, dd->compressed_page, block_size); dd->page_cache_hdr[i].pg_flags |= PAGE_VALID; dd->curbufptr = dd->page_cache_hdr[i].pg_bufptr; return TRUE; } /* * Read from a diskdump-created dumpfile. */ int read_diskdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int ret; physaddr_t curpaddr; ulong pfn, page_offset; physaddr_t paddr_in = paddr; if (XEN_CORE_DUMPFILE() && !XEN_HYPER_MODE()) { if ((paddr = xen_kdump_p2m(paddr)) == P2M_FAILURE) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: xen_kdump_p2m(%llx): " "P2M_FAILURE\n", (ulonglong)paddr_in); return READ_ERROR; } if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: xen_kdump_p2m(%llx): %llx\n", (ulonglong)paddr_in, (ulonglong)paddr); } pfn = paddr_to_pfn(paddr); if (KDUMP_SPLIT()) { /* Find proper dd */ int i; unsigned long long start_pfn; unsigned long long end_pfn; for (i=0; isub_header_kdump->start_pfn_64; end_pfn = dd_list[i]->sub_header_kdump->end_pfn_64; if ((pfn >= start_pfn) && (pfn < end_pfn)) { dd = dd_list[i]; break; } } if (i == num_dumpfiles) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: SEEK_ERROR: " "paddr/pfn %llx/%lx beyond last dumpfile\n", (ulonglong)paddr, pfn); return SEEK_ERROR; } } curpaddr = paddr & ~((physaddr_t)(dd->block_size-1)); page_offset = paddr & ((physaddr_t)(dd->block_size-1)); if ((pfn >= dd->max_mapnr) || !page_is_ram(pfn)) { if (CRASHDEBUG(8)) { fprintf(fp, "read_diskdump: SEEK_ERROR: " "paddr/pfn: %llx/%lx ", (ulonglong)paddr, pfn); if (pfn >= dd->max_mapnr) fprintf(fp, "max_mapnr: %llx\n", dd->max_mapnr); else fprintf(fp, "!page_is_ram\n"); } return SEEK_ERROR; } if (!page_is_dumpable(pfn)) { if ((dd->flags & (ZERO_EXCLUDED|ERROR_EXCLUDED)) == ERROR_EXCLUDED) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: PAGE_EXCLUDED: " "paddr/pfn: %llx/%lx\n", (ulonglong)paddr, pfn); return PAGE_EXCLUDED; } if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: zero-fill: " "paddr/pfn: %llx/%lx\n", (ulonglong)paddr, pfn); memset(bufptr, 0, cnt); return cnt; } if (!page_is_cached(curpaddr)) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: paddr/pfn: %llx/%lx" " -> cache physical page: %llx\n", (ulonglong)paddr, pfn, (ulonglong)curpaddr); if ((ret = cache_page(curpaddr)) < 0) { if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: " "%s: cannot cache page: %llx\n", ret == SEEK_ERROR ? "SEEK_ERROR" : "READ_ERROR", (ulonglong)curpaddr); return ret; } } else if (CRASHDEBUG(8)) fprintf(fp, "read_diskdump: paddr/pfn: %llx/%lx" " -> physical page is cached: %llx\n", (ulonglong)paddr, pfn, (ulonglong)curpaddr); memcpy(bufptr, dd->curbufptr + page_offset, cnt); return cnt; } /* * Write to a diskdump-created dumpfile. */ int write_diskdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return 0; } ulong get_diskdump_panic_task(void) { int i; if ((!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) || !get_active_set()) return NO_TASK; if (pc->flags2 & SNAP) return (task_exists(dd->snapshot_task) ? dd->snapshot_task : NO_TASK); if (DISKDUMP_VALID()) return (ulong)dd->header->tasks[dd->header->current_cpu]; if (KDUMP_CMPRS_VALID()) { if (kernel_symbol_exists("crashing_cpu") && cpu_map_addr("online")) { get_symbol_data("crashing_cpu", sizeof(int), &i); if ((i >= 0) && in_cpu_map(ONLINE_MAP, i)) { if (CRASHDEBUG(1)) error(INFO, "get_diskdump_panic_task: " "active_set[%d]: %lx\n", i, tt->active_set[i]); return (tt->active_set[i]); } } } return NO_TASK; } extern void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); extern void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); static void get_diskdump_regs_32(struct bt_info *bt, ulong *eip, ulong *esp) { Elf32_Nhdr *note; int len; if (KDUMP_CMPRS_VALID() && (bt->task == tt->panic_task || (is_task_active(bt->task) && dd->num_prstatus_notes > 1))) { note = (Elf32_Nhdr*) dd->nt_prstatus_percpu[bt->tc->processor]; if (!note) error(FATAL, "cannot determine NT_PRSTATUS ELF note " "for %s task: %lx\n", (bt->task == tt->panic_task) ? "panic" : "active", bt->task); len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); bt->machdep = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); } machdep->get_stack_frame(bt, eip, esp); } static void get_diskdump_regs_ppc(struct bt_info *bt, ulong *eip, ulong *esp) { if (KDUMP_CMPRS_VALID()) ppc_relocate_nt_prstatus_percpu(dd->nt_prstatus_percpu, &dd->num_prstatus_notes); get_diskdump_regs_32(bt, eip, esp); } static void get_diskdump_regs_ppc64(struct bt_info *bt, ulong *eip, ulong *esp) { int cpu; Elf64_Nhdr *note; size_t len; if ((bt->task == tt->panic_task) && DISKDUMP_VALID()) bt->machdep = &dd->sub_header->elf_regs; else if (KDUMP_CMPRS_VALID() && (bt->task == tt->panic_task || (is_task_active(bt->task) && dd->num_prstatus_notes > 1))) { cpu = bt->tc->processor; if (dd->nt_prstatus_percpu[cpu] == NULL) { if(CRASHDEBUG(1)) error(INFO, "registers not collected for cpu %d\n", cpu); } else { note = (Elf64_Nhdr *) dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); bt->machdep = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); } } machdep->get_stack_frame(bt, eip, esp); } static void get_diskdump_regs_arm(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_diskdump_regs_arm64(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_diskdump_regs_mips(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_diskdump_regs_riscv64(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_diskdump_regs_loongarch64(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_diskdump_regs_sparc64(struct bt_info *bt, ulong *eip, ulong *esp) { Elf64_Nhdr *note; int len; if (KDUMP_CMPRS_VALID() && (bt->task == tt->panic_task || (is_task_active(bt->task) && dd->num_prstatus_notes > 1))) { note = (Elf64_Nhdr *)dd->nt_prstatus_percpu[bt->tc->processor]; if (!note) error(FATAL, "cannot determine NT_PRSTATUS ELF note " "for %s task: %lx\n", (bt->task == tt->panic_task) ? "panic" : "active", bt->task); len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); bt->machdep = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); } machdep->get_stack_frame(bt, eip, esp); } /* * Send the request to the proper architecture hander. */ void get_diskdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) { switch (dd->machine_type) { case EM_ARM: get_diskdump_regs_arm(bt, eip, esp); break; case EM_MIPS: return get_diskdump_regs_mips(bt, eip, esp); break; case EM_386: return get_netdump_regs_x86(bt, eip, esp); break; case EM_IA_64: /* For normal backtraces, this information will be obtained * frome the switch_stack structure, which is pointed to by * the thread.ksp field of the task_struct. But it's still * needed by the "bt -t" option. */ machdep->get_stack_frame(bt, eip, esp); break; case EM_PPC: return get_diskdump_regs_ppc(bt, eip, esp); break; case EM_PPC64: return get_diskdump_regs_ppc64(bt, eip, esp); break; case EM_X86_64: return get_netdump_regs_x86_64(bt, eip, esp); break; case EM_S390: return machdep->get_stack_frame(bt, eip, esp); break; case EM_AARCH64: get_diskdump_regs_arm64(bt, eip, esp); break; case EM_SPARCV9: get_diskdump_regs_sparc64(bt, eip, esp); break; case EM_RISCV: get_diskdump_regs_riscv64(bt, eip, esp); break; case EM_LOONGARCH: get_diskdump_regs_loongarch64(bt, eip, esp); break; default: error(FATAL, "%s: unsupported machine type: %s\n", DISKDUMP_VALID() ? "diskdump" : "compressed kdump", MACHINE_TYPE); } } /* * Return the processor page size. */ uint diskdump_page_size(void) { if (!DISKDUMP_VALID() && !KDUMP_CMPRS_VALID()) return 0; return dd->header->block_size; } /* * diskdump_free_memory(), and diskdump_memory_used() * are debug only, and probably unnecessary to implement. */ int diskdump_free_memory(void) { return 0; } int diskdump_memory_used(void) { return 0; } static void dump_vmcoreinfo(FILE *fp) { char *buf = NULL; unsigned long i = 0; unsigned long size_vmcoreinfo = dd->sub_header_kdump->size_vmcoreinfo; off_t offset = dd->sub_header_kdump->offset_vmcoreinfo; const off_t failed = (off_t)-1; if ((buf = malloc(size_vmcoreinfo)) == NULL) { error(FATAL, "compressed kdump: cannot malloc vmcoreinfo" " buffer\n"); } if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, buf, size_vmcoreinfo)) { error(INFO, "compressed kdump: cannot read vmcoreinfo data\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "compressed kdump: cannot lseek dump vmcoreinfo\n"); goto err; } if (read(dd->dfd, buf, size_vmcoreinfo) < size_vmcoreinfo) { error(INFO, "compressed kdump: cannot read vmcoreinfo data\n"); goto err; } } fprintf(fp, " "); for (i = 0; i < size_vmcoreinfo; i++) { fprintf(fp, "%c", buf[i]); if ((buf[i] == '\n') && ((i+1) != size_vmcoreinfo)) fprintf(fp, " "); } if (buf[i-1] != '\n') fprintf(fp, "\n"); err: if (buf) free(buf); return; } static void dump_eraseinfo(FILE *fp) { char *buf = NULL; unsigned long i = 0; unsigned long size_eraseinfo = dd->sub_header_kdump->size_eraseinfo; off_t offset = dd->sub_header_kdump->offset_eraseinfo; const off_t failed = (off_t)-1; if ((buf = malloc(size_eraseinfo)) == NULL) { error(FATAL, "compressed kdump: cannot malloc eraseinfo" " buffer\n"); } if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, buf, size_eraseinfo)) { error(INFO, "compressed kdump: cannot read eraseinfo data\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "compressed kdump: cannot lseek dump eraseinfo\n"); goto err; } if (read(dd->dfd, buf, size_eraseinfo) < size_eraseinfo) { error(INFO, "compressed kdump: cannot read eraseinfo data\n"); goto err; } } fprintf(fp, " "); for (i = 0; i < size_eraseinfo; i++) { fprintf(fp, "%c", buf[i]); if (buf[i] == '\n') fprintf(fp, " "); } if (buf[i - 1] != '\n') fprintf(fp, "\n"); err: if (buf) free(buf); return; } static void dump_note_offsets(FILE *fp) { struct kdump_sub_header *sub_header_kdump = dd->sub_header_kdump; size_t size; off_t offset; Elf32_Nhdr *note32 = NULL; Elf64_Nhdr *note64 = NULL; size_t tot, len = 0; int qemu, cnt; if (KDUMP_CMPRS_VALID() && !(dd->flags & NO_ELF_NOTES) && (dd->header->header_version >= 4) && (sub_header_kdump->offset_note) && (sub_header_kdump->size_note) && (machdep->process_elf_notes)) { size = sub_header_kdump->size_note; offset = sub_header_kdump->offset_note; fprintf(fp, " NOTE offsets: "); for (tot = cnt = 0; tot < size; tot += len) { qemu = FALSE; if (machine_type("X86_64") || machine_type("S390X") || machine_type("ARM64") || machine_type("PPC64") || machine_type("SPARC64") || machine_type("MIPS64") || machine_type("RISCV64") || machine_type("LOONGARCH64")) { note64 = (void *)dd->notes_buf + tot; len = sizeof(Elf64_Nhdr); if (STRNEQ((char *)note64 + len, "QEMU")) qemu = TRUE; len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); if (note64->n_type == NT_PRSTATUS) { fprintf(fp, "%s%lx (NT_PRSTATUS)\n", tot ? space(22) : "", (ulong)(offset + tot)); cnt++; } if (qemu) { fprintf(fp, "%s%lx (QEMU)\n", tot ? space(22) : "", (ulong)(offset + tot)); cnt++; } } else if (machine_type("X86") || machine_type("PPC")) { note32 = (void *)dd->notes_buf + tot; len = sizeof(Elf32_Nhdr); if (STRNEQ((char *)note32 + len, "QEMU")) qemu = TRUE; len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); if (note32->n_type == NT_PRSTATUS) { fprintf(fp, "%s%lx (NT_PRSTATUS)\n", tot ? space(22) : "", (ulong)(offset + tot)); cnt++; } if (qemu) { fprintf(fp, "%s%lx (QEMU)\n", tot ? space(22) : "", (ulong)(offset + tot)); cnt++; } } } if (!cnt) fprintf(fp, "\n"); } } /* * This function is dump-type independent, and could be used * to dump the diskdump_data structure contents and perhaps * the diskdump header data. */ int __diskdump_memory_dump(FILE *fp) { int i, others, dump_level; struct disk_dump_header *dh; struct disk_dump_sub_header *dsh; struct kdump_sub_header *kdsh; ulong *tasks; if (FLAT_FORMAT()) dump_flat_header(fp); fprintf(fp, "diskdump_data: \n"); fprintf(fp, " filename: %s\n", dd->filename); fprintf(fp, " flags: %lx (", dd->flags); others = 0; if (dd->flags & DISKDUMP_LOCAL) fprintf(fp, "%sDISKDUMP_LOCAL", others++ ? "|" : ""); if (dd->flags & KDUMP_CMPRS_LOCAL) fprintf(fp, "%sKDUMP_CMPRS_LOCAL", others++ ? "|" : ""); if (dd->flags & ERROR_EXCLUDED) fprintf(fp, "%sERROR_EXCLUDED", others++ ? "|" : ""); if (dd->flags & ZERO_EXCLUDED) fprintf(fp, "%sZERO_EXCLUDED", others++ ? "|" : ""); if (dd->flags & NO_ELF_NOTES) fprintf(fp, "%sNO_ELF_NOTES", others++ ? "|" : ""); if (dd->flags & LZO_SUPPORTED) fprintf(fp, "%sLZO_SUPPORTED", others++ ? "|" : ""); if (dd->flags & SNAPPY_SUPPORTED) fprintf(fp, "%sSNAPPY_SUPPORTED", others++ ? "|" : ""); if (dd->flags & ZSTD_SUPPORTED) fprintf(fp, "%sZSTD_SUPPORTED", others++ ? "|" : ""); fprintf(fp, ") %s\n", FLAT_FORMAT() ? "[FLAT]" : ""); fprintf(fp, " dfd: %d\n", dd->dfd); fprintf(fp, " ofp: %lx\n", (ulong)dd->ofp); fprintf(fp, " machine_type: %d ", dd->machine_type); switch (dd->machine_type) { case EM_ARM: fprintf(fp, "(EM_ARM)\n"); break; case EM_MIPS: fprintf(fp, "(EM_MIPS)\n"); break; case EM_386: fprintf(fp, "(EM_386)\n"); break; case EM_X86_64: fprintf(fp, "(EM_X86_64)\n"); break; case EM_IA_64: fprintf(fp, "(EM_IA_64)\n"); break; case EM_PPC: fprintf(fp, "(EM_PPC)\n"); break; case EM_PPC64: fprintf(fp, "(EM_PPC64)\n"); break; case EM_S390: fprintf(fp, "(EM_S390)\n"); break; case EM_AARCH64: fprintf(fp, "(EM_AARCH64)\n"); break; case EM_SPARCV9: fprintf(fp, "(EM_SPARCV9)\n"); break; case EM_LOONGARCH: fprintf(fp, "(EM_LOONGARCH)\n"); break; default: fprintf(fp, "(unknown)\n"); break; } fprintf(fp, "\n header: %lx\n", (ulong)dd->header); dh = dd->header; fprintf(fp, " signature: \""); for (i = 0; i < SIG_LEN; i++) if (dh->signature[i]) fprintf(fp, "%c", dh->signature[i]); fprintf(fp, "\"\n"); fprintf(fp, " header_version: %d\n", dh->header_version); fprintf(fp, " utsname:\n"); fprintf(fp, " sysname: %s\n", dh->utsname.sysname); fprintf(fp, " nodename: %s\n", dh->utsname.nodename); fprintf(fp, " release: %s\n", dh->utsname.release); fprintf(fp, " version: %s\n", dh->utsname.version); fprintf(fp, " machine: %s\n", dh->utsname.machine); fprintf(fp, " domainname: %s\n", dh->utsname.domainname); fprintf(fp, " timestamp:\n"); fprintf(fp, " tv_sec: %lx\n", dh->timestamp.tv_sec); fprintf(fp, " tv_usec: %lx\n", dh->timestamp.tv_usec); fprintf(fp, " status: %x (", dh->status); switch (dd->flags & (DISKDUMP_LOCAL|KDUMP_CMPRS_LOCAL)) { case DISKDUMP_LOCAL: if (dh->status == DUMP_HEADER_COMPLETED) fprintf(fp, "DUMP_HEADER_COMPLETED"); else if (dh->status == DUMP_HEADER_INCOMPLETED) fprintf(fp, "DUMP_HEADER_INCOMPLETED"); else if (dh->status == DUMP_HEADER_COMPRESSED) fprintf(fp, "DUMP_HEADER_COMPRESSED"); break; case KDUMP_CMPRS_LOCAL: if (dh->status & DUMP_DH_COMPRESSED_ZLIB) fprintf(fp, "DUMP_DH_COMPRESSED_ZLIB"); if (dh->status & DUMP_DH_COMPRESSED_LZO) fprintf(fp, "DUMP_DH_COMPRESSED_LZO"); if (dh->status & DUMP_DH_COMPRESSED_SNAPPY) fprintf(fp, "DUMP_DH_COMPRESSED_SNAPPY"); if (dh->status & DUMP_DH_COMPRESSED_ZSTD) fprintf(fp, "DUMP_DH_COMPRESSED_ZSTD"); if (dh->status & DUMP_DH_COMPRESSED_INCOMPLETE) fprintf(fp, "DUMP_DH_COMPRESSED_INCOMPLETE"); if (dh->status & DUMP_DH_EXCLUDED_VMEMMAP) fprintf(fp, "DUMP_DH_EXCLUDED_VMEMMAP"); break; } fprintf(fp, ")\n"); fprintf(fp, " block_size: %d\n", dh->block_size); fprintf(fp, " sub_hdr_size: %d\n", dh->sub_hdr_size); fprintf(fp, " bitmap_blocks: %u\n", dh->bitmap_blocks); fprintf(fp, " max_mapnr: %u\n", dh->max_mapnr); fprintf(fp, " total_ram_blocks: %u\n", dh->total_ram_blocks); fprintf(fp, " device_blocks: %u\n", dh->device_blocks); fprintf(fp, " written_blocks: %u\n", dh->written_blocks); fprintf(fp, " current_cpu: %u\n", dh->current_cpu); fprintf(fp, " nr_cpus: %d\n", dh->nr_cpus); tasks = (ulong *)&dh->tasks[0]; fprintf(fp, " tasks[nr_cpus]: %lx\n", *tasks); for (tasks++, i = 1; i < dh->nr_cpus; i++) { fprintf(fp, " %lx\n", *tasks); tasks++; } fprintf(fp, "\n"); fprintf(fp, " sub_header: %lx ", (ulong)dd->sub_header); if ((dsh = dd->sub_header)) { fprintf(fp, "\n elf_regs: %lx\n", (ulong)&dsh->elf_regs); fprintf(fp, " dump_level: "); if ((pc->flags & RUNTIME) && ((dump_level = get_dump_level()) >= 0)) { fprintf(fp, "%d (0x%x) %s", dump_level, dump_level, dump_level ? "(" : ""); #define DUMP_EXCLUDE_CACHE 0x00000001 /* Exclude LRU & SwapCache pages*/ #define DUMP_EXCLUDE_CLEAN 0x00000002 /* Exclude all-zero pages */ #define DUMP_EXCLUDE_FREE 0x00000004 /* Exclude free pages */ #define DUMP_EXCLUDE_ANON 0x00000008 /* Exclude Anon pages */ #define DUMP_SAVE_PRIVATE 0x00000010 /* Save private pages */ others = 0; if (dump_level & DUMP_EXCLUDE_CACHE) fprintf(fp, "%sDUMP_EXCLUDE_CACHE", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_CLEAN) fprintf(fp, "%sDUMP_EXCLUDE_CLEAN", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_FREE) fprintf(fp, "%sDUMP_EXCLUDE_FREE", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_ANON) fprintf(fp, "%sDUMP_EXCLUDE_ANON", others++ ? "|" : ""); if (dump_level & DUMP_SAVE_PRIVATE) fprintf(fp, "%sDUMP_SAVE_PRIVATE", others++ ? "|" : ""); fprintf(fp, "%s\n\n", dump_level ? ")" : ""); } else fprintf(fp, "%s\n\n", pc->flags & RUNTIME ? "(unknown)" : "(undetermined)"); } else fprintf(fp, "(n/a)\n\n"); fprintf(fp, " sub_header_kdump: %lx ", (ulong)dd->sub_header_kdump); if ((kdsh = dd->sub_header_kdump)) { fprintf(fp, "\n phys_base: %lx\n", (ulong)kdsh->phys_base); fprintf(fp, " dump_level: "); if ((dump_level = get_dump_level()) >= 0) { fprintf(fp, "%d (0x%x) %s", dump_level, dump_level, dump_level ? "(" : ""); #define DL_EXCLUDE_ZERO (0x001) /* Exclude Pages filled with Zeros */ #define DL_EXCLUDE_CACHE (0x002) /* Exclude Cache Pages without Private Pages */ #define DL_EXCLUDE_CACHE_PRI (0x004) /* Exclude Cache Pages with Private Pages */ #define DL_EXCLUDE_USER_DATA (0x008) /* Exclude UserProcessData Pages */ #define DL_EXCLUDE_FREE (0x010) /* Exclude Free Pages */ others = 0; if (dump_level & DL_EXCLUDE_ZERO) fprintf(fp, "%sDUMP_EXCLUDE_ZERO", others++ ? "|" : ""); if (dump_level & DL_EXCLUDE_CACHE) fprintf(fp, "%sDUMP_EXCLUDE_CACHE", others++ ? "|" : ""); if (dump_level & DL_EXCLUDE_CACHE_PRI) fprintf(fp, "%sDUMP_EXCLUDE_CACHE_PRI", others++ ? "|" : ""); if (dump_level & DL_EXCLUDE_USER_DATA) fprintf(fp, "%sDUMP_EXCLUDE_USER_DATA", others++ ? "|" : ""); if (dump_level & DL_EXCLUDE_FREE) fprintf(fp, "%sDUMP_EXCLUDE_FREE", others++ ? "|" : ""); others = 0; fprintf(fp, "%s\n", dump_level ? ")" : ""); } else fprintf(fp, "(unknown)\n"); if (dh->header_version >= 2) { fprintf(fp, " split: %d\n", kdsh->split); fprintf(fp, " start_pfn: "); if (KDUMP_SPLIT()) fprintf(fp, "%ld (0x%lx)\n", kdsh->start_pfn, kdsh->start_pfn); else fprintf(fp, "(unused)\n"); fprintf(fp, " end_pfn: "); if (KDUMP_SPLIT()) fprintf(fp, "%ld (0x%lx)\n", kdsh->end_pfn, kdsh->end_pfn); else fprintf(fp, "(unused)\n"); } if (dh->header_version >= 3) { fprintf(fp, " offset_vmcoreinfo: %llu (0x%llx)\n", (ulonglong)dd->sub_header_kdump->offset_vmcoreinfo, (ulonglong)dd->sub_header_kdump->offset_vmcoreinfo); fprintf(fp, " size_vmcoreinfo: %lu (0x%lx)\n", dd->sub_header_kdump->size_vmcoreinfo, dd->sub_header_kdump->size_vmcoreinfo); if (dd->sub_header_kdump->offset_vmcoreinfo && dd->sub_header_kdump->size_vmcoreinfo) { dump_vmcoreinfo(fp); } } if (dh->header_version >= 4) { fprintf(fp, " offset_note: %llu (0x%llx)\n", (ulonglong)dd->sub_header_kdump->offset_note, (ulonglong)dd->sub_header_kdump->offset_note); fprintf(fp, " size_note: %lu (0x%lx)\n", dd->sub_header_kdump->size_note, dd->sub_header_kdump->size_note); fprintf(fp, " notes_buf: %lx\n", (ulong)dd->notes_buf); fprintf(fp, " num_vmcoredd_notes: %d\n", dd->num_vmcoredd_notes); for (i = 0; i < dd->num_vmcoredd_notes; i++) { fprintf(fp, " notes[%d]: %lx %s\n", i, (ulong)dd->nt_vmcoredd_array[i], dd->nt_vmcoredd_array[i] ? "(NT_VMCOREDD)" : ""); display_vmcoredd_note(dd->nt_vmcoredd_array[i], fp); } fprintf(fp, " num_prstatus_notes: %d\n", dd->num_prstatus_notes); for (i = 0; i < dd->num_prstatus_notes; i++) { fprintf(fp, " notes[%d]: %lx %s\n", i, (ulong)dd->nt_prstatus_percpu[i], dd->nt_prstatus_percpu[i] ? "(NT_PRSTATUS)" : ""); display_ELF_note(dd->machine_type, PRSTATUS_NOTE, dd->nt_prstatus_percpu[i], fp); } fprintf(fp, " snapshot_task: %lx %s\n", dd->snapshot_task, dd->snapshot_task ? "(NT_TASKSTRUCT)" : ""); fprintf(fp, " num_qemu_notes: %d\n", dd->num_qemu_notes); for (i = 0; i < dd->num_qemu_notes; i++) { fprintf(fp, " notes[%d]: %lx (QEMUCPUState)\n", i, (ulong)dd->nt_qemu_percpu[i]); display_ELF_note(dd->machine_type, QEMU_NOTE, dd->nt_qemu_percpu[i], fp); } dump_note_offsets(fp); } if (dh->header_version >= 5) { fprintf(fp, " offset_eraseinfo: %llu (0x%llx)\n", (ulonglong)dd->sub_header_kdump->offset_eraseinfo, (ulonglong)dd->sub_header_kdump->offset_eraseinfo); fprintf(fp, " size_eraseinfo: %lu (0x%lx)\n", dd->sub_header_kdump->size_eraseinfo, dd->sub_header_kdump->size_eraseinfo); if (dd->sub_header_kdump->offset_eraseinfo && dd->sub_header_kdump->size_eraseinfo) { dump_eraseinfo(fp); } } if (dh->header_version >= 6) { fprintf(fp, " start_pfn_64: "); if (KDUMP_SPLIT()) fprintf(fp, "%lld (0x%llx)\n", kdsh->start_pfn_64, kdsh->start_pfn_64); else fprintf(fp, "(unused)\n"); fprintf(fp, " end_pfn_64: "); if (KDUMP_SPLIT()) fprintf(fp, "%lld (0x%llx)\n", kdsh->end_pfn_64, kdsh->end_pfn_64); else fprintf(fp, "(unused)\n"); fprintf(fp, " max_mapnr_64: %llu (0x%llx)\n", kdsh->max_mapnr_64, kdsh->max_mapnr_64); } fprintf(fp, "\n"); } else fprintf(fp, "(n/a)\n\n"); fprintf(fp, " data_offset: %lx\n", (ulong)dd->data_offset); fprintf(fp, " block_size: %d\n", dd->block_size); fprintf(fp, " block_shift: %d\n", dd->block_shift); fprintf(fp, " bitmap: %lx\n", (ulong)dd->bitmap); fprintf(fp, " bitmap_len: %lld\n", (ulonglong)dd->bitmap_len); fprintf(fp, " max_mapnr: %lld (0x%llx)\n", dd->max_mapnr, dd->max_mapnr); fprintf(fp, " dumpable_bitmap: %lx\n", (ulong)dd->dumpable_bitmap); fprintf(fp, " byte: %d\n", dd->byte); fprintf(fp, " bit: %d\n", dd->bit); fprintf(fp, " compressed_page: %lx\n", (ulong)dd->compressed_page); fprintf(fp, " curbufptr: %lx\n\n", (ulong)dd->curbufptr); for (i = 0; i < DISKDUMP_CACHED_PAGES; i++) { fprintf(fp, "%spage_cache_hdr[%d]:\n", i < 10 ? " " : "", i); fprintf(fp, " pg_flags: %x (", dd->page_cache_hdr[i].pg_flags); others = 0; if (dd->page_cache_hdr[i].pg_flags & PAGE_VALID) fprintf(fp, "%sPAGE_VALID", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " pg_addr: %llx\n", (ulonglong)dd->page_cache_hdr[i].pg_addr); fprintf(fp, " pg_bufptr: %lx\n", (ulong)dd->page_cache_hdr[i].pg_bufptr); fprintf(fp, " pg_hit_count: %ld\n", dd->page_cache_hdr[i].pg_hit_count); } fprintf(fp, "\n page_cache_buf: %lx\n", (ulong)dd->page_cache_buf); fprintf(fp, " evict_index: %d\n", dd->evict_index); fprintf(fp, " evictions: %ld\n", dd->evictions); fprintf(fp, " accesses: %ld\n", dd->accesses); fprintf(fp, " cached_reads: %ld ", dd->cached_reads); if (dd->accesses) fprintf(fp, "(%ld%%)\n", dd->cached_reads * 100 / dd->accesses); else fprintf(fp, "\n"); fprintf(fp, " valid_pages: %lx\n", (ulong)dd->valid_pages); fprintf(fp, " total_valid_pages: %ld\n", dd->valid_pages[dd->max_sect_len]); return 0; } /* * Wrapper of __diskdump_memory_dump() */ int diskdump_memory_dump(FILE *fp) { int i; if (KDUMP_SPLIT() && (dd_list != NULL)) for (i = 0; i < num_dumpfiles; i++) { dd = dd_list[i]; __diskdump_memory_dump(fp); fprintf(fp, "\n"); } else __diskdump_memory_dump(fp); return 0; } /* * Get the switch_stack address of the passed-in task. */ ulong get_diskdump_switch_stack(ulong task) { return 0; } /* * Versions of disk_dump that support it contain the "dump_level" symbol. * Version 1 and later compressed kdump dumpfiles contain the dump level * in an additional field of the sub_header_kdump structure. */ int get_dump_level(void) { int dump_level; if (DISKDUMP_VALID()) { if (symbol_exists("dump_level") && readmem(symbol_value("dump_level"), KVADDR, &dump_level, sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) return dump_level; } else if (KDUMP_CMPRS_VALID()) { if (dd->header->header_version >= 1) return dd->sub_header_kdump->dump_level; } return -1; } /* * Used by the "sys" command to display [PARTIAL DUMP] * after the dumpfile name. */ int is_partial_diskdump(void) { return (get_dump_level() > 0 ? TRUE : FALSE); } /* * Used by "sys" command to dump multiple split dumpfiles. */ void show_split_dumpfiles(void) { int i; struct diskdump_data *ddp; struct disk_dump_header *dh; for (i = 0; i < num_dumpfiles; i++) { ddp = dd_list[i]; dh = ddp->header; fprintf(fp, "%s%s%s%s%s", i ? " " : "", ddp->filename, is_partial_diskdump() ? " [PARTIAL DUMP]" : "", dh->status & DUMP_DH_COMPRESSED_INCOMPLETE ? " [INCOMPLETE]" : "", dh->status & DUMP_DH_EXCLUDED_VMEMMAP ? " [EXCLUDED VMEMMAP]" : ""); if ((i+1) < num_dumpfiles) fprintf(fp, "\n"); } } void * diskdump_get_prstatus_percpu(int cpu) { int online; if ((cpu < 0) || (cpu >= dd->num_prstatus_notes)) return NULL; /* * If no cpu mapping was done, then there must be * a one-to-one relationship between the number * of online cpus and the number of notes. */ if ((online = get_cpus_online()) && (online == kt->cpus) && (online != dd->num_prstatus_notes)) return NULL; return dd->nt_prstatus_percpu[cpu]; } /* * Reads a string value from VMCOREINFO. * * Returns a string (that has to be freed by the caller) that contains the * value for key or NULL if the key has not been found. */ static char * vmcoreinfo_read_string(const char *key) { char *buf, *value_string, *p1, *p2; size_t value_length; ulong size_vmcoreinfo; off_t offset; char keybuf[BUFSIZE]; const off_t failed = (off_t)-1; if (dd->header->header_version < 3) return NULL; buf = value_string = NULL; size_vmcoreinfo = dd->sub_header_kdump->size_vmcoreinfo; offset = dd->sub_header_kdump->offset_vmcoreinfo; sprintf(keybuf, "%s=", key); if ((buf = malloc(size_vmcoreinfo+1)) == NULL) { error(INFO, "compressed kdump: cannot malloc vmcoreinfo" " buffer\n"); goto err; } if (FLAT_FORMAT()) { if (!read_flattened_format(dd->dfd, offset, buf, size_vmcoreinfo)) { error(INFO, "compressed kdump: cannot read vmcoreinfo data\n"); goto err; } } else { if (lseek(dd->dfd, offset, SEEK_SET) == failed) { error(INFO, "compressed kdump: cannot lseek dump vmcoreinfo\n"); goto err; } if (read(dd->dfd, buf, size_vmcoreinfo) < size_vmcoreinfo) { error(INFO, "compressed kdump: cannot read vmcoreinfo data\n"); goto err; } } buf[size_vmcoreinfo] = '\n'; if ((p1 = strstr(buf, keybuf))) { p2 = p1 + strlen(keybuf); p1 = strstr(p2, "\n"); value_length = p1-p2; value_string = calloc(value_length+1, sizeof(char)); strncpy(value_string, p2, value_length); value_string[value_length] = NULLCHAR; } err: if (buf) free(buf); return value_string; } static void diskdump_get_osrelease(void) { char *string; if ((string = vmcoreinfo_read_string("OSRELEASE"))) { fprintf(fp, "%s\n", string); free(string); } else pc->flags2 &= ~GET_OSRELEASE; } static int valid_note_address(unsigned char *offset) { if (offset > (dd->notes_buf + dd->sub_header_kdump->size_note)) return FALSE; return TRUE; } void diskdump_display_regs(int cpu, FILE *ofp) { Elf32_Nhdr *note32; Elf64_Nhdr *note64; char *user_regs; size_t len; if ((cpu < 0) || (cpu >= dd->num_prstatus_notes) || (dd->nt_prstatus_percpu[cpu] == NULL)) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } if (machine_type("X86_64")) { note64 = dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); if (!valid_note_address((unsigned char *)note64 + len)) { error(INFO, "invalid NT_PRSTATUS note for cpu %d\n", cpu); return; } user_regs = (char *)note64 + len - SIZE(user_regs_struct) - sizeof(long); fprintf(ofp, " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" " RAX: %016llx RBX: %016llx RCX: %016llx\n" " RDX: %016llx RSI: %016llx RDI: %016llx\n" " RBP: %016llx R8: %016llx R9: %016llx\n" " R10: %016llx R11: %016llx R12: %016llx\n" " R13: %016llx R14: %016llx R15: %016llx\n" " CS: %04x SS: %04x\n", ULONGLONG(user_regs + OFFSET(user_regs_struct_rip)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_eflags)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rax)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rcx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r8)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r9)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r10)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r11)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r12)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r13)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r14)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r15)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)) ); } if (machine_type("PPC64")) { struct ppc64_elf_prstatus *prs; struct ppc64_pt_regs *pr; note64 = dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); if (!valid_note_address((unsigned char *)note64 + len)) { error(INFO, "invalid NT_PRSTATUS note for cpu %d\n", cpu); return; } prs = (struct ppc64_elf_prstatus *) ((char *)note64 + sizeof(Elf64_Nhdr) + note64->n_namesz); prs = (struct ppc64_elf_prstatus *)roundup((ulong)prs, 4); pr = &prs->pr_reg; fprintf(ofp, " R0: %016lx R1: %016lx R2: %016lx\n" " R3: %016lx R4: %016lx R5: %016lx\n" " R6: %016lx R7: %016lx R8: %016lx\n" " R9: %016lx R10: %016lx R11: %016lx\n" " R12: %016lx R13: %016lx R14: %016lx\n" " R15: %016lx R16: %016lx R16: %016lx\n" " R18: %016lx R19: %016lx R20: %016lx\n" " R21: %016lx R22: %016lx R23: %016lx\n" " R24: %016lx R25: %016lx R26: %016lx\n" " R27: %016lx R28: %016lx R29: %016lx\n" " R30: %016lx R31: %016lx\n" " NIP: %016lx MSR: %016lx\n" " OGPR3: %016lx CTR: %016lx\n" " LINK: %016lx XER: %016lx\n" " CCR: %016lx MQ: %016lx\n" " TRAP: %016lx DAR: %016lx\n" " DSISR: %016lx RESULT: %016lx\n", pr->gpr[0], pr->gpr[1], pr->gpr[2], pr->gpr[3], pr->gpr[4], pr->gpr[5], pr->gpr[6], pr->gpr[7], pr->gpr[8], pr->gpr[9], pr->gpr[10], pr->gpr[11], pr->gpr[12], pr->gpr[13], pr->gpr[14], pr->gpr[15], pr->gpr[16], pr->gpr[17], pr->gpr[18], pr->gpr[19], pr->gpr[20], pr->gpr[21], pr->gpr[22], pr->gpr[23], pr->gpr[24], pr->gpr[25], pr->gpr[26], pr->gpr[27], pr->gpr[28], pr->gpr[29], pr->gpr[30], pr->gpr[31], pr->nip, pr->msr, pr->orig_gpr3, pr->ctr, pr->link, pr->xer, pr->ccr, pr->mq, pr->trap, pr->dar, pr->dsisr, pr->result); } if (machine_type("ARM64")) { note64 = dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); if (!valid_note_address((unsigned char *)note64 + len)) { error(INFO, "invalid NT_PRSTATUS note for cpu %d\n", cpu); return; } user_regs = (char *)note64 + len - SIZE(elf_prstatus) + OFFSET(elf_prstatus_pr_reg); fprintf(ofp, " X0: %016lx X1: %016lx X2: %016lx\n" " X3: %016lx X4: %016lx X5: %016lx\n" " X6: %016lx X7: %016lx X8: %016lx\n" " X9: %016lx X10: %016lx X11: %016lx\n" " X12: %016lx X13: %016lx X14: %016lx\n" " X15: %016lx X16: %016lx X17: %016lx\n" " X18: %016lx X19: %016lx X20: %016lx\n" " X21: %016lx X22: %016lx X23: %016lx\n" " X24: %016lx X25: %016lx X26: %016lx\n" " X27: %016lx X28: %016lx X29: %016lx\n" " LR: %016lx SP: %016lx PC: %016lx\n" " PSTATE: %08lx FPVALID: %08x\n", ULONG(user_regs + sizeof(ulong) * 0), ULONG(user_regs + sizeof(ulong) * 1), ULONG(user_regs + sizeof(ulong) * 2), ULONG(user_regs + sizeof(ulong) * 3), ULONG(user_regs + sizeof(ulong) * 4), ULONG(user_regs + sizeof(ulong) * 5), ULONG(user_regs + sizeof(ulong) * 6), ULONG(user_regs + sizeof(ulong) * 7), ULONG(user_regs + sizeof(ulong) * 8), ULONG(user_regs + sizeof(ulong) * 9), ULONG(user_regs + sizeof(ulong) * 10), ULONG(user_regs + sizeof(ulong) * 11), ULONG(user_regs + sizeof(ulong) * 12), ULONG(user_regs + sizeof(ulong) * 13), ULONG(user_regs + sizeof(ulong) * 14), ULONG(user_regs + sizeof(ulong) * 15), ULONG(user_regs + sizeof(ulong) * 16), ULONG(user_regs + sizeof(ulong) * 17), ULONG(user_regs + sizeof(ulong) * 18), ULONG(user_regs + sizeof(ulong) * 19), ULONG(user_regs + sizeof(ulong) * 20), ULONG(user_regs + sizeof(ulong) * 21), ULONG(user_regs + sizeof(ulong) * 22), ULONG(user_regs + sizeof(ulong) * 23), ULONG(user_regs + sizeof(ulong) * 24), ULONG(user_regs + sizeof(ulong) * 25), ULONG(user_regs + sizeof(ulong) * 26), ULONG(user_regs + sizeof(ulong) * 27), ULONG(user_regs + sizeof(ulong) * 28), ULONG(user_regs + sizeof(ulong) * 29), ULONG(user_regs + sizeof(ulong) * 30), ULONG(user_regs + sizeof(ulong) * 31), ULONG(user_regs + sizeof(ulong) * 32), ULONG(user_regs + sizeof(ulong) * 33), UINT(user_regs + sizeof(ulong) * 34)); } if (machine_type("X86")) { note32 = dd->nt_prstatus_percpu[cpu]; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); user_regs = (char *)note32 + len - SIZE(user_regs_struct) - sizeof(int); if (!valid_note_address((unsigned char *)note32 + len)) { error(INFO, "invalid NT_PRSTATUS note for cpu %d\n", cpu); return; } fprintf(ofp, " EAX: %08x EBX: %08x ECX: %08x EDX: %08x\n" " ESP: %08x EIP: %08x ESI: %08x EDI: %08x\n" " CS: %04x DS: %04x ES: %04x FS: %04x\n" " GS: %04x SS: %04x\n" " EBP: %08x EFLAGS: %08x\n", UINT(user_regs + OFFSET(user_regs_struct_eax)), UINT(user_regs + OFFSET(user_regs_struct_ebx)), UINT(user_regs + OFFSET(user_regs_struct_ecx)), UINT(user_regs + OFFSET(user_regs_struct_edx)), UINT(user_regs + OFFSET(user_regs_struct_esp)), UINT(user_regs + OFFSET(user_regs_struct_eip)), UINT(user_regs + OFFSET(user_regs_struct_esi)), UINT(user_regs + OFFSET(user_regs_struct_edi)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ds)), USHORT(user_regs + OFFSET(user_regs_struct_es)), USHORT(user_regs + OFFSET(user_regs_struct_fs)), USHORT(user_regs + OFFSET(user_regs_struct_gs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)), UINT(user_regs + OFFSET(user_regs_struct_ebp)), UINT(user_regs + OFFSET(user_regs_struct_eflags)) ); } if (machine_type("MIPS")) mips_display_regs_from_elf_notes(cpu, ofp); if (machine_type("MIPS64")) mips64_display_regs_from_elf_notes(cpu, ofp); if (machine_type("LOONGARCH64")) loongarch64_display_regs_from_elf_notes(cpu, ofp); } void dump_registers_for_compressed_kdump(void) { int c; if (!KDUMP_CMPRS_VALID() || (dd->header->header_version < 4) || !(machine_type("X86") || machine_type("X86_64") || machine_type("ARM64") || machine_type("PPC64") || machine_type("MIPS") || machine_type("MIPS64") || machine_type("RISCV64") || machine_type("LOONGARCH64"))) error(FATAL, "-r option not supported for this dumpfile\n"); if (machine_type("ARM64") && (kt->cpus != dd->num_prstatus_notes)) fprintf(fp, "NOTE: cpus: %d NT_PRSTATUS notes: %d " "(note-to-cpu mapping is questionable)\n\n", kt->cpus, dd->num_prstatus_notes); for (c = 0; c < kt->cpus; c++) { if (hide_offline_cpu(c)) { fprintf(fp, "%sCPU %d: [OFFLINE]\n", c ? "\n" : "", c); continue; } else fprintf(fp, "%sCPU %d:\n", c ? "\n" : "", c); diskdump_display_regs(c, fp); } } int diskdump_kaslr_check() { if (!QEMU_MEM_DUMP_NO_VMCOREINFO()) return FALSE; if (dd->num_qemu_notes) return TRUE; return FALSE; } int diskdump_get_nr_cpus(void) { if (dd->num_prstatus_notes) return dd->num_prstatus_notes; else if (dd->num_qemu_notes) return dd->num_qemu_notes; else if (dd->num_vmcoredd_notes) return dd->num_vmcoredd_notes; else if (dd->header->nr_cpus) return dd->header->nr_cpus; return 1; } #ifdef X86_64 QEMUCPUState * diskdump_get_qemucpustate(int cpu) { if (cpu >= dd->num_qemu_notes) { if (CRASHDEBUG(1)) error(INFO, "Invalid index for QEMU Note: %d (>= %d)\n", cpu, dd->num_qemu_notes); return NULL; } if (dd->machine_type != EM_X86_64) { if (CRASHDEBUG(1)) error(INFO, "Only x86_64 64bit is supported.\n"); return NULL; } return (QEMUCPUState *)dd->nt_qemucs_percpu[cpu]; } #endif /* * extract hardware specific device dumps from coredump. */ void diskdump_device_dump_extract(int index, char *outfile, FILE *ofp) { ulonglong offset; if (!dd->num_vmcoredd_notes) error(FATAL, "no device dumps found in this dumpfile\n"); else if (index >= dd->num_vmcoredd_notes) error(FATAL, "no device dump found at index: %d", index); offset = dd->sub_header_kdump->offset_note + ((unsigned char *)dd->nt_vmcoredd_array[index] - dd->notes_buf); devdump_extract(dd->nt_vmcoredd_array[index], offset, outfile, ofp); } /* * list all hardware specific device dumps present in coredump. */ void diskdump_device_dump_info(FILE *ofp) { ulonglong offset; char buf[BUFSIZE]; ulong i; if (!dd->num_vmcoredd_notes) error(FATAL, "no device dumps found in this dumpfile\n"); fprintf(fp, "%s ", mkstring(buf, strlen("INDEX"), LJUST, "INDEX")); fprintf(fp, " %s ", mkstring(buf, LONG_LONG_PRLEN, LJUST, "OFFSET")); fprintf(fp, " %s ", mkstring(buf, LONG_PRLEN, LJUST, "SIZE")); fprintf(fp, "NAME\n"); for (i = 0; i < dd->num_vmcoredd_notes; i++) { fprintf(fp, "%s ", mkstring(buf, strlen("INDEX"), CENTER | INT_DEC, MKSTR(i))); offset = dd->sub_header_kdump->offset_note + ((unsigned char *)dd->nt_vmcoredd_array[i] - dd->notes_buf); devdump_info(dd->nt_vmcoredd_array[i], offset, ofp); } } static ulong ZRAM_FLAG_SHIFT; static ulong ZRAM_FLAG_SAME_BIT; static ulong ZRAM_COMP_PRIORITY_BIT1; static ulong ZRAM_COMP_PRIORITY_MASK; static void zram_init(void) { long zram_flag_shift; MEMBER_OFFSET_INIT(zram_mem_pool, "zram", "mem_pool"); MEMBER_OFFSET_INIT(zram_compressor, "zram", "compressor"); if (INVALID_MEMBER(zram_compressor)) MEMBER_OFFSET_INIT(zram_comp_algs, "zram", "comp_algs"); MEMBER_OFFSET_INIT(zram_table_entry_flags, "zram_table_entry", "flags"); if (INVALID_MEMBER(zram_table_entry_flags)) MEMBER_OFFSET_INIT(zram_table_entry_flags, "zram_table_entry", "value"); STRUCT_SIZE_INIT(zram_table_entry, "zram_table_entry"); MEMBER_OFFSET_INIT(zs_pool_size_class, "zs_pool", "size_class"); MEMBER_OFFSET_INIT(size_class_size, "size_class", "size"); MEMBER_OFFSET_INIT(zspage_huge, "zspage", "huge"); if (enumerator_value("ZRAM_LOCK", &zram_flag_shift)) ; else if (THIS_KERNEL_VERSION >= LINUX(6,1,0)) zram_flag_shift = PAGESHIFT() + 1; else zram_flag_shift = 24; ZRAM_FLAG_SHIFT = 1 << zram_flag_shift; ZRAM_FLAG_SAME_BIT = 1 << (zram_flag_shift+1); ZRAM_COMP_PRIORITY_BIT1 = ZRAM_FLAG_SHIFT + 7; ZRAM_COMP_PRIORITY_MASK = 0x3; if (CRASHDEBUG(1)) fprintf(fp, "zram_flag_shift: %ld\n", zram_flag_shift); } static unsigned char * zram_object_addr(ulong pool, ulong handle, unsigned char *zram_buf) { ulong obj, off, class, page, zspage; struct zspage zspage_s; physaddr_t paddr; unsigned int obj_idx, class_idx, size; ulong pages[2], sizes[2]; ulong zs_magic; readmem(handle, KVADDR, &obj, sizeof(void *), "zram entry", FAULT_ON_ERROR); obj >>= OBJ_TAG_BITS; phys_to_page(PTOB(obj >> OBJ_INDEX_BITS), &page); obj_idx = (obj & OBJ_INDEX_MASK); readmem(page + OFFSET(page_private), KVADDR, &zspage, sizeof(void *), "page_private", FAULT_ON_ERROR); readmem(zspage, KVADDR, &zspage_s, sizeof(struct zspage), "zspage", FAULT_ON_ERROR); if (VALID_MEMBER(zspage_huge)) { class_idx = zspage_s.v5_17.class; zs_magic = zspage_s.v5_17.magic; } else { class_idx = zspage_s.v0.class; zs_magic = zspage_s.v0.magic; } if (zs_magic != ZSPAGE_MAGIC) error(FATAL, "zspage magic incorrect: %x\n", zs_magic); class = pool + OFFSET(zs_pool_size_class); class += (class_idx * sizeof(void *)); readmem(class, KVADDR, &class, sizeof(void *), "size_class", FAULT_ON_ERROR); readmem(class + OFFSET(size_class_size), KVADDR, &size, sizeof(unsigned int), "size of class_size", FAULT_ON_ERROR); off = (size * obj_idx) & (~machdep->pagemask); if (off + size <= PAGESIZE()) { if (!is_page_ptr(page, &paddr)) { error(WARNING, "zspage: %lx: not a page pointer\n", page); return NULL; } readmem(paddr + off, PHYSADDR, zram_buf, size, "zram buffer", FAULT_ON_ERROR); goto out; } pages[0] = page; if (VALID_MEMBER(page_freelist)) readmem(page + OFFSET(page_freelist), KVADDR, &pages[1], sizeof(void *), "page_freelist", FAULT_ON_ERROR); else readmem(page + OFFSET(page_index), KVADDR, &pages[1], sizeof(void *), "page_index", FAULT_ON_ERROR); sizes[0] = PAGESIZE() - off; sizes[1] = size - sizes[0]; if (!is_page_ptr(pages[0], &paddr)) { error(WARNING, "pages[0]: %lx: not a page pointer\n", pages[0]); return NULL; } readmem(paddr + off, PHYSADDR, zram_buf, sizes[0], "zram buffer[0]", FAULT_ON_ERROR); if (!is_page_ptr(pages[1], &paddr)) { error(WARNING, "pages[1]: %lx: not a page pointer\n", pages[1]); return NULL; } readmem(paddr, PHYSADDR, zram_buf + sizes[0], sizes[1], "zram buffer[1]", FAULT_ON_ERROR); out: if (VALID_MEMBER(zspage_huge)) { if (!zspage_s.v5_17.huge) return (zram_buf + ZS_HANDLE_SIZE); } else { readmem(page, KVADDR, &obj, sizeof(void *), "page flags", FAULT_ON_ERROR); if (!(obj & (1<<10))) // PG_OwnerPriv1 flag return (zram_buf + ZS_HANDLE_SIZE); } return zram_buf; } static inline bool radix_tree_exceptional_entry(ulong entry) { return entry & RADIX_TREE_EXCEPTIONAL_ENTRY; } static unsigned char * lookup_swap_cache(ulonglong pte_val, unsigned char *zram_buf) { ulonglong swp_offset; ulong swp_type, swp_space; struct list_pair lp; physaddr_t paddr; static int is_xarray = -1; if (is_xarray < 0) { is_xarray = STREQ(MEMBER_TYPE_NAME("address_space", "i_pages"), "xarray"); } swp_type = __swp_type(pte_val); if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { swp_offset = (ulonglong)__swp_offset(pte_val); } else { swp_offset = (ulonglong)SWP_OFFSET(pte_val); } if (!symbol_exists("swapper_spaces")) return NULL; swp_space = symbol_value("swapper_spaces"); swp_space += swp_type * sizeof(void *); readmem(swp_space, KVADDR, &swp_space, sizeof(void *), "swp_spaces", FAULT_ON_ERROR); swp_space += (swp_offset >> SWAP_ADDRESS_SPACE_SHIFT) * SIZE(address_space); lp.index = swp_offset; if ((is_xarray ? do_xarray : do_radix_tree) (swp_space+OFFSET(address_space_page_tree), RADIX_TREE_SEARCH, &lp)) { if ((is_xarray ? xa_is_value : radix_tree_exceptional_entry)((ulong)lp.value)) { /* ignore shadow values */ return NULL; } if (!is_page_ptr((ulong)lp.value, &paddr)) { error(WARNING, "radix page: %lx: not a page pointer\n", lp.value); return NULL; } readmem(paddr, PHYSADDR, zram_buf, PAGESIZE(), "zram buffer", FAULT_ON_ERROR); return zram_buf; } return NULL; } static int get_disk_name_private_data(ulonglong pte_val, ulonglong vaddr, char *name, ulong *private_data) { ulong swap_info, bdev, bd_disk; if (!symbol_exists("swap_info")) return FALSE; swap_info = symbol_value("swap_info"); swap_info_init(); if (vt->flags & SWAPINFO_V2) { swap_info += (__swp_type(pte_val) * sizeof(void *)); readmem(swap_info, KVADDR, &swap_info, sizeof(void *), "swap_info", FAULT_ON_ERROR); } else { swap_info += (SIZE(swap_info_struct) * __swp_type(pte_val)); } readmem(swap_info + OFFSET(swap_info_struct_bdev), KVADDR, &bdev, sizeof(void *), "swap_info_struct_bdev", FAULT_ON_ERROR); readmem(bdev + OFFSET(block_device_bd_disk), KVADDR, &bd_disk, sizeof(void *), "block_device_bd_disk", FAULT_ON_ERROR); if (name) readmem(bd_disk + OFFSET(gendisk_disk_name), KVADDR, name, strlen("zram"), "gendisk_disk_name", FAULT_ON_ERROR); if (private_data) readmem(bd_disk + OFFSET(gendisk_private_data), KVADDR, private_data, sizeof(void *), "gendisk_private_data", FAULT_ON_ERROR); return TRUE; } ulong readswap(ulonglong pte_val, char *buf, ulong len, ulonglong vaddr) { char name[32] = {0}; if (!get_disk_name_private_data(pte_val, vaddr, name, NULL)) return 0; if (!strncmp(name, "zram", 4)) { return try_zram_decompress(pte_val, (unsigned char *)buf, len, vaddr); } else { if (CRASHDEBUG(2)) error(WARNING, "this page has been swapped to %s\n", name); return 0; } } ulong (*decompressor)(unsigned char *in_addr, ulong in_size, unsigned char *out_addr, ulong *out_size, void *other/* NOT USED */); /* * If userspace address was swapped out to zram, this function is called to decompress the object. * try_zram_decompress returns decompressed page data and data length */ ulong try_zram_decompress(ulonglong pte_val, unsigned char *buf, ulong len, ulonglong vaddr) { char name[32] = {0}; ulonglong swp_offset; unsigned char *obj_addr = NULL; unsigned char *zram_buf = NULL; unsigned char *outbuf = NULL; ulong zram, zram_table_entry, sector, index, entry, flags, size, outsize, off; if (INVALID_MEMBER(zram_mem_pool)) { zram_init(); if (INVALID_MEMBER(zram_mem_pool)) { error(WARNING, "Some pages are swapped out to zram. " "Please run mod -s zram.\n"); return 0; } } if (CRASHDEBUG(2)) error(WARNING, "this page has swapped to zram\n"); if (!get_disk_name_private_data(pte_val, vaddr, NULL, &zram)) return 0; if (THIS_KERNEL_VERSION >= LINUX(2, 6, 0)) swp_offset = (ulonglong)__swp_offset(pte_val); else swp_offset = (ulonglong)SWP_OFFSET(pte_val); sector = swp_offset << (PAGESHIFT() - 9); index = sector >> SECTORS_PER_PAGE_SHIFT; readmem(zram, KVADDR, &zram_table_entry, sizeof(void *), "zram_table_entry", FAULT_ON_ERROR); zram_table_entry += (index * SIZE(zram_table_entry)); readmem(zram_table_entry + OFFSET(zram_table_entry_flags), KVADDR, &flags, sizeof(void *), "zram_table_entry.flags", FAULT_ON_ERROR); if (VALID_MEMBER(zram_compressor)) readmem(zram + OFFSET(zram_compressor), KVADDR, name, sizeof(name), "zram compressor", FAULT_ON_ERROR); else { ulong comp_alg_addr; uint32_t prio = (flags >> ZRAM_COMP_PRIORITY_BIT1) & ZRAM_COMP_PRIORITY_MASK; readmem(zram + OFFSET(zram_comp_algs) + sizeof(const char *) * prio, KVADDR, &comp_alg_addr, sizeof(comp_alg_addr), "zram comp_algs", FAULT_ON_ERROR); read_string(comp_alg_addr, name, sizeof(name)); } if (STREQ(name, "lzo")) { #ifdef LZO if (!(dd->flags & LZO_SUPPORTED)) { if (lzo_init() == LZO_E_OK) dd->flags |= LZO_SUPPORTED; else return 0; } decompressor = (void *)lzo1x_decompress_safe; #else error(WARNING, "zram decompress error: this executable needs to be built" " with lzo library\n"); return 0; #endif } else if (STREQ(name, "lzo-rle")) { decompressor = (void *)&lzorle_decompress_safe; } else { /* todo: support more compressor */ error(WARNING, "only the lzo compressor is supported\n"); return 0; } zram_buf = (unsigned char *)GETBUF(PAGESIZE()); /* lookup page from swap cache */ off = PAGEOFFSET(vaddr); obj_addr = lookup_swap_cache(pte_val, zram_buf); if (obj_addr != NULL) { memcpy(buf, obj_addr + off, len); goto out; } readmem(zram_table_entry, KVADDR, &entry, sizeof(void *), "entry of table", FAULT_ON_ERROR); if (!entry || (flags & ZRAM_FLAG_SAME_BIT)) { int count; ulong *same_buf = (ulong *)GETBUF(PAGESIZE()); for (count = 0; count < PAGESIZE() / sizeof(ulong); count++) { same_buf[count] = entry; } memcpy(buf, same_buf + off, len); FREEBUF(same_buf); goto out; } size = flags & (ZRAM_FLAG_SHIFT -1); if (size == 0) { len = 0; goto out; } readmem(zram + OFFSET(zram_mem_pool), KVADDR, &zram, sizeof(void *), "zram.mem_pool", FAULT_ON_ERROR); obj_addr = zram_object_addr(zram, entry, zram_buf); if (obj_addr == NULL) { len = 0; goto out; } if (size == PAGESIZE()) { memcpy(buf, obj_addr + off, len); } else { outbuf = (unsigned char *)GETBUF(PAGESIZE()); outsize = PAGESIZE(); if (!decompressor(obj_addr, size, outbuf, &outsize, NULL)) memcpy(buf, outbuf + off, len); else { error(WARNING, "zram decompress error\n"); len = 0; } FREEBUF(outbuf); } out: if (len && CRASHDEBUG(2)) error(INFO, "%lx: zram decompress success\n", vaddr); FREEBUF(zram_buf); return len; } crash-utility-crash-9cd43f5/lkcd_dump_v7.h0000664000372000037200000003353715107550337020136 0ustar juerghjuergh/* lkcd_dump_v5.h - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net) * Copyright 2001 Matt D. Robinson. All rights reserved. * * Most of this is the same old stuff from vmdump.h, except now we're * actually a stand-alone driver plugged into the block layer interface, * with the exception that we now allow for compression modes externally * loaded (e.g., someone can come up with their own). */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _DUMP_H #define _DUMP_H //#include /* define TRUE and FALSE for use in our dump modules */ #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #ifndef MCLX /* * MCLX NOTE: the architecture-specific headers are being ignored until * deemed necessary; crash has never used them functionally, and only * referencing them in the dump_sgi_environment() helper routines. */ /* necessary header files */ #include /* for architecture-specific header */ #endif #define UTSNAME_ENTRY_SZ 65 /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* header definitions for s390 dump */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */ #define DUMP_VERSION_NUMBER 0x5 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump levels - type specific stuff added later -- add as necessary */ #define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */ #define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */ #define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */ #define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */ #define DUMP_LEVEL_ALL 0x8 /* dump header, all memory pages */ /* dump compression options -- add as necessary */ #define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */ #define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */ #define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */ /* dump flags - any dump-type specific flags -- add as necessary */ #define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */ #define DUMP_FLAGS_NONDISRUPT 0x1 /* try to keep running after dump */ /* dump header flags -- add as necessary */ #define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */ #define DUMP_DH_RAW 0x1 /* raw page (no compression) */ #define DUMP_DH_COMPRESSED 0x2 /* page is compressed */ #define DUMP_DH_END 0x4 /* end marker on a full dump */ /* names for various dump tunables (they are now all read-only) */ #define DUMP_ROOT_NAME "sys/dump" #define DUMP_DEVICE_NAME "dump_device" #define DUMP_COMPRESS_NAME "dump_compress" #define DUMP_LEVEL_NAME "dump_level" #define DUMP_FLAGS_NAME "dump_flags" /* page size for gzip compression -- buffered beyond PAGE_SIZE slightly */ #define DUMP_DPC_PAGE_SIZE (PAGE_SIZE + 512) /* dump ioctl() control options */ #define DIOSDUMPDEV 1 /* set the dump device */ #define DIOGDUMPDEV 2 /* get the dump device */ #define DIOSDUMPLEVEL 3 /* set the dump level */ #define DIOGDUMPLEVEL 4 /* get the dump level */ #define DIOSDUMPFLAGS 5 /* set the dump flag parameters */ #define DIOGDUMPFLAGS 6 /* get the dump flag parameters */ #define DIOSDUMPCOMPRESS 7 /* set the dump compress level */ #define DIOGDUMPCOMPRESS 8 /* get the dump compress level */ /* the major number used for the dumping device */ #ifndef DUMP_MAJOR #define DUMP_MAJOR 227 #endif /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* the time of the system crash */ struct timeval dh_time; /* the NEW utsname (uname) information -- in character form */ /* we do this so we don't have to include utsname.h */ /* plus it helps us be more architecture independent */ /* now maybe one day soon they'll make the [65] a #define! */ char dh_utsname_sysname[65]; char dh_utsname_nodename[65]; char dh_utsname_release[65]; char dh_utsname_version[65]; char dh_utsname_machine[65]; char dh_utsname_domainname[65]; /* the address of current task (OLD = task_struct *, NEW = void *) */ void *dh_current_task; /* what type of compression we're using in this dump (if any) */ uint32_t dh_dump_compress; /* any additional flags */ uint32_t dh_dump_flags; /* any additional flags */ uint32_t dh_dump_device; } dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } dump_page_t; /* * This structure contains information needed for the lkcdutils * package (particularly lcrash) to determine what information is * associated to this kernel, specifically. */ typedef struct lkcdinfo_s { int arch; int ptrsz; int byte_order; int linux_release; int page_shift; int page_size; uint64_t page_mask; uint64_t page_offset; int stack_offset; } lkcdinfo_t; #ifdef IA64 #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ struct ia64_fpreg f10; /* scratch */ struct ia64_fpreg f11; /* scratch */ }; /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* pointer to pt_regs */ struct pt_regs *dha_pt_regs; /* the dump registers */ struct pt_regs dha_regs; /* the rnat register saved after flushrs */ uint64_t dha_rnat; /* the pfs register saved after flushrs */ uint64_t dha_pfs; /* the bspstore register saved after flushrs */ uint64_t dha_bspstore; /* smp specific */ uint32_t dha_smp_num_cpus; int dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; void * dha_smp_current_task[NR_CPUS]; void * dha_stack[NR_CPUS]; void * dha_switch_stack[NR_CPUS]; } dump_header_asm_t; #define NR_CPUS 32 typedef struct _dump_header_asm_smp_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* pointer to pt_regs */ struct pt_regs *dha_pt_regs; /* the dump registers */ struct pt_regs dha_regs; /* the rnat register saved after flushrs */ uint64_t dha_rnat; /* the pfs register saved after flushrs */ uint64_t dha_pfs; /* the bspstore register saved after flushrs */ uint64_t dha_bspstore; /* smp specific */ uint32_t dha_smp_num_cpus; int dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; void * dha_smp_current_task[NR_CPUS]; void * dha_stack[NR_CPUS]; void * dha_switch_stack[NR_CPUS]; } dump_header_asm_smp_t; #endif #ifdef __KERNEL__ /* * Structure: dump_compress_t * Function: This is what an individual compression mechanism can use * to plug in their own compression techniques. It's always * best to build these as individual modules so that people * can put in whatever they want. */ typedef struct dump_compress_s { /* the list_head structure for list storage */ struct list_head list; /* the type of compression to use (DUMP_COMPRESS_XXX) */ int compress_type; /* the compression function to call */ int (*compress_func)(char *, int, char *, int); } dump_compress_t; extern int dump_init(void); extern void dump_execute(char *, struct pt_regs *); extern int page_is_ram(unsigned long); #endif /* __KERNEL__ */ #endif /* _DUMP_H */ crash-utility-crash-9cd43f5/xen_dom0.h0000664000372000037200000000415515107550337017263 0ustar juerghjuergh/* xen_dom0.h * * Copyright (C) 2015 David Anderson * Copyright (C) 2015 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: David Anderson */ /* * ELF note types for Xen dom0/hypervisor kdumps. * The comments below are from xen/include/public/elfnote.h. */ /* * System information exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_INFO * note in case of a system crash. This note will contain various * information about the system, see xen/include/xen/elfcore.h. */ #define XEN_ELFNOTE_CRASH_INFO 0x1000001 /* * System registers exported through crash notes. * * The kexec / kdump code will create one XEN_ELFNOTE_CRASH_REGS * note per cpu in case of a system crash. This note is architecture * specific and will contain registers not saved in the "CORE" note. * See xen/include/xen/elfcore.h for more information. */ #define XEN_ELFNOTE_CRASH_REGS 0x1000002 /* * For (temporary) backwards compatibility. */ #define NT_XEN_KDUMP_CR3 0x10000001 struct xen_kdump_data { ulong flags; ulong cr3; ulong p2m_mfn; char *page; ulong last_mfn_read; ulong last_pmd_read; ulong cache_hits; ulong accesses; int p2m_frames; ulong *p2m_mfn_frame_list; ulong xen_phys_start; int xen_major_version; int xen_minor_version; }; #define KDUMP_P2M_INIT (0x1) #define KDUMP_CR3 (0x2) #define KDUMP_MFN_LIST (0x4) #define P2M_FAILURE ((physaddr_t)(0xffffffffffffffffLL)) extern struct xen_kdump_data *xkd; void dump_xen_kdump_data(FILE *); struct xen_kdump_data *get_xen_kdump_data(void); void process_xen_note(ulong, void *, size_t); physaddr_t xen_kdump_p2m(physaddr_t); crash-utility-crash-9cd43f5/Makefile0000664000372000037200000006617515107550337017053 0ustar juerghjuergh# Makefile for core analysis suite # # Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. # www.missioncriticallinux.com, info@missioncriticallinux.com # # Copyright (C) 2002-2016 David Anderson # Copyright (C) 2002-2016 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # MAKEFLAGS += --no-print-directory PROGRAM=crash # # Supported targets: X86 ALPHA PPC IA64 PPC64 SPARC64 # TARGET and GDB_CONF_FLAGS will be configured automatically by configure # TARGET= GDB_CONF_FLAGS= # Supported arches for cross compilation: x86_64, x86, aarch64, s390x, # powerpc64, alpha, sparc64, mips, riscv64 # E.g: cross compile crash-utility for aarch64 on X86_64 # make CROSS_COMPILE=aarch64-linux-gnu- -j`nproc` # or # make CROSS_COMPILE=aarch64-linux-gnu- -j`nproc` warn # ifneq ($(CROSS_COMPILE),) ARCH := $(shell echo $(CROSS_COMPILE) | sed 's:^.*/::g' | cut -d- -f1) else ARCH := $(shell uname -m) endif ARCH := $(shell echo $(ARCH) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/) CC = $(CROSS_COMPILE)gcc CXX = $(CROSS_COMPILE)g++ HOSTCC = gcc ifeq (${ARCH}, ppc64) CONF_FLAGS = -m64 endif ifneq ($(CROSS_COMPILE),) ifeq (${ARCH}, x86_64) CONF_TARGET_ARCH := X86_64 else ifeq (${ARCH}, aarch64) CONF_TARGET_ARCH := ARM64 else ifeq (${ARCH}, s390x) CONF_TARGET_ARCH := S390X else ifeq (${ARCH}, powerpc64) CONF_TARGET_ARCH := PPC64 else ifeq (${ARCH}, ppc64le) CONF_TARGET_ARCH := PPC64 else ifeq (${ARCH}, alpha) CONF_TARGET_ARCH := ALPHA else ifeq (${ARCH}, i386) CONF_TARGET_ARCH := X86 else ifeq (${ARCH}, mips) CONF_TARGET_ARCH := MIPS else ifeq (${ARCH}, sparc64) CONF_TARGET_ARCH := SPARC64 else ifeq (${ARCH}, riscv64) CONF_TARGET_ARCH := RISCV64 else $(error The current Arch(${ARCH}) does not support cross compilation) endif CONF_FLAGS += -DCONF_TARGET_ARCH=${CONF_TARGET_ARCH} CONF_FLAGS += -DGDB_TARGET_DEFAULT="\"GDB_CONF_FLAGS=--host=$(shell echo $(CROSS_COMPILE) | sed -e 's:^.*/::g' -e 's/-$$//')\"" endif # # GDB, GDB_FILES, GDB_OFILES and GDB_PATCH_FILES will be configured automatically by configure # GDB= GDB_FILES= GDB_OFILES= GDB_PATCH_FILES= # # Default installation directory # INSTALLDIR=${DESTDIR}/usr/bin # LDFLAGS will be configured automatically by configure LDFLAGS= GENERIC_HFILES=defs.h xen_hyper_defs.h xen_dom0.h MCORE_HFILES=va_server.h vas_crash.h REDHAT_HFILES=netdump.h diskdump.h makedumpfile.h xendump.h kvmdump.h qemu-load.h vmcore.h LKCD_DUMP_HFILES=lkcd_vmdump_v1.h lkcd_vmdump_v2_v3.h lkcd_dump_v5.h \ lkcd_dump_v7.h lkcd_dump_v8.h LKCD_OBSOLETE_HFILES=lkcd_fix_mem.h LKCD_TRACE_HFILES=lkcd_x86_trace.h IBM_HFILES=ibm_common.h SADUMP_HFILES=sadump.h UNWIND_HFILES=unwind.h unwind_i.h rse.h unwind_x86.h unwind_x86_64.h VMWARE_HFILES=vmware_vmss.h MAPLE_TREE_HFILES=maple_tree.h LZORLE_HFILES=lzorle_decompress.h CFILES=main.c tools.c global_data.c memory.c filesys.c help.c task.c \ kernel.c test.c gdb_interface.c configure.c net.c dev.c bpf.c \ printk.c \ alpha.c x86.c ppc.c ia64.c s390.c s390x.c s390dbf.c ppc64.c x86_64.c \ arm.c arm64.c mips.c mips64.c riscv64.c loongarch64.c sparc64.c \ extensions.c remote.c va_server.c va_server_v1.c symbols.c cmdline.c \ lkcd_common.c lkcd_v1.c lkcd_v2_v3.c lkcd_v5.c lkcd_v7.c lkcd_v8.c\ lkcd_fix_mem.c s390_dump.c lkcd_x86_trace.c \ netdump.c diskdump.c makedumpfile.c xendump.c unwind.c unwind_decoder.c \ unwind_x86_32_64.c unwind_arm.c \ xen_hyper.c xen_hyper_command.c xen_hyper_global_data.c \ xen_hyper_dump_tables.c kvmdump.c qemu.c qemu-load.c sadump.c ipcs.c \ ramdump.c vmware_vmss.c vmware_guestdump.c \ xen_dom0.c kaslr_helper.c sbitmap.c maple_tree.c \ lzorle_decompress.c SOURCE_FILES=${CFILES} ${GENERIC_HFILES} ${MCORE_HFILES} \ ${REDHAT_CFILES} ${REDHAT_HFILES} ${UNWIND_HFILES} \ ${LKCD_DUMP_HFILES} ${LKCD_TRACE_HFILES} ${LKCD_OBSOLETE_HFILES}\ ${IBM_HFILES} ${SADUMP_HFILES} ${VMWARE_HFILES} ${MAPLE_TREE_HFILES} \ ${LZORLE_HFILES} OBJECT_FILES=main.o tools.o global_data.o memory.o filesys.o help.o task.o \ build_data.o kernel.o test.o gdb_interface.o net.o dev.o bpf.o \ printk.o \ alpha.o x86.o ppc.o ia64.o s390.o s390x.o s390dbf.o ppc64.o x86_64.o \ arm.o arm64.o mips.o mips64.o riscv64.o loongarch64.o sparc64.o \ extensions.o remote.o va_server.o va_server_v1.o symbols.o cmdline.o \ lkcd_common.o lkcd_v1.o lkcd_v2_v3.o lkcd_v5.o lkcd_v7.o lkcd_v8.o \ lkcd_fix_mem.o s390_dump.o netdump.o diskdump.o makedumpfile.o xendump.o \ lkcd_x86_trace.o unwind_v1.o unwind_v2.o unwind_v3.o \ unwind_x86_32_64.o unwind_arm.o \ xen_hyper.o xen_hyper_command.o xen_hyper_global_data.o \ xen_hyper_dump_tables.o kvmdump.o qemu.o qemu-load.o sadump.o ipcs.o \ ramdump.o vmware_vmss.o vmware_guestdump.o \ xen_dom0.o kaslr_helper.o sbitmap.o maple_tree.o \ lzorle_decompress.o MEMORY_DRIVER_FILES=memory_driver/Makefile memory_driver/crash.c memory_driver/README # These are the current set of crash extensions sources. They are not built # by default unless the third command line of the "all:" stanza is uncommented. # Alternatively, they can be built by entering "make extensions" from this # directory. EXTENSIONS=extensions EXTENSION_SOURCE_FILES=${EXTENSIONS}/Makefile ${EXTENSIONS}/echo.c ${EXTENSIONS}/dminfo.c \ ${EXTENSIONS}/snap.c ${EXTENSIONS}/snap.mk ${EXTENSIONS}/trace.c \ ${EXTENSIONS}/eppic.c ${EXTENSIONS}/eppic.mk DAEMON_OBJECT_FILES=remote_daemon.o va_server.o va_server_v1.o \ lkcd_common.o lkcd_v1.o lkcd_v2_v3.o lkcd_v5.o lkcd_v7.o lkcd_v8.o \ s390_dump.o netdump_daemon.o GDB_5.0_FILES=${GDB}/gdb/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/blockframe.c ${GDB}/gdb/alpha-tdep.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/gnu-regex.c \ ${GDB}/gdb/ppc-linux-nat.c GDB_5.0_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o ${GDB}/gdb/target.o \ ${GDB}/gdb/blockframe.o ${GDB}/gdb/alpha-tdep.o \ ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/gnu-regex.o \ ${GDB}/gdb/ppc-linux-nat.o GDB_5.1_FILES=${GDB}/gdb/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/blockframe.c ${GDB}/gdb/alpha-tdep.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/gnu-regex.c GDB_5.1_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o ${GDB}/gdb/target.o \ ${GDB}/gdb/blockframe.o ${GDB}/gdb/alpha-tdep.o \ ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/gnu-regex.o GDB_5.2.1_FILES=${GDB}/gdb/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/blockframe.c ${GDB}/gdb/alpha-tdep.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c GDB_5.2.1_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o ${GDB}/gdb/target.o \ ${GDB}/gdb/blockframe.o ${GDB}/gdb/alpha-tdep.o \ ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o GDB_5.3post-0.20021129.36rh_FILES=${GDB}/gdb/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/frame.c ${GDB}/gdb/alpha-tdep.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/dwarf2read.c GDB_5.3post-0.20021129.36rh_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o \ ${GDB}/gdb/target.o ${GDB}/gdb/frame.o ${GDB}/gdb/alpha-tdep.o \ ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o ${GDB}/gdb/ui-file.o \ ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o GDB_6.0_FILES=${GDB}/gdb/Makefile.in ${GDB}/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c \ ${GDB}/gdb/ppc-linux-tdep.c ${GDB}/sim/ppc/ppc-instructions \ ${GDB}/bfd/simple.c ${GDB}/include/obstack.h GDB_6.0_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o \ ${GDB}/gdb/target.o ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o \ ${GDB}/gdb/ppc-linux-tdep.o ${GDB}/bfd/simple.o GDB_6.1_FILES=${GDB}/gdb/Makefile.in ${GDB}/Makefile.in \ ${GDB}/gdb/main.c ${GDB}/gdb/symtab.c ${GDB}/gdb/target.c \ ${GDB}/gdb/symfile.c ${GDB}/gdb/elfread.c \ ${GDB}/gdb/ui-file.c ${GDB}/gdb/utils.c ${GDB}/gdb/dwarf2read.c \ ${GDB}/include/obstack.h ${GDB}/gdb/ppc-linux-tdep.c GDB_6.1_OFILES=${GDB}/gdb/main.o ${GDB}/gdb/symtab.o \ ${GDB}/gdb/target.o ${GDB}/gdb/symfile.o ${GDB}/gdb/elfread.o \ ${GDB}/gdb/ui-file.o ${GDB}/gdb/utils.o ${GDB}/gdb/dwarf2read.o \ ${GDB}/gdb/ppc-linux-tdep.o GDB_7.0_FILES= GDB_7.0_OFILES=${GDB}/gdb/symtab.o GDB_7.3.1_FILES= GDB_7.3.1_OFILES=${GDB}/gdb/symtab.o GDB_7.6_FILES= GDB_7.6_OFILES=${GDB}/gdb/symtab.o GDB_10.2_FILES= GDB_10.2_OFILES=${GDB}/gdb/symtab.o crash_target.o GDB_16.2_FILES= GDB_16.2_OFILES=${GDB}/gdb/symtab.o crash_target.o # # GDB_FLAGS is passed up from the gdb Makefile. # GDB_FLAGS= # # WARNING_OPTIONS and WARNING_ERROR are both applied on a per-file basis. # WARNING_ERROR is NOT used on files including "dirty" gdb headers so that # successful compilations can be achieved with acceptable warnings; its # usefulness is also dependent upon the processor's compiler -- your mileage # may vary. # #WARNING_OPTIONS=-Wall -O2 -Wstrict-prototypes -Wmissing-prototypes -fstack-protector -Wformat-security #WARNING_ERROR=-Werror # TARGET_CFLAGS will be configured automatically by configure TARGET_CFLAGS= CRASH_CFLAGS=-g -D${TARGET} ${TARGET_CFLAGS} ${GDB_FLAGS} ${CFLAGS} GPL_FILES= TAR_FILES=${SOURCE_FILES} Makefile ${GPL_FILES} README .rh_rpm_package crash.8 \ ${EXTENSION_SOURCE_FILES} ${MEMORY_DRIVER_FILES} CSCOPE_FILES=${SOURCE_FILES} READLINE_DIRECTORY=./${GDB}/readline/readline BFD_DIRECTORY=./${GDB}/bfd GDB_INCLUDE_DIRECTORY=./${GDB}/include REDHATFLAGS=-DREDHAT # target could be set on command line when invoking make. Like: make target=ARM # otherwise target will be the same as the host ifneq ($(target),) CONF_TARGET_FLAG="-t$(target)" endif ifeq ($(findstring warn,$(MAKECMDGOALS)),warn) CONF_TARGET_FLAG += -w endif ifeq ($(findstring Warn,$(MAKECMDGOALS)),Warn) CONF_TARGET_FLAG += -W endif ifeq ($(findstring nowarn,$(MAKECMDGOALS)),nowarn) CONF_TARGET_FLAG += -n endif ifeq ($(findstring lzo,$(MAKECMDGOALS)),lzo) CONF_TARGET_FLAG += -x lzo endif ifeq ($(findstring snappy,$(MAKECMDGOALS)),snappy) CONF_TARGET_FLAG += -x snappy endif ifeq ($(findstring zstd,$(MAKECMDGOALS)),zstd) CONF_TARGET_FLAG += -x zstd endif ifeq ($(findstring valgrind,$(MAKECMDGOALS)),valgrind) CONF_TARGET_FLAG += -x valgrind endif # To build the extensions library by default, uncomment the third command # line below. Otherwise they can be built by entering "make extensions". all: make_configure @./configure ${CONF_TARGET_FLAG} -p "RPMPKG=${RPMPKG}" -b @$(MAKE) gdb_merge # @$(MAKE) extensions gdb_merge: force @if [ ! -f ${GDB}/README ]; then \ $(MAKE) gdb_unzip; fi @echo "${LDFLAGS} -lz -ldl -rdynamic" > ${GDB}/gdb/mergelibs @echo "../../${PROGRAM} ../../${PROGRAM}lib.a" > ${GDB}/gdb/mergeobj @rm -f ${PROGRAM} @if [ ! -f ${GDB}/config.status ]; then \ (cd ${GDB}; ./configure ${GDB_CONF_FLAGS} --with-separate-debug-dir=/usr/lib/debug \ --with-bugurl="" --with-expat=no --with-python=no --disable-sim; \ $(MAKE) CRASH_TARGET=${TARGET}; echo ${TARGET} > crash.target) \ else $(MAKE) rebuild; fi @if [ ! -f ${PROGRAM} ]; then \ echo; echo "${PROGRAM} build failed"; \ echo; exit 1; fi rebuild: @if [ ! -f ${GDB}/${GDB}.patch ]; then \ touch ${GDB}/${GDB}.patch; fi @if [ -f ${GDB}.patch ] && [ -s ${GDB}.patch ] && \ [ "`md5sum < ${GDB}.patch`" != "`md5sum < ${GDB}/${GDB}.patch`" ]; then \ (sh -x ${GDB}.patch ${TARGET}; patch -N -p0 -r- --fuzz=0 < ${GDB}.patch; cp ${GDB}.patch ${GDB}; cd ${GDB}; \ $(MAKE) CRASH_TARGET=${TARGET}) \ else (cd ${GDB}/gdb; $(MAKE) CRASH_TARGET=${TARGET}); fi gdb_unzip: @rm -f gdb.files @for FILE in ${GDB_FILES} dummy; do\ echo $$FILE >> gdb.files; done @if [ ! -f ${GDB}.tar.gz ] && [ ! -f /usr/bin/wget ]; then \ echo /usr/bin/wget is required to download ${GDB}.tar.gz; echo; exit 1; fi @if [ ! -f ${GDB}.tar.gz ] && [ -f /usr/bin/wget ]; then \ [ ! -t 2 ] && WGET_OPTS="--progress=dot:mega"; \ wget $$WGET_OPTS http://ftp.gnu.org/gnu/gdb/${GDB}.tar.gz; fi @tar --exclude-from gdb.files -xzmf ${GDB}.tar.gz @$(MAKE) gdb_patch gdb_patch: if [ -f ${GDB}.patch ] && [ -s ${GDB}.patch ]; then \ patch -p0 < ${GDB}.patch; cp ${GDB}.patch ${GDB}; fi library: ${OBJECT_FILES} ar -rs ${PROGRAM}lib.a ${OBJECT_FILES} gdb: force rm -f ${GDB_OFILES} @$(MAKE) all force: make_configure: force @rm -f configure @${HOSTCC} ${CONF_FLAGS} -o configure configure.c ${WARNING_ERROR} ${WARNING_OPTIONS} clean: make_configure @./configure ${CONF_TARGET_FLAG} -q -b @$(MAKE) do_clean do_clean: rm -f ${OBJECT_FILES} ${DAEMON_OBJECT_FILES} ${PROGRAM} ${PROGRAM}lib.a ${GDB_OFILES} @$(MAKE) -C extensions -i clean @$(MAKE) -C memory_driver -i clean build_data.o: force ${CC} -c ${CRASH_CFLAGS} build_data.c ${WARNING_OPTIONS} ${WARNING_ERROR} install: /usr/bin/install -d ${INSTALLDIR} /usr/bin/install ${PROGRAM} ${INSTALLDIR} # /usr/bin/install ${PROGRAM}d ${INSTALLDIR} unconfig: make_configure @./configure -u warn Warn nowarn lzo snappy zstd valgrind: all @true #dummy main.o: ${GENERIC_HFILES} main.c ${CC} -c ${CRASH_CFLAGS} main.c ${WARNING_OPTIONS} ${WARNING_ERROR} cmdline.o: ${GENERIC_HFILES} cmdline.c ${CC} -c ${CRASH_CFLAGS} cmdline.c -I${READLINE_DIRECTORY} ${WARNING_OPTIONS} ${WARNING_ERROR} tools.o: ${GENERIC_HFILES} tools.c ${CC} -c ${CRASH_CFLAGS} tools.c ${WARNING_OPTIONS} ${WARNING_ERROR} sbitmap.o: ${GENERIC_HFILES} sbitmap.c ${CC} -c ${CRASH_CFLAGS} sbitmap.c ${WARNING_OPTIONS} ${WARNING_ERROR} global_data.o: ${GENERIC_HFILES} global_data.c ${CC} -c ${CRASH_CFLAGS} global_data.c ${WARNING_OPTIONS} ${WARNING_ERROR} symbols.o: ${GENERIC_HFILES} symbols.c ${CC} -c ${CRASH_CFLAGS} symbols.c -I${BFD_DIRECTORY} -I${GDB_INCLUDE_DIRECTORY} ${WARNING_OPTIONS} ${WARNING_ERROR} filesys.o: ${GENERIC_HFILES} filesys.c ${CC} -c ${CRASH_CFLAGS} filesys.c ${WARNING_OPTIONS} ${WARNING_ERROR} help.o: ${GENERIC_HFILES} help.c ${CC} -c ${CRASH_CFLAGS} help.c ${WARNING_OPTIONS} ${WARNING_ERROR} memory.o: ${GENERIC_HFILES} ${MAPLE_TREE_HFILES} memory.c ${CC} -c ${CRASH_CFLAGS} memory.c ${WARNING_OPTIONS} ${WARNING_ERROR} test.o: ${GENERIC_HFILES} test.c ${CC} -c ${CRASH_CFLAGS} test.c ${WARNING_OPTIONS} ${WARNING_ERROR} task.o: ${GENERIC_HFILES} task.c ${CC} -c ${CRASH_CFLAGS} task.c ${WARNING_OPTIONS} ${WARNING_ERROR} kernel.o: ${GENERIC_HFILES} kernel.c ${CC} -c ${CRASH_CFLAGS} kernel.c -I${BFD_DIRECTORY} -I${GDB_INCLUDE_DIRECTORY} ${WARNING_OPTIONS} ${WARNING_ERROR} printk.o: ${GENERIC_HFILES} printk.c ${CC} -c ${CRASH_CFLAGS} printk.c -I${GDB_INCLUDE_DIRECTORY} ${WARNING_OPTIONS} ${WARNING_ERROR} gdb_interface.o: ${GENERIC_HFILES} gdb_interface.c ${CC} -c ${CRASH_CFLAGS} gdb_interface.c ${WARNING_OPTIONS} ${WARNING_ERROR} va_server.o: ${MCORE_HFILES} va_server.c ${CC} -c ${CRASH_CFLAGS} va_server.c ${WARNING_OPTIONS} ${WARNING_ERROR} va_server_v1.o: ${MCORE_HFILES} va_server_v1.c ${CC} -c ${CRASH_CFLAGS} va_server_v1.c ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_common.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_common.c ${CC} -c ${CRASH_CFLAGS} lkcd_common.c ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_v1.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_v1.c ${CC} -c ${CRASH_CFLAGS} lkcd_v1.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_v2_v3.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_v2_v3.c ${CC} -c ${CRASH_CFLAGS} lkcd_v2_v3.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_v5.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_v5.c ${CC} -c ${CRASH_CFLAGS} lkcd_v5.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_v7.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_v7.c ${CC} -c ${CRASH_CFLAGS} lkcd_v7.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_v8.o: ${GENERIC_HFILES} ${LKCD_DUMP_HFILES} lkcd_v8.c ${CC} -c ${CRASH_CFLAGS} lkcd_v8.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} net.o: ${GENERIC_HFILES} net.c ${CC} -c ${CRASH_CFLAGS} net.c ${WARNING_OPTIONS} ${WARNING_ERROR} dev.o: ${GENERIC_HFILES} ${REDHAT_HFILES} dev.c ${CC} -c ${CRASH_CFLAGS} dev.c ${WARNING_OPTIONS} ${WARNING_ERROR} # remote.c functionality has been deprecated remote.o: ${GENERIC_HFILES} remote.c @${CC} -c ${CRASH_CFLAGS} remote.c ${WARNING_OPTIONS} ${WARNING_ERROR} remote_daemon.o: ${GENERIC_HFILES} remote.c ${CC} -c ${CRASH_CFLAGS} -DDAEMON remote.c -o remote_daemon.o ${WARNING_OPTIONS} ${WARNING_ERROR} x86.o: ${GENERIC_HFILES} ${REDHAT_HFILES} x86.c ${CC} -c ${CRASH_CFLAGS} x86.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} alpha.o: ${GENERIC_HFILES} alpha.c ${CC} -c ${CRASH_CFLAGS} alpha.c ${WARNING_OPTIONS} ${WARNING_ERROR} ppc.o: ${GENERIC_HFILES} ppc.c ${CC} -c ${CRASH_CFLAGS} ppc.c ${WARNING_OPTIONS} ${WARNING_ERROR} ia64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} ia64.c ${CC} -c ${CRASH_CFLAGS} ia64.c ${WARNING_OPTIONS} ${WARNING_ERROR} ppc64.o: ${GENERIC_HFILES} ppc64.c ${CC} -c ${CRASH_CFLAGS} ppc64.c ${WARNING_OPTIONS} ${WARNING_ERROR} x86_64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} x86_64.c ${CC} -c ${CRASH_CFLAGS} x86_64.c ${WARNING_OPTIONS} ${WARNING_ERROR} arm.o: ${GENERIC_HFILES} ${REDHAT_HFILES} arm.c ${CC} -c ${CRASH_CFLAGS} arm.c ${WARNING_OPTIONS} ${WARNING_ERROR} arm64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} arm64.c ${CC} -c ${CRASH_CFLAGS} arm64.c ${WARNING_OPTIONS} ${WARNING_ERROR} mips.o: ${GENERIC_HFILES} ${REDHAT_HFILES} mips.c ${CC} -c ${CRASH_CFLAGS} mips.c ${WARNING_OPTIONS} ${WARNING_ERROR} mips64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} mips64.c ${CC} -c ${CRASH_CFLAGS} mips64.c ${WARNING_OPTIONS} ${WARNING_ERROR} riscv64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} riscv64.c ${CC} -c ${CRASH_CFLAGS} riscv64.c ${WARNING_OPTIONS} ${WARNING_ERROR} sparc64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} sparc64.c ${CC} -c ${CRASH_CFLAGS} sparc64.c ${WARNING_OPTIONS} ${WARNING_ERROR} loongarch64.o: ${GENERIC_HFILES} ${REDHAT_HFILES} loongarch64.c ${CC} -c ${CRASH_CFLAGS} loongarch64.c ${WARNING_OPTIONS} ${WARNING_ERROR} s390.o: ${GENERIC_HFILES} ${IBM_HFILES} s390.c ${CC} -c ${CRASH_CFLAGS} s390.c ${WARNING_OPTIONS} ${WARNING_ERROR} s390x.o: ${GENERIC_HFILES} ${IBM_HFILES} s390x.c ${CC} -c ${CRASH_CFLAGS} s390x.c ${WARNING_OPTIONS} ${WARNING_ERROR} s390dbf.o: ${GENERIC_HFILES} ${IBM_HFILES} s390dbf.c ${CC} -c ${CRASH_CFLAGS} s390dbf.c ${WARNING_OPTIONS} ${WARNING_ERROR} s390_dump.o: ${GENERIC_HFILES} ${IBM_HFILES} s390_dump.c ${CC} -c ${CRASH_CFLAGS} s390_dump.c ${WARNING_OPTIONS} ${WARNING_ERROR} netdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} ${SADUMP_HFILES} netdump.c ${CC} -c ${CRASH_CFLAGS} netdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} netdump_daemon.o: ${GENERIC_HFILES} ${REDHAT_HFILES} netdump.c ${CC} -c ${CRASH_CFLAGS} -DDAEMON netdump.c -o netdump_daemon.o ${WARNING_OPTIONS} ${WARNING_ERROR} diskdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} diskdump.c ${CC} -c ${CRASH_CFLAGS} diskdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} makedumpfile.o: ${GENERIC_HFILES} ${REDHAT_HFILES} makedumpfile.c ${CC} -c ${CRASH_CFLAGS} makedumpfile.c ${WARNING_OPTIONS} ${WARNING_ERROR} xendump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} xendump.c ${CC} -c ${CRASH_CFLAGS} xendump.c ${WARNING_OPTIONS} ${WARNING_ERROR} kvmdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} kvmdump.c ${CC} -c ${CRASH_CFLAGS} kvmdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} qemu.o: ${GENERIC_HFILES} ${REDHAT_HFILES} qemu.c ${CC} -c ${CRASH_CFLAGS} qemu.c ${WARNING_OPTIONS} ${WARNING_ERROR} qemu-load.o: ${GENERIC_HFILES} ${REDHAT_HFILES} qemu-load.c ${CC} -c ${CRASH_CFLAGS} qemu-load.c ${WARNING_OPTIONS} ${WARNING_ERROR} sadump.o: ${GENERIC_HFILES} ${SADUMP_HFILES} sadump.c ${CC} -c ${CRASH_CFLAGS} sadump.c ${WARNING_OPTIONS} ${WARNING_ERROR} ipcs.o: ${GENERIC_HFILES} ipcs.c ${CC} -c ${CRASH_CFLAGS} ipcs.c ${WARNING_OPTIONS} ${WARNING_ERROR} extensions.o: ${GENERIC_HFILES} extensions.c ${CC} -c ${CRASH_CFLAGS} extensions.c ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_x86_trace.o: ${GENERIC_HFILES} ${LKCD_TRACE_HFILES} lkcd_x86_trace.c ${CC} -c ${CRASH_CFLAGS} lkcd_x86_trace.c -DREDHAT ${WARNING_OPTIONS} ${WARNING_ERROR} unwind_x86_32_64.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind_x86_32_64.c ${CC} -c ${CRASH_CFLAGS} unwind_x86_32_64.c -o unwind_x86_32_64.o ${WARNING_OPTIONS} ${WARNING_ERROR} unwind_arm.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind_arm.c ${CC} -c ${CRASH_CFLAGS} unwind_arm.c -o unwind_arm.o ${WARNING_OPTIONS} ${WARNING_ERROR} unwind_v1.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind.c unwind_decoder.c ${CC} -c ${CRASH_CFLAGS} unwind.c -DREDHAT -DUNWIND_V1 -o unwind_v1.o ${WARNING_OPTIONS} ${WARNING_ERROR} unwind_v2.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind.c unwind_decoder.c ${CC} -c ${CRASH_CFLAGS} unwind.c -DREDHAT -DUNWIND_V2 -o unwind_v2.o ${WARNING_OPTIONS} ${WARNING_ERROR} unwind_v3.o: ${GENERIC_HFILES} ${UNWIND_HFILES} unwind.c unwind_decoder.c ${CC} -c ${CRASH_CFLAGS} unwind.c -DREDHAT -DUNWIND_V3 -o unwind_v3.o ${WARNING_OPTIONS} ${WARNING_ERROR} lkcd_fix_mem.o: ${GENERIC_HFILES} ${LKCD_HFILES} lkcd_fix_mem.c ${CC} -c ${CRASH_CFLAGS} lkcd_fix_mem.c -DMCLX ${WARNING_OPTIONS} ${WARNING_ERROR} xen_hyper.o: ${GENERIC_HFILES} xen_hyper.c ${CC} -c ${CRASH_CFLAGS} xen_hyper.c ${WARNING_OPTIONS} ${WARNING_ERROR} xen_hyper_command.o: ${GENERIC_HFILES} xen_hyper_command.c ${CC} -c ${CRASH_CFLAGS} xen_hyper_command.c ${WARNING_OPTIONS} ${WARNING_ERROR} xen_hyper_global_data.o: ${GENERIC_HFILES} xen_hyper_global_data.c ${CC} -c ${CRASH_CFLAGS} xen_hyper_global_data.c ${WARNING_OPTIONS} ${WARNING_ERROR} xen_hyper_dump_tables.o: ${GENERIC_HFILES} xen_hyper_dump_tables.c ${CC} -c ${CRASH_CFLAGS} xen_hyper_dump_tables.c ${WARNING_OPTIONS} ${WARNING_ERROR} xen_dom0.o: ${GENERIC_HFILES} xen_dom0.c ${CC} -c ${CRASH_CFLAGS} xen_dom0.c ${WARNING_OPTIONS} ${WARNING_ERROR} ramdump.o: ${GENERIC_HFILES} ${REDHAT_HFILES} ramdump.c ${CC} -c ${CRASH_CFLAGS} ramdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} vmware_vmss.o: ${GENERIC_HFILES} ${VMWARE_HFILES} vmware_vmss.c ${CC} -c ${CRASH_CFLAGS} vmware_vmss.c ${WARNING_OPTIONS} ${WARNING_ERROR} vmware_guestdump.o: ${GENERIC_HFILES} ${VMWARE_HFILES} vmware_guestdump.c ${CC} -c ${CRASH_CFLAGS} vmware_guestdump.c ${WARNING_OPTIONS} ${WARNING_ERROR} kaslr_helper.o: ${GENERIC_HFILES} kaslr_helper.c ${CC} -c ${CRASH_CFLAGS} kaslr_helper.c ${WARNING_OPTIONS} ${WARNING_ERROR} bpf.o: ${GENERIC_HFILES} bpf.c ${CC} -c ${CRASH_CFLAGS} bpf.c ${WARNING_OPTIONS} ${WARNING_ERROR} maple_tree.o: ${GENERIC_HFILES} ${MAPLE_TREE_HFILES} maple_tree.c ${CC} -c ${CRASH_CFLAGS} maple_tree.c ${WARNING_OPTIONS} ${WARNING_ERROR} lzorle_decompress.o: lzorle_decompress.c ${CC} -c ${CRASH_CFLAGS} lzorle_decompress.c ${WARNING_OPTIONS} ${WARNING_ERROR} ${PROGRAM}: force @$(MAKE) all # Remote daemon functionality has been deprecated. daemon_deprecated: force @echo "WARNING: remote daemon functionality has been deprecated" @echo ${PROGRAM}d: daemon_deprecated make_configure @./configure -d @$(MAKE) build_data.o @$(MAKE) daemon daemon: ${DAEMON_OBJECT_FILES} ${CC} ${LDFLAGS} -o ${PROGRAM}d ${DAEMON_OBJECT_FILES} build_data.o -lz files: make_configure @./configure -q -b @$(MAKE) show_files gdb_files: make_configure @./configure -q -b @echo ${GDB_FILES} ${GDB_PATCH_FILES} show_files: @if [ -f ${PROGRAM} ]; then \ ./${PROGRAM} --no_scroll --no_crashrc -h README > README; fi @echo ${SOURCE_FILES} Makefile ${GDB_FILES} ${GDB_PATCH_FILES} ${GPL_FILES} README \ .rh_rpm_package crash.8 ${EXTENSION_SOURCE_FILES} ${MEMORY_DRIVER_FILES} ctags: ctags ${SOURCE_FILES} tar: make_configure @./configure -q -b @$(MAKE) do_tar do_tar: @if [ -f ${PROGRAM} ]; then \ ./${PROGRAM} --no_scroll --no_crashrc -h README > README; fi tar cvzf ${PROGRAM}.tar.gz ${TAR_FILES} ${GDB_FILES} ${GDB_PATCH_FILES} @echo; ls -l ${PROGRAM}.tar.gz VERSION= RELEASE=0 release: make_configure @if [ "`id --user`" != "0" ]; then \ echo "make release: must be super-user"; exit 1; fi @./configure -P "RPMPKG=${RPMPKG}" -u -g @$(MAKE) release_configure @echo @echo "cvs tag this release if necessary" release_configure: make_configure @if [ "${GDB}" = "" ] ; then \ echo "make release: GDB not defined: append GDB=gdb-x.x to make command line"; echo; exit 1; fi @./configure -r ${GDB} @$(MAKE) do_release do_release: @echo "CRASH VERSION: ${VERSION} GDB VERSION: ${GDB}" @if [ ! -f .rh_rpm_package ]; then \ echo "no .rh_rpm_package exists!"; exit 1; fi @chmod 666 .rh_rpm_package @rm -rf ./RELDIR; mkdir ./RELDIR; mkdir ./RELDIR/${PROGRAM}-${VERSION} @rm -f ${PROGRAM}-${VERSION}.tar.gz @rm -f ${PROGRAM}-${VERSION}-${RELEASE}.src.rpm @chown root ./RELDIR/${PROGRAM}-${VERSION} @tar cf - ${SOURCE_FILES} Makefile ${GDB_FILES} ${GDB_PATCH_FILES} ${GPL_FILES} \ .rh_rpm_package crash.8 ${EXTENSION_SOURCE_FILES} ${MEMORY_DRIVER_FILES} | \ (cd ./RELDIR/${PROGRAM}-${VERSION}; tar xf -) @cp ${GDB}.tar.gz ./RELDIR/${PROGRAM}-${VERSION} @./${PROGRAM} --no_scroll --no_crashrc -h README > README @cp README ./RELDIR/${PROGRAM}-${VERSION}/README @(cd ./RELDIR; find . -exec chown root {} ";") @(cd ./RELDIR; find . -exec chgrp root {} ";") @(cd ./RELDIR; find . -exec touch {} ";") @(cd ./RELDIR; \ tar czvf ../${PROGRAM}-${VERSION}.tar.gz ${PROGRAM}-${VERSION}) @chgrp root ${PROGRAM}-${VERSION}.tar.gz @rm -rf ./RELDIR @echo @ls -l ${PROGRAM}-${VERSION}.tar.gz @./configure -s -u > ${PROGRAM}.spec @if [ -s ${PROGRAM}.spec ]; then \ rm -rf ./RPMBUILD; \ mkdir -p ./RPMBUILD/SOURCES ./RPMBUILD/SPECS ./RPMBUILD/SRPMS; \ cp ${PROGRAM}-${VERSION}.tar.gz ./RPMBUILD/SOURCES; \ cp ${PROGRAM}.spec ./RPMBUILD/SPECS; \ rpmbuild --define "_sourcedir ./RPMBUILD/SOURCES" \ --define "_srcrpmdir ./RPMBUILD/SRPMS" \ --define "_specdir ./RPMBUILD/SPECS" \ --nodeps -bs ./RPMBUILD/SPECS/${PROGRAM}.spec > /dev/null; \ mv ./RPMBUILD/SRPMS/${PROGRAM}-${VERSION}-${RELEASE}.src.rpm . ; \ rm -rf ./RPMBUILD; \ ls -l ${PROGRAM}-${VERSION}-${RELEASE}.src.rpm; \ fi ref: $(MAKE) ctags cscope cscope: rm -f cscope.files cscope.out for FILE in ${SOURCE_FILES}; do \ echo $$FILE >> cscope.files; done cscope -b -f cscope.out glink: make_configure @./configure -q -b rm -f gdb ln -s ${GDB}/gdb gdb (cd ${GDB}/gdb; rm -f ${PROGRAM}; ln -s ../../${PROGRAM} ${PROGRAM}) name: @echo ${PROGRAM} dis: objdump --disassemble --line-numbers ${PROGRAM} > ${PROGRAM}.dis extensions: make_configure @./configure ${CONF_TARGET_FLAG} -q -b @$(MAKE) do_extensions do_extensions: @$(MAKE) -C extensions -i CC=$(CC) TARGET=$(TARGET) TARGET_CFLAGS="$(CFLAGS) $(TARGET_CFLAGS)" GDB=$(GDB) GDB_FLAGS=$(GDB_FLAGS) memory_driver: make_configure @$(MAKE) -C memory_driver -i crash-utility-crash-9cd43f5/diskdump.h0000664000372000037200000000771415107550337017376 0ustar juerghjuergh/* * diskdump.h * * Copyright (C) 2004, 2005, 2006 David Anderson * Copyright (C) 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * Copyright (C) 2005 FUJITSU LIMITED * Copyright (C) 2005 NEC Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include #define divideup(x, y) (((x) + ((y) - 1)) / (y)) #define round(x, y) (((x) / (y)) * (y)) #define DUMP_PARTITION_SIGNATURE "diskdump" #define SIG_LEN (sizeof(DUMP_PARTITION_SIGNATURE) - 1) #define DISK_DUMP_SIGNATURE "DISKDUMP" #define KDUMP_SIGNATURE "KDUMP " #define DUMP_HEADER_COMPLETED 0 #define DUMP_HEADER_INCOMPLETED 1 #define DUMP_HEADER_COMPRESSED 8 struct disk_dump_header { char signature[SIG_LEN]; /* = "DISKDUMP" */ int header_version; /* Dump header version */ struct new_utsname utsname; /* copy of system_utsname */ struct timeval timestamp; /* Time stamp */ unsigned int status; /* Above flags */ int block_size; /* Size of a block in byte */ int sub_hdr_size; /* Size of arch dependent header in blocks */ unsigned int bitmap_blocks; /* Size of Memory bitmap in block */ unsigned int max_mapnr; /* = max_mapnr, OBSOLETE! 32bit only, full 64bit in sub header. */ unsigned int total_ram_blocks;/* Number of blocks should be written */ unsigned int device_blocks; /* Number of total blocks in * the dump device */ unsigned int written_blocks; /* Number of written blocks */ unsigned int current_cpu; /* CPU# which handles dump */ int nr_cpus; /* Number of CPUs */ struct task_struct *tasks[0]; }; struct disk_dump_sub_header { long elf_regs; }; struct kdump_sub_header { unsigned long phys_base; int dump_level; /* header_version 1 and later */ int split; /* header_version 2 and later */ unsigned long start_pfn; /* header_version 2 and later, OBSOLETE! 32bit only, full 64bit in start_pfn_64. */ unsigned long end_pfn; /* header_version 2 and later, OBSOLETE! 32bit only, full 64bit in end_pfn_64. */ off_t offset_vmcoreinfo; /* header_version 3 and later */ unsigned long size_vmcoreinfo; /* header_version 3 and later */ off_t offset_note; /* header_version 4 and later */ unsigned long size_note; /* header_version 4 and later */ off_t offset_eraseinfo; /* header_version 5 and later */ unsigned long size_eraseinfo; /* header_version 5 and later */ unsigned long long start_pfn_64; /* header_version 6 and later */ unsigned long long end_pfn_64; /* header_version 6 and later */ unsigned long long max_mapnr_64; /* header_version 6 and later */ }; /* page flags */ #define DUMP_DH_COMPRESSED_ZLIB 0x1 /* page is compressed with zlib */ #define DUMP_DH_COMPRESSED_LZO 0x2 /* page is compressed with lzo */ #define DUMP_DH_COMPRESSED_SNAPPY 0x4 /* page is compressed with snappy */ #define DUMP_DH_COMPRESSED_INCOMPLETE 0x8 /* dumpfile is incomplete */ #define DUMP_DH_EXCLUDED_VMEMMAP 0x10 /* unused vmemmap pages are excluded */ #define DUMP_DH_COMPRESSED_ZSTD 0x20 /* page is compressed with zstd */ /* descriptor of each page for vmcore */ typedef struct page_desc { off_t offset; /* the offset of the page data*/ unsigned int size; /* the size of this dump page */ unsigned int flags; /* flags */ unsigned long long page_flags; /* page flags */ } page_desc_t; #define DISKDUMP_CACHED_PAGES (16) #define PAGE_VALID (0x1) /* flags */ #define DISKDUMP_VALID_PAGE(flags) ((flags) & PAGE_VALID) crash-utility-crash-9cd43f5/s390.c0000664000372000037200000010055415107550337016243 0ustar juerghjuergh/* s390.c - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2009-2010, 2012-2014 David Anderson * Copyright (C) 2002-2006, 2009-2010, 2012-2014 Red Hat, Inc. All rights reserved. * Copyright (C) 2005, 2006, 2010 Michael Holzheu, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef S390 #include "defs.h" #define S390_WORD_SIZE 4 #define S390_ADDR_MASK 0x7fffffff #define S390_PMD_BASE_MASK (~((1UL<<6)-1)) #define S390_PT_BASE_MASK S390_PMD_BASE_MASK #define S390_PAGE_BASE_MASK (~((1UL<<12)-1)) /* Flags used in entries of page dirs and page tables. */ #define S390_PAGE_PRESENT 0x001 /* set: loaded in physical memory * clear: not loaded in physical mem */ #define S390_RO_S390 0x200 /* HW read-only */ #define S390_PAGE_INVALID 0x400 /* HW invalid */ #define S390_PAGE_INVALID_MASK 0x601ULL /* for linux 2.6 */ #define S390_PAGE_INVALID_NONE 0x401ULL /* for linux 2.6 */ #define S390_PTE_INVALID_MASK 0x80000900 #define S390_PTE_INVALID(x) ((x) & S390_PTE_INVALID_MASK) #define INT_STACK_SIZE STACKSIZE() // can be 4096 or 8192 #define KERNEL_STACK_SIZE STACKSIZE() // can be 4096 or 8192 #define LOWCORE_SIZE 4096 /* * declarations of static functions */ static void s390_print_lowcore(char*, struct bt_info*,int); static int s390_kvtop(struct task_context *, ulong, physaddr_t *, int); static int s390_uvtop(struct task_context *, ulong, physaddr_t *, int); static int s390_vtop(unsigned long, ulong, physaddr_t*, int); static ulong s390_vmalloc_start(void); static int s390_is_task_addr(ulong); static int s390_verify_symbol(const char *, ulong, char type); static ulong s390_get_task_pgd(ulong); static int s390_translate_pte(ulong, void *, ulonglong); static ulong s390_processor_speed(void); static int s390_eframe_search(struct bt_info *); static void s390_back_trace_cmd(struct bt_info *); static void s390_get_stack_frame(struct bt_info *, ulong *, ulong *); static int s390_dis_filter(ulong, char *, unsigned int); static void s390_cmd_mach(void); static int s390_get_smp_cpus(void); static void s390_display_machine_stats(void); static void s390_dump_line_number(ulong); static struct line_number_hook s390_line_number_hooks[]; static int s390_is_uvaddr(ulong, struct task_context *); /* * struct lowcore name (old: "_lowcore", new: "lowcore") */ static char *lc_struct; /* * Initialize member offsets */ static void s390_offsets_init(void) { if (STRUCT_EXISTS("lowcore")) lc_struct = "lowcore"; else lc_struct = "_lowcore"; if (MEMBER_EXISTS(lc_struct, "st_status_fixed_logout")) MEMBER_OFFSET_INIT(s390_lowcore_psw_save_area, lc_struct, "st_status_fixed_logout"); else MEMBER_OFFSET_INIT(s390_lowcore_psw_save_area, lc_struct, "psw_save_area"); } /* * Do all necessary machine-specific setup here. This is called several * times during initialization. */ void s390_init(int when) { switch (when) { case PRE_SYMTAB: machdep->verify_symbol = s390_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); // machdep->stacksize = machdep->pagesize * 2; if ((machdep->pgd = (char *)malloc(SEGMENT_TABLE_SIZE)) == NULL) error(FATAL, "cannot malloc pgd space."); machdep->pmd = machdep->pgd; if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; break; case PRE_GDB: machdep->kvbase = 0; machdep->identity_map_base = 0; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = s390_is_uvaddr; machdep->eframe_search = s390_eframe_search; machdep->back_trace = s390_back_trace_cmd; machdep->processor_speed = s390_processor_speed; machdep->uvtop = s390_uvtop; machdep->kvtop = s390_kvtop; machdep->get_task_pgd = s390_get_task_pgd; machdep->get_stack_frame = s390_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = s390_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = s390_is_task_addr; machdep->dis_filter = s390_dis_filter; machdep->cmd_mach = s390_cmd_mach; machdep->get_smp_cpus = s390_get_smp_cpus; machdep->line_number_hooks = s390_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; vt->flags |= COMMON_VADDR; break; case POST_GDB: if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else machdep->nr_irqs = 0; machdep->vmalloc_start = s390_vmalloc_start; machdep->dump_irq = generic_dump_irq; if (!machdep->hz) machdep->hz = HZ; machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; s390_offsets_init(); break; case POST_INIT: break; } } /* * Dump machine dependent information */ void s390_dump_machdep_table(ulong arg) { int others; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", (ulonglong)machdep->memsize, (ulonglong)machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: s390_eframe_search()\n"); fprintf(fp, " back_trace: s390_back_trace_cmd()\n"); fprintf(fp, " processor_speed: s390_processor_speed()\n"); fprintf(fp, " uvtop: s390_uvtop()\n"); fprintf(fp, " kvtop: s390_kvtop()\n"); fprintf(fp, " get_task_pgd: s390_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_stack_frame: s390_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: s390_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: s390_vmalloc_start()\n"); fprintf(fp, " is_task_addr: s390_is_task_addr()\n"); fprintf(fp, " verify_symbol: s390_verify_symbol()\n"); fprintf(fp, " dis_filter: s390_dis_filter()\n"); fprintf(fp, " cmd_mach: s390_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: s390_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: s390_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: s390_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } /* * Check if address is in context's address space */ static int s390_is_uvaddr(ulong vaddr, struct task_context *tc) { return IN_TASK_VMA(tc->task, vaddr); } /* * Translates a user virtual address to its physical address */ static int s390_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { unsigned long pgd_base; readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd_base,sizeof(long), "pgd_base",FAULT_ON_ERROR); return s390_vtop(pgd_base, vaddr, paddr, verbose); } /* * Translates a kernel virtual address to its physical address */ static int s390_kvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { unsigned long pgd_base; if (!IS_KVADDR(vaddr)){ *paddr = 0; return FALSE; } if (!vt->vmalloc_start) { *paddr = VTOP(vaddr); return TRUE; } if (!IS_VMALLOC_ADDR(vaddr)) { *paddr = VTOP(vaddr); return TRUE; } pgd_base = (unsigned long)vt->kernel_pgd[0]; return s390_vtop(pgd_base, vaddr, paddr, verbose); } /* * Check if page is mapped */ static inline int s390_pte_present(unsigned long x) { if(THIS_KERNEL_VERSION >= LINUX(2,6,0)) { return !((x) & S390_PAGE_INVALID) || ((x) & S390_PAGE_INVALID_MASK)==S390_PAGE_INVALID_NONE; } else { return((x) & S390_PAGE_PRESENT); } } /* * page table traversal functions */ /* Segment table traversal function */ static ulong _kl_sg_table_deref_s390(ulong vaddr, ulong table, int len) { ulong offset, entry; offset = ((vaddr >> 20) & 0x7ffUL) * 4; if (offset >= (len + 1)*64) /* Offset is over the table limit. */ return 0; readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); /* * Check if the segment table entry could be read and doesn't have * any of the reserved bits set. */ if (entry & 0x80000000UL) return 0; /* Check if the segment table entry has the invalid bit set. */ if (entry & 0x40UL) return 0; /* Segment table entry is valid and well formed. */ return entry; } /* Page table traversal function */ static ulong _kl_pg_table_deref_s390(ulong vaddr, ulong table, int len) { ulong offset, entry; offset = ((vaddr >> 12) & 0xffUL) * 4; if (offset >= (len + 1)*64) /* Offset is over the table limit. */ return 0; readmem(table + offset, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); /* * Check if the page table entry could be read and doesn't have * any of the reserved bits set. */ if (entry & 0x80000900UL) return 0; /* Check if the page table entry has the invalid bit set. */ if (entry & 0x400UL) return 0; /* Page table entry is valid and well formed. */ return entry; } /* lookup virtual address in page tables */ static int s390_vtop(unsigned long table, ulong vaddr, physaddr_t *phys_addr, int verbose) { ulong entry, paddr; int len; /* * Get the segment table entry. * We assume that the segment table length field in the asce * is set to the maximum value of 127 (which translates to * a segment table with 2048 entries) and that the addressing * mode is 31 bit. */ entry = _kl_sg_table_deref_s390(vaddr, table, 127); if (!entry) return FALSE; table = entry & 0x7ffffc00UL; len = entry & 0xfUL; /* Get the page table entry */ entry = _kl_pg_table_deref_s390(vaddr, table, len); if (!entry) return FALSE; /* Isolate the page origin from the page table entry. */ paddr = entry & 0x7ffff000UL; /* Add the page offset and return the final value. */ *phys_addr = paddr + (vaddr & 0xfffUL); return TRUE; } /* * Determine where vmalloc'd memory starts. */ static ulong s390_vmalloc_start(void) { unsigned long highmem_addr,high_memory; highmem_addr=symbol_value("high_memory"); readmem(highmem_addr, PHYSADDR, &high_memory,sizeof(long), "highmem",FAULT_ON_ERROR); return high_memory; } /* * Check if address can be a valid task_struct */ static int s390_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * return MHz - unfortunately it is not possible to get this on linux * for zSeries */ static ulong s390_processor_speed(void) { return 0; } /* * Accept or reject a symbol from the kernel namelist. */ static int s390_verify_symbol(const char *name, ulong value, char type) { int i; if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "startup") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if (STREQ(name, "Letext") || STREQ(name, "gcc2_compiled.")) return FALSE; /* reject L2^B symbols */ if (strstr(name, "L2\002") == name) return FALSE; if (STREQ(name, ".rodata")) return TRUE; /* throw away all symbols containing a '.' */ for(i = 0; i < strlen(name);i++){ if(name[i] == '.') return FALSE; } return TRUE; } /* * Get the relevant page directory pointer from a task structure. */ static ulong s390_get_task_pgd(ulong task) { return (error(FATAL, "s390_get_task_pgd: TBD\n")); } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int s390_translate_pte(ulong pte, void *physaddr, ulonglong unused) { char *arglist[MAXARGS]; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; int c,len1,len2,len3; if(S390_PTE_INVALID(pte)){ fprintf(fp,"PTE is invalid\n"); return FALSE; } if(physaddr) *((ulong *)physaddr) = pte & S390_PAGE_BASE_MASK; if(!s390_pte_present(pte)){ swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|LJUST, "PTE"), mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); sprintf(ptebuf, "%lx", pte); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return FALSE; } fprintf(fp,"PTE PHYSICAL FLAGS\n"); fprintf(fp,"%08lx %08lx",pte, pte & S390_PAGE_BASE_MASK); fprintf(fp," ("); if(pte & S390_PAGE_INVALID) fprintf(fp,"INVALID "); if(pte & S390_RO_S390) fprintf(fp,"PROTECTION"); fprintf(fp,")"); return TRUE; } /* * Look for likely exception frames in a stack. */ static int s390_eframe_search(struct bt_info *bt) { if(bt->flags & BT_EFRAME_SEARCH2) return (error(FATAL, "Option '-E' is not implemented for this architecture\n")); else return (error(FATAL, "Option '-e' is not implemented for this architecture\n")); } /* * returns cpu number of task */ static int s390_cpu_of_task(unsigned long task) { int cpu; if(VALID_MEMBER(task_struct_processor)){ /* linux 2.4 */ readmem(task + OFFSET(task_struct_processor),KVADDR, &cpu, sizeof(cpu), "task_struct_processor", FAULT_ON_ERROR); } else { char thread_info[8192]; unsigned long thread_info_addr; readmem(task + OFFSET(task_struct_thread_info),KVADDR, &thread_info_addr, sizeof(thread_info_addr), "thread info addr", FAULT_ON_ERROR); readmem(thread_info_addr,KVADDR,thread_info,sizeof(thread_info), "thread info", FAULT_ON_ERROR); cpu = *((int*) &thread_info[OFFSET(thread_info_cpu)]); } return cpu; } /* * returns true, if task of bt currently is executed by a cpu */ static int s390_has_cpu(struct bt_info *bt) { int cpu = bt->tc->processor; if (is_task_active(bt->task) && (kt->cpu_flags[cpu] & ONLINE_MAP)) return TRUE; else return FALSE; } /* * read lowcore for cpu */ static void s390_get_lowcore(int cpu, char* lowcore) { unsigned long lowcore_array,lowcore_ptr; lowcore_array = symbol_value("lowcore_ptr"); readmem(lowcore_array + cpu * S390_WORD_SIZE,KVADDR, &lowcore_ptr, sizeof(long), "lowcore_ptr", FAULT_ON_ERROR); readmem(lowcore_ptr, KVADDR, lowcore, LOWCORE_SIZE, "lowcore", FAULT_ON_ERROR); } /* * Read interrupt stack (either "async_stack" or "panic_stack"); */ static void s390_get_int_stack(char *stack_name, char* lc, char* int_stack, unsigned long* start, unsigned long* end) { unsigned long stack_addr; if (!MEMBER_EXISTS(lc_struct, stack_name)) return; stack_addr = ULONG(lc + MEMBER_OFFSET(lc_struct, stack_name)); if (stack_addr == 0) return; readmem(stack_addr - INT_STACK_SIZE, KVADDR, int_stack, INT_STACK_SIZE, stack_name, FAULT_ON_ERROR); *start = stack_addr - INT_STACK_SIZE; *end = stack_addr; } /* * Unroll a kernel stack. */ static void s390_back_trace_cmd(struct bt_info *bt) { char* stack; char async_stack[INT_STACK_SIZE]; char panic_stack[INT_STACK_SIZE]; long ksp,backchain,old_backchain; int i=0, r14_offset,bc_offset,r14, skip_first_frame=0; unsigned long async_start = 0, async_end = 0; unsigned long panic_start = 0, panic_end = 0; unsigned long stack_end, stack_start, stack_base; char buf[BUFSIZE]; int cpu = bt->tc->processor; if (bt->hp && bt->hp->eip) { error(WARNING, "instruction pointer argument ignored on this architecture!\n"); } if (is_task_active(bt->task) && !(kt->cpu_flags[cpu] & ONLINE_MAP)) { fprintf(fp, " CPU offline\n"); return; } ksp = bt->stkptr; /* print lowcore and get async stack when task has cpu */ if(s390_has_cpu(bt)){ char lowcore[LOWCORE_SIZE]; unsigned long psw_flags; int cpu = s390_cpu_of_task(bt->task); if (ACTIVE()) { fprintf(fp,"(active)\n"); return; } s390_get_lowcore(cpu,lowcore); psw_flags = ULONG(lowcore + OFFSET(s390_lowcore_psw_save_area)); if(psw_flags & 0x10000UL){ fprintf(fp,"Task runs in userspace\n"); s390_print_lowcore(lowcore,bt,0); return; } s390_get_int_stack("async_stack", lowcore, async_stack, &async_start, &async_end); s390_get_int_stack("panic_stack", lowcore, panic_stack, &panic_start, &panic_end); s390_print_lowcore(lowcore,bt,1); fprintf(fp,"\n"); skip_first_frame=1; } /* get task stack start and end */ if(THIS_KERNEL_VERSION >= LINUX(2,6,0)){ readmem(bt->task + OFFSET(task_struct_thread_info),KVADDR, &stack_start, sizeof(long), "thread info", FAULT_ON_ERROR); } else { stack_start = bt->task; } stack_end = stack_start + KERNEL_STACK_SIZE; if(!STRUCT_EXISTS("stack_frame")){ r14_offset = 56; bc_offset=0; } else { r14_offset = MEMBER_OFFSET("stack_frame","gprs") + 8 * S390_WORD_SIZE; bc_offset = MEMBER_OFFSET("stack_frame","back_chain"); } backchain = ksp; do { unsigned long r14_stack_off; struct load_module *lm; int j; ulong offset; char *name_plus_offset; struct syment *sp; /* Find stack: Either async, panic stack or task stack */ if((backchain > stack_start) && (backchain < stack_end)){ stack = bt->stackbuf; stack_base = stack_start; } else if((backchain > async_start) && (backchain < async_end) && s390_has_cpu(bt)){ stack = async_stack; stack_base = async_start; } else if((backchain > panic_start) && (backchain < panic_end) && s390_has_cpu(bt)){ stack = panic_stack; stack_base = panic_start; } else { /* invalid stackframe */ break; } r14_stack_off=backchain - stack_base + r14_offset; r14 = ULONG(&stack[r14_stack_off]) & S390_ADDR_MASK; /* print function name */ if(BT_REFERENCE_CHECK(bt)){ if(bt->ref->cmdflags & BT_REF_HEXVAL){ if(r14 == bt->ref->hexval) bt->ref->cmdflags |= BT_REF_FOUND; } else { if(STREQ(closest_symbol(r14),bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } } else if(skip_first_frame){ skip_first_frame=0; } else { fprintf(fp," #%i [%08lx] ",i,backchain); name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(r14, &offset); if (sp && offset) name_plus_offset = value_to_symstr(r14, buf, bt->radix); } fprintf(fp,"%s at %x", name_plus_offset ? name_plus_offset : closest_symbol(r14), r14); if (module_symbol(r14, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); if (bt->flags & BT_LINE_NUMBERS) s390_dump_line_number(r14); i++; } old_backchain=backchain; backchain = ULONG(&stack[backchain - stack_base + bc_offset]); /* print stack content if -f is specified */ if((bt->flags & BT_FULL) && !BT_REFERENCE_CHECK(bt)){ int frame_size; if(backchain == 0){ frame_size = stack_base - old_backchain + KERNEL_STACK_SIZE; } else { frame_size = MIN((backchain - old_backchain), (stack_base - old_backchain + KERNEL_STACK_SIZE)); } for(j=0; j< frame_size; j+=4){ if(j % 16 == 0){ fprintf(fp,"\n%08lx: ",old_backchain+j); } fprintf(fp," %s", format_stack_entry(bt, buf, ULONG(&stack[old_backchain - stack_base + j]), 0)); } fprintf(fp,"\n\n"); } /* Check for interrupt stackframe */ if((backchain == 0) && (stack == async_stack)){ unsigned long psw_flags,r15; psw_flags = ULONG(&stack[old_backchain - stack_base +96 +MEMBER_OFFSET("pt_regs","psw")]); if(psw_flags & 0x10000UL){ /* User psw: should not happen */ break; } r15 = ULONG(&stack[old_backchain - stack_base + 96 + MEMBER_OFFSET("pt_regs", "gprs") + 15 * S390_WORD_SIZE]); backchain=r15; fprintf(fp," - Interrupt -\n"); } } while(backchain != 0); } /* * print lowcore info (psw and all registers) */ static void s390_print_lowcore(char* lc, struct bt_info *bt, int show_symbols) { char* ptr; unsigned long tmp[4]; ptr = lc + OFFSET(s390_lowcore_psw_save_area); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390_WORD_SIZE); if(BT_REFERENCE_CHECK(bt)){ if(bt->ref->cmdflags & BT_REF_HEXVAL){ if(tmp[1] == bt->ref->hexval) bt->ref->cmdflags |= BT_REF_FOUND; } else { if(STREQ(closest_symbol(tmp[1]),bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } return; } fprintf(fp," LOWCORE INFO:\n"); fprintf(fp," -psw : %#010lx %#010lx\n", tmp[0], tmp[1]); if(show_symbols){ fprintf(fp," -function : %s at %lx\n", closest_symbol(tmp[1] & S390_ADDR_MASK), tmp[1] & S390_ADDR_MASK); if (bt->flags & BT_LINE_NUMBERS) s390_dump_line_number(tmp[1] & S390_ADDR_MASK); } ptr = lc + MEMBER_OFFSET(lc_struct, "cpu_timer_save_area"); tmp[0]=UINT(ptr); tmp[1]=UINT(ptr + S390_WORD_SIZE); fprintf(fp," -cpu timer: %#010lx %#010lx\n", tmp[0],tmp[1]); ptr = lc + MEMBER_OFFSET(lc_struct, "clock_comp_save_area"); tmp[0]=UINT(ptr); tmp[1]=UINT(ptr + S390_WORD_SIZE); fprintf(fp," -clock cmp: %#010lx %#010lx\n", tmp[0], tmp[1]); fprintf(fp," -general registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "gpregs_save_area"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0],tmp[1],tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0],tmp[1],tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 10* S390_WORD_SIZE); tmp[3]=ULONG(ptr + 11* S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0],tmp[1],tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 12* S390_WORD_SIZE); tmp[1]=ULONG(ptr + 13* S390_WORD_SIZE); tmp[2]=ULONG(ptr + 14* S390_WORD_SIZE); tmp[3]=ULONG(ptr + 15* S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); fprintf(fp," -access registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "access_regs_save_area"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 10* S390_WORD_SIZE); tmp[3]=ULONG(ptr + 11* S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 12* S390_WORD_SIZE); tmp[1]=ULONG(ptr + 13* S390_WORD_SIZE); tmp[2]=ULONG(ptr + 14* S390_WORD_SIZE); tmp[3]=ULONG(ptr + 15* S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); fprintf(fp," -control registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "cregs_save_area"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 4 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 8 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 10 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 11 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=ULONG(ptr + 12 * S390_WORD_SIZE); tmp[1]=ULONG(ptr + 13 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 14 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 15 * S390_WORD_SIZE); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); ptr = lc + MEMBER_OFFSET(lc_struct, "floating_pt_save_area"); fprintf(fp," -floating point registers 0,2,4,6:\n"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + 2 * S390_WORD_SIZE); tmp[2]=ULONG(ptr + 4 * S390_WORD_SIZE); tmp[3]=ULONG(ptr + 6 * S390_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0], tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2], tmp[3]); } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void s390_get_stack_frame(struct bt_info *bt, ulong *eip, ulong *esp) { unsigned long ksp, r14; int r14_offset; char lowcore[LOWCORE_SIZE]; if(s390_has_cpu(bt)) s390_get_lowcore(s390_cpu_of_task(bt->task),lowcore); /* get the stack pointer */ if(esp){ if(s390_has_cpu(bt)){ ksp = ULONG(lowcore + MEMBER_OFFSET(lc_struct, "gpregs_save_area") + (15 * S390_WORD_SIZE)); } else { readmem(bt->task + OFFSET(task_struct_thread_ksp), KVADDR, &ksp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); } *esp = ksp; } else { /* for 'bt -S' */ ksp=bt->hp->esp; } /* get the instruction address */ if(!eip) return; if(s390_has_cpu(bt) && esp){ *eip = ULONG(lowcore + OFFSET(s390_lowcore_psw_save_area) + S390_WORD_SIZE) & S390_ADDR_MASK; } else { if(!STRUCT_EXISTS("stack_frame")){ r14_offset = 56; } else { r14_offset = MEMBER_OFFSET("stack_frame","gprs") + 8 * S390_WORD_SIZE; } readmem(ksp + r14_offset,KVADDR,&r14,sizeof(void*),"eip", FAULT_ON_ERROR); *eip=r14 & S390_ADDR_MASK; } } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int s390_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * so this routine both fixes the references as well as imposing the current * output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int s390_get_smp_cpus(void) { return MAX(get_cpus_online(), get_highest_cpu_online()+1); } /* * Machine dependent command. */ void s390_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cm")) != EOF) { switch(c) { case 'c': fprintf(fp,"'-c' option is not implemented on this architecture\n"); return; case 'm': fprintf(fp,"'-m' option is not implemented on this architecture\n"); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); s390_display_machine_stats(); } /* * "mach" command output. */ static void s390_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } static const char *hook_files[] = { "arch/s390/kernel/entry.S", "arch/s390/kernel/head.S" }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) static struct line_number_hook s390_line_number_hooks[] = { {"startup",HEAD_S}, {"_stext",HEAD_S}, {"_pstart",HEAD_S}, {"system_call",ENTRY_S}, {"sysc_do_svc",ENTRY_S}, {"sysc_do_restart",ENTRY_S}, {"sysc_return",ENTRY_S}, {"sysc_sigpending",ENTRY_S}, {"sysc_restart",ENTRY_S}, {"sysc_singlestep",ENTRY_S}, {"sysc_tracesys",ENTRY_S}, {"ret_from_fork",ENTRY_S}, {"pgm_check_handler",ENTRY_S}, {"io_int_handler",ENTRY_S}, {"io_return",ENTRY_S}, {"ext_int_handler",ENTRY_S}, {"mcck_int_handler",ENTRY_S}, {"mcck_return",ENTRY_S}, {"restart_int_handler",ENTRY_S}, {NULL, NULL} /* list must be NULL-terminated */ }; static void s390_dump_line_number(ulong callpc) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) { fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); } else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } #endif crash-utility-crash-9cd43f5/lkcd_v5.c0000664000372000037200000003100115107550337017062 0ustar juerghjuergh/* lkcd_v5.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #include "lkcd_dump_v5.h" static dump_header_t dump_header_v5 = { 0 }; static dump_page_t dump_page = { 0 }; static void mclx_cache_page_headers_v5(void); /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v5(FILE *fp, int fd) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_page_t *dp; lkcd->fd = fd; lkcd->fp = fp; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v5; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; lkcd->dump_page = dp; lkcd->dump_header = dh; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->total_pages = dh->dh_num_pages; lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->zoned_offsets = 0; lkcd->get_dp_flags = get_dp_flags_v5; lkcd->get_dp_address = get_dp_address_v5; lkcd->compression = dh->dh_dump_compress; lkcd->page_header_size = sizeof(dump_page_t); lkcd->get_dp_size = get_dp_size_v5; lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: lkcd_print("reached EOF\n"); eof = TRUE; continue; } if (dp->dp_flags & ~(DUMP_DH_COMPRESSED|DUMP_DH_RAW|DUMP_DH_END|LKCD_DUMP_MCLX_V0)) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_flags & (LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1)) lkcd->flags |= LKCD_MCLX; if (dp->dp_size > 4096) { lkcd_print("dp_size > 4096: %d\n", dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_DH_END) { lkcd_print("found DUMP_DH_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(2)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (dh->dh_version & LKCD_DUMP_MCLX_V1) mclx_cache_page_headers_v5(); if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v5(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v5(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v5(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v5(ulong arg) { int others; dump_header_t *dh; dump_page_t *dp; dh = (dump_header_t *)lkcd->dump_header; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: "); lkcd_print(BITS32() ? "%llx " : "%lx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else if (dh->dh_magic_number == DUMP_MAGIC_LIVE) lkcd_print("(DUMP_MAGIC_LIVE)\n"); else lkcd_print("(?)\n"); others = 0; lkcd_print(" dh_version: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_version); switch (dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) { case LKCD_DUMP_V1: lkcd_print("%sLKCD_DUMP_V1", others++ ? "|" : ""); break; case LKCD_DUMP_V2: lkcd_print("%sLKCD_DUMP_V2", others++ ? "|" : ""); break; case LKCD_DUMP_V3: lkcd_print("%sLKCD_DUMP_V3", others++ ? "|" : ""); break; case LKCD_DUMP_V5: lkcd_print("%sLKCD_DUMP_V5", others++ ? "|" : ""); break; } if (dh->dh_version & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); if (dh->dh_version & LKCD_DUMP_MCLX_V1) lkcd_print("%sLKCD_DUMP_MCLX_V1", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_header_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_level); others = 0; if (dh->dh_dump_level & DUMP_LEVEL_HEADER) lkcd_print("%sDUMP_LEVEL_HEADER", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_KERN) lkcd_print("%sDUMP_LEVEL_KERN", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_USED) lkcd_print("%sDUMP_LEVEL_USED", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_ALL) lkcd_print("%sDUMP_LEVEL_ALL", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_page_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: "); lkcd_print(BITS32() ? "%lld\n" : "%ld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_end); lkcd_print(" dh_num_pages: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(dh->dh_time.tv_sec)))); lkcd_print("dh_utsname_sysname: %s\n", dh->dh_utsname_sysname); lkcd_print("dh_utsname_nodename: %s\n", dh->dh_utsname_nodename); lkcd_print("dh_utsname_release: %s\n", dh->dh_utsname_release); lkcd_print("dh_utsname_version: %s\n", dh->dh_utsname_version); lkcd_print("dh_utsname_machine: %s\n", dh->dh_utsname_machine); lkcd_print("dh_utsname_domainname: %s\n", dh->dh_utsname_domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print(" dh_dump_compress: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_compress); others = 0; if (dh->dh_dump_compress == DUMP_COMPRESS_NONE) lkcd_print("%sDUMP_COMPRESS_NONE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_RLE) lkcd_print("%sDUMP_COMPRESS_RLE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_GZIP) lkcd_print("%sDUMP_COMPRESS_GZIP", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_flags: "); others = 0; lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_flags); if (dh->dh_dump_flags & DUMP_FLAGS_NONDISRUPT) lkcd_print("%sDUMP_FLAGS_NONDISRUPT", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_device: "); lkcd_print(BITS32() ? "%lx\n" : "%x\n", dh->dh_dump_device); if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dp->dp_address); lkcd_print(" dp_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dp->dp_size); lkcd_print(" dp_flags: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_DH_COMPRESSED) lkcd_print("DUMP_DH_COMPRESSED", others++); if (dp->dp_flags & DUMP_DH_RAW) lkcd_print("%sDUMP_DH_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_DH_END) lkcd_print("%sDUMP_DH_END", others++ ? "|" : ""); if (dp->dp_flags & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); lkcd_print(")\n"); } void dump_dump_page_v5(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console(BITS32() ? "dp_address: %llx " : "dp_address: %lx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_DH_COMPRESSED) console("DUMP_DH_COMPRESSED", others++); if (flags & DUMP_DH_RAW) console("%sDUMP_DH_RAW", others++ ? "|" : ""); if (flags & DUMP_DH_END) console("%sDUMP_DH_END", others++ ? "|" : ""); if (flags & LKCD_DUMP_MCLX_V0) console("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); console(")\n"); } /* * Read the MCLX-enhanced page header cache. Verify the first one, which * is a pointer to the page header for address 1MB, and take the rest at * blind faith. Note that the page headers do not include the 64K dump * header offset, which must be added to the values found. */ static void mclx_cache_page_headers_v5(void) { int i; uint64_t physaddr1, physaddr2, page_headers[MCLX_PAGE_HEADERS]; dump_page_t dump_page, *dp; ulong granularity; if (LKCD_DEBUG(2)) /* dump headers have all been read */ return; if (lkcd->total_pages > MEGABYTES(1))/* greater than 4G not supported */ return; if (lseek(lkcd->fd, sizeof(dump_header_t), SEEK_SET) == -1) return; if (read(lkcd->fd, page_headers, MCLX_V1_PAGE_HEADER_CACHE) != MCLX_V1_PAGE_HEADER_CACHE) return; dp = &dump_page; /* * Determine the granularity between offsets. */ if (lseek(lkcd->fd, page_headers[0] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if (lseek(lkcd->fd, page_headers[1] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr2 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if ((physaddr1 % MEGABYTES(1)) || (physaddr2 % MEGABYTES(1)) || (physaddr2 < physaddr1)) return; granularity = physaddr2 - physaddr1; for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) { if (!page_headers[i]) break; lkcd->curhdroffs = page_headers[i] + LKCD_OFFSET_TO_FIRST_PAGE; set_mb_benchmark((granularity * (i+1))/lkcd->page_size); } } crash-utility-crash-9cd43f5/extensions.c0000664000372000037200000003500315107550337017740 0ustar juerghjuergh/* extensions.c - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2013, 2018 David Anderson * Copyright (C) 2002-2013, 2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include static int in_extensions_library(char *, char *); static char *get_extensions_directory(char *, bool *); static void show_all_extensions(void); static void show_extensions(char *); #define DUMP_EXTENSIONS (0) #define LOAD_EXTENSION (1) #define UNLOAD_EXTENSION (2) #define SHOW_ALL_EXTENSIONS (4) /* * Load, unload, or list the extension libaries. */ void cmd_extend(void) { int c; int flag; flag = DUMP_EXTENSIONS; while ((c = getopt(argcnt, args, "lus")) != EOF) { switch(c) { case 's': if (flag & UNLOAD_EXTENSION) { error(INFO, "-s and -u are mutually exclusive\n"); argerrs++; }else if (flag & LOAD_EXTENSION) { error(INFO, "-s and -l are mutually exclusive\n"); argerrs++; } else flag |= SHOW_ALL_EXTENSIONS; break; case 'l': if (flag & UNLOAD_EXTENSION) { error(INFO, "-l and -u are mutually exclusive\n"); argerrs++; } else if (flag & SHOW_ALL_EXTENSIONS) { error(INFO, "-l and -s are mutually exclusive\n"); argerrs++; } else flag |= LOAD_EXTENSION; break; case 'u': if (flag & LOAD_EXTENSION) { error(INFO, "-u and -l are mutually exclusive\n"); argerrs++; } else if (flag & SHOW_ALL_EXTENSIONS) { error(INFO, "-u and -s are mutually exclusive\n"); argerrs++; } else flag |= UNLOAD_EXTENSION; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); switch (flag) { case DUMP_EXTENSIONS: if (!args[optind]) { dump_extension_table(!VERBOSE); return; } /* FALLTHROUGH */ case LOAD_EXTENSION: if (!args[optind]) { error(INFO, "-l requires one or more extension library arguments\n"); cmd_usage(pc->curcmd, SYNOPSIS); break; } while (args[optind]) { load_extension(args[optind]); optind++; } break; case UNLOAD_EXTENSION: if (!args[optind]) { unload_extension(NULL); break; } while (args[optind]) { unload_extension(args[optind]); optind++; } break; case SHOW_ALL_EXTENSIONS: show_all_extensions(); break; } } /* * List all extension libaries and their commands in either the extend * command format or for "help -e" (verbose). */ void dump_extension_table(int verbose) { int i; struct extension_table *ext; struct command_table_entry *cp; char buf[BUFSIZE]; int longest, others; if (!extension_table) return; if (verbose) { for (ext = extension_table; ext; ext = ext->next) { fprintf(fp, " filename: %s\n", ext->filename); fprintf(fp, " handle: %lx\n", (ulong)ext->handle); fprintf(fp, " flags: %lx (", ext->flags); others = 0; if (ext->flags & REGISTERED) fprintf(fp, "%sREGISTERED", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " next: %lx\n", (ulong)ext->next); fprintf(fp, " prev: %lx\n", (ulong)ext->prev); for (i = 0, cp = ext->command_table; cp->name; cp++, i++) { fprintf(fp, "command_table[%d]: %lx\n", i, (ulong)cp); fprintf(fp, " name: %s\n", cp->name); fprintf(fp, " func: %lx\n", (ulong)cp->func); fprintf(fp, " help_data: %lx\n", (ulong)cp->help_data); fprintf(fp, " flags: %lx (", cp->flags); others = 0; if (cp->flags & CLEANUP) fprintf(fp, "%sCLEANUP", others++ ? "|" : ""); if (cp->flags & REFRESH_TASK_TABLE) fprintf(fp, "%sREFRESH_TASK_TABLE", others++ ? "|" : ""); if (cp->flags & HIDDEN_COMMAND) fprintf(fp, "%sHIDDEN_COMMAND", others++ ? "|" : ""); fprintf(fp, ")\n"); } if (ext->next) fprintf(fp, "\n"); } return; } /* * Print them out in the order they were loaded. */ for (longest = 0, ext = extension_table; ext; ext = ext->next) { if (strlen(ext->filename) > longest) longest = strlen(ext->filename); } fprintf(fp, "%s COMMANDS\n", mkstring(buf, longest, LJUST, "SHARED OBJECT")); longest = MAX(longest, strlen("SHARED OBJECT")); for (ext = extension_table; ext; ext = ext->next) if (ext->next == NULL) break; do { fprintf(fp, "%s ", mkstring(buf, longest, LJUST, ext->filename)); for (cp = ext->command_table; cp->name; cp++) fprintf(fp, "%s ", cp->name); fprintf(fp, "\n"); } while ((ext = ext->prev)); } static void show_extensions(char *dir) { DIR *dirp; struct dirent *dp; char filename[BUFSIZE*2]; dirp = opendir(dir); if (!dirp) return; for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { sprintf(filename, "%s%s%s", dir, LASTCHAR(dir) == '/' ? "" : "/", dp->d_name); if (!is_shared_object(filename)) continue; fprintf(fp, "%s\n", filename); } closedir(dirp); } static void show_all_extensions(void) { char *dir; show_extensions("./"); if ((dir = getenv("CRASH_EXTENSIONS"))) show_extensions(dir); if (BITS64()) show_extensions("/usr/lib64/crash/extensions/"); show_extensions("/usr/lib/crash/extensions/"); show_extensions("./extensions/"); } /* * Load an extension library. */ void load_extension(char *lib) { struct extension_table *ext, *curext; char buf[BUFSIZE]; size_t size; char *env; int env_len; if ((env = getenv("CRASH_EXTENSIONS"))) env_len = strlen(env)+1; else env_len = 0; size = sizeof(struct extension_table) + strlen(lib) + MAX(env_len, strlen("/usr/lib64/crash/extensions/")) + 1; if ((ext = (struct extension_table *)malloc(size)) == NULL) error(FATAL, "cannot malloc extension_table space."); BZERO(ext, size); ext->filename = (char *)((ulong)ext + sizeof(struct extension_table)); /* * If the library is not specified by an absolute pathname, dlopen() * does not look in the current directory, so modify the filename. * If it's not in the current directory, check the extensions library * directory. */ if ((*lib != '.') && (*lib != '/')) { if (file_exists(lib, NULL)) sprintf(ext->filename, "./%s", lib); else if (in_extensions_library(lib, buf)) strcpy(ext->filename, buf); else { error(INFO, "%s: %s\n", lib, strerror(ENXIO)); free(ext); return; } } else strcpy(ext->filename, lib); if (!is_shared_object(ext->filename)) { error(INFO, "%s: not an ELF format object file\n", ext->filename); free(ext); return; } for (curext = extension_table; curext; curext = curext->next) { if (same_file(curext->filename, ext->filename)) { fprintf(fp, "%s: shared object already loaded\n", ext->filename); free(ext); return; } } /* * register_extension() will be called by the shared object's * _init() function before dlopen() returns below. */ pc->curext = ext; ext->handle = dlopen(ext->filename, RTLD_NOW|RTLD_GLOBAL); if (!ext->handle) { strcpy(buf, dlerror()); error(INFO, "%s\n", buf); if (strstr(buf, "undefined symbol: register_extension")) { error(INFO, "%s may be statically linked: ", pc->program_name); fprintf(fp, "recompile without the -static flag\n"); } free(ext); return; } if (!(ext->flags & REGISTERED)) { dlclose(ext->handle); if (ext->flags & (DUPLICATE_COMMAND_NAME | NO_MINIMAL_COMMANDS)) error(INFO, "%s: shared object unloaded\n", ext->filename); else error(INFO, "%s: no commands registered: shared object unloaded\n", ext->filename); free(ext); return; } fprintf(fp, "%s: shared object loaded\n", ext->filename); /* * Put new libraries at the head of the list. */ if (extension_table) { extension_table->prev = ext; ext->next = extension_table; } extension_table = ext; help_init(); } /* * Check the extensions library directories. */ static int in_extensions_library(char *lib, char *buf) { char *env; if ((env = getenv("CRASH_EXTENSIONS"))) { sprintf(buf, "%s%s%s", env, LASTCHAR(env) == '/' ? "" : "/", lib); if (file_exists(buf, NULL)) return TRUE; } if (BITS64()) { sprintf(buf, "/usr/lib64/crash/extensions/%s", lib); if (file_exists(buf, NULL)) return TRUE; } sprintf(buf, "/usr/lib/crash/extensions/%s", lib); if (file_exists(buf, NULL)) return TRUE; sprintf(buf, "./extensions/%s", lib); if (file_exists(buf, NULL)) return TRUE; return FALSE; } /* * Look for an extensions directory using the proper order. */ static char * get_extensions_directory(char *dirbuf, bool *end) { static int index = 0; char *dirs[] = { getenv("CRASH_EXTENSIONS"), BITS64() ? "/usr/lib64/crash/extensions" : NULL, "/usr/lib/crash/extensions", "./extensions", }; char *dir; if (index >= sizeof(dirs) / sizeof(char *)) { *end = true; return NULL; } *end = false; dir = dirs[index++]; if (is_directory(dir)) { snprintf(dirbuf, BUFSIZE, "%s", dir); return dir; } else { return NULL; } } void preload_extensions(void) { DIR *dirp; struct dirent *dp; char dirbuf[BUFSIZE]; char filename[BUFSIZE*2]; int found; bool end; next_dir: if (!get_extensions_directory(dirbuf, &end)) { if (end) return; else goto next_dir; } dirp = opendir(dirbuf); if (!dirp) { error(INFO, "%s: %s\n", dirbuf, strerror(errno)); goto next_dir; } pc->curcmd = pc->program_name; for (found = 0, dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { sprintf(filename, "%s%s%s", dirbuf, LASTCHAR(dirbuf) == '/' ? "" : "/", dp->d_name); if (!is_shared_object(filename)) continue; found++; load_extension(dp->d_name); } closedir(dirp); if (found) fprintf(fp, "\n"); else { error(NOTE, "%s: no extension modules found in directory\n\n", dirbuf); goto next_dir; } } /* * Unload all, or as specified, extension libraries. */ void unload_extension(char *lib) { struct extension_table *ext; int found; char buf[BUFSIZE]; if (!lib) { while (extension_table) { ext = extension_table; if (dlclose(ext->handle)) error(FATAL, "dlclose: %s: shared object not open\n", ext->filename); fprintf(fp, "%s: shared object unloaded\n", ext->filename); extension_table = ext->next; free(ext); } help_init(); return; } if ((*lib != '.') && (*lib != '/')) { if (!file_exists(lib, NULL) && in_extensions_library(lib, buf)) lib = buf; } if (!file_exists(lib, NULL)) { error(INFO, "%s: %s\n", lib, strerror(ENXIO)); return; } for (ext = extension_table, found = FALSE; ext; ext = ext->next) { if (same_file(lib, ext->filename)) { found = TRUE; if (dlclose(ext->handle)) error(INFO, "dlclose: %s: shared object not open\n", ext->filename); else { fprintf(fp, "%s: shared object unloaded\n", ext->filename); if (extension_table == ext) { /* first */ extension_table = ext->next; if (ext->next) ext->next->prev = NULL; } else if (ext->next == NULL) /* last */ ext->prev->next = NULL; else { /* middle */ ext->prev->next = ext->next; ext->next->prev = ext->prev; } free(ext); help_init(); break; } } else if (STREQ(basename(lib), basename(ext->filename))) { error(INFO, "%s and %s are different object files\n", lib, ext->filename); found = TRUE; } } if (!found) error(INFO, "%s: not loaded\n", lib); } /* * Register the command_table as long as there are no command namespace * clashes with the currently-existing command set. Also delete any aliases * that clash, giving the registered command name priority. * * This function is called from the shared object's _init() function * before the dlopen() call returns back to load_extension() above. * The mark of approval for load_extension() is the setting of the * REGISTERED bit in the "current" extension_table structure flags. */ void register_extension(struct command_table_entry *command_table) { struct command_table_entry *cp; pc->curext->flags |= NO_MINIMAL_COMMANDS; for (cp = command_table; cp->name; cp++) { if (get_command_table_entry(cp->name)) { error(INFO, "%s: \"%s\" is a duplicate of a currently-existing command\n", pc->curext->filename, cp->name); pc->curext->flags |= DUPLICATE_COMMAND_NAME; return; } if (cp->flags & MINIMAL) pc->curext->flags &= ~NO_MINIMAL_COMMANDS; } if ((pc->flags & MINIMAL_MODE) && (pc->curext->flags & NO_MINIMAL_COMMANDS)) { error(INFO, "%s: does not contain any commands which support minimal mode\n", pc->curext->filename); return; } if (pc->flags & MINIMAL_MODE) { for (cp = command_table; cp->name; cp++) { if (!(cp->flags & MINIMAL)) { error(WARNING, "%s: command \"%s\" does not support minimal mode\n", pc->curext->filename, cp->name); } } } for (cp = command_table; cp->name; cp++) { if (is_alias(cp->name)) { error(INFO, "alias \"%s\" deleted: name clash with extension command\n", cp->name); deallocate_alias(cp->name); } } pc->curext->command_table = command_table; pc->curext->flags |= REGISTERED; /* Mark of approval */ } /* * Hooks for sial. */ unsigned long get_curtask(void) { return CURRENT_TASK(); } char * crash_global_cmd(void) { return pc->curcmd; } struct command_table_entry * crash_cmd_table(void) { return pc->cmd_table; } crash-utility-crash-9cd43f5/README0000664000372000037200000004127415107550337016264 0ustar juerghjuergh CORE ANALYSIS SUITE The core analysis suite is a self-contained tool that can be used to investigate either live systems, kernel core dumps created from dump creation facilities such as kdump, kvmdump, xendump, the netdump and diskdump packages offered by Red Hat, the LKCD kernel patch, the mcore kernel patch created by Mission Critical Linux, as well as other formats created by manufacturer-specific firmware. o The tool is loosely based on the SVR4 crash command, but has been completely integrated with gdb in order to be able to display formatted kernel data structures, disassemble source code, etc. o The current set of available commands consist of common kernel core analysis tools such as a context-specific stack traces, source code disassembly, kernel variable displays, memory display, dumps of linked-lists, etc. In addition, any gdb command may be entered, which in turn will be passed onto the gdb module for execution. o There are several commands that delve deeper into specific kernel subsystems, which also serve as templates for kernel developers to create new commands for analysis of a specific area of interest. Adding a new command is a simple affair, and a quick recompile adds it to the command menu. o The intent is to make the tool independent of Linux version dependencies, building in recognition of major kernel code changes so as to adapt to new kernel versions, while maintaining backwards compatibility. A whitepaper with complete documentation concerning the use of this utility can be found here: https://crash-utility.github.io/crash_whitepaper.html These are the current prerequisites: o At this point, x86, ia64, x86_64, ppc64, ppc, arm, arm64, alpha, mips, mips64, loongarch64, riscv64, s390 and s390x-based kernels are supported. Other architectures may be addressed in the future. o One size fits all -- the utility can be run on any Linux kernel version version dating back to 2.2.5-15. A primary design goal is to always maintain backwards-compatibility. o In order to contain debugging data, the top-level kernel Makefile's CFLAGS definition must contain the -g flag. Typically distributions will contain a package containing a vmlinux file with full debuginfo data. If not, the kernel must be rebuilt: For 2.2 kernels that are not built with -g, change the following line: CFLAGS = -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer to: CFLAGS = -g -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer For 2.4 kernels that are not built with -g, change the following line: CFLAGS := $(CPPFLAGS) -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strict-aliasing to: CFLAGS := -g $(CPPFLAGS) -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strict-aliasing For 2.6 and later kernels that are not built with -g, the kernel should be configured with CONFIG_DEBUG_INFO enabled, which in turn will add the -g flag to the CFLAGS setting in the kernel Makefile. After the kernel is re-compiled, the uncompressed "vmlinux" kernel that is created in the top-level kernel build directory must be saved. o Requirements for building: Fedora: make gcc gcc-c++ ncurses-devel zlib-devel lzo-devel snappy-devel bison wget patch texinfo libzstd-devel gmp-devel mpfr-devel Ubuntu/Debian: make gcc g++ libncurses-dev zlib1g-dev liblzo2-dev libsnappy-dev bison wget patch texinfo libzstd-dev Arch Linux: make gcc ncurses zlib lzo snappy bison wget patch texinfo zstd openSUSE: make gcc gcc-c++ ncurses-devel zlib-devel lzo-devel snappy-devel bison wget patch texinfo libzstd-devel To build the crash utility: $ tar -xf crash-9.0.1.tar.gz $ cd crash-9.0.1 $ make To cross compile the crash utility for aarch64 on x86_64: $ make CROSS_COMPILE=aarch64-linux-gnu- -j`nproc` Supported arches for cross compilation: x86_64, x86, aarch64, s390x, powerpc64, alpha, sparc64, mips, riscv64 The initial build will take several minutes because the embedded gdb module must be configured and built. Alternatively, the crash source RPM file may be installed and built, and the resultant crash binary RPM file installed. The crash binary can only be used on systems of the same architecture as the host build system. There are a few optional manners of building the crash binary: o On an x86_64 host, a 32-bit x86 binary that can be used to analyze 32-bit x86 dumpfiles may be built by typing "make target=X86". o On an x86 or x86_64 host, a 32-bit x86 binary that can be used to analyze 32-bit arm dumpfiles may be built by typing "make target=ARM". o On an x86 or x86_64 host, a 32-bit x86 binary that can be used to analyze 32-bit mips dumpfiles may be built by typing "make target=MIPS". o On an ppc64 host, a 32-bit ppc binary that can be used to analyze 32-bit ppc dumpfiles may be built by typing "make target=PPC". o On an x86_64 host, an x86_64 binary that can be used to analyze arm64 dumpfiles may be built by typing "make target=ARM64". o On an x86_64 host, an x86_64 binary that can be used to analyze ppc64le dumpfiles may be built by typing "make target=PPC64". o On an x86_64 host, an x86_64 binary that can be used to analyze riscv64 dumpfiles may be built by typing "make target=RISCV64". o On an x86_64 host, an x86_64 binary that can be used to analyze loongarch64 dumpfiles may be built by typing "make target=LOONGARCH64". Traditionally when vmcores are compressed via the makedumpfile(8) facility the libz compression library is used, and by default the crash utility only supports libz. Recently makedumpfile has been enhanced to optionally use the LZO, snappy or zstd compression libraries. To build crash with any or all of those libraries, type "make lzo", "make snappy" or "make zstd". crash supports valgrind Memcheck tool on the crash's custom memory allocator. To build crash with this feature enabled, type "make valgrind" and then run crash with valgrind as "valgrind crash vmlinux vmcore". All of the alternate build commands above are "sticky" in that the special "make" targets only have to be entered one time; all subsequent builds will follow suit. If the tool is run against a kernel dumpfile, two arguments are required, the uncompressed kernel name and the kernel dumpfile name. If run on a live system, only the kernel name is required, because /dev/mem will be used as the "dumpfile". On Red Hat or Fedora kernels where the /dev/mem device is restricted, the /dev/crash memory driver will be used. If neither /dev/mem or /dev/crash are available, then /proc/kcore will be be used as the live memory source. If /proc/kcore is also restricted, then the Red Hat /dev/crash driver may be compiled and installed; its source is included in the crash-9.0.1/memory_driver subdirectory. If the kernel file is stored in /boot, /, /boot/efi, or in any /usr/src or /usr/lib/debug/lib/modules subdirectory, then no command line arguments are required -- the first kernel found that matches /proc/version will be used as the namelist. For example, invoking crash on a live system would look like this: $ crash crash 9.0.1 Copyright (C) 2002-2025 Red Hat, Inc. Copyright (C) 2004, 2005, 2006, 2010 IBM Corporation Copyright (C) 1999-2006 Hewlett-Packard Co Copyright (C) 2005, 2006, 2011, 2012 Fujitsu Limited Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. Copyright (C) 2005, 2011, 2020-2022 NEC Corporation Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. Copyright (C) 2015, 2021 VMware, Inc. This program is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under certain conditions. Enter "help copying" to see the conditions. This program has absolutely no warranty. Enter "help warranty" for details. GNU gdb 16.2 Copyright 2013 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Type "show copying" and "show warranty" for details. This GDB was configured as "i686-pc-linux-gnu"... KERNEL: /boot/vmlinux DUMPFILE: /dev/mem CPUS: 1 DATE: Tue Jun 10 18:03:09 CST 2025 UPTIME: 10 days, 22:55:18 LOAD AVERAGE: 0.08, 0.03, 0.01 TASKS: 42 NODENAME: ha2.mclinux.com RELEASE: 2.4.0-test10 VERSION: #11 SMP Thu Nov 4 15:09:25 EST 2000 MACHINE: i686 (447 MHz) MEMORY: 128 MB PID: 3621 COMMAND: "crash" TASK: c463c000 CPU: 0 STATE: TASK_RUNNING (ACTIVE) crash> help * files mod sbitmapq union alias foreach mount search vm ascii fuser net set vtop bpf gdb p sig waitq bt help ps struct whatis btop ipcs pte swap wr dev irq ptob sym q dis kmem ptov sys eval list rd task exit log repeat timer extend mach runq tree crash version: 9.0.0 gdb version: 16.2 For help on any command above, enter "help ". For help on input options, enter "help input". For help on output options, enter "help output". crash> When run on a dumpfile, both the kernel namelist and dumpfile must be entered on the command line. For example, when run on a core dump created by the Red Hat netdump or diskdump facilities: $ crash vmlinux vmcore crash 9.0.0 Copyright (C) 2002-2025 Red Hat, Inc. Copyright (C) 2004, 2005, 2006, 2010 IBM Corporation Copyright (C) 1999-2006 Hewlett-Packard Co Copyright (C) 2005, 2006, 2011, 2012 Fujitsu Limited Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. Copyright (C) 2005, 2011, 2020-2022 NEC Corporation Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. Copyright (C) 2015, 2021 VMware, Inc. This program is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under certain conditions. Enter "help copying" to see the conditions. This program has absolutely no warranty. Enter "help warranty" for details. GNU gdb 16.2 Copyright 2013 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Type "show copying" and "show warranty" for details. This GDB was configured as "i686-pc-linux-gnu"... KERNEL: vmlinux DUMPFILE: vmcore CPUS: 4 DATE: Tue Jun 10 18:03:09 CST 2025 UPTIME: 00:02:40 LOAD AVERAGE: 2.24, 0.96, 0.37 TASKS: 70 NODENAME: pro1.lab.boston.redhat.com RELEASE: 2.6.3-2.1.214.11smp VERSION: #1 SMP Tue Mar 2 10:58:27 EST 2004 MACHINE: i686 (2785 Mhz) MEMORY: 512 MB PANIC: "Oops: 0002 [#1]" (check log for details) PID: 0 COMMAND: "swapper" TASK: 22fa200 (1 of 4) [THREAD_INFO: 2356000] CPU: 0 STATE: TASK_RUNNING (PANIC) crash> The tool's environment is context-specific. On a live system, the default context is the command itself; on a dump the default context will be the task that panicked. The most commonly-used commands are: set - set a new task context by pid, task address, or cpu. bt - backtrace of the current context, or as specified with arguments. p - print the contents of a kernel variable. rd - read memory, which may be either kernel virtual, user virtual, or physical. ps - simple process listing. log - dump the kernel log_buf. struct - print the contents of a structure at a specified address. foreach - execute a command on all tasks, or those specified, in the system. Detailed help concerning the use of each of the commands in the menu above may be displayed by entering "help command", where "command" is one of those listed above. Rather than getting bogged down in details here, simply run the help command on each of the commands above. Note that many commands have multiple options so as to avoid the proliferation of command names. Command output may be piped to external commands or redirected to files. Enter "help output" for details. The command line history mechanism allows for command-line recall and command-line editing. Input files containing a set of crash commands may be substituted for command-line input. Enter "help input" for details. Note that a .crashrc file (or .rc if the name has been changed), may contain any number of "set" or "alias" commands -- see the help pages on those two commands for details. Lastly, if a command is entered that is not recognized, it is checked against the kernel's list of variables, structure, union or typedef names, and if found, the command is passed to "p", "struct", "union" or "whatis". That being the case, as long as a kernel variable/structure/union name is different than any of the current commands. (1) A kernel variable can be dumped by simply entering its name: crash> init_mm init_mm = $2 = { mmap = 0xc022d540, mmap_avl = 0x0, mmap_cache = 0x0, pgd = 0xc0101000, count = { counter = 0x6 }, map_count = 0x1, mmap_sem = { count = { counter = 0x1 }, waking = 0x0, wait = 0x0 }, context = 0x0, start_code = 0xc0000000, end_code = 0xc022b4c8, end_data = c0250388, ... (2) A structure or can be dumped simply by entering its name and address: crash> vm_area_struct c5ba3910 struct vm_area_struct { vm_mm = 0xc3ae3210, vm_start = 0x821b000, vm_end = 0x8692000, vm_next = 0xc5ba3890, vm_page_prot = { pgprot = 0x25 }, vm_flags = 0x77, vm_avl_height = 0x4, vm_avl_left = 0xc0499540, vm_avl_right = 0xc0499f40, vm_next_share = 0xc04993c0, vm_pprev_share = 0xc0499060, vm_ops = 0x0, vm_offset = 0x0, vm_file = 0x0, vm_pte = 0x0 } The crash utility has been designed to facilitate the task of adding new commands. New commands may be permanently compiled into the crash executable, or dynamically added during runtime using shared object files. To permanently add a new command to the crash executable's menu: 1. For a command named "xxx", put a reference to cmd_xxx() in defs.h. 2. Add cmd_xxx into the base_command_table[] array in global_data.c. 3. Write cmd_xxx(), putting it in one of the appropriate files. Look at the other commands for guidance on getting symbolic data, reading memory, displaying data, etc... 4. Recompile and run. Note that while the initial compile of crash, which configures and compiles the gdb module, takes several minutes, subsequent re-compiles to do such things as add new commands or fix bugs just takes a few seconds. Alternatively, you can create shared object library files consisting of crash command extensions, that can be dynamically linked into the crash executable during runtime or during initialization. This will allow the same shared object to be used with subsequent crash releases without having to re-merge the command's code into each new set of crash sources. The dynamically linked-in commands will automatically show up in the crash help menu. For details, enter "help extend" during runtime, or enter "crash -h extend" from the shell command line. crash-utility-crash-9cd43f5/tools.c0000664000372000037200000053257515107550337016721 0ustar juerghjuergh/* tools.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2019 David Anderson * Copyright (C) 2002-2019 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #ifdef VALGRIND #include #include #endif static void print_number(struct number_option *, int, int); static long alloc_hq_entry(void); struct hq_entry; static void dealloc_hq_entry(struct hq_entry *); static void show_options(void); static void dump_struct_members(struct list_data *, int, ulong); static void rbtree_iteration(ulong, struct tree_data *, char *); void dump_struct_members_for_tree(struct tree_data *, int, ulong); struct req_entry { char *arg, *name, **member; int *is_str, *is_ptr; ulong *width, *offset; int count; }; static void print_value(struct req_entry *, unsigned int, ulong, unsigned int); struct req_entry *fill_member_offsets(char *); void dump_struct_members_fast(struct req_entry *, int, ulong); FILE * set_error(char *target) { FILE *tmp_fp = NULL; char *tmp_str = NULL; if (STREQ(target, pc->error_path)) return pc->error_fp; tmp_str = malloc(strlen(target) + 1); if (tmp_str == NULL) return NULL; strcpy(tmp_str, target); if (STREQ(target, "default")) tmp_fp = stdout; else if (STREQ(target, "redirect")) tmp_fp = fp; else { tmp_fp = fopen(target, "a"); if (tmp_fp == NULL) { error(INFO, "invalid path: %s\n", target); return NULL; } } if (pc->error_fp != NULL && pc->error_fp != stdout && pc->error_fp != fp) fclose(pc->error_fp); if (pc->error_path) free(pc->error_path); pc->error_fp = tmp_fp; pc->error_path = tmp_str; return pc->error_fp; } /* * General purpose error reporting routine. Type INFO prints the message * and returns. Type FATAL aborts the command in progress, and longjmps * back to the appropriate recovery location. If a FATAL occurs during * program initialization, exit() is called. * * The idea is to get the message out so that it is seen by the user * regardless of how the command output may be piped or redirected. * Besides stderr, check whether the output is going to a file or pipe, and * if so, intermingle the error message there as well. */ int __error(int type, char *fmt, ...) { int end_of_line, new_line; char buf[BUFSIZE]; char *spacebuf; void *retaddr[NUMBER_STACKFRAMES] = { 0 }; va_list ap; if (STREQ(pc->error_path, "redirect")) pc->error_fp = fp; if (CRASHDEBUG(1) || (pc->flags & DROP_CORE)) { SAVE_RETURN_ADDRESS(retaddr); console("error() trace: %lx => %lx => %lx => %lx\n", retaddr[3], retaddr[2], retaddr[1], retaddr[0]); } va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (!fmt && FATAL_ERROR(type)) { fprintf(pc->error_fp, "\n"); clean_exit(1); } end_of_line = FATAL_ERROR(type) && !(pc->flags & RUNTIME); if ((new_line = (buf[0] == '\n'))) shift_string_left(buf, 1); else if (pc->flags & PLEASE_WAIT) new_line = TRUE; if (type == CONT) spacebuf = space(strlen(pc->curcmd)); else spacebuf = NULL; if (pc->stdpipe && (STREQ(pc->error_path, "default") || STREQ(pc->error_path, "redirect"))) { fprintf(pc->stdpipe, "%s%s%s %s%s", new_line ? "\n" : "", type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", type == WARNING ? "WARNING: " : type == NOTE ? "NOTE: " : "", buf); fflush(pc->stdpipe); } else { fprintf(pc->error_fp, "%s%s%s %s%s", new_line || end_of_line ? "\n" : "", type == WARNING ? "WARNING" : type == NOTE ? "NOTE" : type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", buf, end_of_line ? "\n" : ""); fflush(pc->error_fp); } if ((STREQ(pc->error_path, "default")) && (fp != stdout) && (fp != pc->stdpipe) && (fp != pc->tmpfile)) { fprintf(fp, "%s%s%s %s", new_line ? "\n" : "", type == WARNING ? "WARNING" : type == NOTE ? "NOTE" : type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", buf); fflush(fp); } if ((pc->flags & DROP_CORE) && (type != NOTE)) { dump_trace(retaddr); SIGACTION(SIGSEGV, SIG_DFL, &pc->sigaction, NULL); drop_core("DROP_CORE flag set: forcing a segmentation fault\n"); } switch (type) { case FATAL: if (pc->flags & IN_FOREACH) RESUME_FOREACH(); /* FALLTHROUGH */ case FATAL_RESTART: if (pc->flags & RUNTIME) RESTART(); else { if (REMOTE()) remote_exit(); clean_exit(1); } default: case INFO: case NOTE: case WARNING: return FALSE; } } /* * Parse a line into tokens, populate the passed-in argv[] array, and return * the count of arguments found. This function modifies the passed-string * by inserting a NULL character at the end of each token. Expressions * encompassed by parentheses, and strings encompassed by apostrophes, are * collected into single tokens. */ int parse_line(char *str, char *argv[]) { int i, j, k; int string; int expression; for (i = 0; i < MAXARGS; i++) argv[i] = NULL; clean_line(str); if (str == NULL || strlen(str) == 0) return(0); i = j = k = 0; string = FALSE; expression = 0; /* * Special handling for when the first character is a '"'. */ if (str[0] == '"') { next: do { i++; } while ((str[i] != NULLCHAR) && (str[i] != '"')); switch (str[i]) { case NULLCHAR: argv[j] = &str[k]; return j+1; case '"': argv[j++] = &str[k+1]; str[i++] = NULLCHAR; if (str[i] == '"') { k = i; goto next; } break; } } else argv[j++] = str; while (TRUE) { if (j == MAXARGS) error(FATAL, "too many arguments in string!\n"); while (str[i] != ' ' && str[i] != '\t' && str[i] != NULLCHAR) { i++; } switch (str[i]) { case ' ': case '\t': str[i++] = NULLCHAR; while (str[i] == ' ' || str[i] == '\t') { i++; } if (str[i] == '"') { str[i] = ' '; string = TRUE; i++; } /* * Make an expression encompassed by a set of parentheses * a single argument. Also account for embedded sets. */ if (!string && str[i] == '(') { argv[j++] = &str[i]; expression = 1; while (expression > 0) { i++; switch (str[i]) { case '(': expression++; break; case ')': expression--; break; case NULLCHAR: case '\n': expression = -1; break; default: break; } } if (expression == 0) { i++; continue; } } if (str[i] != NULLCHAR && str[i] != '\n') { argv[j++] = &str[i]; if (string) { string = FALSE; while (str[i] != '"' && str[i] != NULLCHAR) i++; if (str[i] == '"') str[i] = ' '; } break; } /* else fall through */ case '\n': str[i] = NULLCHAR; /* keep falling... */ case NULLCHAR: argv[j] = NULLCHAR; return(j); } } } /* * Defuse controversy re: extensions to ctype.h */ int whitespace(int c) { return ((c == ' ') ||(c == '\t')); } int ascii(int c) { return ((c >= 0) && ( c <= 0x7f)); } /* * Strip line-ending whitespace and linefeeds. */ char * strip_line_end(char *line) { strip_linefeeds(line); strip_ending_whitespace(line); return(line); } /* * Strip line-beginning and line-ending whitespace and linefeeds. */ char * clean_line(char *line) { strip_beginning_whitespace(line); strip_linefeeds(line); strip_ending_whitespace(line); return(line); } /* * Strip line-ending linefeeds in a string. */ char * strip_linefeeds(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == '\n') { *p = NULLCHAR; if (--p < line) break; } return(line); } /* * Strip a specified line-ending character in a string. */ char * strip_ending_char(char *line, char c) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); if (*p == c) *p = NULLCHAR; return(line); } /* * Strip a specified line-beginning character in a string. */ char * strip_beginning_char(char *line, char c) { if (line == NULL || strlen(line) == 0) return(line); if (FIRSTCHAR(line) == c) shift_string_left(line, 1); return(line); } /* * Strip line-ending whitespace. */ char * strip_ending_whitespace(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == ' ' || *p == '\t') { *p = NULLCHAR; if (p == line) break; p--; } return(line); } /* * Strip line-beginning whitespace. */ char * strip_beginning_whitespace(char *line) { char buf[BUFSIZE]; char *p; if (line == NULL || strlen(line) == 0) return(line); strcpy(buf, line); p = &buf[0]; while (*p == ' ' || *p == '\t') p++; strcpy(line, p); return(line); } /* * End line at first comma found. */ char * strip_comma(char *line) { char *p; if ((p = strstr(line, ","))) *p = NULLCHAR; return(line); } /* * Strip the 0x from the beginning of a hexadecimal value string. */ char * strip_hex(char *line) { if (STRNEQ(line, "0x")) shift_string_left(line, 2); return(line); } /* * Turn a string into upper-case. */ char * upper_case(const char *s, char *buf) { const char *p1; char *p2; p1 = s; p2 = buf; while (*p1) { *p2 = toupper(*p1); p1++, p2++; } *p2 = NULLCHAR; return(buf); } /* * Return pointer to first non-space/tab in a string. */ char * first_nonspace(char *s) { return(s + strspn(s, " \t")); } /* * Return pointer to first space/tab in a string. If none are found, * return a pointer to the string terminating NULL. */ char * first_space(char *s) { return(s + strcspn(s, " \t")); } /* * Replace the first space/tab found in a string with a NULL character. */ char * null_first_space(char *s) { char *p1; p1 = first_space(s); if (*p1) *p1 = NULLCHAR; return s; } /* * Replace any instances of the characters in string c that are found in * string s with the character passed in r. */ char * replace_string(char *s, char *c, char r) { int i, j; for (i = 0; s[i]; i++) { for (j = 0; c[j]; j++) { if (s[i] == c[j]) s[i] = r; } } return s; } void string_insert(char *insert, char *where) { char *p; p = GETBUF(strlen(insert) + strlen(where) + 1); sprintf(p, "%s%s", insert, where); strcpy(where, p); FREEBUF(p); } /* * Find the rightmost instance of a substring in a string. */ char * strstr_rightmost(char *s, char *lookfor) { char *next, *last, *p; for (p = s, last = NULL; *p; p++) { if (!(next = strstr(p, lookfor))) break; last = p = next; } return last; } /* * Prints a string verbatim, allowing strings with % signs to be displayed * without printf conversions. */ void print_verbatim(FILE *filep, char *line) { int i; for (i = 0; i < strlen(line); i++) { fputc(line[i], filep); fflush(filep); } } char * fixup_percent(char *s) { char *p1; if ((p1 = strstr(s, "%")) == NULL) return s; s[strlen(s)+1] = NULLCHAR; memmove(p1+1, p1, strlen(p1)); *p1 = '%'; return s; } /* * Convert an indeterminate number string to either a hexadecimal or decimal * long value. Translate with a bias towards decimal unless HEX_BIAS is set. */ ulong stol(char *s, int flags, int *errptr) { if ((flags & HEX_BIAS) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); else { if (decimal(s, 0)) return(dtol(s, flags, errptr)); else if (hexadecimal(s, 0)) return(htol(s, flags, errptr)); } if (!(flags & QUIET)) error(INFO, "not a valid number: %s\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } ulonglong stoll(char *s, int flags, int *errptr) { if ((flags & HEX_BIAS) && hexadecimal(s, 0)) return(htoll(s, flags, errptr)); else { if (decimal(s, 0)) return(dtoll(s, flags, errptr)); else if (hexadecimal(s, 0)) return(htoll(s, flags, errptr)); } if (!(flags & QUIET)) error(INFO, "not a valid number: %s\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Append a two-character string to a number to make 1, 2, 3 and 4 into * 1st, 2nd, 3rd, 4th, and so on... */ char * ordinal(ulong val, char *buf) { char *p1; sprintf(buf, "%ld", val); p1 = &buf[strlen(buf)-1]; switch (*p1) { case '1': strcat(buf, "st"); break; case '2': strcat(buf, "nd"); break; case '3': strcat(buf, "rd"); break; default: strcat(buf, "th"); break; } return buf; } /* * Convert a string into: * * 1. an evaluated expression if it's enclosed within parentheses. * 2. to a decimal value if the string is all decimal characters. * 3. to a hexadecimal value if the string is all hexadecimal characters. * 4. to a symbol value if the string is a known symbol. * * If HEX_BIAS is set, pass the value on to htol(). */ ulong convert(char *s, int flags, int *errptr, ulong numflag) { struct syment *sp; if ((numflag & NUM_EXPR) && can_eval(s)) return(eval(s, flags, errptr)); if ((flags & HEX_BIAS) && (numflag & NUM_HEX) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); else { if ((numflag & NUM_DEC) && decimal(s, 0)) return(dtol(s, flags, errptr)); if ((numflag & NUM_HEX) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); } if ((sp = symbol_search(s))) return(sp->value); error(INFO, "cannot convert \"%s\"\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a hexadecimal long value. */ ulong htol(char *s, int flags, int *errptr) { long i, j; ulong n; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto htol_error; } if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > MAX_HEXADDR_STRLEN) { if (!(flags & QUIET)) error(INFO, "input string too large: \"%s\" (%d vs %d)\n", s, strlen(s), MAX_HEXADDR_STRLEN); goto htol_error; } for (n = i = 0; s[i] != 0; i++) { switch (s[i]) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j = (s[i] - 'a') + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j = (s[i] - 'A') + 10; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': j = s[i] - '0'; break; case 'x': case 'X': case 'h': continue; default: if (!(flags & QUIET)) error(INFO, "invalid input: \"%s\"\n", s); goto htol_error; } n = (16 * n) + j; } return(n); htol_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return BADADDR; } /* * Convert a string to a hexadecimal unsigned long long value. */ ulonglong htoll(char *s, int flags, int *errptr) { long i, j; ulonglong n; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto htoll_error; } if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > LONG_LONG_PRLEN) { if (!(flags & QUIET)) error(INFO, "input string too large: \"%s\" (%d vs %d)\n", s, strlen(s), LONG_LONG_PRLEN); goto htoll_error; } for (n = i = 0; s[i] != 0; i++) { switch (s[i]) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j = (s[i] - 'a') + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j = (s[i] - 'A') + 10; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': j = s[i] - '0'; break; case 'x': case 'X': case 'h': continue; default: if (!(flags & QUIET)) error(INFO, "invalid input: \"%s\"\n", s); goto htoll_error; } n = (16 * n) + j; } return(n); htoll_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a decimal long value. */ ulong dtol(char *s, int flags, int *errptr) { ulong retval; char *p, *orig; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtol_error; } if (strlen(s) == 0) goto dtol_error; p = orig = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0') { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", orig, s[j]); goto dtol_error; } else if (sscanf(s, "%lu", &retval) != 1) { if (!(flags & QUIET)) error(INFO, "invalid expression\n"); goto dtol_error; } return(retval); dtol_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a decimal long value. */ ulonglong dtoll(char *s, int flags, int *errptr) { ulonglong retval; char *p, *orig; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtoll_error; } if (strlen(s) == 0) goto dtoll_error; p = orig = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0') { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", orig, s[j]); goto dtoll_error; } else if (sscanf(s, "%llu", &retval) != 1) { if (!(flags & QUIET)) error(INFO, "invalid expression\n"); goto dtoll_error; } return (retval); dtoll_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return ((ulonglong)(-1)); } /* * Convert a string to a decimal integer value. */ unsigned int dtoi(char *s, int flags, int *errptr) { unsigned int retval; char *p; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtoi_error; } p = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0' || (sscanf(s, "%d", (int *)&retval) != 1)) { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", s, s[j]); goto dtoi_error; } return(retval); dtoi_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return((unsigned int)(-1)); } /* * Determine whether a string contains only decimal characters. * If count is non-zero, limit the search to count characters. */ int decimal(char *s, int count) { char *p; int cnt, digits; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; for (p = &s[0], digits = 0; p && *p; p++) { switch(*p) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': digits++; case ' ': break; default: return FALSE; } if (count && (--cnt == 0)) break; } return (digits ? TRUE : FALSE); } /* * Extract a hexadecimal number from a string. If first_instance is FALSE, * and two possibilities are found, a fatal error results. */ int extract_hex(char *s, ulong *result, char stripchar, ulong first_instance) { int i, found; char *arglist[MAXARGS]; int argc; ulong value; char *buf; buf = GETBUF(strlen(s) + 1); strcpy(buf, s); argc = parse_line(buf, arglist); for (i = found = value = 0; i < argc; i++) { if (stripchar) strip_ending_char(arglist[i], stripchar); if (hexadecimal(arglist[i], 0)) { if (found) { FREEBUF(buf); error(FATAL, "two hexadecimal args in: \"%s\"\n", strip_linefeeds(s)); } value = htol(arglist[i], FAULT_ON_ERROR, NULL); found = TRUE; if (first_instance) break; } } FREEBUF(buf); if (found) { *result = value; return TRUE; } return FALSE; } /* * Determine whether a string contains only ASCII characters. */ int ascii_string(char *s) { char *p; for (p = &s[0]; *p; p++) { if (!ascii(*p)) return FALSE; } return TRUE; } /* * Check whether a string contains only printable ASCII characters. */ int printable_string(char *s) { char *p; for (p = &s[0]; *p; p++) { if (!isprint(*p)) return FALSE; } return TRUE; } /* * Determine whether a string contains only hexadecimal characters. * If count is non-zero, limit the search to count characters. */ int hexadecimal(char *s, int count) { char *p; int cnt, digits; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; for (p = &s[0], digits = 0; *p; p++) { switch(*p) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': digits++; case 'x': case 'X': break; case ' ': if (*(p+1) == NULLCHAR) break; else return FALSE; default: return FALSE; } if (count && (--cnt == 0)) break; } return (digits ? TRUE : FALSE); } /* * Determine whether a string contains only hexadecimal characters. * and cannot be construed as a decimal number. * If count is non-zero, limit the search to count characters. */ int hexadecimal_only(char *s, int count) { char *p; int cnt, only; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; only = 0; for (p = &s[0]; *p; p++) { switch(*p) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'x': case 'X': only++; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': break; case ' ': if (*(p+1) == NULLCHAR) break; else return FALSE; default: return FALSE; } if (count && (--cnt == 0)) break; } return only; } /* * Clean a command argument that has an obvious but ignorable error. * The first one is an attached comma to a number, that usually is the * result of a cut-and-paste of an address from a structure display. * The second on is an attached colon to a number, usually from a * cut-and-paste of a memory dump. * Add more when they become annoynance. * * It presumes args[optind] is the argument being tinkered with, and * always returns TRUE for convenience of use. */ int clean_arg(void) { char buf[BUFSIZE]; if (LASTCHAR(args[optind]) == ',' || LASTCHAR(args[optind]) == ':') { strcpy(buf, args[optind]); LASTCHAR(buf) = NULLCHAR; if (IS_A_NUMBER(buf)) LASTCHAR(args[optind]) = NULLCHAR; } return TRUE; } /* * Translate a hexadecimal string into its ASCII components. */ void cmd_ascii(void) { int i; ulonglong value; char *s; int c, prlen, bytes; optind = 1; if (!args[optind]) { fprintf(fp, "\n"); fprintf(fp, " 0 1 2 3 4 5 6 7\n"); fprintf(fp, " +-------------------------------\n"); fprintf(fp, " 0 | NUL DLE SP 0 @ P ' p\n"); fprintf(fp, " 1 | SOH DC1 ! 1 A Q a q\n"); fprintf(fp, " 2 | STX DC2 %c 2 B R b r\n", 0x22); fprintf(fp, " 3 | ETX DC3 # 3 C S c s\n"); fprintf(fp, " 4 | EOT DC4 $ 4 D T d t\n"); fprintf(fp, " 5 | ENQ NAK %c 5 E U e u\n", 0x25); fprintf(fp, " 6 | ACK SYN & 6 F V f v\n"); fprintf(fp, " 7 | BEL ETB ` 7 G W g w\n"); fprintf(fp, " 8 | BS CAN ( 8 H X h x\n"); fprintf(fp, " 9 | HT EM ) 9 I Y i y\n"); fprintf(fp, " A | LF SUB * : J Z j z\n"); fprintf(fp, " B | VT ESC + ; K [ k {\n"); fprintf(fp, " C | FF FS , < L %c l |\n", 0x5c); fprintf(fp, " D | CR GS _ = M ] m }\n"); fprintf(fp, " E | SO RS . > N ^ n ~\n"); fprintf(fp, " F | SI US / ? O - o DEL\n"); fprintf(fp, "\n"); return; } while (args[optind]) { s = args[optind]; if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > LONG_PRLEN) { prlen = LONG_LONG_PRLEN; bytes = sizeof(long long); } else { prlen = LONG_PRLEN; bytes = sizeof(long); } value = htoll(s, FAULT_ON_ERROR, NULL); fprintf(fp, "%.*llx: ", prlen, value); for (i = 0; i < bytes; i++) { c = (value >> (8*i)) & 0xff; if ((c >= 0x20) && (c < 0x7f)) { fprintf(fp, "%c", (char)c); continue; } if (c > 0x7f) { fprintf(fp, "<%02x>", c); continue; } switch (c) { case 0x0: fprintf(fp, ""); break; case 0x1: fprintf(fp, ""); break; case 0x2: fprintf(fp, ""); break; case 0x3: fprintf(fp, ""); break; case 0x4: fprintf(fp, ""); break; case 0x5: fprintf(fp, ""); break; case 0x6: fprintf(fp, ""); break; case 0x7: fprintf(fp, ""); break; case 0x8: fprintf(fp, ""); break; case 0x9: fprintf(fp, ""); break; case 0xa: fprintf(fp, ""); break; case 0xb: fprintf(fp, ""); break; case 0xc: fprintf(fp, ""); break; case 0xd: fprintf(fp, ""); break; case 0xe: fprintf(fp, ""); break; case 0xf: fprintf(fp, ""); break; case 0x10: fprintf(fp, ""); break; case 0x11: fprintf(fp, ""); break; case 0x12: fprintf(fp, ""); break; case 0x13: fprintf(fp, ""); break; case 0x14: fprintf(fp, ""); break; case 0x15: fprintf(fp, ""); break; case 0x16: fprintf(fp, ""); break; case 0x17: fprintf(fp, ""); break; case 0x18: fprintf(fp, ""); break; case 0x19: fprintf(fp, ""); break; case 0x1a: fprintf(fp, ""); break; case 0x1b: fprintf(fp, ""); break; case 0x1c: fprintf(fp, ""); break; case 0x1d: fprintf(fp, ""); break; case 0x1e: fprintf(fp, ""); break; case 0x1f: fprintf(fp, ""); break; case 0x7f: fprintf(fp, ""); break; } } fprintf(fp, "\n"); optind++; } } /* * Counts number of leading whitespace characters in a string. */ int count_leading_spaces(char *s) { return (strspn(s, " \t")); } /* * Prints the requested number of spaces. */ void pad_line(FILE *filep, int cnt, char c) { int i; for (i = 0; i < cnt; i++) fputc(c, filep); } /* * Returns appropriate number of inter-field spaces in a usable string. * MINSPACE is defined as -100, but implies the minimum space between two * fields. Currently this can be either one or two spaces, depending upon * the architecture. Since the mininum space must be at least 1, MINSPACE, * MINSPACE-1 and MINSPACE+1 are all valid, special numbers. Otherwise * the space count must be greater than or equal to 0. * * If the cnt request is greater than SPACES, a dynamic buffer is * allocated, and normal buffer garbage collection will return it * back to the pool. */ char * space(int cnt) { #define SPACES 40 static char spacebuf[SPACES+1] = { 0 }; int i; char *bigspace; if (cnt > SPACES) { bigspace = GETBUF(cnt); for (i = 0; i < cnt; i++) bigspace[i] = ' '; bigspace[i] = NULLCHAR; return bigspace; } if (!strlen(spacebuf)) { for (i = 0; i < SPACES; i++) spacebuf[i] = ' '; spacebuf[i] = NULLCHAR; } if (cnt < (MINSPACE-1)) error(FATAL, "illegal spacing request: %d\n", cnt); if ((cnt > MINSPACE+1) && (cnt < 0)) error(FATAL, "illegal spacing request\n"); switch (cnt) { case (MINSPACE-1): if (VADDR_PRLEN > 8) return (&spacebuf[SPACES]); /* NULL */ else return (&spacebuf[SPACES-1]); /* 1 space */ case MINSPACE: if (VADDR_PRLEN > 8) return (&spacebuf[SPACES-1]); /* 1 space */ else return (&spacebuf[SPACES-2]); /* 2 spaces */ case (MINSPACE+1): if (VADDR_PRLEN > 8) return (&spacebuf[SPACES-2]); /* 2 spaces */ else return (&spacebuf[SPACES-3]); /* 3 spaces */ default: return (&spacebuf[SPACES-cnt]); /* as requested */ } } /* * Determine whether substring s1, with length len, and contained within * string s, is surrounded by characters. If len is 0, calculate * it. */ int bracketed(char *s, char *s1, int len) { char *s2; if (!len) { if (!(s2 = strstr(s1, ">"))) return FALSE; len = s2-s1; } if (((s1-s) < 1) || (*(s1-1) != '<') || ((s1+len) >= &s[strlen(s)]) || (*(s1+len) != '>')) return FALSE; return TRUE; } /* * Counts the number of a specified character in a string. */ int count_chars(char *s, char c) { char *p; int count; if (!s) return 0; count = 0; for (p = s; *p; p++) { if (*p == c) count++; } return count; } /* * Counts the number of a specified characters in a buffer. */ long count_buffer_chars(char *bufptr, char c, long len) { long i, cnt; for (i = cnt = 0; i < len; i++, bufptr++) { if (*bufptr == c) cnt++; } return cnt; } /* * Concatenates the tokens in the global args[] array into one string, * separating each token with one space. If the no_options flag is set, * don't include any args beginning with a dash character. */ char * concat_args(char *buf, int arg, int no_options) { int i; BZERO(buf, BUFSIZE); for (i = arg; i < argcnt; i++) { if (no_options && STRNEQ(args[i], "-")) continue; strcat(buf, args[i]); strcat(buf, " "); } return(strip_ending_whitespace(buf)); } /* * Shifts the contents of a string to the left by cnt characters, * disposing the leftmost characters. */ char * shift_string_left(char *s, int cnt) { int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s, s+cnt, (origlen-cnt)); *(s+(origlen-cnt)) = NULLCHAR; return(s); } /* * Shifts the contents of a string to the right by cnt characters, * inserting space characters. (caller confirms space is available) */ char * shift_string_right(char *s, int cnt) { int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s+cnt, s, origlen); s[origlen+cnt] = NULLCHAR; return(memset(s, ' ', cnt)); } /* * Create a string in a buffer of a given size, centering, or justifying * left or right as requested. If the opt argument is used, then the string * is created with its string/integer value. If opt is NULL, then the * string is already in contained in string s (not justified). Note that * flag LONGLONG_HEX implies that opt is a ulonglong pointer to the * actual value. */ char * mkstring(char *s, int size, ulong flags, const char *opt) { int len; int extra; int left; int right; switch (flags & (LONG_DEC|SLONG_DEC|LONG_HEX|INT_HEX|INT_DEC|LONGLONG_HEX|ZERO_FILL)) { case LONG_DEC: sprintf(s, "%lu", (ulong)opt); break; case SLONG_DEC: sprintf(s, "%ld", (ulong)opt); break; case LONG_HEX: sprintf(s, "%lx", (ulong)opt); break; case (LONG_HEX|ZERO_FILL): if (VADDR_PRLEN == 8) sprintf(s, "%08lx", (ulong)opt); else if (VADDR_PRLEN == 16) sprintf(s, "%016lx", (ulong)opt); break; case INT_DEC: sprintf(s, "%u", (uint)((ulong)opt)); break; case INT_HEX: sprintf(s, "%x", (uint)((ulong)opt)); break; case LONGLONG_HEX: sprintf(s, "%llx", *((ulonglong *)opt)); break; default: if (opt) strcpy(s, opt); break; } /* * At this point, string s has the string to be justified, * and has room to work with. The relevant flags from this * point on are of CENTER, LJUST and RJUST. If the length * of string s is already larger than the requested size, * just return it as is. */ len = strlen(s); if (size <= len) return(s); extra = size - len; if (flags & CENTER) { /* * If absolute centering is not possible, justify the * string as requested -- or to the left if no justify * argument was passed in. */ if (extra % 2) { switch (flags & (LJUST|RJUST)) { default: case LJUST: right = (extra/2) + 1; left = extra/2; break; case RJUST: right = extra/2; left = (extra/2) + 1; break; } } else left = right = extra/2; shift_string_right(s, left); len = strlen(s); memset(s + len, ' ', right); s[len + right] = NULLCHAR; return(s); } if (flags & LJUST) { len = strlen(s); memset(s + len, ' ', extra); s[len + extra] = NULLCHAR; } else if (flags & RJUST) shift_string_right(s, extra); return(s); } /* * Prints the requested number of BACKSPACE characters. */ void backspace(int cnt) { int i; for (i = 0; i < cnt; i++) fprintf(fp, "\b"); } /* * Set/display process context or internal variables. Processes are set * by their task or PID number, or to the panic context with the -p flag. * Internal variables may be viewed or changed, depending whether an argument * follows the variable name. If no arguments are entered, the current * process context is dumped. The current set of variables and their * acceptable settings are: * * debug "on", "off", or any number. "on" sets it to a value of 1. * hash "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * scroll "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * silent "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * refresh "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * sym regular filename * console device filename * radix 10 or 16 * core (no arg) drop core when error() is called. * vi (no arg) set editing mode to vi (from .rc file only). * emacs (no arg) set editing mode to emacs (from .rc file only). * namelist kernel name (from .rc file only). * dumpfile dumpfile name (from .rc file only). * * gdb variable settings not changeable by gdb's "set" command: * * print_max value (default is 200). */ void cmd_set(void) { int i, c; ulong value; int cpu, runtime, from_rc_file; char buf[BUFSIZE]; char *extra_message; struct task_context *tc; struct syment *sp; #define defer() do { } while (0) #define already_done() do { } while (0) #define ignore() do { } while (0) extra_message = NULL; runtime = pc->flags & RUNTIME ? TRUE : FALSE; from_rc_file = pc->curcmd_flags & FROM_RCFILE ? TRUE : FALSE; while ((c = getopt(argcnt, args, "pvc:a:")) != EOF) { switch(c) { case 'c': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) { error(INFO, "not allowed on a live system\n"); argerrs++; break; } cpu = dtoi(optarg, FAULT_ON_ERROR, NULL); set_cpu(cpu); return; case 'p': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) { set_context(tt->this_task, NO_PID, TRUE); show_context(CURRENT_CONTEXT()); return; } if (!tt->panic_task) { error(INFO, "no panic task found!\n"); return; } set_context(tt->panic_task, NO_PID, TRUE); show_context(CURRENT_CONTEXT()); return; case 'v': if (!runtime) return; show_options(); return; case 'a': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) error(FATAL, "-a option not allowed on live systems\n"); switch (str_to_context(optarg, &value, &tc)) { case STR_PID: if ((i = TASKS_PER_PID(value)) > 1) error(FATAL, "pid %d has %d tasks: " "use a task address\n", value, i); break; case STR_TASK: break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); } cpu = tc->processor; tt->active_set[cpu] = tc->task; if (tt->panic_threads[cpu]) tt->panic_threads[cpu] = tc->task; fprintf(fp, "\"%s\" task %lx has been marked as the active task on cpu %d\n", tc->comm, tc->task, cpu); return; default: argerrs++; break; } } if (argerrs) { if (runtime) cmd_usage(pc->curcmd, SYNOPSIS); return; } if (!args[optind]) { if (XEN_HYPER_MODE()) error(INFO, "requires an option with the Xen hypervisor\n"); else if (pc->flags & MINIMAL_MODE) show_options(); else if (runtime) show_context(CURRENT_CONTEXT()); return; } while (args[optind]) { if (STREQ(args[optind], "debug")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) pc->debug = 1; else if (STREQ(args[optind], "off")) pc->debug = 0; else if (IS_A_NUMBER(args[optind])) pc->debug = stol(args[optind], FAULT_ON_ERROR, NULL); else goto invalid_set_command; } if (runtime) fprintf(fp, "debug: %ld\n", pc->debug); set_lkcd_debug(pc->debug); set_vas_debug(pc->debug); return; } else if (STREQ(args[optind], "hash")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) pc->flags |= HASH; else if (STREQ(args[optind], "off")) pc->flags &= ~HASH; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= HASH; else pc->flags &= ~HASH; } else goto invalid_set_command; } if (runtime) fprintf(fp, "hash: %s\n", pc->flags & HASH ? "on" : "off"); return; } else if (STREQ(args[optind], "unwind")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) { if ((kt->flags & DWARF_UNWIND_CAPABLE) || !runtime) { kt->flags |= DWARF_UNWIND; kt->flags &= ~NO_DWARF_UNWIND; } } else if (STREQ(args[optind], "off")) { kt->flags &= ~DWARF_UNWIND; if (!runtime) kt->flags |= NO_DWARF_UNWIND; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { if ((kt->flags & DWARF_UNWIND_CAPABLE) || !runtime) { kt->flags |= DWARF_UNWIND; kt->flags &= ~NO_DWARF_UNWIND; } } else { kt->flags &= ~DWARF_UNWIND; if (!runtime) kt->flags |= NO_DWARF_UNWIND; } } else goto invalid_set_command; } if (runtime) fprintf(fp, "unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off"); return; } else if (STREQ(args[optind], "refresh")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) tt->flags |= TASK_REFRESH; else if (STREQ(args[optind], "off")) { tt->flags &= ~TASK_REFRESH; if (!runtime) tt->flags |= TASK_REFRESH_OFF; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) tt->flags |= TASK_REFRESH; else { tt->flags &= ~TASK_REFRESH; if (!runtime) tt->flags |= TASK_REFRESH_OFF; } } else goto invalid_set_command; } if (runtime) fprintf(fp, "refresh: %s\n", tt->flags & TASK_REFRESH ? "on" : "off"); return; } else if (STREQ(args[optind], "gdb")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) { if (pc->flags & MINIMAL_MODE) goto invalid_set_command; else pc->flags2 |= GDB_CMD_MODE; } else if (STREQ(args[optind], "off")) pc->flags2 &= ~GDB_CMD_MODE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { if (pc->flags & MINIMAL_MODE) goto invalid_set_command; else pc->flags2 |= GDB_CMD_MODE; } else pc->flags2 &= ~GDB_CMD_MODE; } else goto invalid_set_command; set_command_prompt(pc->flags2 & GDB_CMD_MODE ? "gdb> " : NULL); } if (runtime) fprintf(fp, "gdb: %s\n", pc->flags2 & GDB_CMD_MODE ? "on" : "off"); return; } else if (STREQ(args[optind], "scroll")) { if (args[optind+1] && pc->scroll_command) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "on")) pc->flags |= SCROLL; else if (STREQ(args[optind], "off")) pc->flags &= ~SCROLL; else if (STREQ(args[optind], "more")) pc->scroll_command = SCROLL_MORE; else if (STREQ(args[optind], "less")) pc->scroll_command = SCROLL_LESS; else if (STREQ(args[optind], "CRASHPAGER")) { if (CRASHPAGER_valid()) pc->scroll_command = SCROLL_CRASHPAGER; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= SCROLL; else pc->flags &= ~SCROLL; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "scroll: %s ", pc->flags & SCROLL ? "on" : "off"); switch (pc->scroll_command) { case SCROLL_LESS: fprintf(fp, "(/usr/bin/less)\n"); break; case SCROLL_MORE: fprintf(fp, "(/bin/more)\n"); break; case SCROLL_NONE: fprintf(fp, "(none)\n"); break; case SCROLL_CRASHPAGER: fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); break; } } return; } else if (STREQ(args[optind], "silent")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) { pc->flags |= SILENT; pc->flags &= ~SCROLL; } else if (STREQ(args[optind], "off")) pc->flags &= ~SILENT; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { pc->flags |= SILENT; pc->flags &= ~SCROLL; } else pc->flags &= ~SILENT; } else goto invalid_set_command; if (!(pc->flags & SILENT)) fprintf(fp, "silent: off\n"); } else if (runtime && !(pc->flags & SILENT)) fprintf(fp, "silent: off\n"); return; } else if (STREQ(args[optind], "console")) { int assignment; if (args[optind+1]) { create_console_device(args[optind+1]); optind++; assignment = optind; } else assignment = 0; if (runtime) { fprintf(fp, "console: "); if (pc->console) fprintf(fp, "%s\n", pc->console); else { if (assignment) fprintf(fp, "assignment to %s failed\n", args[assignment]); else fprintf(fp, "not set\n"); } } return; } else if (STREQ(args[optind], "core")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) pc->flags |= DROP_CORE; else if (STREQ(args[optind], "off")) pc->flags &= ~DROP_CORE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= DROP_CORE; else pc->flags &= ~DROP_CORE; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "core: %s on error message)\n", pc->flags & DROP_CORE ? "on (drop core" : "off (do NOT drop core"); } return; } else if (STREQ(args[optind], "radix")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (STREQ(args[optind], "10") || STRNEQ(args[optind], "dec") || STRNEQ(args[optind], "ten")) pc->output_radix = 10; else if (STREQ(args[optind], "16") || STRNEQ(args[optind], "hex") || STRNEQ(args[optind], "six")) pc->output_radix = 16; else goto invalid_set_command; } if (runtime) { sprintf(buf, "set output-radix %d", pc->output_radix); gdb_pass_through(buf, NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: %d (%s)\n", pc->output_radix, pc->output_radix == 10 ? "decimal" : "hex"); } return; } else if (STREQ(args[optind], "hex")) { if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (runtime) { pc->output_radix = 16; gdb_pass_through("set output-radix 16", NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: 16 (hex)\n"); } return; } else if (STREQ(args[optind], "dec")) { if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (runtime) { pc->output_radix = 10; gdb_pass_through("set output-radix 10", NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: 10 (decimal)\n"); } return; } else if (STREQ(args[optind], "edit")) { if (args[optind+1]) { if (runtime && !from_rc_file) error(FATAL, "cannot change editing mode during runtime\n"); optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "vi")) pc->editing_mode = "vi"; else if (STREQ(args[optind], "emacs")) pc->editing_mode = "emacs"; else goto invalid_set_command; } if (runtime) fprintf(fp, "edit: %s\n", pc->editing_mode); return; } else if (STREQ(args[optind], "vi")) { if (runtime) { if (!from_rc_file) error(FATAL, "cannot change editing mode during runtime\n"); fprintf(fp, "edit: %s\n", pc->editing_mode); } else pc->editing_mode = "vi"; return; } else if (STREQ(args[optind], "emacs")) { if (runtime) { if (!from_rc_file) error(FATAL, "cannot change %s editing mode during runtime\n", pc->editing_mode); fprintf(fp, "edit: %s\n", pc->editing_mode); } else pc->editing_mode = "emacs"; return; } else if (STREQ(args[optind], "print_max")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (decimal(args[optind], 0)) *gdb_print_max = atoi(args[optind]); else if (hexadecimal(args[optind], 0)) *gdb_print_max = (unsigned int) htol(args[optind], FAULT_ON_ERROR, NULL); else goto invalid_set_command; } if (runtime) fprintf(fp, "print_max: %d\n", *gdb_print_max); return; } else if (STREQ(args[optind], "scope")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (can_eval(args[optind])) value = eval(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) value = htol(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) value = sp->value; else goto invalid_set_command; if (runtime) { if (gdb_set_crash_scope(value, args[optind])) pc->scope = value; else return; } } if (runtime) { fprintf(fp, "scope: %lx ", pc->scope); if (pc->scope) fprintf(fp, "(%s)\n", value_to_symstr(pc->scope, buf, 0)); else fprintf(fp, "(not set)\n"); } return; } else if (STREQ(args[optind], "null-stop")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (STREQ(args[optind], "on")) *gdb_stop_print_at_null = 1; else if (STREQ(args[optind], "off")) *gdb_stop_print_at_null = 0; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) *gdb_stop_print_at_null = 1; else *gdb_stop_print_at_null = 0; } else goto invalid_set_command; } if (runtime) fprintf(fp, "null-stop: %s\n", *gdb_stop_print_at_null ? "on" : "off"); return; } else if (STREQ(args[optind], "print_array")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (STREQ(args[optind], "on")) *gdb_prettyprint_arrays = 1; else if (STREQ(args[optind], "off")) *gdb_prettyprint_arrays = 0; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) *gdb_prettyprint_arrays = 1; else *gdb_prettyprint_arrays = 0; } else goto invalid_set_command; } if (runtime) fprintf(fp, "print_array: %s\n", *gdb_prettyprint_arrays ? "on" : "off"); return; } else if (STREQ(args[optind], "namelist")) { optind++; if (!runtime && args[optind]) { if (!is_elf_file(args[optind])) error(FATAL, "%s: not a kernel namelist (from .%src file)\n", args[optind], pc->program_name); if ((pc->namelist = (char *) malloc(strlen(args[optind])+1)) == NULL) { error(INFO, "cannot malloc memory for namelist: %s: %s\n", args[optind], strerror(errno)); } else strcpy(pc->namelist, args[optind]); } if (runtime) fprintf(fp, "namelist: %s\n", pc->namelist); return; } else if (STREQ(args[optind], "free")) { if (!runtime) defer(); else fprintf(fp, "%d pages freed\n", dumpfile_memory(DUMPFILE_FREE_MEM)); return; } else if (STREQ(args[optind], "data_debug")) { pc->flags |= DATADEBUG; return; } else if (STREQ(args[optind], "zero_excluded")) { if (args[optind+1]) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "on")) { *diskdump_flags |= ZERO_EXCLUDED; sadump_set_zero_excluded(); } else if (STREQ(args[optind], "off")) { *diskdump_flags &= ~ZERO_EXCLUDED; sadump_unset_zero_excluded(); } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { *diskdump_flags |= ZERO_EXCLUDED; sadump_set_zero_excluded(); } else { *diskdump_flags &= ~ZERO_EXCLUDED; sadump_unset_zero_excluded(); } } else goto invalid_set_command; } if (runtime) fprintf(fp, "zero_excluded: %s\n", (*diskdump_flags & ZERO_EXCLUDED) || sadump_is_zero_excluded() ? "on" : "off"); return; } else if (STREQ(args[optind], "offline")) { if (args[optind+1]) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "show")) pc->flags2 &= ~OFFLINE_HIDE; else if(STREQ(args[optind], "hide")) pc->flags2 |= OFFLINE_HIDE; else goto invalid_set_command; } if (runtime) fprintf(fp, " offline: %s\n", pc->flags2 & OFFLINE_HIDE ? "hide" : "show"); return; } else if (STREQ(args[optind], "redzone")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) pc->flags2 |= REDZONE; else if (STREQ(args[optind], "off")) pc->flags2 &= ~REDZONE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags2 |= REDZONE; else pc->flags2 &= ~REDZONE; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "redzone: %s\n", pc->flags2 & REDZONE ? "on" : "off"); } return; } else if (STREQ(args[optind], "error")) { if (args[optind+1]) { optind++; if (!set_error(args[optind])) return; } if (runtime) { fprintf(fp, "error: %s\n", pc->error_path); } return; } else if (XEN_HYPER_MODE()) { error(FATAL, "invalid argument for the Xen hypervisor\n"); } else if (pc->flags & MINIMAL_MODE) { error(FATAL, "invalid argument in minimal mode\n"); } else if (runtime) { ulong pid, task; switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: pid = value; task = NO_TASK; if (set_context(task, pid, TRUE)) show_context(CURRENT_CONTEXT()); break; case STR_TASK: task = value; pid = NO_PID; if (set_context(task, pid, TRUE)) show_context(CURRENT_CONTEXT()); break; case STR_INVALID: error(INFO, "invalid task or pid value: %s\n", args[optind]); break; } } else console("set: ignoring \"%s\"\n", args[optind]); optind++; } return; invalid_set_command: sprintf(buf, "invalid command"); if (!runtime) sprintf(&buf[strlen(buf)], " in .%src file", pc->program_name); strcat(buf, ": "); for (i = 0; i < argcnt; i++) sprintf(&buf[strlen(buf)], "%s ", args[i]); strcat(buf, "\n"); if (extra_message) strcat(buf, extra_message); error(runtime ? FATAL : INFO, buf); #undef defer #undef already_done #undef ignore } /* * Display the set of settable internal variables. */ static void show_options(void) { char buf[BUFSIZE]; fprintf(fp, " scroll: %s ", pc->flags & SCROLL ? "on" : "off"); switch (pc->scroll_command) { case SCROLL_LESS: fprintf(fp, "(/usr/bin/less)\n"); break; case SCROLL_MORE: fprintf(fp, "(/bin/more)\n"); break; case SCROLL_NONE: fprintf(fp, "(none)\n"); break; case SCROLL_CRASHPAGER: fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); break; } fprintf(fp, " radix: %d (%s)\n", pc->output_radix, pc->output_radix == 10 ? "decimal" : pc->output_radix == 16 ? "hexadecimal" : "unknown"); fprintf(fp, " refresh: %s\n", tt->flags & TASK_REFRESH ? "on" : "off"); fprintf(fp, " print_max: %d\n", *gdb_print_max); fprintf(fp, " print_array: %s\n", *gdb_prettyprint_arrays ? "on" : "off"); fprintf(fp, " console: %s\n", pc->console ? pc->console : "(not assigned)"); fprintf(fp, " debug: %ld\n", pc->debug); fprintf(fp, " core: %s\n", pc->flags & DROP_CORE ? "on" : "off"); fprintf(fp, " hash: %s\n", pc->flags & HASH ? "on" : "off"); fprintf(fp, " silent: %s\n", pc->flags & SILENT ? "on" : "off"); fprintf(fp, " edit: %s\n", pc->editing_mode); fprintf(fp, " namelist: %s\n", pc->namelist); fprintf(fp, " dumpfile: %s\n", pc->dumpfile); fprintf(fp, " unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off"); fprintf(fp, " zero_excluded: %s\n", (*diskdump_flags & ZERO_EXCLUDED) || sadump_is_zero_excluded() ? "on" : "off"); fprintf(fp, " null-stop: %s\n", *gdb_stop_print_at_null ? "on" : "off"); fprintf(fp, " gdb: %s\n", pc->flags2 & GDB_CMD_MODE ? "on" : "off"); fprintf(fp, " scope: %lx ", pc->scope); if (pc->scope) fprintf(fp, "(%s)\n", value_to_symstr(pc->scope, buf, 0)); else fprintf(fp, "(not set)\n"); fprintf(fp, " offline: %s\n", pc->flags2 & OFFLINE_HIDE ? "hide" : "show"); fprintf(fp, " redzone: %s\n", pc->flags2 & REDZONE ? "on" : "off"); fprintf(fp, " error: %s\n", pc->error_path); } /* * Evaluate an expression, which can consist of a single symbol, single value, * or an expression consisting of two values and an operator. If the * expression contains redirection characters, the whole expression must * be enclosed with parentheses. The result is printed in decimal, hex, * octal and binary. Input number values can only be hex or decimal, with * a bias towards decimal (use 0x when necessary). */ void cmd_eval(void) { int flags; int bitflag, longlongflag, longlongflagforce; struct number_option nopt; char buf1[BUFSIZE]; /* * getopt() is not used to avoid confusion with minus sign. */ optind = 1; bitflag = 0; longlongflag = longlongflagforce = 0; BZERO(&nopt, sizeof(struct number_option)); if (STREQ(args[optind], "-lb") || STREQ(args[optind], "-bl")) { longlongflagforce++; bitflag++; optind++; } else if (STREQ(args[optind], "-l")) { longlongflagforce++; optind++; if (STREQ(args[optind], "-b") && args[optind+1]) { optind++; bitflag++; } } else if (STREQ(args[optind], "-b")) { if (STREQ(args[optind+1], "-l")) { if (args[optind+2]) { bitflag++; longlongflagforce++; optind += 2; } else cmd_usage(pc->curcmd, SYNOPSIS); } else if (args[optind+1]) { bitflag++; optind++; } } if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); longlongflag = BITS32() ? TRUE : FALSE; flags = longlongflag ? (LONG_LONG|RETURN_ON_ERROR) : FAULT_ON_ERROR; if(!BITS32()) longlongflagforce = 0; BZERO(buf1, BUFSIZE); buf1[0] = '('; while (args[optind]) { if (*args[optind] == '(') { if (eval_common(args[optind], flags, NULL, &nopt)) print_number(&nopt, bitflag, longlongflagforce); else error(FATAL, "invalid expression: %s\n", args[optind]); return; } else { strcat(buf1, args[optind]); strcat(buf1, " "); } optind++; } clean_line(buf1); strcat(buf1, ")"); if (eval_common(buf1, flags, NULL, &nopt)) print_number(&nopt, bitflag, longlongflagforce); else error(FATAL, "invalid expression: %s\n", buf1); } /* * Pre-check a string for eval-worthiness. This allows callers to avoid * having to encompass a non-whitespace expression with parentheses. * Note that the data being evaluated is not error-checked here, but * rather that it exists in the proper format. */ int can_eval(char *s) { char *op; char *element1, *element2; char work[BUFSIZE]; /* * If we've got a () pair containing any sort of stuff in between, * then presume it's eval-able. It might contain crap, but it * should be sent to eval() regardless. */ if ((FIRSTCHAR(s) == '(') && (count_chars(s, '(') == 1) && (count_chars(s, ')') == 1) && (strlen(s) > 2) && (LASTCHAR(s) == ')')) return TRUE; /* * If the string contains any of the operators except the shifters, * and has any kind of data on either side, it's also eval-able. */ strcpy(work, s); if (!(op = strpbrk(work, "><+-&|*/%^"))) return FALSE; element1 = &work[0]; *op = NULLCHAR; element2 = op+1; if (!strlen(element1) || !strlen(element2)) return FALSE; return TRUE; } /* * Evaluate an expression involving two values and an operator. */ #define OP_ADD (1) #define OP_SUB (2) #define OP_AND (3) #define OP_OR (4) #define OP_MUL (5) #define OP_DIV (6) #define OP_MOD (7) #define OP_SL (8) #define OP_SR (9) #define OP_EXOR (10) #define OP_POWER (11) ulong eval(char *s, int flags, int *errptr) { struct number_option nopt; if (eval_common(s, flags, errptr, &nopt)) { return(nopt.num); } else { switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: error(FATAL, "invalid expression: %s\n", s); case RETURN_ON_ERROR: error(INFO, "invalid expression: %s\n", s); if (errptr) *errptr = TRUE; break; } return UNUSED; } } ulonglong evall(char *s, int flags, int *errptr) { struct number_option nopt; if (BITS32()) flags |= LONG_LONG; if (eval_common(s, flags, errptr, &nopt)) { return(nopt.ll_num); } else { switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: error(FATAL, "invalid expression: %s\n", s); case RETURN_ON_ERROR: error(INFO, "invalid expression: %s\n", s); if (errptr) *errptr = TRUE; break; } return UNUSED; } } int eval_common(char *s, int flags, int *errptr, struct number_option *np) { char *p1, *p2; char *op, opcode; ulong value1; ulong value2; ulonglong ll_value1; ulonglong ll_value2; char work[BUFSIZE]; char *element1; char *element2; struct syment *sp; opcode = 0; value1 = value2 = 0; ll_value1 = ll_value2 = 0; if (strstr(s, "(") || strstr(s, ")")) { p1 = s; if (*p1 != '(') goto malformed; if (LASTCHAR(s) != ')') goto malformed; p2 = &LASTCHAR(s); if (strstr(s, ")") != p2) goto malformed; strcpy(work, p1+1); LASTCHAR(work) = NULLCHAR; if (strstr(work, "(") || strstr(work, ")")) goto malformed; } else strcpy(work, s); if (work[0] == '-') { shift_string_right(work, 1); work[0] = '0'; } if (!(op = strpbrk(work, "#><+-&|*/%^"))) { if (calculate(work, &value1, &ll_value1, flags & (HEX_BIAS|LONG_LONG))) { if (flags & LONG_LONG) { np->ll_num = ll_value1; if (BITS32() && (ll_value1 > 0xffffffff)) np->retflags |= LONG_LONG; return TRUE; } else { np->num = value1; return TRUE; } } goto malformed; } switch (*op) { case '+': opcode = OP_ADD; break; case '-': opcode = OP_SUB; break; case '&': opcode = OP_AND; break; case '|': opcode = OP_OR; break; case '*': opcode = OP_MUL; break; case '%': opcode = OP_MOD; break; case '/': opcode = OP_DIV; break; case '<': if (*(op+1) != '<') goto malformed; opcode = OP_SL; break; case '>': if (*(op+1) != '>') goto malformed; opcode = OP_SR; break; case '^': opcode = OP_EXOR; break; case '#': opcode = OP_POWER; break; } element1 = &work[0]; *op = NULLCHAR; if ((opcode == OP_SL) || (opcode == OP_SR)) { *(op+1) = NULLCHAR; element2 = op+2; } else element2 = op+1; if (strlen(clean_line(element1)) == 0) goto malformed; if (strlen(clean_line(element2)) == 0) goto malformed; if ((sp = symbol_search(element1))) value1 = ll_value1 = sp->value; else { if (!calculate(element1, &value1, &ll_value1, flags & (HEX_BIAS|LONG_LONG))) goto malformed; if (BITS32() && (ll_value1 > 0xffffffff)) np->retflags |= LONG_LONG; } if ((sp = symbol_search(element2))) value2 = ll_value2 = sp->value; else if (!calculate(element2, &value2, &ll_value2, flags & (HEX_BIAS|LONG_LONG))) goto malformed; if (flags & LONG_LONG) { if (BITS32() && (ll_value2 > 0xffffffff)) np->retflags |= LONG_LONG; switch (opcode) { case OP_ADD: np->ll_num = (ll_value1 + ll_value2); break; case OP_SUB: np->ll_num = (ll_value1 - ll_value2); break; case OP_AND: np->ll_num = (ll_value1 & ll_value2); break; case OP_OR: np->ll_num = (ll_value1 | ll_value2); break; case OP_MUL: np->ll_num = (ll_value1 * ll_value2); break; case OP_DIV: np->ll_num = (ll_value1 / ll_value2); break; case OP_MOD: np->ll_num = (ll_value1 % ll_value2); break; case OP_SL: np->ll_num = (ll_value1 << ll_value2); break; case OP_SR: np->ll_num = (ll_value1 >> ll_value2); break; case OP_EXOR: np->ll_num = (ll_value1 ^ ll_value2); break; case OP_POWER: np->ll_num = ll_power(ll_value1, ll_value2); break; } } else { switch (opcode) { case OP_ADD: np->num = (value1 + value2); break; case OP_SUB: np->num = (value1 - value2); break; case OP_AND: np->num = (value1 & value2); break; case OP_OR: np->num = (value1 | value2); break; case OP_MUL: np->num = (value1 * value2); break; case OP_DIV: np->num = (value1 / value2); break; case OP_MOD: np->num = (value1 % value2); break; case OP_SL: np->num = (value1 << value2); break; case OP_SR: np->num = (value1 >> value2); break; case OP_EXOR: np->num = (value1 ^ value2); break; case OP_POWER: np->num = power(value1, value2); break; } } return TRUE; malformed: return FALSE; } /* * Take string containing a number, and possibly a multiplier, and calculate * its real value. The allowable multipliers are k, K, m, M, g and G, for * kilobytes, megabytes and gigabytes. */ int calculate(char *s, ulong *value, ulonglong *llvalue, ulong flags) { ulong factor, bias; int errflag; int ones_complement; ulong localval; ulonglong ll_localval; struct syment *sp; bias = flags & HEX_BIAS; if (*s == '~') { ones_complement = TRUE; s++; } else ones_complement = FALSE; if ((sp = symbol_search(s))) { if (flags & LONG_LONG) { *llvalue = (ulonglong)sp->value; if (ones_complement) *llvalue = ~(*llvalue); } else *value = ones_complement ? ~(sp->value) : sp->value; return TRUE; } factor = 1; errflag = 0; switch (LASTCHAR(s)) { case 'k': case 'K': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = 1024; else return FALSE; break; case 'm': case 'M': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = (1024*1024); else return FALSE; break; case 'g': case 'G': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = (1024*1024*1024); else return FALSE; break; default: if (!IS_A_NUMBER(s)) return FALSE; break; } if (flags & LONG_LONG) { ll_localval = stoll(s, RETURN_ON_ERROR|bias, &errflag); if (errflag) return FALSE; if (ones_complement) *llvalue = ~(ll_localval * factor); else *llvalue = ll_localval * factor; } else { localval = stol(s, RETURN_ON_ERROR|bias, &errflag); if (errflag) return FALSE; if (ones_complement) *value = ~(localval * factor); else *value = localval * factor; } return TRUE; } /* * Print a 32-bit or 64-bit number in hexadecimal, decimal, octal and binary, * also showing the bits set if appropriate. * */ static void print_number(struct number_option *np, int bitflag, int longlongflagforce) { int i; ulong hibit; ulonglong ll_hibit; int ccnt; ulong mask; ulonglong ll_mask; char *hdr = " bits set: "; char buf[BUFSIZE]; int hdrlen; int longlongformat; longlongformat = longlongflagforce; if (!longlongflagforce) { if (BITS32()) { if (np->retflags & LONG_LONG) longlongformat = TRUE; if (np->ll_num > 0xffffffff) longlongformat = TRUE; else np->num = (ulong)np->ll_num; } } if (longlongformat) { ll_hibit = (ulonglong)(1) << ((sizeof(long long)*8)-1); fprintf(fp, "hexadecimal: %llx ", np->ll_num); if (np->ll_num >= KILOBYTES(1)) { if ((np->ll_num % GIGABYTES(1)) == 0) fprintf(fp, "(%lldGB)", np->ll_num / GIGABYTES(1)); else if ((np->ll_num % MEGABYTES(1)) == 0) fprintf(fp, "(%lldMB)", np->ll_num / MEGABYTES(1)); else if ((np->ll_num % KILOBYTES(1)) == 0) fprintf(fp, "(%lldKB)", np->ll_num / KILOBYTES(1)); } fprintf(fp, "\n"); fprintf(fp, " decimal: %llu ", np->ll_num); if ((long long)np->ll_num < 0) fprintf(fp, "(%lld)\n", (long long)np->ll_num); else fprintf(fp, "\n"); fprintf(fp, " octal: %llo\n", np->ll_num); fprintf(fp, " binary: "); for(i = 0, ll_mask = np->ll_num; i < (sizeof(long long)*8); i++, ll_mask <<= 1) if (ll_mask & ll_hibit) fprintf(fp, "1"); else fprintf(fp, "0"); fprintf(fp,"\n"); } else { hibit = (ulong)(1) << ((sizeof(long)*8)-1); fprintf(fp, "hexadecimal: %lx ", np->num); if (np->num >= KILOBYTES(1)) { if ((np->num % GIGABYTES(1)) == 0) fprintf(fp, "(%ldGB)", np->num / GIGABYTES(1)); else if ((np->num % MEGABYTES(1)) == 0) fprintf(fp, "(%ldMB)", np->num / MEGABYTES(1)); else if ((np->num % KILOBYTES(1)) == 0) fprintf(fp, "(%ldKB)", np->num / KILOBYTES(1)); } fprintf(fp, "\n"); fprintf(fp, " decimal: %lu ", np->num); if ((long)np->num < 0) fprintf(fp, "(%ld)\n", (long)np->num); else fprintf(fp, "\n"); fprintf(fp, " octal: %lo\n", np->num); fprintf(fp, " binary: "); for(i = 0, mask = np->num; i < (sizeof(long)*8); i++, mask <<= 1) if (mask & hibit) fprintf(fp, "1"); else fprintf(fp, "0"); fprintf(fp,"\n"); } if (!bitflag) return; hdrlen = strlen(hdr); ccnt = hdrlen; fprintf(fp, "%s", hdr); if (longlongformat) { for (i = 63; i >= 0; i--) { ll_mask = (ulonglong)(1) << i; if (np->ll_num & ll_mask) { sprintf(buf, "%d ", i); fprintf(fp, "%s", buf); ccnt += strlen(buf); if (ccnt >= 77) { fprintf(fp, "\n"); INDENT(strlen(hdr)); ccnt = hdrlen; } } } } else { for (i = BITS()-1; i >= 0; i--) { mask = (ulong)(1) << i; if (np->num & mask) { sprintf(buf, "%d ", i); fprintf(fp, "%s", buf); ccnt += strlen(buf); if (ccnt >= 77) { fprintf(fp, "\n"); INDENT(strlen(hdr)); ccnt = hdrlen; } } } } fprintf(fp, "\n"); } /* * Display the contents of a linked list. Minimum requirements are a starting * address, typically of a structure which contains the "next" list entry at * some offset into the structure. The default offset is zero bytes, and need * not be entered if that's the case. Otherwise a number argument that's not * a kernel * virtual address will be understood to be the offset. * Alternatively the offset may be entered in "struct.member" format. Each * item in the list is dumped, and the list will be considered terminated upon * encountering a "next" value that is: * * a NULL pointer. * a pointer to the starting address. * a pointer to the entry pointed to by the starting address. * a pointer to the structure itself. * a pointer to the value specified with the "-e ending_addr" option. * * If the structures are linked using list_head structures, the -h or -H * options must be used. In that case, the "start" address is: * a pointer to the structure that contains the list_head structure (-h), * or a pointer to a LIST_HEAD() structure (-H). * * Given that the contents of the structures containing the next pointers * often contain useful data, the "-s structname" also prints each structure * in the list. * * By default, the list members are hashed to guard against duplicate entries * causing the list to wrap back upon itself. * * WARNING: There's an inordinate amount of work parsing arguments below * in order to maintain backwards compatibility re: not having to use -o, * which gets sticky with zero-based kernel virtual address space. */ void cmd_list(void) { int c; long head_member_offset = 0; /* offset for head like denty.d_subdirs */ struct list_data list_data, *ld; struct datatype_member struct_member, *sm; struct syment *sp; ulong value, struct_list_offset; sm = &struct_member; ld = &list_data; BZERO(ld, sizeof(struct list_data)); struct_list_offset = 0; while ((c = getopt(argcnt, args, "BHhrs:S:e:o:O:xdl:")) != EOF) { switch(c) { case 'B': ld->flags |= LIST_BRENT_ALGO; break; case 'H': ld->flags |= LIST_HEAD_FORMAT; ld->flags |= LIST_HEAD_POINTER; break; case 'h': ld->flags |= LIST_HEAD_FORMAT; break; case 'r': ld->flags |= LIST_HEAD_FORMAT; ld->flags |= LIST_HEAD_REVERSE; break; case 's': case 'S': if (ld->structname_args++ == 0) hq_open(); hq_enter((ulong)optarg); ld->flags |= (c == 's') ? LIST_PARSE_MEMBER : LIST_READ_MEMBER; if (count_bits_long(ld->flags & (LIST_PARSE_MEMBER|LIST_READ_MEMBER)) > 1) error(FATAL, "-S and -s options are mutually exclusive\n"); break; case 'l': if (IS_A_NUMBER(optarg)) struct_list_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) struct_list_offset = sm->member_offset; else error(FATAL, "invalid -l option: %s\n", optarg); break; case 'O': if (ld->flags & LIST_HEAD_OFFSET_ENTERED) error(FATAL, "offset value %d (0x%lx) already entered\n", head_member_offset, head_member_offset); else if (IS_A_NUMBER(optarg)) head_member_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) head_member_offset = sm->member_offset; else error(FATAL, "invalid -O argument: %s\n", optarg); ld->flags |= LIST_HEAD_OFFSET_ENTERED; break; case 'o': if (ld->flags & LIST_OFFSET_ENTERED) error(FATAL, "offset value %d (0x%lx) already entered\n", ld->member_offset, ld->member_offset); else if (IS_A_NUMBER(optarg)) ld->member_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) ld->member_offset = sm->member_offset; else error(FATAL, "invalid -o argument: %s\n", optarg); ld->flags |= LIST_OFFSET_ENTERED; break; case 'e': ld->end = htol(optarg, FAULT_ON_ERROR, NULL); break; case 'x': if (ld->flags & LIST_STRUCT_RADIX_10) error(FATAL, "-d and -x are mutually exclusive\n"); ld->flags |= LIST_STRUCT_RADIX_16; break; case 'd': if (ld->flags & LIST_STRUCT_RADIX_16) error(FATAL, "-d and -x are mutually exclusive\n"); ld->flags |= LIST_STRUCT_RADIX_10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind] && args[optind+1] && args[optind+2]) { error(INFO, "too many arguments\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (ld->structname_args) { ld->structname = (char **)GETBUF(sizeof(char *) * ld->structname_args); retrieve_list((ulong *)ld->structname, ld->structname_args); hq_close(); ld->struct_list_offset = struct_list_offset; } else if (struct_list_offset) { error(INFO, "-l option can only be used with -s or -S option\n"); cmd_usage(pc->curcmd, SYNOPSIS); } while (args[optind]) { if (strstr(args[optind], ".") && arg_to_datatype(args[optind], sm, RETURN_ON_ERROR) > 1) { if (ld->flags & LIST_OFFSET_ENTERED) error(FATAL, "offset value %ld (0x%lx) already entered\n", ld->member_offset, ld->member_offset); ld->member_offset = sm->member_offset; ld->flags |= LIST_OFFSET_ENTERED; } else { /* * Do an inordinate amount of work to avoid -o... * * OK, if it's a symbol, then it has to be a start. */ if ((sp = symbol_search(args[optind]))) { if (ld->flags & LIST_START_ENTERED) error(FATAL, "list start already entered\n"); ld->start = sp->value; ld->flags |= LIST_START_ENTERED; goto next_arg; } /* * If it's not a symbol nor a number, bail out if it * cannot be evaluated as a start address. */ if (!IS_A_NUMBER(args[optind])) { if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { if (ld->flags & LIST_START_ENTERED) error(FATAL, "list start already entered\n"); ld->start = value; ld->flags |= LIST_START_ENTERED; goto next_arg; } } error(FATAL, "invalid argument: %s\n", args[optind]); } /* * If the start is known, it's got to be an offset. */ if (ld->flags & LIST_START_ENTERED) { value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; break; } /* * If the offset is known, or there's no subsequent * argument, then it's got to be a start. */ if ((ld->flags & LIST_OFFSET_ENTERED) || !args[optind+1]) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (!IS_KVADDR(value)) error(FATAL, "invalid kernel virtual address: %s\n", args[optind]); ld->start = value; ld->flags |= LIST_START_ENTERED; break; } /* * Neither start nor offset has been entered, and * it's a number. Look ahead to the next argument. * If it's a symbol, then this must be an offset. */ if ((sp = symbol_search(args[optind+1]))) { value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; goto next_arg; } else if ((!IS_A_NUMBER(args[optind+1]) && !can_eval(args[optind+1])) && !strstr(args[optind+1], ".")) error(FATAL, "symbol not found: %s\n", args[optind+1]); /* * Crunch time. We've got two numbers. If they're * both ambigous we must have zero-based kernel * virtual address space. */ if (COMMON_VADDR_SPACE() && AMBIGUOUS_NUMBER(args[optind]) && AMBIGUOUS_NUMBER(args[optind+1])) { error(INFO, "ambiguous arguments: \"%s\" and \"%s\": -o is required\n", args[optind], args[optind+1]); cmd_usage(pc->curcmd, SYNOPSIS); } if (hexadecimal_only(args[optind], 0)) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { ld->start = value; ld->flags |= LIST_START_ENTERED; goto next_arg; } } value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; } next_arg: optind++; } if (!(ld->flags & LIST_START_ENTERED)) { error(INFO, "starting address required\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if ((ld->flags & LIST_OFFSET_ENTERED) && ld->struct_list_offset) { error(INFO, "-l and -o are mutually exclusive\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (ld->flags & LIST_HEAD_FORMAT) { ld->list_head_offset = ld->member_offset; if (ld->flags & LIST_HEAD_REVERSE) ld->member_offset = sizeof(void *); else ld->member_offset = 0; if (ld->flags & LIST_HEAD_POINTER) { if (!ld->end) ld->end = ld->start; readmem(ld->start + ld->member_offset, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (ld->start == ld->end) { fprintf(fp, "(empty)\n"); return; } } else { if (ld->flags & LIST_HEAD_OFFSET_ENTERED) { if (!ld->end) ld->end = ld->start + head_member_offset; readmem(ld->start + head_member_offset, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (ld->start == ld->end) { fprintf(fp, "(empty)\n"); return; } } else ld->start += ld->list_head_offset; } } ld->flags &= ~(LIST_OFFSET_ENTERED|LIST_START_ENTERED); ld->flags |= VERBOSE; if (ld->flags & LIST_BRENT_ALGO) c = do_list_no_hash(ld); else { hq_open(); c = do_list(ld); hq_close(); } if (ld->structname_args) FREEBUF(ld->structname); } void dump_struct_members_fast(struct req_entry *e, int radix, ulong p) { unsigned int i; char b[BUFSIZE]; if (!(e && IS_KVADDR(p))) return; if (!radix) radix = *gdb_output_radix; for (i = 0; i < e->count; i++) { if (0 < e->width[i] && (e->width[i] <= 8 || e->is_str[i])) { print_value(e, i, p, e->is_ptr[i] ? 16 : radix); } else if (e->width[i] == 0 || e->width[i] > 8) { snprintf(b, BUFSIZE, "%s.%s", e->name, e->member[i]); dump_struct_member(b, p, radix); } } } struct req_entry * fill_member_offsets(char *arg) { int j; char *p, m; struct req_entry *e; char buf[BUFSIZE]; if (!(arg && *arg)) return NULL; j = count_chars(arg, ',') + 1; e = (struct req_entry *)GETBUF(sizeof(*e)); e->arg = GETBUF(strlen(arg + 1)); strcpy(e->arg, arg); m = ((p = strchr(e->arg, '.')) != NULL); if (!p++) p = e->arg + strlen(e->arg) + 1; e->name = GETBUF(p - e->arg); strncpy(e->name, e->arg, p - e->arg - 1); if (!m) return e; e->count = count_chars(p, ',') + 1; e->width = (ulong *)GETBUF(e->count * sizeof(ulong)); e->is_ptr = (int *)GETBUF(e->count * sizeof(int)); e->is_str = (int *)GETBUF(e->count * sizeof(int)); e->member = (char **)GETBUF(e->count * sizeof(char *)); e->offset = (ulong *)GETBUF(e->count * sizeof(ulong)); replace_string(p, ",", ' '); parse_line(p, e->member); for (j = 0; j < e->count; j++) { e->offset[j] = MEMBER_OFFSET(e->name, e->member[j]); if (e->offset[j] == INVALID_OFFSET) e->offset[j] = ANON_MEMBER_OFFSET(e->name, e->member[j]); if (e->offset[j] == INVALID_OFFSET) error(FATAL, "Can't get offset of '%s.%s'\n", e->name, e->member[j]); e->is_ptr[j] = MEMBER_TYPE(e->name, e->member[j]) == TYPE_CODE_PTR; e->is_str[j] = is_string(e->name, e->member[j]); /* Dirty hack for obtaining size of particular field */ snprintf(buf, BUFSIZE, "%s + 1", e->member[j]); e->width[j] = ANON_MEMBER_OFFSET(e->name, buf) - e->offset[j]; } return e; } static void print_value(struct req_entry *e, unsigned int i, ulong addr, unsigned int radix) { union { uint64_t v64; uint32_t v32; uint16_t v16; uint8_t v8; } v; char buf[BUFSIZE]; struct syment *sym; addr += e->offset[i]; /* Read up to 8 bytes, counters, pointers, etc. */ if (e->width[i] <= 8 && !readmem(addr, KVADDR, &v, e->width[i], "structure value", RETURN_ON_ERROR | QUIET)) { error(INFO, "cannot access member: %s at %lx\n", e->member[i], addr); return; } snprintf(buf, BUFSIZE, " %%s = %s%%%s%s", (radix == 16 ? "0x" : ""), (e->width[i] == 8 ? "l" : ""), (radix == 16 ? "x" : "u" ) ); switch (e->width[i]) { case 1: fprintf(fp, buf, e->member[i], v.v8); break; case 2: fprintf(fp, buf, e->member[i], v.v16); break; case 4: fprintf(fp, buf, e->member[i], v.v32); break; case 8: fprintf(fp, buf, e->member[i], v.v64); break; } if (e->is_str[i]) { if (e->is_ptr[i]) { read_string(v.v64, buf, BUFSIZE); fprintf(fp, " \"%s\"", buf); } else { read_string(addr, buf, e->width[i]); fprintf(fp, " %s = \"%s\"", e->member[i], buf); } } else if ((sym = value_search(v.v64, 0)) && is_symbol_text(sym)) fprintf(fp, " <%s>", sym->name); fprintf(fp, "\n"); } /* * Does the work for cmd_list() and any other function that requires the * contents of a linked list. See cmd_list description above for details. */ int do_list(struct list_data *ld) { ulong next, last, first, offset; ulong searchfor, readflag; int i, count, others, close_hq_on_return; unsigned int radix; struct req_entry **e = NULL; if (CRASHDEBUG(1)) { others = 0; console(" flags: %lx (", ld->flags); if (ld->flags & VERBOSE) console("%sVERBOSE", others++ ? "|" : ""); if (ld->flags & LIST_OFFSET_ENTERED) console("%sLIST_OFFSET_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_START_ENTERED) console("%sLIST_START_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_FORMAT) console("%sLIST_HEAD_FORMAT", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_POINTER) console("%sLIST_HEAD_POINTER", others++ ? "|" : ""); if (ld->flags & RETURN_ON_DUPLICATE) console("%sRETURN_ON_DUPLICATE", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_10) console("%sLIST_STRUCT_RADIX_10", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_16) console("%sLIST_STRUCT_RADIX_16", others++ ? "|" : ""); if (ld->flags & LIST_ALLOCATE) console("%sLIST_ALLOCATE", others++ ? "|" : ""); if (ld->flags & LIST_CALLBACK) console("%sLIST_CALLBACK", others++ ? "|" : ""); if (ld->flags & CALLBACK_RETURN) console("%sCALLBACK_RETURN", others++ ? "|" : ""); console(")\n"); console(" start: %lx\n", ld->start); console(" member_offset: %ld\n", ld->member_offset); console(" list_head_offset: %ld\n", ld->list_head_offset); console(" end: %lx\n", ld->end); console(" searchfor: %lx\n", ld->searchfor); console(" structname_args: %lx\n", ld->structname_args); if (!ld->structname_args) console(" structname: (unused)\n"); for (i = 0; i < ld->structname_args; i++) console(" structname[%d]: %s\n", i, ld->structname[i]); console(" header: %s\n", ld->header); console(" list_ptr: %lx\n", (ulong)ld->list_ptr); console(" callback_func: %lx\n", (ulong)ld->callback_func); console(" callback_data: %lx\n", (ulong)ld->callback_data); console("struct_list_offset: %lx\n", ld->struct_list_offset); } count = 0; searchfor = ld->searchfor; ld->searchfor = 0; if (ld->flags & LIST_STRUCT_RADIX_10) radix = 10; else if (ld->flags & LIST_STRUCT_RADIX_16) radix = 16; else radix = 0; next = ld->start; close_hq_on_return = FALSE; if (ld->flags & LIST_ALLOCATE) { if (!hq_is_open()) { hq_open(); close_hq_on_return = TRUE; } else if (hq_is_inuse()) { error(ld->flags & RETURN_ON_LIST_ERROR ? INFO : FATAL, "\ndo_list: hash queue is in use?\n"); return -1; } } readflag = ld->flags & RETURN_ON_LIST_ERROR ? (RETURN_ON_ERROR|QUIET) : FAULT_ON_ERROR; if (!readmem(next + ld->member_offset, KVADDR, &first, sizeof(void *), "first list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } if (ld->header) fprintf(fp, "%s", ld->header); offset = ld->list_head_offset + ld->struct_list_offset; if (ld->structname && (ld->flags & LIST_READ_MEMBER)) { e = (struct req_entry **)GETBUF(sizeof(*e) * ld->structname_args); for (i = 0; i < ld->structname_args; i++) e[i] = fill_member_offsets(ld->structname[i]); } while (1) { if (ld->flags & VERBOSE) { fprintf(fp, "%lx\n", next - ld->list_head_offset); if (ld->structname) { for (i = 0; i < ld->structname_args; i++) { switch (count_chars(ld->structname[i], '.')) { case 0: dump_struct(ld->structname[i], next - offset, radix); break; default: if (ld->flags & LIST_PARSE_MEMBER) dump_struct_members(ld, i, next); else if (ld->flags & LIST_READ_MEMBER) dump_struct_members_fast(e[i], radix, next - offset); break; } } } } if (next && !hq_enter(next - ld->list_head_offset)) { if (ld->flags & (RETURN_ON_DUPLICATE|RETURN_ON_LIST_ERROR)) { error(INFO, "\nduplicate list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } error(FATAL, "\nduplicate list entry: %lx\n", next); } if ((searchfor == next) || (searchfor == (next - ld->list_head_offset))) ld->searchfor = searchfor; count++; last = next; if ((ld->flags & LIST_CALLBACK) && ld->callback_func((void *)(next - ld->list_head_offset), ld->callback_data) && (ld->flags & CALLBACK_RETURN)) break; if (!readmem(next + ld->member_offset, KVADDR, &next, sizeof(void *), "list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } if (next == 0) { if (ld->flags & LIST_HEAD_FORMAT) { error(INFO, "\ninvalid list entry: 0\n"); if (close_hq_on_return) hq_close(); return -1; } if (CRASHDEBUG(1)) console("do_list end: next:%lx\n", next); break; } if (next == ld->end) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == end:%lx\n", next, ld->end); break; } if (next == ld->start) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == start:%lx\n", next, ld->start); break; } if (next == last) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == last:%lx\n", next, last); break; } if ((next == first) && (count != 1)) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == first:%lx (count %d)\n", next, last, count); break; } } if (CRASHDEBUG(1)) console("do_list count: %d\n", count); if (ld->flags & LIST_ALLOCATE) { ld->list_ptr = (ulong *)GETBUF(count * sizeof(void *)); count = retrieve_list(ld->list_ptr, count); if (close_hq_on_return) hq_close(); } return count; } static void do_list_debug_entry(struct list_data *ld) { int i, others; if (CRASHDEBUG(1)) { others = 0; console(" flags: %lx (", ld->flags); if (ld->flags & VERBOSE) console("%sVERBOSE", others++ ? "|" : ""); if (ld->flags & LIST_OFFSET_ENTERED) console("%sLIST_OFFSET_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_START_ENTERED) console("%sLIST_START_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_FORMAT) console("%sLIST_HEAD_FORMAT", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_POINTER) console("%sLIST_HEAD_POINTER", others++ ? "|" : ""); if (ld->flags & RETURN_ON_DUPLICATE) console("%sRETURN_ON_DUPLICATE", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_10) console("%sLIST_STRUCT_RADIX_10", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_16) console("%sLIST_STRUCT_RADIX_16", others++ ? "|" : ""); if (ld->flags & LIST_ALLOCATE) console("%sLIST_ALLOCATE", others++ ? "|" : ""); if (ld->flags & LIST_CALLBACK) console("%sLIST_CALLBACK", others++ ? "|" : ""); if (ld->flags & CALLBACK_RETURN) console("%sCALLBACK_RETURN", others++ ? "|" : ""); console(")\n"); console(" start: %lx\n", ld->start); console(" member_offset: %ld\n", ld->member_offset); console(" list_head_offset: %ld\n", ld->list_head_offset); console(" end: %lx\n", ld->end); console(" searchfor: %lx\n", ld->searchfor); console(" structname_args: %lx\n", ld->structname_args); if (!ld->structname_args) console(" structname: (unused)\n"); for (i = 0; i < ld->structname_args; i++) console(" structname[%d]: %s\n", i, ld->structname[i]); console(" header: %s\n", ld->header); console(" list_ptr: %lx\n", (ulong)ld->list_ptr); console(" callback_func: %lx\n", (ulong)ld->callback_func); console(" callback_data: %lx\n", (ulong)ld->callback_data); console("struct_list_offset: %lx\n", ld->struct_list_offset); } } static void do_list_output_struct(struct list_data *ld, ulong next, ulong offset, unsigned int radix, struct req_entry **e) { int i; for (i = 0; i < ld->structname_args; i++) { switch (count_chars(ld->structname[i], '.')) { case 0: dump_struct(ld->structname[i], next - offset, radix); break; default: if (ld->flags & LIST_PARSE_MEMBER) dump_struct_members(ld, i, next); else if (ld->flags & LIST_READ_MEMBER) dump_struct_members_fast(e[i], radix, next - offset); break; } } } static int do_list_no_hash_readmem(struct list_data *ld, ulong *next_ptr, ulong readflag) { if (!readmem(*next_ptr + ld->member_offset, KVADDR, next_ptr, sizeof(void *), "list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", *next_ptr); return -1; } return 0; } static ulong brent_x; /* tortoise */ static ulong brent_y; /* hare */ static ulong brent_r; /* power */ static ulong brent_lambda; /* loop length */ static ulong brent_mu; /* distance to start of loop */ static ulong brent_loop_detect; static ulong brent_loop_exit; /* * 'ptr': representative of x or y; modified on return */ static int brent_f(ulong *ptr, struct list_data *ld, ulong readflag) { return do_list_no_hash_readmem(ld, ptr, readflag); } /* * Similar to do_list() but without the hash_table or LIST_ALLOCATE. * Useful for the 'list' command and other callers needing faster list * enumeration. */ int do_list_no_hash(struct list_data *ld) { ulong next, last, first, offset; ulong searchfor, readflag; int i, count, ret; unsigned int radix; struct req_entry **e = NULL; do_list_debug_entry(ld); count = 0; searchfor = ld->searchfor; ld->searchfor = 0; if (ld->flags & LIST_STRUCT_RADIX_10) radix = 10; else if (ld->flags & LIST_STRUCT_RADIX_16) radix = 16; else radix = 0; next = ld->start; readflag = ld->flags & RETURN_ON_LIST_ERROR ? (RETURN_ON_ERROR|QUIET) : FAULT_ON_ERROR; if (!readmem(next + ld->member_offset, KVADDR, &first, sizeof(void *), "first list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", next); return -1; } if (ld->header) fprintf(fp, "%s", ld->header); offset = ld->list_head_offset + ld->struct_list_offset; if (ld->structname && (ld->flags & LIST_READ_MEMBER)) { e = (struct req_entry **)GETBUF(sizeof(*e) * ld->structname_args); for (i = 0; i < ld->structname_args; i++) e[i] = fill_member_offsets(ld->structname[i]); } brent_loop_detect = brent_loop_exit = 0; brent_lambda = 0; brent_r = 2; brent_x = brent_y = next; ret = brent_f(&brent_y, ld, readflag); if (ret == -1) return -1; while (1) { if (!brent_loop_detect && ld->flags & VERBOSE) { fprintf(fp, "%lx\n", next - ld->list_head_offset); if (ld->structname) { do_list_output_struct(ld, next, offset, radix, e); } } if (next && brent_loop_exit) { if (ld->flags & (RETURN_ON_DUPLICATE|RETURN_ON_LIST_ERROR)) { error(INFO, "\nduplicate list entry: %lx\n", brent_x); return -1; } error(FATAL, "\nduplicate list entry: %lx\n", brent_x); } if ((searchfor == next) || (searchfor == (next - ld->list_head_offset))) ld->searchfor = searchfor; count++; last = next; if ((ld->flags & LIST_CALLBACK) && ld->callback_func((void *)(next - ld->list_head_offset), ld->callback_data) && (ld->flags & CALLBACK_RETURN)) break; ret = do_list_no_hash_readmem(ld, &next, readflag); if (ret == -1) return -1; if (!brent_loop_detect) { if (count > 1 && brent_x == brent_y) { brent_loop_detect = 1; error(INFO, "loop detected, loop length: %ld\n", brent_lambda); /* reset x and y to start; advance y loop length */ brent_mu = 0; brent_x = brent_y = ld->start; while (brent_lambda--) { ret = brent_f(&brent_y, ld, readflag); if (ret == -1) return -1; } } else { if (brent_r == brent_lambda) { brent_x = brent_y; brent_r *= 2; brent_lambda = 0; } brent_y = next; brent_lambda++; } } else { if (!brent_loop_exit && brent_x == brent_y) { brent_loop_exit = 1; error(INFO, "length from start to loop: %lx", brent_mu); } else { ret = brent_f(&brent_x, ld, readflag); if (ret == -1) return -1; ret = brent_f(&brent_y, ld, readflag); if (ret == -1) return -1; brent_mu++; } } if (next == 0) { if (ld->flags & LIST_HEAD_FORMAT) { error(INFO, "\ninvalid list entry: 0\n"); return -1; } if (CRASHDEBUG(1)) console("do_list end: next:%lx\n", next); break; } if (next == ld->end) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == end:%lx\n", next, ld->end); break; } if (next == ld->start) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == start:%lx\n", next, ld->start); break; } if (next == last) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == last:%lx\n", next, last); break; } if ((next == first) && (count != 1)) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == first:%lx (count %d)\n", next, last, count); break; } } if (CRASHDEBUG(1)) console("do_list count: %d\n", count); return count; } /* * Issue a dump_struct_member() call for one or more structure * members. Multiple members are passed in a comma-separated * list using the the format: * * struct.member1,member2,member3 */ void dump_struct_members(struct list_data *ld, int idx, ulong next) { int i, argc; char *p1, *p2; char *structname, *members; char *arglist[MAXARGS]; unsigned int radix; if (ld->flags & LIST_STRUCT_RADIX_10) radix = 10; else if (ld->flags & LIST_STRUCT_RADIX_16) radix = 16; else radix = 0; structname = GETBUF(strlen(ld->structname[idx])+1); members = GETBUF(strlen(ld->structname[idx])+1); strcpy(structname, ld->structname[idx]); p1 = strstr(structname, ".") + 1; p2 = strstr(ld->structname[idx], ".") + 1; strcpy(members, p2); replace_string(members, ",", ' '); argc = parse_line(members, arglist); for (i = 0; i < argc; i++) { *p1 = NULLCHAR; strcat(structname, arglist[i]); dump_struct_member(structname, next - ld->list_head_offset - ld->struct_list_offset, radix); } FREEBUF(structname); FREEBUF(members); } #define RADIXTREE_REQUEST (0x1) #define RBTREE_REQUEST (0x2) #define XARRAY_REQUEST (0x4) #define MAPLE_REQUEST (0x8) void cmd_tree() { int c, type_flag, others; long root_offset; struct tree_data tree_data, *td; struct datatype_member struct_member, *sm; struct syment *sp; ulong value; char *type_name = NULL; type_flag = 0; root_offset = 0; sm = &struct_member; td = &tree_data; BZERO(td, sizeof(struct tree_data)); while ((c = getopt(argcnt, args, "xdt:r:o:s:S:plNv")) != EOF) { switch (c) { case 't': if (type_flag & (RADIXTREE_REQUEST|RBTREE_REQUEST|XARRAY_REQUEST|MAPLE_REQUEST)) { error(INFO, "multiple tree types may not be entered\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (STRNEQ(optarg, "ra")) if (MEMBER_EXISTS("radix_tree_root", "xa_head")) { type_flag = XARRAY_REQUEST; type_name = "Xarrays"; } else { type_flag = RADIXTREE_REQUEST; type_name = "radix trees"; } else if (STRNEQ(optarg, "rb")) { type_flag = RBTREE_REQUEST; type_name = "rbtrees"; } else if (STRNEQ(optarg, "x")) { type_flag = XARRAY_REQUEST; type_name = "Xarrays"; } else if (STRNEQ(optarg, "m")) { type_flag = MAPLE_REQUEST; type_name = "maple trees"; } else { error(INFO, "invalid tree type: %s\n", optarg); cmd_usage(pc->curcmd, SYNOPSIS); } break; case 'l': td->flags |= TREE_LINEAR_ORDER; break; case 'r': if (td->flags & TREE_ROOT_OFFSET_ENTERED) error(FATAL, "root offset value %d (0x%lx) already entered\n", root_offset, root_offset); else if (IS_A_NUMBER(optarg)) root_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) root_offset = sm->member_offset; else error(FATAL, "invalid -r argument: %s\n", optarg); td->flags |= TREE_ROOT_OFFSET_ENTERED; break; case 'o': if (td->flags & TREE_NODE_OFFSET_ENTERED) error(FATAL, "node offset value %d (0x%lx) already entered\n", td->node_member_offset, td->node_member_offset); else if (IS_A_NUMBER(optarg)) td->node_member_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) td->node_member_offset = sm->member_offset; else error(FATAL, "invalid -o argument: %s\n", optarg); td->flags |= TREE_NODE_OFFSET_ENTERED; break; case 's': case 'S': if (td->structname_args++ == 0) hq_open(); hq_enter((ulong)optarg); td->flags |= (c == 's') ? TREE_PARSE_MEMBER : TREE_READ_MEMBER; if (count_bits_long(td->flags & (TREE_PARSE_MEMBER|TREE_READ_MEMBER)) > 1) error(FATAL, "-S and -s options are mutually exclusive\n"); break; case 'p': td->flags |= TREE_POSITION_DISPLAY; break; case 'N': td->flags |= TREE_NODE_POINTER; break; case 'x': if (td->flags & TREE_STRUCT_RADIX_10) error(FATAL, "-d and -x are mutually exclusive\n"); td->flags |= TREE_STRUCT_RADIX_16; break; case 'd': if (td->flags & TREE_STRUCT_RADIX_16) error(FATAL, "-d and -x are mutually exclusive\n"); td->flags |= TREE_STRUCT_RADIX_10; break; case 'v': td->flags |= TREE_STRUCT_VERBOSE; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if ((type_flag & (XARRAY_REQUEST|RADIXTREE_REQUEST|MAPLE_REQUEST)) && (td->flags & TREE_LINEAR_ORDER)) error(FATAL, "-l option is not applicable to %s\n", type_name); if ((type_flag & (XARRAY_REQUEST|RADIXTREE_REQUEST|MAPLE_REQUEST)) && (td->flags & TREE_NODE_OFFSET_ENTERED)) error(FATAL, "-o option is not applicable to %s\n", type_name); if ((type_flag & (RBTREE_REQUEST|XARRAY_REQUEST|RADIXTREE_REQUEST)) && (td->flags & TREE_STRUCT_VERBOSE)) error(FATAL, "-v option is not applicable to %s\n", type_name); if ((td->flags & TREE_ROOT_OFFSET_ENTERED) && (td->flags & TREE_NODE_POINTER)) error(FATAL, "-r and -N options are mutually exclusive\n"); if (!args[optind]) { error(INFO, "a starting address is required\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if ((sp = symbol_search(args[optind]))) { td->start = sp->value; goto next_arg; } if (!IS_A_NUMBER(args[optind])) { if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { td->start = value; goto next_arg; } } error(FATAL, "invalid start argument: %s\n", args[optind]); } if (hexadecimal_only(args[optind], 0)) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { td->start = value; goto next_arg; } } error(FATAL, "invalid start argument: %s\n", args[optind]); next_arg: if (args[optind+1]) { error(INFO, "too many arguments entered\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (td->structname_args) { td->structname = (char **)GETBUF(sizeof(char *) * td->structname_args); retrieve_list((ulong *)td->structname, td->structname_args); hq_close(); } if (!(td->flags & TREE_NODE_POINTER)) td->start = td->start + root_offset; if (CRASHDEBUG(1)) { others = 0; fprintf(fp, " flags: %lx (", td->flags); if (td->flags & TREE_ROOT_OFFSET_ENTERED) fprintf(fp, "%sTREE_ROOT_OFFSET_ENTERED", others++ ? "|" : ""); if (td->flags & TREE_NODE_OFFSET_ENTERED) fprintf(fp, "%sTREE_NODE_OFFSET_ENTERED", others++ ? "|" : ""); if (td->flags & TREE_NODE_POINTER) fprintf(fp, "%sTREE_NODE_POINTER", others++ ? "|" : ""); if (td->flags & TREE_POSITION_DISPLAY) fprintf(fp, "%sTREE_POSITION_DISPLAY", others++ ? "|" : ""); if (td->flags & TREE_STRUCT_RADIX_10) fprintf(fp, "%sTREE_STRUCT_RADIX_10", others++ ? "|" : ""); if (td->flags & TREE_STRUCT_RADIX_16) fprintf(fp, "%sTREE_STRUCT_RADIX_16", others++ ? "|" : ""); if (td->flags & TREE_PARSE_MEMBER) fprintf(fp, "%sTREE_PARSE_MEMBER", others++ ? "|" : ""); if (td->flags & TREE_READ_MEMBER) fprintf(fp, "%sTREE_READ_MEMBER", others++ ? "|" : ""); if (td->flags & TREE_LINEAR_ORDER) fprintf(fp, "%sTREE_LINEAR_ORDER", others++ ? "|" : ""); if (td->flags & TREE_STRUCT_VERBOSE) fprintf(fp, "%sTREE_STRUCT_VERBOSE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " type: "); if (type_flag & RADIXTREE_REQUEST) fprintf(fp, "radix\n"); else if (type_flag & XARRAY_REQUEST) fprintf(fp, "xarray\n"); else if (type_flag & MAPLE_REQUEST) fprintf(fp, "maple\n"); else fprintf(fp, "red-black%s", type_flag & RBTREE_REQUEST ? "\n" : " (default)\n"); fprintf(fp, " node pointer: %s\n", td->flags & TREE_NODE_POINTER ? "yes" : "no"); fprintf(fp, " start: %lx\n", td->start); fprintf(fp, "node_member_offset: %ld\n", td->node_member_offset); fprintf(fp, " structname_args: %d\n", td->structname_args); fprintf(fp, " count: %d\n", td->count); } td->flags &= ~TREE_NODE_OFFSET_ENTERED; td->flags |= VERBOSE; hq_open(); if (type_flag & RADIXTREE_REQUEST) do_rdtree(td); else if (type_flag & XARRAY_REQUEST) do_xatree(td); else if (type_flag & MAPLE_REQUEST) do_mptree(td); else do_rbtree(td); hq_close(); if (td->structname_args) FREEBUF(td->structname); } static ulong RADIX_TREE_MAP_SHIFT = UNINITIALIZED; static ulong RADIX_TREE_MAP_SIZE = UNINITIALIZED; static ulong RADIX_TREE_MAP_MASK = UNINITIALIZED; #define RADIX_TREE_ENTRY_MASK 3UL #define RADIX_TREE_INTERNAL_NODE 1UL static void do_radix_tree_iter(ulong node, uint height, char *path, ulong index, struct radix_tree_ops *ops) { uint off; if (!hq_enter(node)) error(FATAL, "\nduplicate tree node: %lx\n", node); for (off = 0; off < RADIX_TREE_MAP_SIZE; off++) { ulong slot; ulong shift = (height - 1) * RADIX_TREE_MAP_SHIFT; readmem(node + OFFSET(radix_tree_node_slots) + sizeof(void *) * off, KVADDR, &slot, sizeof(void *), "radix_tree_node.slot[off]", FAULT_ON_ERROR); if (!slot) continue; if (slot & RADIX_TREE_INTERNAL_NODE) slot &= ~RADIX_TREE_INTERNAL_NODE; if (height == 1) ops->entry(node, slot, path, index | off, ops->private); else { ulong child_index = index | (off << shift); char child_path[BUFSIZE]; sprintf(child_path, "%s/%d", path, off); do_radix_tree_iter(slot, height - 1, child_path, child_index, ops); } } } int do_radix_tree_traverse(ulong ptr, int is_root, struct radix_tree_ops *ops) { static ulong max_height = UNINITIALIZED; ulong node_p; long nlen; uint height, is_internal; unsigned char shift; char path[BUFSIZE]; if (!VALID_STRUCT(radix_tree_root) || !VALID_STRUCT(radix_tree_node) || ((!VALID_MEMBER(radix_tree_root_height) || !VALID_MEMBER(radix_tree_root_rnode) || !VALID_MEMBER(radix_tree_node_slots) || !ARRAY_LENGTH(height_to_maxindex)) && (!VALID_MEMBER(radix_tree_root_rnode) || !VALID_MEMBER(radix_tree_node_shift) || !VALID_MEMBER(radix_tree_node_slots) || !ARRAY_LENGTH(height_to_maxnodes)))) error(FATAL, "radix trees do not exist or have changed " "their format\n"); if (RADIX_TREE_MAP_SHIFT == UNINITIALIZED) { if (!(nlen = MEMBER_SIZE("radix_tree_node", "slots"))) error(FATAL, "cannot determine length of " "radix_tree_node.slots[] array\n"); nlen /= sizeof(void *); RADIX_TREE_MAP_SHIFT = ffsl(nlen) - 1; RADIX_TREE_MAP_SIZE = (1UL << RADIX_TREE_MAP_SHIFT); RADIX_TREE_MAP_MASK = (RADIX_TREE_MAP_SIZE-1); if (ARRAY_LENGTH(height_to_maxindex)) max_height = ARRAY_LENGTH(height_to_maxindex); else max_height = ARRAY_LENGTH(height_to_maxnodes); } height = 0; if (!is_root) { node_p = ptr; if (node_p & RADIX_TREE_INTERNAL_NODE) node_p &= ~RADIX_TREE_INTERNAL_NODE; if (VALID_MEMBER(radix_tree_node_height)) { readmem(node_p + OFFSET(radix_tree_node_height), KVADDR, &height, sizeof(uint), "radix_tree_node height", FAULT_ON_ERROR); } else if (VALID_MEMBER(radix_tree_node_shift)) { readmem(node_p + OFFSET(radix_tree_node_shift), KVADDR, &shift, sizeof(shift), "radix_tree_node shift", FAULT_ON_ERROR); height = (shift / RADIX_TREE_MAP_SHIFT) + 1; } else error(FATAL, "-N option is not supported or applicable" " for radix trees on this architecture or kernel\n"); if (height > max_height) goto error_height; } else { if (VALID_MEMBER(radix_tree_root_height)) { readmem(ptr + OFFSET(radix_tree_root_height), KVADDR, &height, sizeof(uint), "radix_tree_root height", FAULT_ON_ERROR); } readmem(ptr + OFFSET(radix_tree_root_rnode), KVADDR, &node_p, sizeof(void *), "radix_tree_root rnode", FAULT_ON_ERROR); is_internal = (node_p & RADIX_TREE_INTERNAL_NODE); if (node_p & RADIX_TREE_INTERNAL_NODE) node_p &= ~RADIX_TREE_INTERNAL_NODE; if (is_internal && VALID_MEMBER(radix_tree_node_shift)) { readmem(node_p + OFFSET(radix_tree_node_shift), KVADDR, &shift, sizeof(shift), "radix_tree_node shift", FAULT_ON_ERROR); height = (shift / RADIX_TREE_MAP_SHIFT) + 1; } if (height > max_height) { node_p = ptr; goto error_height; } } if (CRASHDEBUG(1)) { fprintf(fp, "radix_tree_node.slots[%ld]\n", RADIX_TREE_MAP_SIZE); fprintf(fp, "max_height %ld: ", max_height); fprintf(fp, "\n"); fprintf(fp, "pointer at %lx (is_root? %s):\n", node_p, is_root ? "yes" : "no"); if (is_root) dump_struct("radix_tree_root", ptr, RADIX(ops->radix)); else dump_struct("radix_tree_node", node_p, RADIX(ops->radix)); } if (height == 0) { strcpy(path, "direct"); ops->entry(node_p, node_p, path, 0, ops->private); } else { strcpy(path, "root"); do_radix_tree_iter(node_p, height, path, 0, ops); } return 0; error_height: fprintf(fp, "radix_tree_node at %lx\n", node_p); dump_struct("radix_tree_node", node_p, RADIX(ops->radix)); error(FATAL, "height %d is greater than " "maximum radix tree height index %ld\n", height, max_height); return -1; } static ulong XA_CHUNK_SHIFT = UNINITIALIZED; static ulong XA_CHUNK_SIZE = UNINITIALIZED; static ulong XA_CHUNK_MASK = UNINITIALIZED; static void do_xarray_iter(ulong node, uint height, char *path, ulong index, struct xarray_ops *ops) { uint off; if (!hq_enter(node)) error(FATAL, "\nduplicate tree node: %lx\n", node); for (off = 0; off < XA_CHUNK_SIZE; off++) { ulong slot; ulong shift = (height - 1) * XA_CHUNK_SHIFT; readmem(node + OFFSET(xa_node_slots) + sizeof(void *) * off, KVADDR, &slot, sizeof(void *), "xa_node.slots[off]", FAULT_ON_ERROR); if (!slot) continue; if ((slot & XARRAY_TAG_MASK) == XARRAY_TAG_INTERNAL) slot &= ~XARRAY_TAG_INTERNAL; if (height == 1) ops->entry(node, slot, path, index | off, ops->private); else { ulong child_index = index | (off << shift); char child_path[BUFSIZE]; sprintf(child_path, "%s/%d", path, off); do_xarray_iter(slot, height - 1, child_path, child_index, ops); } } } int do_xarray_traverse(ulong ptr, int is_root, struct xarray_ops *ops) { ulong node_p; long nlen; uint height, is_internal; unsigned char shift; char path[BUFSIZE]; if (!VALID_STRUCT(xarray) || !VALID_STRUCT(xa_node) || !VALID_MEMBER(xarray_xa_head) || !VALID_MEMBER(xa_node_slots) || !VALID_MEMBER(xa_node_shift)) error(FATAL, "xarray facility does not exist or has changed its format\n"); if (XA_CHUNK_SHIFT == UNINITIALIZED) { if ((nlen = MEMBER_SIZE("xa_node", "slots")) <= 0) error(FATAL, "cannot determine length of xa_node.slots[] array\n"); nlen /= sizeof(void *); XA_CHUNK_SHIFT = ffsl(nlen) - 1; XA_CHUNK_SIZE = (1UL << XA_CHUNK_SHIFT); XA_CHUNK_MASK = (XA_CHUNK_SIZE-1); } height = 0; if (!is_root) { node_p = ptr; if ((node_p & XARRAY_TAG_MASK) == XARRAY_TAG_INTERNAL) node_p &= ~XARRAY_TAG_MASK; if (VALID_MEMBER(xa_node_shift)) { readmem(node_p + OFFSET(xa_node_shift), KVADDR, &shift, sizeof(shift), "xa_node shift", FAULT_ON_ERROR); height = (shift / XA_CHUNK_SHIFT) + 1; } else error(FATAL, "-N option is not supported or applicable" " for xarrays on this architecture or kernel\n"); } else { readmem(ptr + OFFSET(xarray_xa_head), KVADDR, &node_p, sizeof(void *), "xarray xa_head", FAULT_ON_ERROR); is_internal = ((node_p & XARRAY_TAG_MASK) == XARRAY_TAG_INTERNAL); if (node_p & XARRAY_TAG_MASK) node_p &= ~XARRAY_TAG_MASK; if (is_internal && VALID_MEMBER(xa_node_shift)) { readmem(node_p + OFFSET(xa_node_shift), KVADDR, &shift, sizeof(shift), "xa_node shift", FAULT_ON_ERROR); height = (shift / XA_CHUNK_SHIFT) + 1; } } if (CRASHDEBUG(1)) { fprintf(fp, "xa_node.slots[%ld]\n", XA_CHUNK_SIZE); fprintf(fp, "pointer at %lx (is_root? %s):\n", node_p, is_root ? "yes" : "no"); if (is_root) dump_struct("xarray", ptr, RADIX(ops->radix)); else dump_struct("xa_node", node_p, RADIX(ops->radix)); } if (height == 0) { strcpy(path, "direct"); ops->entry(node_p, node_p, path, 0, ops->private); } else { strcpy(path, "root"); do_xarray_iter(node_p, height, path, 0, ops); } return 0; } static void do_rdtree_entry(ulong node, ulong slot, const char *path, ulong index, void *private) { struct tree_data *td = private; static struct req_entry **e = NULL; uint print_radix; int i; if (!td->count && td->structname_args) { /* * Retrieve all members' info only once (count == 0) * After last iteration all memory will be freed up */ e = (struct req_entry **)GETBUF(sizeof(*e) * td->structname_args); for (i = 0; i < td->structname_args; i++) e[i] = fill_member_offsets(td->structname[i]); } td->count++; if (td->flags & VERBOSE) fprintf(fp, "%lx\n", slot); if (td->flags & TREE_POSITION_DISPLAY) { fprintf(fp, " index: %ld position: %s/%ld\n", index, path, index & RADIX_TREE_MAP_MASK); } if (td->structname) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch (count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], slot, print_radix); break; default: if (td->flags & TREE_PARSE_MEMBER) dump_struct_members_for_tree(td, i, slot); else if (td->flags & TREE_READ_MEMBER) dump_struct_members_fast(e[i], print_radix, slot); break; } } } } int do_rdtree(struct tree_data *td) { struct radix_tree_ops ops = { .entry = do_rdtree_entry, .private = td, }; int is_root = !(td->flags & TREE_NODE_POINTER); if (td->flags & TREE_STRUCT_RADIX_10) ops.radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) ops.radix = 16; else ops.radix = 0; do_radix_tree_traverse(td->start, is_root, &ops); return 0; } static void do_xarray_entry(ulong node, ulong slot, const char *path, ulong index, void *private) { struct tree_data *td = private; static struct req_entry **e = NULL; uint print_radix; int i; if (!td->count && td->structname_args) { /* * Retrieve all members' info only once (count == 0) * After last iteration all memory will be freed up */ e = (struct req_entry **)GETBUF(sizeof(*e) * td->structname_args); for (i = 0; i < td->structname_args; i++) e[i] = fill_member_offsets(td->structname[i]); } td->count++; if (td->flags & VERBOSE) fprintf(fp, "%lx\n", slot); if (td->flags & TREE_POSITION_DISPLAY) { fprintf(fp, " index: %ld position: %s/%ld\n", index, path, index & XA_CHUNK_MASK); } if (td->structname) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch (count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], slot, print_radix); break; default: if (td->flags & TREE_PARSE_MEMBER) dump_struct_members_for_tree(td, i, slot); else if (td->flags & TREE_READ_MEMBER) dump_struct_members_fast(e[i], print_radix, slot); break; } } } } int do_xatree(struct tree_data *td) { struct xarray_ops ops = { .entry = do_xarray_entry, .private = td, }; int is_root = !(td->flags & TREE_NODE_POINTER); if (td->flags & TREE_STRUCT_RADIX_10) ops.radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) ops.radix = 16; else ops.radix = 0; do_xarray_traverse(td->start, is_root, &ops); return 0; } int do_rbtree(struct tree_data *td) { ulong start; char pos[BUFSIZE]; if (!VALID_MEMBER(rb_root_rb_node) || !VALID_MEMBER(rb_node_rb_left) || !VALID_MEMBER(rb_node_rb_right)) error(FATAL, "red-black trees do not exist or have changed " "their format\n"); sprintf(pos, "root"); if (td->flags & TREE_NODE_POINTER) start = td->start; else readmem(td->start + OFFSET(rb_root_rb_node), KVADDR, &start, sizeof(void *), "rb_root rb_node", FAULT_ON_ERROR); rbtree_iteration(start, td, pos); return td->count; } void rbtree_iteration(ulong node_p, struct tree_data *td, char *pos) { int i; uint print_radix; ulong struct_p, new_p, test_p; char new_pos[BUFSIZE]; static struct req_entry **e; if (!node_p) return; if (!td->count && td->structname_args) { /* * Retrieve all members' info only once (count == 0) * After last iteration all memory will be freed up */ e = (struct req_entry **)GETBUF(sizeof(*e) * td->structname_args); for (i = 0; i < td->structname_args; i++) e[i] = fill_member_offsets(td->structname[i]); } if (hq_enter(node_p)) td->count++; else error(FATAL, "\nduplicate tree entry: %lx\n", node_p); if ((td->flags & TREE_LINEAR_ORDER) && readmem(node_p+OFFSET(rb_node_rb_left), KVADDR, &new_p, sizeof(void *), "rb_node rb_left", RETURN_ON_ERROR) && new_p) { if (readmem(new_p+OFFSET(rb_node_rb_left), KVADDR, &test_p, sizeof(void *), "rb_node rb_left", RETURN_ON_ERROR|QUIET)) { sprintf(new_pos, "%s/l", pos); rbtree_iteration(new_p, td, new_pos); } else error(INFO, "rb_node: %lx: corrupted rb_left pointer: %lx\n", node_p, new_p); } struct_p = node_p - td->node_member_offset; if (td->flags & VERBOSE) fprintf(fp, "%lx\n", struct_p); if (td->flags & TREE_POSITION_DISPLAY) fprintf(fp, " position: %s\n", pos); if (td->structname) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch(count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], struct_p, print_radix); break; default: if (td->flags & TREE_PARSE_MEMBER) dump_struct_members_for_tree(td, i, struct_p); else if (td->flags & TREE_READ_MEMBER) dump_struct_members_fast(e[i], print_radix, struct_p); break; } } } if (!(td->flags & TREE_LINEAR_ORDER) && readmem(node_p+OFFSET(rb_node_rb_left), KVADDR, &new_p, sizeof(void *), "rb_node rb_left", RETURN_ON_ERROR) && new_p) { if (readmem(new_p+OFFSET(rb_node_rb_left), KVADDR, &test_p, sizeof(void *), "rb_node rb_left", RETURN_ON_ERROR|QUIET)) { sprintf(new_pos, "%s/l", pos); rbtree_iteration(new_p, td, new_pos); } else error(INFO, "rb_node: %lx: corrupted rb_left pointer: %lx\n", node_p, new_p); } if (readmem(node_p+OFFSET(rb_node_rb_right), KVADDR, &new_p, sizeof(void *), "rb_node rb_right", RETURN_ON_ERROR) && new_p) { if (readmem(new_p+OFFSET(rb_node_rb_left), KVADDR, &test_p, sizeof(void *), "rb_node rb_left", RETURN_ON_ERROR|QUIET)) { sprintf(new_pos, "%s/r", pos); rbtree_iteration(new_p, td, new_pos); } else error(INFO, "rb_node: %lx: corrupted rb_right pointer: %lx\n", node_p, new_p); } } void dump_struct_members_for_tree(struct tree_data *td, int idx, ulong struct_p) { int i, argc; uint print_radix; char *p1; char *structname, *members; char *arglist[MAXARGS]; if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; structname = GETBUF(strlen(td->structname[idx])+1); members = GETBUF(strlen(td->structname[idx])+1); strcpy(structname, td->structname[idx]); p1 = strstr(structname, ".") + 1; strcpy(members, p1); replace_string(members, ",", ' '); argc = parse_line(members, arglist); for (i = 0; i pageshift) #define HQ_INDEX(X) (((X) >> HQ_SHIFT) % pc->nr_hash_queues) struct hq_entry { int next; int order; ulong value; }; struct hq_head { int next; int qcnt; }; struct hash_table { ulong flags; struct hq_head *queue_heads; struct hq_entry *memptr; long count; long index; int reallocs; } hash_table = { 0 }; /* * For starters, allocate a hash table containing HQ_ENTRY_CHUNK entries. * If necessary during runtime, it will be increased in size. */ void hq_init(void) { struct hash_table *ht; ht = &hash_table; if (pc->nr_hash_queues == 0) pc->nr_hash_queues = NR_HASH_QUEUES_DEFAULT; if ((ht->queue_heads = (struct hq_head *)malloc(pc->nr_hash_queues * sizeof(struct hq_head))) == NULL) { error(INFO, "cannot malloc memory for hash queue heads: %s\n", strerror(errno)); ht->flags = HASH_QUEUE_NONE; pc->flags &= ~HASH; return; } if ((ht->memptr = (struct hq_entry *)malloc(HQ_ENTRY_CHUNK * sizeof(struct hq_entry))) == NULL) { error(INFO, "cannot malloc memory for hash queues: %s\n", strerror(errno)); ht->flags = HASH_QUEUE_NONE; pc->flags &= ~HASH; return; } BZERO(ht->memptr, HQ_ENTRY_CHUNK * sizeof(struct hq_entry)); ht->count = HQ_ENTRY_CHUNK; ht->index = 0; } /* * Get a free hash queue entry. If there's no more available, realloc() * a new chunk of memory with another HQ_ENTRY_CHUNK entries stuck on the end. */ static long alloc_hq_entry(void) { struct hash_table *ht; struct hq_entry *new, *end_of_old; ht = &hash_table; if (++ht->index == ht->count) { if (!(new = (void *)realloc((void *)ht->memptr, (ht->count+HQ_ENTRY_CHUNK) * sizeof(struct hq_entry)))) { error(INFO, "cannot realloc memory for hash queues: %s\n", strerror(errno)); ht->flags |= HASH_QUEUE_FULL; return(-1); } ht->reallocs++; ht->memptr = new; end_of_old = ht->memptr + ht->count; BZERO(end_of_old, HQ_ENTRY_CHUNK * sizeof(struct hq_entry)); ht->count += HQ_ENTRY_CHUNK; } return(ht->index); } /* * Restore the hash queue to its state before the duplicate entry * was attempted. */ static void dealloc_hq_entry(struct hq_entry *entry) { struct hash_table *ht; long hqi; ht = &hash_table; hqi = HQ_INDEX(entry->value); ht->index--; BZERO(entry, sizeof(struct hq_entry)); ht->queue_heads[hqi].qcnt--; } /* * Initialize the hash table for a hashing session. */ int hq_open(void) { struct hash_table *ht; if (!(pc->flags & HASH)) return FALSE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE|HASH_QUEUE_OPEN)) return FALSE; ht->flags &= ~(HASH_QUEUE_FULL|HASH_QUEUE_CLOSED); BZERO(ht->queue_heads, sizeof(struct hq_head) * pc->nr_hash_queues); BZERO(ht->memptr, ht->count * sizeof(struct hq_entry)); ht->index = 0; ht->flags |= HASH_QUEUE_OPEN; return TRUE; } int hq_is_open(void) { struct hash_table *ht; ht = &hash_table; return (ht->flags & HASH_QUEUE_OPEN ? TRUE : FALSE); } int hq_is_inuse(void) { struct hash_table *ht; if (!hq_is_open()) return FALSE; ht = &hash_table; return (ht->index ? TRUE : FALSE); } /* * Close the hash table, returning the number of items hashed in this session. */ int hq_close(void) { struct hash_table *ht; ht = &hash_table; ht->flags &= ~(HASH_QUEUE_OPEN); ht->flags |= HASH_QUEUE_CLOSED; if (!(pc->flags & HASH)) return(0); if (ht->flags & HASH_QUEUE_NONE) return(0); ht->flags &= ~HASH_QUEUE_FULL; return(ht->index); } char *corrupt_hq = "corrupt hash queue entry: value: %lx next: %d order: %d\n"; /* * For a given value, allocate a hash queue entry and hash it into the * open hash table. If a duplicate entry is found, return FALSE; for all * other possibilities return TRUE. Note that it's up to the user to deal * with failure. */ int hq_enter(ulong value) { struct hash_table *ht; struct hq_entry *entry; struct hq_entry *list_entry; long hqi; long index; if (!(pc->flags & HASH)) return TRUE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE|HASH_QUEUE_FULL)) return TRUE; if (!(ht->flags & HASH_QUEUE_OPEN)) return TRUE; if ((index = alloc_hq_entry()) < 0) return TRUE; entry = ht->memptr + index; if (entry->next || entry->value || entry->order) { error(INFO, corrupt_hq, entry->value, entry->next, entry->order); ht->flags |= HASH_QUEUE_NONE; return TRUE; } entry->next = 0; entry->value = value; entry->order = index; hqi = HQ_INDEX(value); if (ht->queue_heads[hqi].next == 0) { ht->queue_heads[hqi].next = index; ht->queue_heads[hqi].qcnt = 1; return TRUE; } else ht->queue_heads[hqi].qcnt++; list_entry = ht->memptr + ht->queue_heads[hqi].next; while (TRUE) { if (list_entry->value == entry->value) { dealloc_hq_entry(entry); return FALSE; } if (list_entry->next >= ht->count) { error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return TRUE; } if (list_entry->next == 0) break; list_entry = ht->memptr + list_entry->next; } list_entry->next = index; return TRUE; } /* * "hash -d" output */ void dump_hash_table(int verbose) { int i; struct hash_table *ht; struct hq_entry *list_entry; long elements; long queues_in_use; int others; uint minq, maxq; ht = &hash_table; others = 0; fprintf(fp, " flags: %lx (", ht->flags); if (ht->flags & HASH_QUEUE_NONE) fprintf(fp, "%sHASH_QUEUE_NONE", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_OPEN) fprintf(fp, "%sHASH_QUEUE_OPEN", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_CLOSED) fprintf(fp, "%sHASH_QUEUE_CLOSED", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_FULL) fprintf(fp, "%sHASH_QUEUE_FULL", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " queue_heads[%ld]: %lx\n", pc->nr_hash_queues, (ulong)ht->queue_heads); fprintf(fp, " memptr: %lx\n", (ulong)ht->memptr); fprintf(fp, " count: %ld ", ht->count); if (ht->reallocs) fprintf(fp, " (%d reallocs)", ht->reallocs); fprintf(fp, "\n"); fprintf(fp, " index: %ld\n", ht->index); queues_in_use = 0; minq = ~(0); maxq = 0; for (i = 0; i < pc->nr_hash_queues; i++) { if (ht->queue_heads[i].next == 0) { minq = 0; continue; } if (ht->queue_heads[i].qcnt < minq) minq = ht->queue_heads[i].qcnt; if (ht->queue_heads[i].qcnt > maxq) maxq = ht->queue_heads[i].qcnt; queues_in_use++; } elements = 0; list_entry = ht->memptr; for (i = 0; i < ht->count; i++, list_entry++) { if (!list_entry->order) { if (list_entry->value || list_entry->next) goto corrupt_list_entry; continue; } if (list_entry->next >= ht->count) goto corrupt_list_entry; ++elements; } if (elements != ht->index) fprintf(fp, " elements found: %ld (expected %ld)\n", elements, ht->index); fprintf(fp, " queues in use: %ld of %ld\n", queues_in_use, pc->nr_hash_queues); fprintf(fp, " queue length range: %d to %d\n", minq, maxq); if (verbose) { if (!elements) { fprintf(fp, " entries: (none)\n"); return; } fprintf(fp, " entries: "); list_entry = ht->memptr; for (i = 0; i < ht->count; i++, list_entry++) { if (list_entry->order) fprintf(fp, "%s%lx (%d)\n", list_entry->order == 1 ? "" : " ", list_entry->value, list_entry->order); } } return; corrupt_list_entry: error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; } /* * Retrieve the count of, and optionally stuff a pre-allocated array with, * the current hash table entries. The entries will be sorted according * to the order in which they were entered, so from this point on, no * further hq_enter() operations on this list will be allowed. However, * multiple calls to retrieve_list are allowed because the second and * subsequent ones will go directly to where the non-zero (valid) entries * start in the potentially very large list_entry memory chunk. */ int retrieve_list(ulong array[], int count) { int i; struct hash_table *ht; struct hq_entry *list_entry; int elements; if (!(pc->flags & HASH)) error(FATAL, "cannot perform this command with hash turned off\n"); ht = &hash_table; list_entry = ht->memptr; for (i = elements = 0; i < ht->count; i++, list_entry++) { if (!list_entry->order) { if (list_entry->value || list_entry->next) goto corrupt_list_entry; continue; } if (list_entry->next >= ht->count) goto corrupt_list_entry; if (array) array[elements] = list_entry->value; if (++elements == count) break; } return elements; corrupt_list_entry: error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return(-1); } /* * For a given value, check to see if a hash queue entry exists. If an * entry is found, return TRUE; for all other possibilities return FALSE. */ int hq_entry_exists(ulong value) { struct hash_table *ht; struct hq_entry *list_entry; long hqi; if (!(pc->flags & HASH)) return FALSE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE)) return FALSE; if (!(ht->flags & HASH_QUEUE_OPEN)) return FALSE; hqi = HQ_INDEX(value); list_entry = ht->memptr + ht->queue_heads[hqi].next; while (TRUE) { if (list_entry->value == value) return TRUE; if (list_entry->next >= ht->count) { error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return FALSE; } if (list_entry->next == 0) break; list_entry = ht->memptr + list_entry->next; } return FALSE; } /* * K&R power function for integers */ long power(long base, int exp) { int i; long p; p = 1; for (i = 1; i <= exp; i++) p = p * base; return p; } long long ll_power(long long base, long long exp) { long long i; long long p; p = 1; for (i = 1; i <= exp; i++) p = p * base; return p; } /* * Internal buffer allocation scheme to avoid inline malloc() calls and * resultant memory leaks due to aborted commands. These buffers are * for TEMPORARY use on a per-command basis. They are allocated by calls * to GETBUF(size). They can explicitly freed by FREEBUF(address), but * they are all freed by free_all_bufs() which is called in a number of * places, most not */ #define NUMBER_1K_BUFS (10) #define NUMBER_2K_BUFS (10) #define NUMBER_4K_BUFS (5) #define NUMBER_8K_BUFS (5) #define NUMBER_32K_BUFS (1) #define SHARED_1K_BUF_FULL (0x003ff) #define SHARED_2K_BUF_FULL (0x003ff) #define SHARED_4K_BUF_FULL (0x0001f) #define SHARED_8K_BUF_FULL (0x0001f) #define SHARED_32K_BUF_FULL (0x00001) #define SHARED_1K_BUF_AVAIL(X) \ (NUMBER_1K_BUFS && !(((X) & SHARED_1K_BUF_FULL) == SHARED_1K_BUF_FULL)) #define SHARED_2K_BUF_AVAIL(X) \ (NUMBER_2K_BUFS && !(((X) & SHARED_2K_BUF_FULL) == SHARED_2K_BUF_FULL)) #define SHARED_4K_BUF_AVAIL(X) \ (NUMBER_4K_BUFS && !(((X) & SHARED_4K_BUF_FULL) == SHARED_4K_BUF_FULL)) #define SHARED_8K_BUF_AVAIL(X) \ (NUMBER_8K_BUFS && !(((X) & SHARED_8K_BUF_FULL) == SHARED_8K_BUF_FULL)) #define SHARED_32K_BUF_AVAIL(X) \ (NUMBER_32K_BUFS && !(((X) & SHARED_32K_BUF_FULL) == SHARED_32K_BUF_FULL)) #define B1K (0) #define B2K (1) #define B4K (2) #define B8K (3) #define B32K (4) #define SHARED_BUF_SIZES (B32K+1) #define MAX_MALLOC_BUFS (2000) #define MAX_CACHE_SIZE (KILOBYTES(32)) struct shared_bufs { char buf_1K[NUMBER_1K_BUFS][1024]; char buf_2K[NUMBER_2K_BUFS][2048]; char buf_4K[NUMBER_4K_BUFS][4096]; char buf_8K[NUMBER_8K_BUFS][8192]; char buf_32K[NUMBER_32K_BUFS][32768]; long buf_1K_used; long buf_2K_used; long buf_4K_used; long buf_8K_used; long buf_32K_used; long buf_1K_maxuse; long buf_2K_maxuse; long buf_4K_maxuse; long buf_8K_maxuse; long buf_32K_maxuse; long buf_1K_ovf; long buf_2K_ovf; long buf_4K_ovf; long buf_8K_ovf; long buf_32K_ovf; int buf_inuse[SHARED_BUF_SIZES]; char *malloc_bp[MAX_MALLOC_BUFS]; long smallest; long largest; long embedded; long max_embedded; long mallocs; long frees; double total; ulong reqs; } shared_bufs; void buf_init(void) { struct shared_bufs *bp; bp = &shared_bufs; BZERO(bp, sizeof(struct shared_bufs)); bp->smallest = 0x7fffffff; bp->total = 0.0; #ifdef VALGRIND VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_1K, sizeof(bp->buf_1K)); VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_2K, sizeof(bp->buf_2K)); VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_4K, sizeof(bp->buf_4K)); VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_8K, sizeof(bp->buf_8K)); VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_32K, sizeof(bp->buf_32K)); VALGRIND_CREATE_MEMPOOL(&bp->buf_1K, 0, 1); VALGRIND_CREATE_MEMPOOL(&bp->buf_2K, 0, 1); VALGRIND_CREATE_MEMPOOL(&bp->buf_4K, 0, 1); VALGRIND_CREATE_MEMPOOL(&bp->buf_8K, 0, 1); VALGRIND_CREATE_MEMPOOL(&bp->buf_32K, 0, 1); #endif } /* * Free up all buffers used by the last command. */ void free_all_bufs(void) { int i; struct shared_bufs *bp; bp = &shared_bufs; bp->embedded = 0; for (i = 0; i < SHARED_BUF_SIZES; i++) bp->buf_inuse[i] = 0; for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i]) { free(bp->malloc_bp[i]); bp->malloc_bp[i] = NULL; bp->frees++; } } if (bp->mallocs != bp->frees) error(WARNING, "malloc/free mismatch (%ld/%ld)\n", bp->mallocs, bp->frees); #ifdef VALGRIND VALGRIND_DESTROY_MEMPOOL(&bp->buf_1K); VALGRIND_DESTROY_MEMPOOL(&bp->buf_2K); VALGRIND_DESTROY_MEMPOOL(&bp->buf_4K); VALGRIND_DESTROY_MEMPOOL(&bp->buf_8K); VALGRIND_DESTROY_MEMPOOL(&bp->buf_32K); VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_1K, sizeof(bp->buf_1K)); VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_2K, sizeof(bp->buf_2K)); VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_4K, sizeof(bp->buf_4K)); VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_8K, sizeof(bp->buf_8K)); VALGRIND_MAKE_MEM_NOACCESS(&bp->buf_32K, sizeof(bp->buf_32K)); VALGRIND_CREATE_MEMPOOL(&bp->buf_1K, 0, 1); VALGRIND_CREATE_MEMPOOL(&bp->buf_2K, 0, 1); VALGRIND_CREATE_MEMPOOL(&bp->buf_4K, 0, 1); VALGRIND_CREATE_MEMPOOL(&bp->buf_8K, 0, 1); VALGRIND_CREATE_MEMPOOL(&bp->buf_32K, 0, 1); #endif } /* * Free a specific buffer that may have been returned by malloc(). * If the address is one of the static buffers, look for it and * clear its inuse bit. */ void freebuf(char *addr) { int i; struct shared_bufs *bp; bp = &shared_bufs; bp->embedded--; if (CRASHDEBUG(8)) { INDENT(bp->embedded*2); fprintf(fp, "FREEBUF(%ld)\n", bp->embedded); } for (i = 0; i < NUMBER_1K_BUFS; i++) { if (addr == (char *)&bp->buf_1K[i]) { bp->buf_inuse[B1K] &= ~(1 << i); #ifdef VALGRIND VALGRIND_MEMPOOL_FREE(&bp->buf_1K, addr); #endif return; } } for (i = 0; i < NUMBER_2K_BUFS; i++) { if (addr == (char *)&bp->buf_2K[i]) { bp->buf_inuse[B2K] &= ~(1 << i); #ifdef VALGRIND VALGRIND_MEMPOOL_FREE(&bp->buf_2K, addr); #endif return; } } for (i = 0; i < NUMBER_4K_BUFS; i++) { if (addr == (char *)&bp->buf_4K[i]) { bp->buf_inuse[B4K] &= ~(1 << i); #ifdef VALGRIND VALGRIND_MEMPOOL_FREE(&bp->buf_4K, addr); #endif return; } } for (i = 0; i < NUMBER_8K_BUFS; i++) { if (addr == (char *)&bp->buf_8K[i]) { bp->buf_inuse[B8K] &= ~(1 << i); #ifdef VALGRIND VALGRIND_MEMPOOL_FREE(&bp->buf_8K, addr); #endif return; } } for (i = 0; i < NUMBER_32K_BUFS; i++) { if (addr == (char *)&bp->buf_32K[i]) { bp->buf_inuse[B32K] &= ~(1 << i); #ifdef VALGRIND VALGRIND_MEMPOOL_FREE(&bp->buf_32K, addr); #endif return; } } for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i] == addr) { free(bp->malloc_bp[i]); bp->malloc_bp[i] = NULL; bp->frees++; return; } } error(FATAL, "freeing an unknown buffer -- shared buffer inconsistency!\n"); } /* DEBUG */ void dump_embedded(char *s) { struct shared_bufs *bp; char *p1; p1 = s ? s : ""; bp = &shared_bufs; console("%s: embedded: %ld mallocs: %ld frees: %ld\n", p1, bp->embedded, bp->mallocs, bp->frees); } /* DEBUG */ long get_embedded(void) { struct shared_bufs *bp; bp = &shared_bufs; return(bp->embedded); } /* * "help -b" output */ void dump_shared_bufs(void) { int i; struct shared_bufs *bp; bp = &shared_bufs; fprintf(fp, " buf_1K_used: %ld\n", bp->buf_1K_used); fprintf(fp, " buf_2K_used: %ld\n", bp->buf_2K_used); fprintf(fp, " buf_4K_used: %ld\n", bp->buf_4K_used); fprintf(fp, " buf_8K_used: %ld\n", bp->buf_8K_used); fprintf(fp, " buf_32K_used: %ld\n", bp->buf_32K_used); fprintf(fp, " buf_1K_ovf: %ld\n", bp->buf_1K_ovf); fprintf(fp, " buf_2K_ovf: %ld\n", bp->buf_2K_ovf); fprintf(fp, " buf_4K_ovf: %ld\n", bp->buf_4K_ovf); fprintf(fp, " buf_8K_ovf: %ld\n", bp->buf_8K_ovf); fprintf(fp, " buf_32K_ovf: %ld\n", bp->buf_32K_ovf); fprintf(fp, " buf_1K_maxuse: %2ld of %d\n", bp->buf_1K_maxuse, NUMBER_1K_BUFS); fprintf(fp, " buf_2K_maxuse: %2ld of %d\n", bp->buf_2K_maxuse, NUMBER_2K_BUFS); fprintf(fp, " buf_4K_maxuse: %2ld of %d\n", bp->buf_4K_maxuse, NUMBER_4K_BUFS); fprintf(fp, " buf_8K_maxuse: %2ld of %d\n", bp->buf_8K_maxuse, NUMBER_8K_BUFS); fprintf(fp, "buf_32K_maxuse: %2ld of %d\n", bp->buf_32K_maxuse, NUMBER_32K_BUFS); fprintf(fp, " buf_inuse[%d]: ", SHARED_BUF_SIZES); for (i = 0; i < SHARED_BUF_SIZES; i++) fprintf(fp, "[%lx]", (ulong)bp->buf_inuse[i]); fprintf(fp, "\n"); for (i = 0; i < MAX_MALLOC_BUFS; i++) if (bp->malloc_bp[i]) fprintf(fp, " malloc_bp[%d]: %lx\n", i, (ulong)bp->malloc_bp[i]); if (bp->smallest == 0x7fffffff) fprintf(fp, " smallest: 0\n"); else fprintf(fp, " smallest: %ld\n", bp->smallest); fprintf(fp, " largest: %ld\n", bp->largest); fprintf(fp, " embedded: %ld\n", bp->embedded); fprintf(fp, " max_embedded: %ld\n", bp->max_embedded); fprintf(fp, " mallocs: %ld\n", bp->mallocs); fprintf(fp, " frees: %ld\n", bp->frees); fprintf(fp, " reqs/total: %ld/%.0f\n", bp->reqs, bp->total); fprintf(fp, " average size: %.0f\n", bp->total/bp->reqs); } /* * Try to get one of the static buffers first. If not available, fall * through and get it from malloc(), keeping trace of the returned address. */ #define SHARED_BUFSIZE(size) \ ((size <= 1024) ? 1024 >> 7 : \ ((size <= 2048) ? 2048 >> 7 : \ ((size <= 4096) ? 4096 >> 7 : \ ((size <= 8192) ? 8192 >> 7 : \ ((size <= 32768) ? 32768 >> 7 : -1))))) char * getbuf(long reqsize) { int i; int index; int bdx; int mask; struct shared_bufs *bp; char *bufp; if (!reqsize) { ulong retaddr = (ulong)__builtin_return_address(0); error(FATAL, "zero-size memory allocation! (called from %lx)\n", retaddr); } bp = &shared_bufs; index = SHARED_BUFSIZE(reqsize); if (CRASHDEBUG(7) && (reqsize > MAX_CACHE_SIZE)) error(NOTE, "GETBUF request > MAX_CACHE_SIZE: %ld\n", reqsize); if (CRASHDEBUG(8)) { INDENT(bp->embedded*2); fprintf(fp, "GETBUF(%ld -> %ld)\n", reqsize, bp->embedded); } bp->embedded++; if (bp->embedded > bp->max_embedded) bp->max_embedded = bp->embedded; if (reqsize < bp->smallest) bp->smallest = reqsize; if (reqsize > bp->largest) bp->largest = reqsize; bp->total += reqsize; bp->reqs++; switch (index) { case -1: break; case 8: if (SHARED_1K_BUF_AVAIL(bp->buf_inuse[B1K])) { mask = ~(bp->buf_inuse[B1K]); bdx = ffs(mask) - 1; bufp = bp->buf_1K[bdx]; bp->buf_1K_used++; bp->buf_inuse[B1K] |= (1 << bdx); bp->buf_1K_maxuse = MAX(bp->buf_1K_maxuse, count_bits_int(bp->buf_inuse[B1K])); #ifdef VALGRIND VALGRIND_MEMPOOL_ALLOC(&bp->buf_1K, bufp, 1024); #endif BZERO(bufp, 1024); return(bufp); } bp->buf_1K_ovf++; /* FALLTHROUGH */ case 16: if (SHARED_2K_BUF_AVAIL(bp->buf_inuse[B2K])) { mask = ~(bp->buf_inuse[B2K]); bdx = ffs(mask) - 1; bufp = bp->buf_2K[bdx]; bp->buf_2K_used++; bp->buf_inuse[B2K] |= (1 << bdx); bp->buf_2K_maxuse = MAX(bp->buf_2K_maxuse, count_bits_int(bp->buf_inuse[B2K])); #ifdef VALGRIND VALGRIND_MEMPOOL_ALLOC(&bp->buf_2K, bufp, 2048); #endif BZERO(bufp, 2048); return(bufp); } bp->buf_2K_ovf++; /* FALLTHROUGH */ case 32: if (SHARED_4K_BUF_AVAIL(bp->buf_inuse[B4K])) { mask = ~(bp->buf_inuse[B4K]); bdx = ffs(mask) - 1; bufp = bp->buf_4K[bdx]; bp->buf_4K_used++; bp->buf_inuse[B4K] |= (1 << bdx); bp->buf_4K_maxuse = MAX(bp->buf_4K_maxuse, count_bits_int(bp->buf_inuse[B4K])); #ifdef VALGRIND VALGRIND_MEMPOOL_ALLOC(&bp->buf_4K, bufp, 4096); #endif BZERO(bufp, 4096); return(bufp); } bp->buf_4K_ovf++; /* FALLTHROUGH */ case 64: if (SHARED_8K_BUF_AVAIL(bp->buf_inuse[B8K])) { mask = ~(bp->buf_inuse[B8K]); bdx = ffs(mask) - 1; bufp = bp->buf_8K[bdx]; bp->buf_8K_used++; bp->buf_inuse[B8K] |= (1 << bdx); bp->buf_8K_maxuse = MAX(bp->buf_8K_maxuse, count_bits_int(bp->buf_inuse[B8K])); #ifdef VALGRIND VALGRIND_MEMPOOL_ALLOC(&bp->buf_8K, bufp, 8192); #endif BZERO(bufp, 8192); return(bufp); } bp->buf_8K_ovf++; /* FALLTHROUGH */ case 256: if (SHARED_32K_BUF_AVAIL(bp->buf_inuse[B32K])) { mask = ~(bp->buf_inuse[B32K]); bdx = ffs(mask) - 1; bufp = bp->buf_32K[bdx]; bp->buf_32K_used++; bp->buf_inuse[B32K] |= (1 << bdx); bp->buf_32K_maxuse = MAX(bp->buf_32K_maxuse, count_bits_int(bp->buf_inuse[B32K])); #ifdef VALGRIND VALGRIND_MEMPOOL_ALLOC(&bp->buf_32K, bufp, 32768); #endif BZERO(bufp, 32768); return(bufp); } bp->buf_32K_ovf++; break; } for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i]) continue; if ((bp->malloc_bp[i] = (char *)calloc(reqsize, 1))) { bp->mallocs++; return(bp->malloc_bp[i]); } break; } dump_shared_bufs(); return ((char *)(long) error(FATAL, "cannot allocate any more memory!\n")); } /* * Change the size of the previously-allocated memory block * pointed to by oldbuf to newsize bytes. Copy the minimum * of oldsize and newsize bytes from the oldbuf to the newbuf, * and return the address of the new buffer, which will have * a different address than oldbuf. */ char * resizebuf(char *oldbuf, long oldsize, long newsize) { char *newbuf; newbuf = GETBUF(newsize); BCOPY(oldbuf, newbuf, MIN(oldsize, newsize)); FREEBUF(oldbuf); return newbuf; } /* * Duplicate a string into a buffer allocated with GETBUF(). */ char * strdupbuf(char *oldstring) { char *newstring; newstring = GETBUF(strlen(oldstring)+1); strcpy(newstring, oldstring); return newstring; } /* * Return the number of bits set in an int or long. */ int count_bits_int(int val) { int i, cnt; int total; cnt = sizeof(int) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) total++; val >>= 1; } return total; } int count_bits_long(ulong val) { int i, cnt; int total; cnt = sizeof(long) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) total++; val >>= 1; } return total; } int highest_bit_long(ulong val) { int i, cnt; int total; int highest; highest = -1; cnt = sizeof(long) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) highest = i; val >>= 1; } return highest; } int lowest_bit_long(ulong val) { int i, cnt; int lowest; lowest = -1; cnt = sizeof(long) * 8; for (i = 0; i < cnt; i++) { if (val & 1) { lowest = i; break; } val >>= 1; } return lowest; } /* * Debug routine to stop whatever's going on in its tracks. */ void drop_core(char *s) { volatile int *ptr; int i ATTRIBUTE_UNUSED; if (s && ascii_string(s)) fprintf(stderr, "%s", s); kill((pid_t)pc->program_pid, 3); ptr = NULL; while (TRUE) i = *ptr; } /* * For debug output to a device other than the current terminal. * pc->console must have been preset by: * * 1. by an .rc file setting: "set console /dev/whatever" * 2. by a runtime command: "set console /dev/whatever" * 3. during program invocation: "-c /dev/whatever" * * The first time it's called, the device will be opened. */ int console(const char *fmt, ...) { char output[BUFSIZE*2]; va_list ap; if (!pc->console || !strlen(pc->console) || (pc->flags & NO_CONSOLE) || (pc->confd == -1)) return 0; if (!fmt || !strlen(fmt)) return 0; va_start(ap, fmt); (void)vsnprintf(output, BUFSIZE*2, fmt, ap); va_end(ap); if (pc->confd == -2) { if ((pc->confd = open(pc->console, O_WRONLY|O_NDELAY)) < 0) { error(INFO, "console device %s: %s\n", pc->console, strerror(errno), 0, 0); return 0; } } return(write(pc->confd, output, strlen(output))); } /* * Allocate space to store the designated console device name. * If a console device pre-exists, free its name space and close the device. */ void create_console_device(char *dev) { if (pc->console) { if (pc->confd != -1) close(pc->confd); free(pc->console); } pc->confd = -2; if ((pc->console = (char *)malloc(strlen(dev)+1)) == NULL) fprintf(stderr, "console name malloc: %s\n", strerror(errno)); else { strcpy(pc->console, dev); if (console("debug console [%ld]: %s\n", pc->program_pid, (ulong)pc->console) < 0) { close(pc->confd); free(pc->console); pc->console = NULL; pc->confd = -1; if (!(pc->flags & RUNTIME)) error(INFO, "cannot set console to %s\n", dev); } } } /* * Disable console output without closing the device. * Typically used with CONSOLE_OFF() macro. */ int console_off(void) { int orig_no_console; orig_no_console = pc->flags & NO_CONSOLE; pc->flags |= NO_CONSOLE; return orig_no_console; } /* * Re-enable console output. Typically used with CONSOLE_ON() macro. */ int console_on(int orig_no_console) { if (!orig_no_console) pc->flags &= ~NO_CONSOLE; return(pc->flags & NO_CONSOLE); } /* * Print a string to the console device with no formatting, useful for * sending strings containing % signs. */ int console_verbatim(char *s) { char *p; int cnt; if (!pc->console || !strlen(pc->console) || (pc->flags & NO_CONSOLE) || (pc->confd == -1)) return 0; if (!s || !strlen(s)) return 0; if (pc->confd == -2) { if ((pc->confd = open(pc->console, O_WRONLY|O_NDELAY)) < 0) { fprintf(stderr, "%s: %s\n", pc->console, strerror(errno)); return 0; } } for (cnt = 0, p = s; *p; p++) { if (write(pc->confd, p, 1) != 1) break; cnt++; } return cnt; } /* * Set up a signal handler. */ void sigsetup(int sig, void *handler, struct sigaction *act,struct sigaction *oldact) { BZERO(act, sizeof(struct sigaction)); act->sa_handler = handler; act->sa_flags = SA_NOMASK; sigaction(sig, act, oldact); } /* * Convert a jiffies-based time value into a string showing the * the number of days, hours:minutes:seconds. */ #define SEC_MINUTES (60) #define SEC_HOURS (60 * SEC_MINUTES) #define SEC_DAYS (24 * SEC_HOURS) char * convert_time(ulonglong count, char *buf) { ulonglong total, days, hours, minutes, seconds; if (CRASHDEBUG(2)) error(INFO, "convert_time: %lld (%llx)\n", count, count); if (!machdep->hz) { sprintf(buf, "(cannot calculate: unknown HZ value)"); return buf; } total = (count)/(ulonglong)machdep->hz; days = total / SEC_DAYS; total %= SEC_DAYS; hours = total / SEC_HOURS; total %= SEC_HOURS; minutes = total / SEC_MINUTES; seconds = total % SEC_MINUTES; buf[0] = NULLCHAR; if (days) sprintf(buf, "%llu days, ", days); sprintf(&buf[strlen(buf)], "%02llu:%02llu:%02llu", hours, minutes, seconds); return buf; } /* * Convert a calendar time into a null-terminated string like ctime(), but * the result string contains the time zone string and does not ends with a * linefeed ('\n'). If localtime() or strftime() fails, fails back to return * POSIX time (seconds since the Epoch) or ctime() string respectively. * * NOTE: The return value points to a statically allocated string which is * overwritten by subsequent calls. */ char * ctime_tz(time_t *timep) { static char buf[64]; struct tm *tm; size_t size; if (!timep) return NULL; tm = localtime(timep); if (!tm) { snprintf(buf, sizeof(buf), "%ld", *timep); return buf; } size = strftime(buf, sizeof(buf), "%a %b %e %T %Z %Y", tm); if (!size) return strip_linefeeds(ctime(timep)); return buf; } /* * Stall for a number of microseconds. */ void stall(ulong microseconds) { struct timeval delay; delay.tv_sec = 0; delay.tv_usec = (__time_t)microseconds; (void) select(0, (fd_set *) 0, (fd_set *) 0, (fd_set *) 0, &delay); } /* * Fill a buffer with a page count translated to a GB/MB/KB value. */ char * pages_to_size(ulong pages, char *buf) { double total; char *p; if (pages == 0) { sprintf(buf, "0"); return buf; } total = (double)pages * (double)PAGESIZE(); if (total >= GIGABYTES(1)) sprintf(buf, "%.1f GB", total/(double)GIGABYTES(1)); else if (total >= MEGABYTES(1)) sprintf(buf, "%.1f MB", total/(double)MEGABYTES(1)); else sprintf(buf, "%ld KB", (ulong)(total/(double)KILOBYTES(1))); if ((p = strstr(buf, ".0 "))) memmove(p, p + 2, sizeof(" GB")); return buf; } /* * If the list_head.next value points to itself, it's an emtpy list. */ int empty_list(ulong list_head_addr) { ulong next; if (!readmem(list_head_addr, KVADDR, &next, sizeof(void *), "list_head next contents", RETURN_ON_ERROR)) return TRUE; return (next == list_head_addr); } int machine_type(char *type) { return STREQ(MACHINE_TYPE, type); } int machine_type_mismatch(char *file, char *e_machine, char *alt, ulong query) { if (machine_type(e_machine) || machine_type(alt)) return FALSE; if (query == KDUMP_LOCAL) /* already printed by NETDUMP_LOCAL */ return TRUE; error(WARNING, "machine type mismatch:\n"); fprintf(fp, " crash utility: %s\n", MACHINE_TYPE); fprintf(fp, " %s: %s%s%s\n\n", file, e_machine, alt ? " or " : "", alt ? alt : ""); return TRUE; } void command_not_supported() { error(FATAL, "command not supported or applicable on this architecture or kernel\n"); } void option_not_supported(int c) { error(FATAL, "-%c option not supported or applicable on this architecture or kernel\n", (char)c); } static int please_wait_len = 0; void please_wait(char *s) { int fd; char buf[BUFSIZE]; if ((pc->flags & SILENT) || !DUMPFILE() || (pc->flags & RUNTIME)) return; if (!(pc->flags & TTY) && KVMDUMP_DUMPFILE()) { if (!isatty(fileno(stdin)) || ((fd = open("/dev/tty", O_RDONLY)) < 0)) return; close(fd); } pc->flags |= PLEASE_WAIT; please_wait_len = sprintf(buf, "\rplease wait... (%s)", s); fprintf(fp, "%s", buf); fflush(fp); } void please_wait_done(void) { if (!(pc->flags & PLEASE_WAIT)) return; pc->flags &= ~PLEASE_WAIT; fprintf(fp, "\r"); pad_line(fp, please_wait_len, ' '); fprintf(fp, "\r"); fflush(fp); } /* * Compare two pathnames. */ int pathcmp(char *p1, char *p2) { char c1, c2; do { if ((c1 = *p1++) == '/') while (*p1 == '/') { p1++; } if ((c2 = *p2++) == '/') while (*p2 == '/') { p2++; } if (c1 == '\0') return ((c2 == '/') && (*p2 == '\0')) ? 0 : c1 - c2; } while (c1 == c2); return ((c2 == '\0') && (c1 == '/') && (*p1 == '\0')) ? 0 : c1 - c2; } #include /* * Check the byte-order of an ELF file vs. the host byte order. */ int endian_mismatch(char *file, char dumpfile_endian, ulong query) { char *endian; switch (dumpfile_endian) { case ELFDATA2LSB: if (__BYTE_ORDER == __LITTLE_ENDIAN) return FALSE; endian = "little-endian"; break; case ELFDATA2MSB: if (__BYTE_ORDER == __BIG_ENDIAN) return FALSE; endian = "big-endian"; break; default: endian = "unknown"; break; } if (query == KDUMP_LOCAL) /* already printed by NETDUMP_LOCAL */ return TRUE; error(WARNING, "endian mismatch:\n"); fprintf(fp, " crash utility: %s\n", (__BYTE_ORDER == __LITTLE_ENDIAN) ? "little-endian" : "big-endian"); fprintf(fp, " %s: %s\n\n", file, endian); return TRUE; } uint16_t swap16(uint16_t val, int swap) { if (swap) return (((val & 0x00ff) << 8) | ((val & 0xff00) >> 8)); else return val; } uint32_t swap32(uint32_t val, int swap) { if (swap) return (((val & 0x000000ffU) << 24) | ((val & 0x0000ff00U) << 8) | ((val & 0x00ff0000U) >> 8) | ((val & 0xff000000U) >> 24)); else return val; } uint64_t swap64(uint64_t val, int swap) { if (swap) return (((val & 0x00000000000000ffULL) << 56) | ((val & 0x000000000000ff00ULL) << 40) | ((val & 0x0000000000ff0000ULL) << 24) | ((val & 0x00000000ff000000ULL) << 8) | ((val & 0x000000ff00000000ULL) >> 8) | ((val & 0x0000ff0000000000ULL) >> 24) | ((val & 0x00ff000000000000ULL) >> 40) | ((val & 0xff00000000000000ULL) >> 56)); else return val; } /* * Get a sufficiently large buffer for cpumask. * You should call FREEBUF() on the result when you no longer need it. */ ulong * get_cpumask_buf(void) { int cpulen, len_cpumask; cpulen = DIV_ROUND_UP(kt->cpus, BITS_PER_LONG) * sizeof(ulong); len_cpumask = VALID_SIZE(cpumask_t) ? SIZE(cpumask_t) : 0; if (len_cpumask > 0) cpulen = len_cpumask > cpulen ? cpulen : len_cpumask; return (ulong *)GETBUF(cpulen); } int make_cpumask(char *s, ulong *mask, int flags, int *errptr) { char *p, *q, *orig; int start, end; int i; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "make_cpumask: received NULL string\n"); orig = NULL; goto make_cpumask_error; } orig = strdup(s); p = strtok(s, ","); while (p) { s = strtok(NULL, ""); if (STREQ(p, "a") || STREQ(p, "all")) { start = 0; end = kt->cpus - 1; } else { start = end = -1; q = strtok(p, "-"); start = dtoi(q, flags, errptr); if ((q = strtok(NULL, "-"))) end = dtoi(q, flags, errptr); if (end == -1) end = start; } if ((start < 0) || (start >= kt->cpus) || (end < 0) || (end >= kt->cpus)) { error(INFO, "invalid cpu specification: %s\n", orig); goto make_cpumask_error; } for (i = start; i <= end; i++) SET_BIT(mask, i); p = strtok(s, ","); } free(orig); return TRUE; make_cpumask_error: free(orig); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Copy a string into a sized buffer. If necessary, truncate * the resultant string in the sized buffer so that it will * always be NULL-terminated. */ size_t strlcpy(char *dest, const char *src, size_t size) { size_t ret = strlen(src); if (size) { size_t len = (ret >= size) ? size - 1 : ret; memcpy(dest, src, len); dest[len] = '\0'; } return ret; } struct rb_node * rb_first(struct rb_root *root) { struct rb_root rloc; struct rb_node *n; struct rb_node nloc; readmem((ulong)root, KVADDR, &rloc, sizeof(struct rb_root), "rb_root", FAULT_ON_ERROR); n = rloc.rb_node; if (!n) return NULL; while (rb_left(n, &nloc)) n = nloc.rb_left; return n; } struct rb_node * rb_parent(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return (struct rb_node *)(nloc->rb_parent_color & ~3); } struct rb_node * rb_right(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return nloc->rb_right; } struct rb_node * rb_left(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return nloc->rb_left; } struct rb_node * rb_next(struct rb_node *node) { struct rb_node nloc; struct rb_node *parent; /* node is destroyed */ if (!accessible((ulong)node)) return NULL; parent = rb_parent(node, &nloc); if (parent == node) return NULL; if (nloc.rb_right) { /* rb_right is destroyed */ if (!accessible((ulong)nloc.rb_right)) return NULL; node = nloc.rb_right; while (rb_left(node, &nloc)) { /* rb_left is destroyed */ if (!accessible((ulong)nloc.rb_left)) return NULL; node = nloc.rb_left; } return node; } while ((parent = rb_parent(node, &nloc))) { /* parent is destroyed */ if (!accessible((ulong)parent)) return NULL; if (node != rb_right(parent, &nloc)) break; node = parent; } return parent; } struct rb_node * rb_last(struct rb_root *root) { struct rb_node *node; struct rb_node nloc; /* meet destroyed data */ if (!accessible((ulong)(root + OFFSET(rb_root_rb_node)))) return NULL; readmem((ulong)(root + OFFSET(rb_root_rb_node)), KVADDR, &node, sizeof(node), "rb_root node", FAULT_ON_ERROR); while (1) { if (!node) break; /* meet destroyed data */ if (!accessible((ulong)node)) return NULL; readmem((ulong)node, KVADDR, &nloc, sizeof(struct rb_node), "rb_node last", FAULT_ON_ERROR); /* meet the last one */ if (!nloc.rb_right) break; /* meet destroyed data */ if (!!accessible((ulong)nloc.rb_right)) break; node = nloc.rb_right; } return node; } long percpu_counter_sum_positive(ulong fbc) { int i, count; ulong addr; long ret; if (INVALID_MEMBER(percpu_counter_count)) return 0; readmem(fbc + OFFSET(percpu_counter_count), KVADDR, &ret, sizeof(long long), "percpu_counter.count", FAULT_ON_ERROR); if (INVALID_MEMBER(percpu_counter_counters)) /* !CONFIG_SMP */ return (ret < 0) ? 0 : ret; readmem(fbc + OFFSET(percpu_counter_counters), KVADDR, &addr, sizeof(void *), "percpu_counter.counters", FAULT_ON_ERROR); for (i = 0; i < kt->cpus; i++) { readmem(addr + kt->__per_cpu_offset[i], KVADDR, &count, sizeof(int), "percpu_counter.counters count", FAULT_ON_ERROR); ret += count; } return (ret < 0) ? 0 : ret; } ulong get_subsys_private(char *kset_name, char *target_name) { ulong kset_addr, kset_list, name_addr, private = 0; struct list_data list_data, *ld; char buf[32]; int i, cnt; if (!symbol_exists(kset_name)) return 0; ld = &list_data; BZERO(ld, sizeof(struct list_data)); get_symbol_data(kset_name, sizeof(ulong), &kset_addr); readmem(kset_addr + OFFSET(kset_list), KVADDR, &kset_list, sizeof(ulong), "kset.list", FAULT_ON_ERROR); ld->flags |= LIST_ALLOCATE; ld->start = kset_list; ld->end = kset_addr + OFFSET(kset_list); ld->list_head_offset = OFFSET(kobject_entry); cnt = do_list(ld); for (i = 0; i < cnt; i++) { readmem(ld->list_ptr[i] + OFFSET(kobject_name), KVADDR, &name_addr, sizeof(ulong), "kobject.name", FAULT_ON_ERROR); read_string(name_addr, buf, sizeof(buf)-1); if (CRASHDEBUG(1)) fprintf(fp, "kobject: %lx name: %s\n", ld->list_ptr[i], buf); if (STREQ(buf, target_name)) { /* entry is subsys_private.subsys.kobj. See bus_to_subsys(). */ private = ld->list_ptr[i] - OFFSET(kset_kobj) - OFFSET(subsys_private_subsys); break; } } FREEBUF(ld->list_ptr); return private; } crash-utility-crash-9cd43f5/lkcd_v2_v3.c0000664000372000037200000003634215107550337017504 0ustar juerghjuergh/* lkcd_v2_v3.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #define CONFIG_VMDUMP #include "lkcd_vmdump_v2_v3.h" static dump_header_t dump_header_v2_v3 = { 0 }; static dump_page_t dump_page = { 0 }; static dump_header_asm_t dump_header_asm = { 0 }; static void mclx_cache_page_headers_v3(void); /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v2_v3(FILE *fp, int fd) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_header_asm_t *dha; dump_page_t *dp; lkcd->fd = fd; lkcd->fp = fp; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v2_v3; dha = &dump_header_asm; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; if (dh->dh_version & LKCD_DUMP_MCLX_V1) lseek(lkcd->fd, MCLX_V1_PAGE_HEADER_CACHE, SEEK_CUR); if (read(lkcd->fd, dha, sizeof(dump_header_asm_t)) != sizeof(dump_header_asm_t)) return FALSE; lkcd->dump_page = dp; lkcd->dump_header = dh; lkcd->dump_header_asm = dha; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->total_pages = dh->dh_num_pages; lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->zoned_offsets = 0; lkcd->get_dp_flags = get_dp_flags_v2_v3; lkcd->get_dp_address = get_dp_address_v2_v3; lkcd->get_dp_size = get_dp_size_v2_v3; lkcd->compression = LKCD_DUMP_COMPRESS_RLE; lkcd->page_header_size = sizeof(dump_page_t); lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: lkcd_print("reached EOF\n"); eof = TRUE; continue; } if (dp->dp_flags & ~(DUMP_COMPRESSED|DUMP_RAW|DUMP_END|LKCD_DUMP_MCLX_V0)) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_flags & (LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1)) lkcd->flags |= LKCD_MCLX; if (dp->dp_size > 4096) { lkcd_print("dp_size > 4096: %d\n", dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_END) { lkcd_print("found DUMP_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(2)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (dh->dh_version & LKCD_DUMP_MCLX_V1) mclx_cache_page_headers_v3(); if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v2_v3(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v2_v3(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v2_v3(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } void dump_dump_page_v2_v3(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console(BITS32() ? "dp_address: %llx " : "dp_address: %lx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_COMPRESSED) console("DUMP_COMPRESSED", others++); if (flags & DUMP_RAW) console("%sDUMP_RAW", others++ ? "|" : ""); if (flags & DUMP_END) console("%sDUMP_END", others++ ? "|" : ""); if (flags & LKCD_DUMP_MCLX_V0) console("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); console(")\n"); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v2_v3(ulong arg) { int others; dump_header_t *dh; dump_header_asm_t *dha; dump_page_t *dp; dh = (dump_header_t *)lkcd->dump_header; dha = (dump_header_asm_t *)lkcd->dump_header_asm; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: "); lkcd_print(BITS32() ? "%llx " : "%lx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else lkcd_print("(?)\n"); others = 0; lkcd_print(" dh_version: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_version); switch (dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) { case LKCD_DUMP_V1: lkcd_print("%sLKCD_DUMP_V1", others++ ? "|" : ""); break; case LKCD_DUMP_V2: lkcd_print("%sLKCD_DUMP_V2", others++ ? "|" : ""); break; case LKCD_DUMP_V3: lkcd_print("%sLKCD_DUMP_V3", others++ ? "|" : ""); break; } if (dh->dh_version & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); if (dh->dh_version & LKCD_DUMP_MCLX_V1) lkcd_print("%sLKCD_DUMP_MCLX_V1", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_header_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_level); others = 0; if (dh->dh_dump_level & DUMP_HEADER) lkcd_print("%sDUMP_HEADER", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_KERN) lkcd_print("%sDUMP_KERN", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_USED) lkcd_print("%sDUMP_USED", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_ALL) lkcd_print("%sDUMP_ALL", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_page_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: "); lkcd_print(BITS32() ? "%lld\n" : "%ld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_end); lkcd_print(" dh_num_pages: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(dh->dh_time.tv_sec)))); lkcd_print(" dh_utsname:\n"); lkcd_print(" sysname: %s\n", dh->dh_utsname.sysname); lkcd_print(" nodename: %s\n", dh->dh_utsname.nodename); lkcd_print(" release: %s\n", dh->dh_utsname.release); lkcd_print(" version: %s\n", dh->dh_utsname.version); lkcd_print(" machine: %s\n", dh->dh_utsname.machine); lkcd_print(" domainname: %s\n", dh->dh_utsname.domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print("dha_magic_number: "); lkcd_print(BITS32() ? "%llx " : "%lx ", dha->dha_magic_number); if (dha->dha_magic_number == DUMP_ASM_MAGIC_NUMBER) lkcd_print("(DUMP_ASM_MAGIC_NUMBER)\n"); else lkcd_print("(?)\n"); lkcd_print(" dha_version: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dha->dha_version); lkcd_print(" dha_header_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dha->dha_header_size); #ifdef X86 lkcd_print(" dha_esp: %lx\n", dha->dha_esp); lkcd_print(" dha_eip: %lx\n", dha->dha_eip); #endif #if defined PPC || ALPHA || IA64 /* TBD */ #endif lkcd_print(" dha_regs:\n"); #ifdef PPC lkcd_print(" (PowerPC register display TBD)\n"); #endif #ifdef IA64 lkcd_print(" (IA64 register display TBD)\n"); #endif #ifdef X86 lkcd_print(" ebx: %lx\n", dha->dha_regs.ebx); lkcd_print(" ecx: %lx\n", dha->dha_regs.ecx); lkcd_print(" edx: %lx\n", dha->dha_regs.edx); lkcd_print(" esi: %lx\n", dha->dha_regs.esi); lkcd_print(" edi: %lx\n", dha->dha_regs.edi); lkcd_print(" eax: %lx\n", dha->dha_regs.eax); lkcd_print(" xds: %x\n", dha->dha_regs.xds); lkcd_print(" xes: %x\n", dha->dha_regs.xes); lkcd_print(" orig_eax: %lx\n", dha->dha_regs.orig_eax); lkcd_print(" eip: %lx\n", dha->dha_regs.eip); lkcd_print(" xcs: %x\n", dha->dha_regs.xcs); lkcd_print(" eflags: %lx\n", dha->dha_regs.eflags); lkcd_print(" esp: %lx\n", dha->dha_regs.esp); lkcd_print(" xss: %x\n", dha->dha_regs.xss); #endif #ifdef ALPHA lkcd_print(" r0: %lx\n", dha->dha_regs.r0); lkcd_print(" r1: %lx\n", dha->dha_regs.r1); lkcd_print(" r2: %lx\n", dha->dha_regs.r2); lkcd_print(" r3: %lx\n", dha->dha_regs.r3); lkcd_print(" r4: %lx\n", dha->dha_regs.r4); lkcd_print(" r5: %lx\n", dha->dha_regs.r5); lkcd_print(" r6: %lx\n", dha->dha_regs.r6); lkcd_print(" r7: %lx\n", dha->dha_regs.r7); lkcd_print(" r8: %lx\n", dha->dha_regs.r8); lkcd_print(" r19: %lx\n", dha->dha_regs.r19); lkcd_print(" r20: %lx\n", dha->dha_regs.r20); lkcd_print(" r21: %lx\n", dha->dha_regs.r21); lkcd_print(" r22: %lx\n", dha->dha_regs.r22); lkcd_print(" r23: %lx\n", dha->dha_regs.r23); lkcd_print(" r24: %lx\n", dha->dha_regs.r24); lkcd_print(" r25: %lx\n", dha->dha_regs.r25); lkcd_print(" r26: %lx\n", dha->dha_regs.r26); lkcd_print(" r27: %lx\n", dha->dha_regs.r27); lkcd_print(" r28: %lx\n", dha->dha_regs.r28); lkcd_print(" hae: %lx\n", dha->dha_regs.hae); lkcd_print(" trap_a0: %lx\n", dha->dha_regs.trap_a0); lkcd_print(" trap_a1: %lx\n", dha->dha_regs.trap_a1); lkcd_print(" trap_a2: %lx\n", dha->dha_regs.trap_a2); lkcd_print(" ps: %lx\n", dha->dha_regs.ps); lkcd_print(" pc: %lx\n", dha->dha_regs.pc); lkcd_print(" gp: %lx\n", dha->dha_regs.gp); lkcd_print(" r16: %lx\n", dha->dha_regs.r16); lkcd_print(" r17: %lx\n", dha->dha_regs.r17); lkcd_print(" r18: %lx\n", dha->dha_regs.r18); #endif if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dp->dp_address); lkcd_print(" dp_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dp->dp_size); lkcd_print(" dp_flags: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_COMPRESSED) lkcd_print("DUMP_COMPRESSED", others++); if (dp->dp_flags & DUMP_RAW) lkcd_print("%sDUMP_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_END) lkcd_print("%sDUMP_END", others++ ? "|" : ""); if (dp->dp_flags & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); lkcd_print(")\n"); } /* * Read the MCLX-enhanced page header cache. Verify the first one, which * is a pointer to the page header for address 1MB, and take the rest at * blind faith. Note that the page headers do not include the 64K dump * header offset, which must be added to the values found. */ static void mclx_cache_page_headers_v3(void) { int i; uint64_t physaddr1, physaddr2, page_headers[MCLX_PAGE_HEADERS]; dump_page_t dump_page, *dp; ulong granularity; if (LKCD_DEBUG(2)) /* dump headers have all been read */ return; if (lkcd->total_pages > MEGABYTES(1))/* greater than 4G not supported */ return; if (lseek(lkcd->fd, sizeof(dump_header_t), SEEK_SET) == -1) return; if (read(lkcd->fd, page_headers, MCLX_V1_PAGE_HEADER_CACHE) != MCLX_V1_PAGE_HEADER_CACHE) return; dp = &dump_page; /* * Determine the granularity between offsets. */ if (lseek(lkcd->fd, page_headers[0] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if (lseek(lkcd->fd, page_headers[1] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr2 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if ((physaddr1 % MEGABYTES(1)) || (physaddr2 % MEGABYTES(1)) || (physaddr2 < physaddr1)) return; granularity = physaddr2 - physaddr1; for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) { if (!page_headers[i]) break; lkcd->curhdroffs = page_headers[i] + LKCD_OFFSET_TO_FIRST_PAGE; set_mb_benchmark((granularity * (i+1))/lkcd->page_size); } } crash-utility-crash-9cd43f5/unwind.c0000664000372000037200000024413715107550337017057 0ustar juerghjuergh/* * Copyright (C) 1999-2002 Hewlett-Packard Co * David Mosberger-Tang */ /* * unwind.c * * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010, 2012 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010, 2012 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * arch/ia64/kernel/unwind.c (kernel-2.4.18-6.23) */ #ifdef IA64 /* * WARNING: unw_frame_info, pt_regs and switch_stack have been * copied to unwind.h, under the UNWIND_V[123] sections; this is * done to rectify the need for this user-land code to use the same * data structures that the target kernel is using. * * Basically it's a juggling match to keep the unw_frame_info, * switch_stack and pt_regs structures in a "known" state -- as defined by * the UNWIND_V[123] definitions used in the unwind.h header file -- and * then passed to the 3 compile lines of unwind.c to create the three * unwind_v[123].o object files. */ /* * 2004-09-14 J. Nomura Added OS_INIT handling */ /* #include can't include this -- it's changing over time! */ #include "defs.h" #include "xen_hyper_defs.h" typedef unsigned char u8; typedef unsigned long long u64; #undef PAGE_SIZE #define PAGE_SIZE PAGESIZE() #define GATE_ADDR (0xa000000000000000 + PAGE_SIZE) #define CLEAR_SCRIPT_CACHE (TRUE) #define _ASM_IA64_FPU_H #include "unwind.h" #include "unwind_i.h" #include "rse.h" static struct unw_reg_state *alloc_reg_state(void); static void free_reg_state(struct unw_reg_state *); static void rse_function_params(struct bt_info *bt, struct unw_frame_info *, char *); static int load_unw_table(int); static void verify_unw_member(char *, long); static void verify_common_struct(char *, long); static void dump_unwind_table(struct unw_table *); static int unw_init_from_blocked_task(struct unw_frame_info *, struct bt_info *); static void unw_init_from_interruption(struct unw_frame_info *, struct bt_info *, ulong, ulong); static int unw_switch_from_osinit_v1(struct unw_frame_info *, struct bt_info *); static int unw_switch_from_osinit_v2(struct unw_frame_info *, struct bt_info *); static int unw_switch_from_osinit_v3(struct unw_frame_info *, struct bt_info *, char *); static unsigned long get_init_stack_ulong(unsigned long addr); static void unw_init_frame_info(struct unw_frame_info *, struct bt_info *, ulong); static int find_save_locs(struct unw_frame_info *); static int unw_unwind(struct unw_frame_info *); static void run_script(struct unw_script *, struct unw_frame_info *); static struct unw_script *script_lookup(struct unw_frame_info *); static struct unw_script *script_new(unsigned long); static void script_finalize(struct unw_script *, struct unw_state_record *); static void script_emit(struct unw_script *, struct unw_insn); static void emit_nat_info(struct unw_state_record *, int, struct unw_script *); static struct unw_script *build_script(struct unw_frame_info *); static struct unw_table_entry *lookup(struct unw_table *, unsigned long); static void compile_reg(struct unw_state_record *, int, struct unw_script *); static void compile_reg_v2(struct unw_state_record *, int, struct unw_script *); #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */ #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE) #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1) #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE) #define UNW_DEBUG 0 #define UNW_STATS 0 #define p5 5 #define pNonSys p5 /* complement of pSys */ # define STAT(x...) #define struct_offset(str,fld) ((char *)&((str *)NULL)->fld - (char *) 0) #undef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) /* * Local snapshot of kernel's "unw" table, minus the spinlock_t and anything * after the kernel_table. This allows the unmodified porting of the kernel * code pieces that reference "unw.xxx" directly. * * The 2.6 kernel introduced a new pt_regs_offsets[32] array positioned in * between the preg_index array and the kernel_table members. */ #ifdef REDHAT static struct unw { #else static struct { spinlock_t lock; /* spinlock for unwind data */ #endif /* !REDHAT */ /* list of unwind tables (one per load-module) */ struct unw_table *tables; /* table of registers that prologues can save (and order in which they're saved): */ unsigned char save_order[8]; /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */ unsigned short sw_off[sizeof(struct unw_frame_info) / 8]; unsigned short lru_head; /* index of lead-recently used script */ unsigned short lru_tail; /* index of most-recently used script */ /* index into unw_frame_info for preserved register i */ unsigned short preg_index[UNW_NUM_REGS]; /* unwind table for the kernel: */ struct unw_table kernel_table; #ifndef REDHAT /* unwind table describing the gate page (kernel code that is mapped into user space): */ size_t gate_table_size; unsigned long *gate_table; /* hash table that maps instruction pointer to script index: */ unsigned short hash[UNW_HASH_SIZE]; /* script cache: */ struct unw_script cache[UNW_CACHE_SIZE]; # if UNW_DEBUG const char *preg_name[UNW_NUM_REGS]; # endif # if UNW_STATS struct { struct { int lookups; int hinted_hits; int normal_hits; int collision_chain_traversals; } cache; struct { unsigned long build_time; unsigned long run_time; unsigned long parse_time; int builds; int news; int collisions; int runs; } script; struct { unsigned long init_time; unsigned long unwind_time; int inits; int unwinds; } api; } stat; # endif #endif /* !REDHAT */ } unw = { 0 }; static short pt_regs_offsets[32] = { 0 }; static struct unw_reg_state * alloc_reg_state(void) { return((struct unw_reg_state *) GETBUF(sizeof(struct unw_reg_state))); } static void free_reg_state(struct unw_reg_state *rs) { FREEBUF(rs); } static struct unw_labeled_state * alloc_labeled_state(void) { return((struct unw_labeled_state *) GETBUF(sizeof(struct unw_labeled_state))); } static void free_labeled_state(struct unw_labeled_state *ls) { FREEBUF(ls); } typedef unsigned long unw_word; /* Unwind accessors. */ static inline unsigned long pt_regs_off_v2 (unsigned long reg) { short off = -1; if (reg < 32) off = pt_regs_offsets[reg]; if (off < 0) { if (reg > 0) error(INFO, "unwind: bad scratch reg r%lu\n", reg); off = 0; } return (unsigned long) off; } /* * Returns offset of rREG in struct pt_regs. */ static inline unsigned long pt_regs_off (unsigned long reg) { unsigned long off =0; if (machdep->flags & UNW_PTREGS) return pt_regs_off_v2(reg); if (reg >= 1 && reg <= 3) off = struct_offset(struct pt_regs, r1) + 8*(reg - 1); else if (reg <= 11) off = struct_offset(struct pt_regs, r8) + 8*(reg - 8); else if (reg <= 15) off = struct_offset(struct pt_regs, r12) + 8*(reg - 12); else if (reg <= 31) off = struct_offset(struct pt_regs, r16) + 8*(reg - 16); else if (reg > 0) error(INFO, "unwind: bad scratch reg r%lu\n", reg); return off; } #ifdef UNWIND_V1 static inline struct pt_regs * get_scratch_regs (struct unw_frame_info *info) { struct pt_regs *pt_unused = NULL; error(INFO, "get_scratch_regs: should not be here!\n"); return pt_unused; } #endif #ifdef UNWIND_V2 static inline struct pt_regs * get_scratch_regs (struct unw_frame_info *info) { if (!info->pt) { /* This should not happen with valid unwind info. */ error(INFO, "get_scratch_regs: bad unwind info: resetting info->pt\n"); if (info->flags & UNW_FLAG_INTERRUPT_FRAME) info->pt = (unsigned long)((struct pt_regs *) info->psp - 1); else info->pt = info->sp - 16; } return (struct pt_regs *) info->pt; } #endif #ifdef UNWIND_V3 static inline struct pt_regs * get_scratch_regs (struct unw_frame_info *info) { if (!info->pt) { /* This should not happen with valid unwind info. */ error(INFO, "get_scratch_regs: bad unwind info: resetting info->pt\n"); if (info->flags & UNW_FLAG_INTERRUPT_FRAME) info->pt = (unsigned long)((struct pt_regs *) info->psp - 1); else info->pt = info->sp - 16; } return (struct pt_regs *) info->pt; } #endif int #ifdef UNWIND_V1 unw_access_gr_v1 (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write) #endif #ifdef UNWIND_V2 unw_access_gr_v2 (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write) #endif #ifdef UNWIND_V3 unw_access_gr_v3 (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write) #endif { unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat; struct unw_ireg *ireg; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if ((unsigned) regnum - 1 >= 127) { error(INFO, "unwind: trying to access non-existent r%u\n", regnum); return -1; } if (regnum < 32) { if (regnum >= 4 && regnum <= 7) { /* access a preserved register */ ireg = &info->r4 + (regnum - 4); addr = ireg->loc; if (addr) { nat_addr = addr + ireg->nat.off; switch (ireg->nat.type) { case UNW_NAT_VAL: /* simulate getf.sig/setf.sig */ if (write) { if (*nat) { /* write NaTVal and be done with it */ addr[0] = 0; addr[1] = 0x1fffe; return 0; } addr[1] = 0x1003e; } else { if (addr[0] == 0 && addr[1] == 0x1ffe) { /* return NaT and be done with it */ *val = 0; *nat = 1; return 0; } } /* fall through */ case UNW_NAT_NONE: dummy_nat = 0; nat_addr = &dummy_nat; break; case UNW_NAT_MEMSTK: nat_mask = (1UL << ((long) addr & 0x1f8)/8); break; case UNW_NAT_REGSTK: nat_addr = ia64_rse_rnat_addr(addr); if ((unsigned long) addr < info->regstk.limit || (unsigned long) addr >= info->regstk.top) { error(INFO, "unwind: %p outside of regstk " "[0x%lx-0x%lx)\n", (void *) addr, info->regstk.limit, info->regstk.top); return -1; } if ((unsigned long) nat_addr >= info->regstk.top) nat_addr = &info->sw->ar_rnat; nat_mask = (1UL << ia64_rse_slot_num(addr)); break; } } else { addr = &info->sw->r4 + (regnum - 4); nat_addr = &info->sw->ar_unat; nat_mask = (1UL << ((long) addr & 0x1f8)/8); } } else { /* access a scratch register */ if (machdep->flags & UNW_PTREGS) { pt = get_scratch_regs(info); addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum)); } else { if (info->flags & UNW_FLAG_INTERRUPT_FRAME) pt = (struct pt_regs *) info->psp - 1; else pt = (struct pt_regs *) info->sp - 1; addr = (unsigned long *) ((long) pt + pt_regs_off(regnum)); } if (info->pri_unat_loc) nat_addr = info->pri_unat_loc; else nat_addr = &info->sw->ar_unat; nat_mask = (1UL << ((long) addr & 0x1f8)/8); } } else { /* access a stacked register */ addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32); nat_addr = ia64_rse_rnat_addr(addr); if ((unsigned long) addr < info->regstk.limit || (unsigned long) addr >= info->regstk.top) { error(INFO, "unwind: ignoring attempt to access register outside of rbs\n"); return -1; } if ((unsigned long) nat_addr >= info->regstk.top) nat_addr = &info->sw->ar_rnat; nat_mask = (1UL << ia64_rse_slot_num(addr)); } if (write) { *addr = *val; if (*nat) *nat_addr |= nat_mask; else *nat_addr &= ~nat_mask; } else { if ((IA64_GET_STACK_ULONG(nat_addr) & nat_mask) == 0) { *val = IA64_GET_STACK_ULONG(addr); *nat = 0; } else { *val = 0; /* if register is a NaT, *addr may contain kernel data! */ *nat = 1; } } return 0; } int #ifdef UNWIND_V1 unw_access_br_v1 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif #ifdef UNWIND_V2 unw_access_br_v2 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif #ifdef UNWIND_V3 unw_access_br_v3 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif { unsigned long *addr; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if (info->flags & UNW_FLAG_INTERRUPT_FRAME) pt = (struct pt_regs *) info->psp - 1; else pt = (struct pt_regs *) info->sp - 1; switch (regnum) { /* scratch: */ case 0: addr = &pt->b0; break; case 6: addr = &pt->b6; break; case 7: addr = &pt->b7; break; /* preserved: */ case 1: case 2: case 3: case 4: case 5: addr = *(&info->b1_loc + (regnum - 1)); if (!addr) addr = &info->sw->b1 + (regnum - 1); break; default: error(INFO, "unwind: trying to access non-existent b%u\n", regnum); return -1; } if (write) *addr = *val; else *val = IA64_GET_STACK_ULONG(addr); return 0; } #ifdef UNWIND_V1 int unw_access_fr_v1 (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write) { struct ia64_fpreg *addr = 0; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if ((unsigned) (regnum - 2) >= 126) { error(INFO, "unwind: trying to access non-existent f%u\n", regnum); return -1; } if (info->flags & UNW_FLAG_INTERRUPT_FRAME) pt = (struct pt_regs *) info->psp - 1; else pt = (struct pt_regs *) info->sp - 1; if (regnum <= 5) { addr = *(&info->f2_loc + (regnum - 2)); if (!addr) addr = &info->sw->f2 + (regnum - 2); } else if (regnum <= 15) { if (regnum <= 9) addr = &pt->f6 + (regnum - 6); else addr = &info->sw->f10 + (regnum - 10); } else if (regnum <= 31) { addr = info->fr_loc[regnum - 16]; if (!addr) addr = &info->sw->f16 + (regnum - 16); } else { #ifdef REDHAT struct bt_info *bt = (struct bt_info *)info->task; addr = (struct ia64_fpreg *) (bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_fph) + ((regnum - 32) * sizeof(struct ia64_fpreg))); #else struct task_struct *t = info->task; if (write) ia64_sync_fph(t); else ia64_flush_fph(t); addr = t->thread.fph + (regnum - 32); #endif } if (write) *addr = *val; else GET_STACK_DATA(addr, val, sizeof(struct ia64_fpreg)); return 0; } #endif #ifdef UNWIND_V2 int unw_access_fr_v2 (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write) { struct ia64_fpreg *addr = 0; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if ((unsigned) (regnum - 2) >= 126) { error(INFO, "unwind: trying to access non-existent f%u\n", regnum); return -1; } if (regnum <= 5) { addr = *(&info->f2_loc + (regnum - 2)); if (!addr) addr = &info->sw->f2 + (regnum - 2); } else if (regnum <= 15) { if (regnum <= 11) { pt = get_scratch_regs(info); addr = &pt->f6 + (regnum - 6); } else addr = &info->sw->f12 + (regnum - 12); } else if (regnum <= 31) { addr = info->fr_loc[regnum - 16]; if (!addr) addr = &info->sw->f16 + (regnum - 16); } else { #ifdef REDHAT struct bt_info *bt = (struct bt_info *)info->task; addr = (struct ia64_fpreg *) (bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_fph) + ((regnum - 32) * sizeof(struct ia64_fpreg))); #else struct task_struct *t = info->task; if (write) ia64_sync_fph(t); else ia64_flush_fph(t); addr = t->thread.fph + (regnum - 32); #endif } if (write) *addr = *val; else GET_STACK_DATA(addr, val, sizeof(struct ia64_fpreg)); return 0; } #endif #ifdef UNWIND_V3 int unw_access_fr_v3 (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write) { struct ia64_fpreg *addr = 0; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if ((unsigned) (regnum - 2) >= 126) { error(INFO, "unwind: trying to access non-existent f%u\n", regnum); return -1; } if (regnum <= 5) { addr = *(&info->f2_loc + (regnum - 2)); if (!addr) addr = &info->sw->f2 + (regnum - 2); } else if (regnum <= 15) { if (regnum <= 11) { pt = get_scratch_regs(info); addr = &pt->f6 + (regnum - 6); } else addr = &info->sw->f12 + (regnum - 12); } else if (regnum <= 31) { addr = info->fr_loc[regnum - 16]; if (!addr) addr = &info->sw->f16 + (regnum - 16); } else { #ifdef REDHAT struct bt_info *bt = (struct bt_info *)info->task; addr = (struct ia64_fpreg *) (bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_fph) + ((regnum - 32) * sizeof(struct ia64_fpreg))); #else struct task_struct *t = info->task; if (write) ia64_sync_fph(t); else ia64_flush_fph(t); addr = t->thread.fph + (regnum - 32); #endif } if (write) *addr = *val; else GET_STACK_DATA(addr, val, sizeof(struct ia64_fpreg)); return 0; } #endif int #ifdef UNWIND_V1 unw_access_ar_v1 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif #ifdef UNWIND_V2 unw_access_ar_v2 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif #ifdef UNWIND_V3 unw_access_ar_v3 (struct unw_frame_info *info, int regnum, unsigned long *val, int write) #endif { unsigned long *addr; struct pt_regs *pt; struct bt_info *bt = (struct bt_info *)info->task; if (info->flags & UNW_FLAG_INTERRUPT_FRAME) pt = (struct pt_regs *) info->psp - 1; else pt = (struct pt_regs *) info->sp - 1; switch (regnum) { case UNW_AR_BSP: addr = info->bsp_loc; if (!addr) addr = &info->sw->ar_bspstore; break; case UNW_AR_BSPSTORE: addr = info->bspstore_loc; if (!addr) addr = &info->sw->ar_bspstore; break; case UNW_AR_PFS: addr = info->pfs_loc; if (!addr) addr = &info->sw->ar_pfs; break; case UNW_AR_RNAT: addr = info->rnat_loc; if (!addr) addr = &info->sw->ar_rnat; break; case UNW_AR_UNAT: addr = info->unat_loc; if (!addr) addr = &info->sw->ar_unat; break; case UNW_AR_LC: addr = info->lc_loc; if (!addr) addr = &info->sw->ar_lc; break; case UNW_AR_EC: if (!info->cfm_loc) return -1; if (write) *info->cfm_loc = (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52); else *val = (IA64_GET_STACK_ULONG(info->cfm_loc) >> 52) & 0x3f; return 0; case UNW_AR_FPSR: addr = info->fpsr_loc; if (!addr) addr = &info->sw->ar_fpsr; break; case UNW_AR_RSC: if (machdep->flags & UNW_PTREGS) pt = get_scratch_regs(info); addr = &pt->ar_rsc; break; case UNW_AR_CCV: if (machdep->flags & UNW_PTREGS) pt = get_scratch_regs(info); addr = &pt->ar_ccv; break; #if defined(UNWIND_V3) case UNW_AR_CSD: if (machdep->flags & UNW_PTREGS) pt = get_scratch_regs(info); addr = &pt->ar_csd; break; case UNW_AR_SSD: if (machdep->flags & UNW_PTREGS) pt = get_scratch_regs(info); addr = &pt->ar_ssd; break; #endif default: error(INFO, "unwind: trying to access non-existent ar%u\n", regnum); return -1; } if (write) *addr = *val; else *val = IA64_GET_STACK_ULONG(addr); return 0; } int #ifdef UNWIND_V1 unw_access_pr_v1 (struct unw_frame_info *info, unsigned long *val, int write) #endif #ifdef UNWIND_V2 unw_access_pr_v2 (struct unw_frame_info *info, unsigned long *val, int write) #endif #ifdef UNWIND_V3 unw_access_pr_v3 (struct unw_frame_info *info, unsigned long *val, int write) #endif { unsigned long *addr; struct bt_info *bt = (struct bt_info *)info->task; addr = info->pr_loc; if (!addr) addr = &info->sw->pr; if (write) *addr = *val; else *val = IA64_GET_STACK_ULONG(addr); return 0; } /* Routines to manipulate the state stack. */ static inline void push (struct unw_state_record *sr) { struct unw_reg_state *rs; rs = alloc_reg_state(); if (!rs) { error(INFO, "unwind: cannot stack reg state!\n"); return; } memcpy(rs, &sr->curr, sizeof(*rs)); sr->curr.next = rs; } static void pop (struct unw_state_record *sr) { struct unw_reg_state *rs = sr->curr.next; if (!rs) { error(INFO, "unwind: stack underflow!\n"); return; } memcpy(&sr->curr, rs, sizeof(*rs)); free_reg_state(rs); } /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */ static struct unw_reg_state * dup_state_stack (struct unw_reg_state *rs) { struct unw_reg_state *copy, *prev = NULL, *first = NULL; while (rs) { copy = alloc_reg_state(); if (!copy) { error(INFO, "unwind.dup_state_stack: out of memory\n"); return NULL; } memcpy(copy, rs, sizeof(*copy)); if (first) prev->next = copy; else first = copy; rs = rs->next; prev = copy; } return first; } /* Free all stacked register states (but not RS itself). */ static void free_state_stack (struct unw_reg_state *rs) { struct unw_reg_state *p, *next; for (p = rs->next; p != NULL; p = next) { next = p->next; free_reg_state(p); } rs->next = NULL; } /* Routines to manipulate the state stack. */ static enum unw_register_index __attribute__((const)) decode_abreg (unsigned char abreg, int memory) { switch (abreg) { case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04); case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22); case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30); case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41); case 0x60: return UNW_REG_PR; case 0x61: return UNW_REG_PSP; case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR; case 0x63: return UNW_REG_RP; case 0x64: return UNW_REG_BSP; case 0x65: return UNW_REG_BSPSTORE; case 0x66: return UNW_REG_RNAT; case 0x67: return UNW_REG_UNAT; case 0x68: return UNW_REG_FPSR; case 0x69: return UNW_REG_PFS; case 0x6a: return UNW_REG_LC; default: break; } error(INFO, "unwind: bad abreg=0x%x\n", abreg); return UNW_REG_LC; } static void set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val) { reg->val = val; reg->where = where; if (reg->when == UNW_WHEN_NEVER) reg->when = when; } static void alloc_spill_area (unsigned long *offp, unsigned long regsize, struct unw_reg_info *lo, struct unw_reg_info *hi) { struct unw_reg_info *reg; for (reg = hi; reg >= lo; --reg) { if (reg->where == UNW_WHERE_SPILL_HOME) { reg->where = UNW_WHERE_PSPREL; *offp -= regsize; reg->val = *offp; #ifndef KERNEL_FIX reg->val = 0x10 - *offp; *offp += regsize; #endif } } } static inline void spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t) { struct unw_reg_info *reg; for (reg = *regp; reg <= lim; ++reg) { if (reg->where == UNW_WHERE_SPILL_HOME) { reg->when = t; *regp = reg + 1; return; } } error(INFO, "unwind: excess spill!\n"); } static inline void finish_prologue (struct unw_state_record *sr) { struct unw_reg_info *reg; unsigned long off; int i; /* * First, resolve implicit register save locations (see Section "11.4.2.3 Rules * for Using Unwind Descriptors", rule 3): */ for (i = 0; i < (int) sizeof(unw.save_order)/sizeof(unw.save_order[0]); ++i) { reg = sr->curr.reg + unw.save_order[i]; if (reg->where == UNW_WHERE_GR_SAVE) { reg->where = UNW_WHERE_GR; reg->val = sr->gr_save_loc++; } } /* * Next, compute when the fp, general, and branch registers get * saved. This must come before alloc_spill_area() because * we need to know which registers are spilled to their home * locations. */ if (sr->imask) { unsigned char kind, mask = 0, *cp = sr->imask; unsigned long t; static const unsigned char limit[3] = { UNW_REG_F31, UNW_REG_R7, UNW_REG_B5 }; struct unw_reg_info *(regs[3]); regs[0] = sr->curr.reg + UNW_REG_F2; regs[1] = sr->curr.reg + UNW_REG_R4; regs[2] = sr->curr.reg + UNW_REG_B1; for (t = 0; t < sr->region_len; ++t) { if ((t & 3) == 0) mask = *cp++; kind = (mask >> 2*(3-(t & 3))) & 3; if (kind > 0) spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1], sr->region_start + t); } } /* * Next, lay out the memory stack spill area: */ if (sr->any_spills) { off = sr->spill_offset; alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31); alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5); alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7); } } /* * Region header descriptors. */ static void desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave, struct unw_state_record *sr) { int i; if (!(sr->in_body || sr->first_region)) finish_prologue(sr); sr->first_region = 0; /* check if we're done: */ if (sr->when_target < sr->region_start + sr->region_len) { sr->done = 1; return; } for (i = 0; i < sr->epilogue_count; ++i) pop(sr); sr->epilogue_count = 0; sr->epilogue_start = UNW_WHEN_NEVER; if (!body) push(sr); sr->region_start += sr->region_len; sr->region_len = rlen; sr->in_body = body; if (!body) { for (i = 0; i < 4; ++i) { if (mask & 0x8) set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR, sr->region_start + sr->region_len - 1, grsave++); mask <<= 1; } sr->gr_save_loc = grsave; sr->any_spills = 0; sr->imask = 0; sr->spill_offset = 0x10; /* default to psp+16 */ } } /* * Prologue descriptors. */ static inline void desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr) { console("desc_abi: abi: 0x%x context: %c\n", abi, context); if (((abi == 0) || (abi == 3)) && context == 'i') sr->flags |= UNW_FLAG_INTERRUPT_FRAME; else error(INFO, "unwind: ignoring unwabi(abi=0x%x,context=0x%x)\n", abi, context); } static inline void desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); brmask >>= 1; } } static inline void desc_br_mem (unsigned char brmask, struct unw_state_record *sr) { int i; for (i = 0; i < 5; ++i) { if (brmask & 1) { set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } brmask >>= 1; } } static inline void desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } for (i = 0; i < 20; ++i) { if ((frmask & 1) != 0) { int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4; set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } #ifndef KERNEL_FIX for (i = 0; i < 20; ++i) { if ((frmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } #endif } static inline void desc_fr_mem (unsigned char frmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((frmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } frmask >>= 1; } } static inline void desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, gr++); grmask >>= 1; } } static inline void desc_gr_mem (unsigned char grmask, struct unw_state_record *sr) { int i; for (i = 0; i < 4; ++i) { if ((grmask & 1) != 0) { set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, sr->region_start + sr->region_len - 1, 0); sr->any_spills = 1; } grmask >>= 1; } } static inline void desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr) { set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE, sr->region_start + MIN((int)t, sr->region_len - 1), 16*size); } static inline void desc_mem_stack_v (unw_word t, struct unw_state_record *sr) { sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1); } static inline void desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst); } static inline void desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1, 0x10 - 4*pspoff); } static inline void desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr) { set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1, 4*spoff); } static inline void desc_rp_br (unsigned char dst, struct unw_state_record *sr) { sr->return_link_reg = dst; } static inline void desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr) { struct unw_reg_info *reg = sr->curr.reg + regnum; if (reg->where == UNW_WHERE_NONE) reg->where = UNW_WHERE_GR_SAVE; reg->when = sr->region_start + MIN((int)t, sr->region_len - 1); } static inline void desc_spill_base (unw_word pspoff, struct unw_state_record *sr) { sr->spill_offset = 0x10 - 4*pspoff; } static inline unsigned char * desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr) { sr->imask = imaskp; return imaskp + (2*sr->region_len + 7)/8; } /* * Body descriptors. */ static inline void desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr) { sr->epilogue_start = sr->region_start + sr->region_len - 1 - t; sr->epilogue_count = ecount + 1; } static inline void desc_copy_state (unw_word label, struct unw_state_record *sr) { struct unw_labeled_state *ls; for (ls = sr->labeled_states; ls; ls = ls->next) { if (ls->label == label) { free_state_stack(&sr->curr); memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr)); sr->curr.next = dup_state_stack(ls->saved_state.next); return; } } error(INFO, "unwind: failed to find state labeled 0x%lx\n", label); } static inline void desc_label_state (unw_word label, struct unw_state_record *sr) { struct unw_labeled_state *ls; ls = alloc_labeled_state(); if (!ls) { error(INFO, "unwind.desc_label_state(): out of memory\n"); return; } ls->label = label; memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state)); ls->saved_state.next = dup_state_stack(sr->curr.next); /* insert into list of labeled states: */ ls->next = sr->labeled_states; sr->labeled_states = ls; } /* * General descriptors. */ static inline int desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr) { if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1)) return 0; if (qp > 0) { if ((sr->pr_val & (1UL << qp)) == 0) return 0; sr->pr_mask |= (1UL << qp); } return 1; } static inline void desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 0); r->where = UNW_WHERE_NONE; r->when = UNW_WHEN_NEVER; r->val = 0; } static inline void desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x, unsigned char ytreg, struct unw_state_record *sr) { enum unw_where where = UNW_WHERE_GR; struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; if (x) where = UNW_WHERE_BR; else if (ytreg & 0x80) where = UNW_WHERE_FR; r = sr->curr.reg + decode_abreg(abreg, 0); r->where = where; r->when = sr->region_start + MIN((int)t, sr->region_len - 1); r->val = (ytreg & 0x7f); } static inline void desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 1); r->where = UNW_WHERE_PSPREL; r->when = sr->region_start + MIN((int)t, sr->region_len - 1); r->val = 0x10 - 4*pspoff; } static inline void desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff, struct unw_state_record *sr) { struct unw_reg_info *r; if (!desc_is_active(qp, t, sr)) return; r = sr->curr.reg + decode_abreg(abreg, 1); r->where = UNW_WHERE_SPREL; r->when = sr->region_start + MIN((int)t, sr->region_len - 1); r->val = 4*spoff; } #define UNW_DEC_BAD_CODE(code) error(INFO, "unwind: unknown code 0x%02x\n", code); /* * region headers: */ #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg) #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg) /* * prologue descriptors: */ #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg) #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg) #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg) #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg) #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg) #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg) #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg) #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg) #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg) #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg) #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg) #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg) #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg) #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg) #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg) #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg) #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg) #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg) #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg) #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg)) /* * body descriptors: */ #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg) #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg) #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg) /* * general unwind descriptors: */ #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg) #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg) #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg) #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg) #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg) #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg) #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg) #include "unwind_decoder.c" /* * Run a sanity check on the common structure usage, and do an initial * read of the unw table. If anything fails, the UNW_OUT_OF_SYNC flag * will be set and backtraces not allowed. */ void #ifdef UNWIND_V1 unwind_init_v1(void) #endif #ifdef UNWIND_V2 unwind_init_v2(void) #endif #ifdef UNWIND_V3 unwind_init_v3(void) #endif { int len; struct gnu_request request, *req; req = &request; if (LKCD_KERNTYPES()) { if ((len = STRUCT_SIZE("unw")) == 0) { error(WARNING, "cannot determine unw.tables offset; no struct unw\n"); machdep->flags |= UNW_OUT_OF_SYNC; return; } machdep->machspec->unw_tables_offset = MEMBER_OFFSET("unw", "tables"); if (MEMBER_EXISTS("unw", "r0")) machdep->flags |= UNW_R0; /* * no verification of save_order, sw_off, preg_index as * we're purely depending on the structure definition. */ if (MEMBER_EXISTS("unw", "pt_regs_offsets")) { machdep->machspec->unw_pt_regs_offsets = MEMBER_OFFSET("unw", "pt_regs_offsets") - machdep->machspec->unw_tables_offset; machdep->machspec->unw_kernel_table_offset = MEMBER_OFFSET("unw", "kernel_table") - machdep->machspec->unw_tables_offset; machdep->flags |= UNW_PTREGS; } if (!load_unw_table(CLEAR_SCRIPT_CACHE)) { error(WARNING, "unwind_init: cannot read kernel unw table\n"); machdep->flags |= UNW_OUT_OF_SYNC; } machdep->machspec->unw = (void *)&unw; /* fall to common structure size verifications */ goto verify; } if (get_symbol_type("unw", "tables", req) == TYPE_CODE_UNDEF) { /* * KLUDGE ALERT: * If unw.tables cannot be ascertained by gdb, try unw.save_order, * given that it is the field just after unw.tables. */ if (get_symbol_type("unw", "save_order", req) == TYPE_CODE_UNDEF) { error(WARNING, "cannot determine unw.tables offset\n"); machdep->flags |= UNW_OUT_OF_SYNC; } else req->member_offset -= BITS_PER_BYTE * sizeof(void *); if (CRASHDEBUG(1)) error(WARNING, "using unw.save_order to determine unw.tables\n"); } if (!(machdep->flags & UNW_OUT_OF_SYNC)) { machdep->machspec->unw_tables_offset = req->member_offset/BITS_PER_BYTE; if (get_symbol_type("unw", "r0", req) != TYPE_CODE_UNDEF) machdep->flags |= UNW_R0; verify_unw_member("save_order", struct_offset(struct unw, save_order)); verify_unw_member("sw_off", struct_offset(struct unw, sw_off)); verify_unw_member("preg_index", struct_offset(struct unw, preg_index)); if (get_symbol_type("unw", "pt_regs_offsets", req) == TYPE_CODE_ARRAY) { machdep->machspec->unw_pt_regs_offsets = req->member_offset/BITS_PER_BYTE - machdep->machspec->unw_tables_offset; get_symbol_type("unw", "kernel_table", req); machdep->machspec->unw_kernel_table_offset = req->member_offset/BITS_PER_BYTE - machdep->machspec->unw_tables_offset; machdep->flags |= UNW_PTREGS; } else verify_unw_member("kernel_table", struct_offset(struct unw, kernel_table)); if (!load_unw_table(CLEAR_SCRIPT_CACHE)) { error(WARNING, "unwind_init: cannot read kernel unw table\n"); machdep->flags |= UNW_OUT_OF_SYNC; } machdep->machspec->unw = (void *)&unw; } verify: verify_common_struct("unw_frame_info", sizeof(struct unw_frame_info)); verify_common_struct("unw_table", sizeof(struct unw_table)); verify_common_struct("unw_table_entry", sizeof(struct unw_table_entry)); verify_common_struct("unw_state_record", sizeof(struct unw_state_record)); verify_common_struct("unw_labeled_state", sizeof(struct unw_labeled_state)); verify_common_struct("unw_reg_info", sizeof(struct unw_reg_info)); verify_common_struct("unw_insn", sizeof(struct unw_insn)); } /* * Check whether the unw fields used in this port exist at the same * offset as the local version of the structure. */ static void verify_unw_member(char *member, long loffs) { struct gnu_request request, *req; long koffs; req = &request; if (get_symbol_type("unw", member, req) == TYPE_CODE_UNDEF) { error(WARNING, "cannot determine unw.%s offset\n", member); machdep->flags |= UNW_OUT_OF_SYNC; } else { koffs = (req->member_offset/BITS_PER_BYTE) - machdep->machspec->unw_tables_offset; if (machdep->flags & UNW_R0) koffs -= sizeof(unsigned long); if (koffs != loffs) { error(WARNING, "unw.%s offset differs: %ld (local: %d)\n", member, koffs, loffs); machdep->flags |= UNW_OUT_OF_SYNC; } else if (CRASHDEBUG(3)) error(INFO, "unw.%s offset OK: %ld (local: %d)\n", member, koffs, loffs); } } /* * Check whether the sizes of common local/kernel structures match. */ static void verify_common_struct(char *structname, long loclen) { long len; len = STRUCT_SIZE(structname); if (len < 0) { error(WARNING, "cannot determine size of %s\n", structname); machdep->flags |= UNW_OUT_OF_SYNC; } else if (len != loclen) { error(WARNING, "%s size differs: %ld (local: %d)\n", structname, len, loclen); machdep->flags |= UNW_OUT_OF_SYNC; } } /* * Do a one-time read of the useful part of the kernel's unw table into the * truncated local version, followed by a one-time read of the kernel's * unw_table_entry array into a permanently allocated location. The * script cache is cleared only if requested. */ static int load_unw_table(int clear_cache) { int i; size_t len; struct machine_specific *ms; struct unw_table_entry *kernel_unw_table_entry_array; if (machdep->flags & UNW_OUT_OF_SYNC) return FALSE; ms = machdep->machspec; if (clear_cache) { if (!ms->script_cache) { len = sizeof(struct unw_script) * UNW_CACHE_SIZE; if ((ms->script_cache = (struct unw_script *)malloc(len)) == NULL) { error(WARNING, "cannot malloc unw_script cache\n"); return FALSE; } } for (i = 0; i < UNW_CACHE_SIZE; i++) BZERO((void *)&ms->script_cache[i], sizeof(struct unw_script)); ms->script_index = 0; } if (machdep->flags & UNW_READ) return TRUE; if (machdep->flags & UNW_R0) { struct unw *unw_temp, *up; unw_temp = (struct unw *)GETBUF(sizeof(struct unw) * 2); up = unw_temp; if (!readmem(symbol_value("unw")+ms->unw_tables_offset, KVADDR, up, sizeof(struct unw) + sizeof(struct unw_table *), "unw", RETURN_ON_ERROR|QUIET)) return FALSE; unw.tables = up->tables; /* * Bump the "up" pointer by 8 to account for the * "r0" member that comes after the "tables" member. */ up = (struct unw *)(((unsigned long)unw_temp) + sizeof(struct unw_table *)); for (i = 0; i < 8; i++) unw.save_order[i] = up->save_order[i]; for (i = 0; i < (sizeof(struct unw_frame_info) / 8); i++) unw.sw_off[i] = up->sw_off[i]; unw.lru_head = up->lru_head; unw.lru_tail = up->lru_tail; for (i = 0; i < UNW_NUM_REGS; i++) unw.preg_index[i] = up->preg_index[i]; BCOPY(&up->kernel_table, &unw.kernel_table, sizeof(struct unw_table)); FREEBUF(unw_temp); } else { if (!readmem(symbol_value("unw")+ms->unw_tables_offset, KVADDR, &unw, sizeof(struct unw), "unw", RETURN_ON_ERROR|QUIET)) return FALSE; } if (machdep->flags & UNW_PTREGS) { if (!readmem(symbol_value("unw")+ms->unw_kernel_table_offset+ machdep->machspec->unw_tables_offset, KVADDR, &unw.kernel_table, sizeof(struct unw_table), "unw.kernel_table", RETURN_ON_ERROR|QUIET)) return FALSE; if (!readmem(symbol_value("unw")+ms->unw_pt_regs_offsets+ machdep->machspec->unw_tables_offset, KVADDR, &pt_regs_offsets, sizeof(pt_regs_offsets), "unw.pt_regs_offsets", RETURN_ON_ERROR|QUIET)) return FALSE; } len = unw.kernel_table.length * sizeof(struct unw_table_entry); if ((kernel_unw_table_entry_array = (struct unw_table_entry *)malloc(len)) == NULL) { error(WARNING, "cannot malloc kernel unw.kernel_table array (len: %d)\n", len); return FALSE; } if (!readmem((ulong)unw.kernel_table.array, KVADDR, kernel_unw_table_entry_array, len, "kernel unw_table_entry array", RETURN_ON_ERROR|QUIET)) { error(WARNING, "cannot read kernel unw.kernel_table array\n"); return FALSE; } /* * Bait and switch for the kernel array only. */ unw.kernel_table.array = kernel_unw_table_entry_array; machdep->flags |= UNW_READ; return TRUE; } /* * The main back trace loop. If we get interrupted in the midst of an * operation, unw_in_progress will left TRUE, and the next time we come * here, the script_cache will be cleared. */ void #ifdef UNWIND_V1 unwind_v1(struct bt_info *bt) #endif #ifdef UNWIND_V2 unwind_v2(struct bt_info *bt) #endif #ifdef UNWIND_V3 unwind_v3(struct bt_info *bt) #endif { struct unw_frame_info unw_frame_info, *info; unsigned long ip, sp, bsp; struct syment *sm; struct pt_regs *pt; int frame; char *name, *name_plus_offset; ulong offset; struct load_module *lm; static int unw_in_progress = FALSE; char buf[BUFSIZE]; if (bt->debug) CRASHDEBUG_SUSPEND(bt->debug); if (!load_unw_table(unw_in_progress ? CLEAR_SCRIPT_CACHE : 0)) error(FATAL, "unwind: cannot read kernel unw table\n"); unw_in_progress = TRUE; info = &unw_frame_info; if (!unw_init_from_blocked_task(info, bt)) goto unwind_return; frame = 0; do { restart: unw_get_ip(info, &ip); unw_get_sp(info, &sp); unw_get_bsp(info, &bsp); if (XEN_HYPER_MODE()) { if (!IS_KVADDR(ip)) break; } else { if (ip < GATE_ADDR + PAGE_SIZE) break; } name_plus_offset = NULL; if ((sm = value_search(ip, &offset))) { name = sm->name; if ((bt->flags & BT_SYMBOL_OFFSET) && offset) name_plus_offset = value_to_symstr(ip, buf, bt->radix); } else name = "(unknown)"; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(name, bt->ref->str)) { bt->ref->cmdflags |= BT_REF_FOUND; goto unwind_return; } break; case BT_REF_HEXVAL: if (bt->ref->hexval == ip) { bt->ref->cmdflags |= BT_REF_FOUND; goto unwind_return; } break; } } else { fprintf(fp, "%s#%d [BSP:%lx] %s at %lx", frame >= 10 ? "" : " ", frame, bsp, name_plus_offset ? name_plus_offset : name, ip); if (module_symbol(ip, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); if (bt->flags & BT_FULL) rse_function_params(bt, info, name); if (bt->flags & BT_LINE_NUMBERS) ia64_dump_line_number(ip); if (info->flags & UNW_FLAG_INTERRUPT_FRAME) { pt = (struct pt_regs *)info->psp - 1; ia64_exception_frame((ulong)pt, bt); } } if (STREQ(name, "start_kernel") || STREQ(name, "start_secondary") || STREQ(name, "start_kernel_thread")) break; /* * "init_handler_platform" indicates that this task was * interrupted by INIT and its stack was switched. */ if (STREQ(name, "init_handler_platform")) { unw_switch_from_osinit_v1(info, bt); frame++; goto restart; } /* * In some cases, init_handler_platform is inlined into * ia64_init_handler. */ if (STREQ(name, "ia64_init_handler")) { if (symbol_exists("ia64_mca_modify_original_stack")) { /* * 2.6.14 or later kernels no longer keep * minstate info in pt_regs/switch_stack. * unw_switch_from_osinit_v3() will try * to find the interrupted task and restart * backtrace itself. */ if (unw_switch_from_osinit_v3(info, bt, "INIT") == FALSE) break; } else { if (unw_switch_from_osinit_v2(info, bt) == FALSE) break; frame++; goto restart; } } if (STREQ(name, "ia64_mca_handler") && symbol_exists("ia64_mca_modify_original_stack")) if (unw_switch_from_osinit_v3(info, bt, "MCA") == FALSE) break; frame++; } while (unw_unwind(info) >= 0); unwind_return: if (bt->flags & BT_UNWIND_ERROR) load_unw_table(CLEAR_SCRIPT_CACHE); if (bt->debug) CRASHDEBUG_RESTORE(); unw_in_progress = FALSE; } void #ifdef UNWIND_V1 dump_unwind_stats_v1(void) #endif #ifdef UNWIND_V2 dump_unwind_stats_v2(void) #endif #ifdef UNWIND_V3 dump_unwind_stats_v3(void) #endif { int i; struct machine_specific *ms; char buf[BUFSIZE]; if (machdep->flags & UNW_OUT_OF_SYNC) { fprintf(fp, "\n"); return; } ms = machdep->machspec; fprintf(fp, " %2ld%% (%ld of %ld)\n", ms->script_cache_fills ? (ms->script_cache_hits * 100)/ms->script_cache_fills : 0, ms->script_cache_hits, ms->script_cache_fills); for (i = 0; i < UNW_CACHE_SIZE; i++) { if (ms->script_cache[i].ip) fprintf(fp, " [%3d]: %lx %s\n", i, ms->script_cache[i].ip, value_to_symstr(ms->script_cache[i].ip, buf, 0)); } } int #ifdef UNWIND_V1 unwind_debug_v1(ulong arg) #endif #ifdef UNWIND_V2 unwind_debug_v2(ulong arg) #endif #ifdef UNWIND_V3 unwind_debug_v3(ulong arg) #endif { struct unw_table *table, *target; struct unw_table unw_table_buf; target = (struct unw_table *)arg; table = unw.tables; do { if (!readmem((ulong)table, KVADDR, &unw_table_buf, sizeof(struct unw_table), "module unw_table", RETURN_ON_ERROR)) break; switch (arg) { case 3: dump_unwind_table(table); break; default: if (table == target) dump_unwind_table(table); break; } table = &unw_table_buf; table = table->next; } while (table); return TRUE; } static void dump_unwind_table(struct unw_table *table) { struct unw_table unw_table_buf, *tbl; readmem((ulong)table, KVADDR, &unw_table_buf, sizeof(struct unw_table), "module unw_table", RETURN_ON_ERROR); tbl = &unw_table_buf; dump_struct("unw_table", (ulong)table, RADIX(16)); } static unsigned long get_init_stack_ulong(unsigned long addr) { unsigned long tmp; readmem(addr, KVADDR, &tmp, sizeof(unsigned long), "get_init_stack_ulong", FAULT_ON_ERROR); return tmp; } static int unw_init_from_blocked_task(struct unw_frame_info *info, struct bt_info *bt) { ulong sw; sw = SWITCH_STACK_ADDR(bt->task); if (XEN_HYPER_MODE()) { if (!INSTACK(sw, bt) && !ia64_in_mca_stack_hyper(sw, bt)) return FALSE; } else { if (!INSTACK(sw, bt) && !ia64_in_init_stack(sw)) return FALSE; } unw_init_frame_info(info, bt, sw); return TRUE; } /* * unw_init_from_interruption * Initialize frame info from specified pt_regs/switch_stack. * * Similar to unw_init_frame_info() except that: * - do not use readmem to access stack * (because stack may be modified by unw_init_from_saved_regs) * - use ar.ifs and ar.iip instead of ar.pfs and b0, respectively * - use sof(size-of-frame) of ar.ifs to caluculate bsp, * instead of sol(size-of-local) of ar.pfs * (because of cover instruction in kernel minstate save macro) */ static void unw_init_from_interruption(struct unw_frame_info *info, struct bt_info *bt, ulong pt, ulong sw) { // unsigned long rbslimit, rbstop, stklimit, stktop, sof, ar_pfs; unsigned long rbslimit, rbstop, stklimit, stktop, sof; ulong t; t = bt->task; memset(info, 0, sizeof(*info)); rbslimit = (unsigned long) t + IA64_RBS_OFFSET; rbstop = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_ar_bspstore)); if (rbstop - (unsigned long) t >= IA64_STK_OFFSET) rbstop = rbslimit; stklimit = (unsigned long) t + IA64_STK_OFFSET; stktop = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, r12)); if (stktop <= rbstop) stktop = rbstop; info->regstk.limit = rbslimit; info->regstk.top = rbstop; info->memstk.limit = stklimit; info->memstk.top = stktop; info->task = (struct task_struct *)bt; info->sw = (struct switch_stack *)sw; info->sp = info->psp = stktop; info->pr = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_pr)); info->cfm_loc = (unsigned long *) (pt + offsetof(struct pt_regs, cr_ifs)); info->unat_loc = (unsigned long *) (pt + offsetof(struct pt_regs, ar_unat)); info->pfs_loc = (unsigned long *) (pt + offsetof(struct pt_regs, ar_pfs)); /* register stack is covered */ sof = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof); /* interrupted ip is saved in iip */ info->ip = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, cr_iip)); #if defined(UNWIND_V2) || defined(UNWIND_V3) info->pt = pt; #endif find_save_locs(info); } /* * unw_switch_from_osinit * switch back to interrupted context * * assumption: init_handler_platform() has 3 arguments, * 2nd arg is pt_regs and 3rd arg is switch_stack. */ static int unw_switch_from_osinit_v1(struct unw_frame_info *info, struct bt_info *bt) { unsigned long pt, sw; char is_nat; /* pt_regs is the 2nd argument of init_handler_platform */ if (unw_get_gr(info, 33, &pt, &is_nat)) { fprintf(fp, "gr 33 get error\n"); return FALSE; } /* switch_stack is the 3rd argument of init_handler_platform */ if (unw_get_gr(info, 34, &sw, &is_nat)) { fprintf(fp, "gr 33 get error\n"); return FALSE; } unw_init_from_interruption(info, bt, pt, sw); ia64_exception_frame(pt, bt); return TRUE; } static int unw_switch_from_osinit_v2(struct unw_frame_info *info, struct bt_info *bt) { unsigned long pt, sw; char is_nat; /* pt_regs is the 1st argument of ia64_init_handler */ if (unw_get_gr(info, 32, &pt, &is_nat)) { fprintf(fp, "gr 32 get error\n"); return FALSE; } /* switch_stack is the 2nd argument of ia64_init_handler */ if (unw_get_gr(info, 33, &sw, &is_nat)) { fprintf(fp, "gr 33 get error\n"); return FALSE; } /* Fix me! */ sw = info->psp + 16; pt = sw + STRUCT_SIZE("switch_stack"); unw_init_from_interruption(info, bt, pt, sw); ia64_exception_frame(pt, bt); return TRUE; } /* CPL (current privilege level) is 2-bit field */ #define IA64_PSR_CPL0_BIT 32 #define IA64_PSR_CPL_MASK (3UL << IA64_PSR_CPL0_BIT) static int user_mode(struct bt_info *bt, unsigned long pt) { unsigned long cr_ipsr; cr_ipsr = IA64_GET_STACK_ULONG(pt + offsetof(struct pt_regs, cr_ipsr)); if (cr_ipsr & IA64_PSR_CPL_MASK) return 1; return 0; } /* * Cope with INIT/MCA stack for the kernel 2.6.14 or later * * Returns FALSE if no more unwinding is needed. */ #define ALIGN16(x) ((x)&~15) static int unw_switch_from_osinit_v3(struct unw_frame_info *info, struct bt_info *bt, char *type) { unsigned long pt, sw, sos, pid; char *p, *q; struct task_context *tc = NULL; struct bt_info clone_bt; unsigned long kr_current, offset_kr; /* * The structure of INIT/MCA stack * * +---------------------------+ <-------- IA64_STK_OFFSET * | pt_regs | * +---------------------------+ * | switch_stack | * +---------------------------+ * | SAL/OS state | * +---------------------------+ * | 16 byte scratch area | * +---------------------------+ <-------- SP at start of C handler * | ..... | * +---------------------------+ * | RBS for MCA/INIT handler | * +---------------------------+ * | struct task for MCA/INIT | * +---------------------------+ <-------- bt->task */ pt = ALIGN16(bt->task + IA64_STK_OFFSET - STRUCT_SIZE("pt_regs")); sw = ALIGN16(pt - STRUCT_SIZE("switch_stack")); sos = ALIGN16(sw - STRUCT_SIZE("ia64_sal_os_state")); /* * 1. Try to find interrupted task from comm * * comm format of INIT/MCA task: * - " " * - " " * where "" is either "INIT" or "MCA". * The latter form is chosen if PID is 0. * * See ia64_mca_modify_comm() in arch/ia64/kernel/mca.c */ if (!bt->tc || !bt->tc->comm) goto find_exframe; /* * If comm is "INIT" or "MCA", it means original stack is not modified. */ if (STREQ(bt->tc->comm, type)) { /* Get pid using ia64_sal_os_state */ pid = 0; offset_kr = MEMBER_OFFSET("ia64_sal_os_state", "prev_IA64_KR_CURRENT"); readmem(sos + offset_kr, KVADDR, &kr_current, sizeof(ulong), "ia64_sal_os_state prev_IA64_KR_CURRENT", FAULT_ON_ERROR); readmem(kr_current + OFFSET(task_struct_pid), KVADDR, &pid, sizeof(pid_t), "task_struct pid", FAULT_ON_ERROR); if (pid) tc = pid_to_context(pid); else { tc = pid_to_context(0); while (tc) { if (tc != bt->tc && tc->processor == bt->tc->processor) break; tc = tc->tc_next; } } if (tc) { /* Clone bt_info and do backtrace */ clone_bt_info(bt, &clone_bt, tc); if (!BT_REFERENCE_CHECK(&clone_bt)) { fprintf(fp, "(%s) INTERRUPTED TASK\n", type); print_task_header(fp, tc, 0); } if (!user_mode(bt, pt)) goto find_exframe; else if (!BT_REFERENCE_CHECK(bt)) { fprintf(fp, " #0 [interrupted in user space]\n"); /* at least show the incomplete exception frame */ bt->flags |= BT_INCOMPLETE_USER_EFRAME; ia64_exception_frame(pt, bt); } } return FALSE; } if ((p = strstr(bt->tc->comm, type))) { p += strlen(type); if (*p != ' ') goto find_exframe; if ((q = strchr(++p, ' '))) { /* * " " * * We came from one of the PID 0 swapper tasks, * so just find the one with the same cpu as * the passed-in INIT/MCA task. */ tc = pid_to_context(0); while (tc) { if (tc != bt->tc && tc->processor == bt->tc->processor) break; tc = tc->tc_next; } } else if (sscanf(p, "%lu", &pid) > 0) /* " " */ tc = pid_to_context(pid); } if (tc) { /* Clone bt_info and do backtrace */ clone_bt_info(bt, &clone_bt, tc); if (!BT_REFERENCE_CHECK(&clone_bt)) { fprintf(fp, "(%s) INTERRUPTED TASK\n", type); print_task_header(fp, tc, 0); } if (!user_mode(bt, pt)) back_trace(&clone_bt); else if (!BT_REFERENCE_CHECK(bt)) { fprintf(fp, " #0 [interrupted in user space]\n"); /* at least show the incomplete exception frame */ bt->flags |= BT_INCOMPLETE_USER_EFRAME; ia64_exception_frame(pt, bt); } return FALSE; } /* task matching with INIT/MCA task's comm is not found */ find_exframe: /* * 2. If step 1 doesn't work, try best to find exception frame */ unw_init_from_interruption(info, bt, pt, sw); if (!BT_REFERENCE_CHECK(bt)) ia64_exception_frame(pt, bt); return TRUE; } static void unw_init_frame_info (struct unw_frame_info *info, struct bt_info *bt, ulong sw) { unsigned long rbslimit, rbstop, stklimit, stktop, sol, ar_pfs; ulong t; t = bt->task; /* * Subtle stuff here: we _could_ unwind through the * switch_stack frame but we don't want to do that because it * would be slow as each preserved register would have to be * processed. Instead, what we do here is zero out the frame * info and start the unwind process at the function that * created the switch_stack frame. When a preserved value in * switch_stack needs to be accessed, run_script() will * initialize the appropriate pointer on demand. */ memset(info, 0, sizeof(*info)); rbslimit = (unsigned long) t + IA64_RBS_OFFSET; readmem(sw + OFFSET(switch_stack_ar_bspstore), KVADDR, &rbstop, sizeof(ulong), "switch_stack ar_bspstore", FAULT_ON_ERROR); if (rbstop - (unsigned long) t >= IA64_STK_OFFSET) rbstop = rbslimit; stklimit = (unsigned long) t + IA64_STK_OFFSET; stktop = (unsigned long) sw - 16; if (stktop <= rbstop) stktop = rbstop; info->regstk.limit = rbslimit; info->regstk.top = rbstop; info->memstk.limit = stklimit; info->memstk.top = stktop; info->task = (struct task_struct *)bt; info->sw = (struct switch_stack *)sw; info->sp = info->psp = (unsigned long) (sw + SIZE(switch_stack)) - 16; info->cfm_loc = (ulong *)(sw + OFFSET(switch_stack_ar_pfs)); ar_pfs = IA64_GET_STACK_ULONG(info->cfm_loc); sol = (ar_pfs >> 7) & 0x7f; info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol); info->ip = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_b0)); info->pr = IA64_GET_STACK_ULONG(sw + OFFSET(switch_stack_pr)); find_save_locs(info); } /* * Display the arguments to a function, presuming that they are found at * the beginning of the sol section. */ #define MAX_REGISTER_PARAMS (8) static void rse_function_params(struct bt_info *bt, struct unw_frame_info *info, char *name) { int i; int numargs; char is_nat[MAX_REGISTER_PARAMS]; int retval[MAX_REGISTER_PARAMS]; char buf1[BUFSIZE], buf2[BUFSIZE], buf3[BUFSIZE], *p1; ulong arglist[MAX_REGISTER_PARAMS]; ulong ip; if (GDB_PATCHED()) return; unw_get_ip(info, &ip); numargs = MIN(get_function_numargs(ip), MAX_REGISTER_PARAMS); if (CRASHDEBUG(1)) fprintf(fp, "rse_function_params: %s: %d args\n", name, numargs); switch (numargs) { case 0: fprintf(fp, " (void)\n"); return; case -1: return; default: break; } for (i = 0; i < numargs; i++) { arglist[i] = is_nat[i] = retval[i] = 0; retval[i] = unw_get_gr(info, 32+i, &arglist[i], &is_nat[i]); } sprintf(buf1, " ("); for (i = 0; i < numargs; i++) { p1 = &buf1[strlen(buf1)]; if (retval[i] != 0) sprintf(buf2, "unknown"); if (is_nat[i]) sprintf(buf2, "[NAT]"); else { if (bt->flags & BT_FULL_SYM_SLAB) sprintf(buf2, "%s", format_stack_entry(bt, buf3, arglist[i], kt->end)); else sprintf(buf2, "%lx", arglist[i]); } sprintf(p1, "%s%s", i ? ", " : "", buf2); if (strlen(buf1) >= 80) sprintf(p1, ",\n %s", buf2); } strcat(buf1, ")\n"); fprintf(fp, "%s", buf1); } static int find_save_locs (struct unw_frame_info *info) { struct unw_script *scr; if ((info->ip & (machdep->machspec->unimpl_va_mask | 0xf)) || IS_UVADDR(info->ip, NULL)) { info->rp_loc = 0; return -1; } scr = script_lookup(info); if (!scr) { scr = build_script(info); if (!scr) { error(INFO, "failed to build unwind script for ip %lx\n", info->ip); return -1; } } run_script(scr, info); return 0; } static int unw_unwind (struct unw_frame_info *info) { unsigned long prev_ip, prev_sp, prev_bsp; unsigned long ip, pr, num_regs; int retval; struct bt_info *bt = (struct bt_info *)info->task; prev_ip = info->ip; prev_sp = info->sp; prev_bsp = info->bsp; /* restore the ip */ if (!info->rp_loc) { error(INFO, "unwind: failed to locate return link (ip=0x%lx)!\n", info->ip); return -1; } ip = info->ip = IA64_GET_STACK_ULONG(info->rp_loc); if (ip < GATE_ADDR + PAGE_SIZE) { /* * We don't have unwind info for the gate page, * so we consider that part * of user-space for the purpose of unwinding. */ console("unwind: reached user-space (ip=0x%lx)\n", ip); return -1; } /* restore the cfm: */ if (!info->pfs_loc) { error(INFO, "unwind: failed to locate ar.pfs!\n"); return -1; } info->cfm_loc = info->pfs_loc; /* restore the bsp: */ pr = info->pr; num_regs = 0; if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) { #ifdef UNWIND_V1 if ((pr & (1UL << pNonSys)) != 0) num_regs = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */ info->pfs_loc = (unsigned long *) (info->sp + 16 + struct_offset(struct pt_regs, ar_pfs)); #endif #ifdef UNWIND_V2 info->pt = info->sp + 16; if ((pr & (1UL << pNonSys)) != 0) num_regs = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */ info->pfs_loc = (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); #endif #ifdef UNWIND_V3 info->pt = info->sp + 16; if ((pr & (1UL << pNonSys)) != 0) num_regs = IA64_GET_STACK_ULONG(info->cfm_loc) & 0x7f; /* size of frame */ info->pfs_loc = (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); #endif } else num_regs = (IA64_GET_STACK_ULONG(info->cfm_loc) >> 7) & 0x7f; /* size of locals */ info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs); if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) { error(INFO, "unwind: bsp (0x%lx) out of range [0x%lx-0x%lx]\n", info->bsp, info->regstk.limit, info->regstk.top); return -1; } /* restore the sp: */ info->sp = info->psp; if ((info->sp < info->memstk.top || info->sp > info->memstk.limit) && !ia64_in_init_stack(info->sp)) { error(INFO, "unwind: sp (0x%lx) out of range [0x%lx-0x%lx]\n", info->sp, info->memstk.top, info->memstk.limit); return -1; } if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) { error(INFO, "unwind: ip, sp, bsp remain unchanged; stopping here (ip=0x%lx)\n", ip); return -1; } /* as we unwind, the saved ar.unat becomes the primary unat: */ info->pri_unat_loc = info->unat_loc; /* finally, restore the predicates: */ unw_get_pr(info, &info->pr); retval = find_save_locs(info); return retval; } /* * Apply the unwinding actions represented by OPS and update SR to * reflect the state that existed upon entry to the function that this * unwinder represents. */ static void run_script (struct unw_script *script, struct unw_frame_info *state) { struct unw_insn *ip, *limit, next_insn; unsigned long opc, dst, val, off; unsigned long *s = (unsigned long *) state; struct bt_info *bt = (struct bt_info *)state->task; state->flags = script->flags; ip = script->insn; limit = script->insn + script->count; next_insn = *ip; while (ip++ < limit) { opc = next_insn.opc; dst = next_insn.dst; val = next_insn.val; next_insn = *ip; redo: switch (opc) { case UNW_INSN_ADD: s[dst] += val; break; case UNW_INSN_MOVE2: if (!s[val]) goto lazy_init; s[dst+1] = s[val+1]; s[dst] = s[val]; break; case UNW_INSN_MOVE: if (!s[val]) goto lazy_init; s[dst] = s[val]; break; #if defined(UNWIND_V2) || defined(UNWIND_V3) case UNW_INSN_MOVE_SCRATCH: if (state->pt) { s[dst] = (unsigned long) get_scratch_regs(state) + val; } else { s[dst] = 0; } break; #endif case UNW_INSN_MOVE_STACKED: s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp, val); break; case UNW_INSN_ADD_PSP: s[dst] = state->psp + val; break; case UNW_INSN_ADD_SP: s[dst] = state->sp + val; break; case UNW_INSN_SETNAT_MEMSTK: if (!state->pri_unat_loc) state->pri_unat_loc = &state->sw->ar_unat; /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */ s[dst+1] = ((unsigned long)(state->pri_unat_loc) - s[dst]) | UNW_NAT_MEMSTK; break; case UNW_INSN_SETNAT_TYPE: s[dst+1] = val; break; case UNW_INSN_LOAD: #if UNW_DEBUG if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0 || s[val] < TASK_SIZE) { debug(1, "unwind: rejecting bad psp=0x%lx\n", s[val]); break; } #endif s[dst] = IA64_GET_STACK_ULONG(s[val]); break; } } return; lazy_init: off = unw.sw_off[val]; s[val] = (unsigned long) state->sw + off; if (off >= struct_offset(struct switch_stack, r4) && off <= struct_offset(struct switch_stack, r7)) /* * We're initializing a general register: init NaT info, too. Note that * the offset is a multiple of 8 which gives us the 3 bits needed for * the type field. */ s[val+1] = (struct_offset(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK; goto redo; } /* * Don't bother with the kernel's script hashing scheme -- we're not worried * about lookup speed. */ static struct unw_script * script_lookup(struct unw_frame_info *info) { int i; struct unw_script *script; unsigned long ip, pr; struct machine_specific *ms; ms = machdep->machspec; ms->script_cache_fills++; ip = info->ip; pr = info->pr; for (i = 0; i < UNW_CACHE_SIZE; i++) { script = &ms->script_cache[i]; if (!script->ip) break; if ((ip == script->ip) && (((pr ^ script->pr_val) & script->pr_mask) == 0)) { ms->script_cache_hits++; return script; } } return NULL; } static struct unw_script * script_new(unsigned long ip) { struct unw_script *script; struct machine_specific *ms; ms = machdep->machspec; script = &ms->script_cache[ms->script_index]; BZERO(script, sizeof(struct unw_script)); ms->script_index++; ms->script_index %= UNW_CACHE_SIZE; script->ip = ip; return script; } static void script_finalize (struct unw_script *script, struct unw_state_record *sr) { script->pr_mask = sr->pr_mask; script->pr_val = sr->pr_val; } static void script_emit(struct unw_script *script, struct unw_insn insn) { if (script->count >= UNW_MAX_SCRIPT_LEN) { error(INFO, "unwind: script exceeds maximum size of %u instructions!\n", UNW_MAX_SCRIPT_LEN); return; } script->insn[script->count++] = insn; } static void emit_nat_info(struct unw_state_record *sr, int i, struct unw_script *script) { struct unw_reg_info *r = sr->curr.reg + i; enum unw_insn_opcode opc; struct unw_insn insn; unsigned long val = 0; switch (r->where) { case UNW_WHERE_GR: if (r->val >= 32) { /* register got spilled to a stacked register */ opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_REGSTK; } else /* register got spilled to a scratch register */ opc = UNW_INSN_SETNAT_MEMSTK; break; case UNW_WHERE_FR: opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_VAL; break; case UNW_WHERE_BR: opc = UNW_INSN_SETNAT_TYPE; val = UNW_NAT_NONE; break; case UNW_WHERE_PSPREL: case UNW_WHERE_SPREL: opc = UNW_INSN_SETNAT_MEMSTK; break; default: error(INFO, "unwind: don't know how to emit nat info for where = %u\n", r->where); return; } insn.opc = opc; insn.dst = unw.preg_index[i]; insn.val = val; script_emit(script, insn); } /* * Build an unwind script that unwinds from state OLD_STATE to the * entrypoint of the function that called OLD_STATE. */ #define UNWIND_INFO_BUFSIZE (3000) /* absurdly large static buffer that */ /* should avoid need for GETBUF() */ static struct unw_script * build_script (struct unw_frame_info *info) { const struct unw_table_entry *e = 0; struct unw_script *script = 0; struct unw_labeled_state *ls, *next; unsigned long ip = info->ip; struct unw_state_record sr; struct unw_table *table; struct unw_reg_info *r; struct unw_insn insn; u8 *dp, *desc_end; u64 hdr; int i; struct unw_table unw_table_buf; char unwind_info_buf[UNWIND_INFO_BUFSIZE]; struct bt_info *bt = (struct bt_info *)info->task; /* build state record */ memset(&sr, 0, sizeof(sr)); for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) r->when = UNW_WHEN_NEVER; sr.pr_val = info->pr; script = script_new(ip); if (!script) { error(INFO, "failed to create a new unwind script\n"); return 0; } /* * The kernel table is embedded and guaranteed to be the first * one on the list. */ table = &unw.kernel_table; if (ip >= table->start && ip < table->end) e = lookup(table, ip - table->segment_base); /* * If not found, walk through the module list. */ while (!e && table->next) { if (!readmem((ulong)table->next, KVADDR, &unw_table_buf, sizeof(struct unw_table), "module unw_table", RETURN_ON_ERROR)) break; table = &unw_table_buf; if (ip >= table->start && ip < table->end) e = lookup(table, ip - table->segment_base); } if (!e) { /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ if (CRASHDEBUG(2)) error(INFO, "unwind: no unwind info for ip %lx\n", ip); bt->flags |= BT_UNWIND_ERROR; sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; sr.curr.reg[UNW_REG_RP].when = -1; sr.curr.reg[UNW_REG_RP].val = 0; compile_reg(&sr, UNW_REG_RP, script); script_finalize(script, &sr); return script; } sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16 + (ip & 0xfUL)); #ifdef REDHAT readmem((ulong)(table->segment_base + e->info_offset), KVADDR, unwind_info_buf, UNWIND_INFO_BUFSIZE, "unwind info", FAULT_ON_ERROR); hdr = *(u64 *)unwind_info_buf; if (((UNW_LENGTH(hdr)*8)+8) > UNWIND_INFO_BUFSIZE) error(FATAL, "absurdly large unwind_info: %d (redefine UNWIND_INFO_BUFSIZE)\n", (UNW_LENGTH(hdr)*8)+8); dp = (u8 *)(unwind_info_buf + 8); desc_end = dp + 8*UNW_LENGTH(hdr); #else hdr = *(u64 *) (table->segment_base + e->info_offset); dp = (u8 *) (table->segment_base + e->info_offset + 8); desc_end = dp + 8*UNW_LENGTH(hdr); #endif while (!sr.done && dp < desc_end) dp = unw_decode(dp, sr.in_body, &sr); if (sr.when_target > sr.epilogue_start) { /* * sp has been restored and all values on the memory stack below * psp also have been restored. */ sr.curr.reg[UNW_REG_PSP].val = 0; sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE; sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER; for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10) || r->where == UNW_WHERE_SPREL) { r->val = 0; r->where = UNW_WHERE_NONE; r->when = UNW_WHEN_NEVER; } } script->flags = sr.flags; /* * If RP did't get saved, generate entry for the return link * register. */ if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) { sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; sr.curr.reg[UNW_REG_RP].when = -1; sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; } /* translate state record into unwinder instructions: */ /* * First, set psp if we're dealing with a fixed-size frame; * subsequent instructions may depend on this value. */ if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE) && sr.curr.reg[UNW_REG_PSP].val != 0) { /* new psp is sp plus frame size */ insn.opc = UNW_INSN_ADD; insn.dst = struct_offset(struct unw_frame_info, psp)/8; insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */ script_emit(script, insn); } /* determine where the primary UNaT is: */ if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) i = UNW_REG_PRI_UNAT_MEM; else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when) i = UNW_REG_PRI_UNAT_GR; else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) i = UNW_REG_PRI_UNAT_MEM; else i = UNW_REG_PRI_UNAT_GR; compile_reg(&sr, i, script); for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i) compile_reg(&sr, i, script); /* free labeled register states & stack: */ for (ls = sr.labeled_states; ls; ls = next) { next = ls->next; free_state_stack(&ls->saved_state); free_labeled_state(ls); } free_state_stack(&sr.curr); script_finalize(script, &sr); return script; } static struct unw_table_entry * lookup(struct unw_table *table, unsigned long rel_ip) { struct unw_table_entry *e = 0; unsigned long lo, hi, mid; struct unw_table_entry *array, *loc_array; static struct unw_table_entry e_returned; if (table == &unw.kernel_table) { array = (struct unw_table_entry *)table->array; loc_array = NULL; } else { loc_array = (struct unw_table_entry *) GETBUF(table->length * sizeof(struct unw_table_entry)); if (!readmem((ulong)table->array, KVADDR, loc_array, table->length * sizeof(struct unw_table_entry), "module unw_table_entry array", RETURN_ON_ERROR|QUIET)) { if (IS_MODULE_VADDR(table->segment_base + rel_ip)) error(WARNING, "cannot read module unw_table_entry array\n"); return 0; } array = loc_array; } /* do a binary search for right entry: */ for (lo = 0, hi = table->length; lo < hi; ) { mid = (lo + hi) / 2; e = &array[mid]; if (rel_ip < e->start_offset) hi = mid; else if (rel_ip >= e->end_offset) lo = mid + 1; else break; } /* * Return a pointer to a static copy of "e" if found, and * give back the module buffer if used. */ if (e) { BCOPY(e, &e_returned, sizeof(struct unw_table_entry)); e = &e_returned; } if (loc_array) FREEBUF(loc_array); if (rel_ip < e->start_offset || rel_ip >= e->end_offset) return NULL; return e; } static void compile_reg (struct unw_state_record *sr, int i, struct unw_script *script) { struct unw_reg_info *r = sr->curr.reg + i; enum unw_insn_opcode opc; unsigned long val, rval; struct unw_insn insn; long need_nat_info; if (machdep->flags & UNW_PTREGS) { compile_reg_v2(sr, i, script); return; } if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target) return; opc = UNW_INSN_MOVE; val = rval = r->val; need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7); switch (r->where) { case UNW_WHERE_GR: if (rval >= 32) { opc = UNW_INSN_MOVE_STACKED; val = rval - 32; } else if (rval >= 4 && rval <= 7) { if (need_nat_info) { opc = UNW_INSN_MOVE2; need_nat_info = 0; } val = unw.preg_index[UNW_REG_R4 + (rval - 4)]; } else { opc = UNW_INSN_ADD_SP; val = -SIZE(pt_regs) + pt_regs_off(rval); } break; case UNW_WHERE_FR: if (rval <= 5) val = unw.preg_index[UNW_REG_F2 + (rval - 2)]; else if (rval >= 16 && rval <= 31) val = unw.preg_index[UNW_REG_F16 + (rval - 16)]; else { opc = UNW_INSN_ADD_SP; val = -SIZE(pt_regs); if (rval <= 9) val += struct_offset(struct pt_regs, f6) + 16*(rval - 6); else error(INFO, "unwind: kernel may not touch f%lu\n", rval); } break; case UNW_WHERE_BR: if (rval >= 1 && rval <= 5) val = unw.preg_index[UNW_REG_B1 + (rval - 1)]; else { opc = UNW_INSN_ADD_SP; val = -SIZE(pt_regs); if (rval == 0) val += struct_offset(struct pt_regs, b0); else if (rval == 6) val += struct_offset(struct pt_regs, b6); else val += struct_offset(struct pt_regs, b7); } break; case UNW_WHERE_SPREL: opc = UNW_INSN_ADD_SP; break; case UNW_WHERE_PSPREL: opc = UNW_INSN_ADD_PSP; break; default: error(INFO, "unwind: register %u has unexpected `where' value of %u\n", i, r->where); break; } insn.opc = opc; insn.dst = unw.preg_index[i]; insn.val = val; script_emit(script, insn); if (need_nat_info) emit_nat_info(sr, i, script); if (i == UNW_REG_PSP) { /* * info->psp must contain the _value_ of the previous * sp, not it's save location. We get this by * dereferencing the value we just stored in * info->psp: */ insn.opc = UNW_INSN_LOAD; insn.dst = insn.val = unw.preg_index[UNW_REG_PSP]; script_emit(script, insn); } } static void compile_reg_v2 (struct unw_state_record *sr, int i, struct unw_script *script) { struct unw_reg_info *r = sr->curr.reg + i; enum unw_insn_opcode opc; unsigned long val, rval; struct unw_insn insn; long need_nat_info; if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target) return; opc = UNW_INSN_MOVE; val = rval = r->val; need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7); switch (r->where) { case UNW_WHERE_GR: if (rval >= 32) { opc = UNW_INSN_MOVE_STACKED; val = rval - 32; } else if (rval >= 4 && rval <= 7) { if (need_nat_info) { opc = UNW_INSN_MOVE2; need_nat_info = 0; } val = unw.preg_index[UNW_REG_R4 + (rval - 4)]; } else { /* register got spilled to a scratch register */ opc = UNW_INSN_MOVE_SCRATCH; val = pt_regs_off(rval); } break; case UNW_WHERE_FR: if (rval <= 5) val = unw.preg_index[UNW_REG_F2 + (rval - 2)]; else if (rval >= 16 && rval <= 31) val = unw.preg_index[UNW_REG_F16 + (rval - 16)]; else { opc = UNW_INSN_MOVE_SCRATCH; if (rval <= 11) val = offsetof(struct pt_regs, f6) + 16*(rval - 6); else error(INFO, "compile_reg: kernel may not touch f%lu\n", rval); } break; case UNW_WHERE_BR: if (rval >= 1 && rval <= 5) val = unw.preg_index[UNW_REG_B1 + (rval - 1)]; else { opc = UNW_INSN_MOVE_SCRATCH; if (rval == 0) val = offsetof(struct pt_regs, b0); else if (rval == 6) val = offsetof(struct pt_regs, b6); else val = offsetof(struct pt_regs, b7); } break; case UNW_WHERE_SPREL: opc = UNW_INSN_ADD_SP; break; case UNW_WHERE_PSPREL: opc = UNW_INSN_ADD_PSP; break; default: error(INFO, "compile_reg: register %u has unexpected `where' value of %u\n", i, r->where); break; } insn.opc = opc; insn.dst = unw.preg_index[i]; insn.val = val; script_emit(script, insn); if (need_nat_info) emit_nat_info(sr, i, script); if (i == UNW_REG_PSP) { /* * info->psp must contain the _value_ of the previous * sp, not it's save location. We get this by * dereferencing the value we just stored in * info->psp: */ insn.opc = UNW_INSN_LOAD; insn.dst = insn.val = unw.preg_index[UNW_REG_PSP]; script_emit(script, insn); } } #endif /* IA64 */ crash-utility-crash-9cd43f5/mips64.c0000664000372000037200000011210015107550337016655 0ustar juerghjuergh/* mips64.c - core analysis suite * * Copyright (C) 2021 Loongson Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef MIPS64 #include #include "defs.h" static void mips64_init_page_flags(void); static int mips64_translate_pte(ulong pte, void *physaddr, ulonglong pte64); static int mips64_pgd_vtop(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose); static int mips64_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose); static int mips64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose); static void mips64_cmd_mach(void); static void mips64_display_machine_stats(void); static void mips64_back_trace_cmd(struct bt_info *bt); static void mips64_analyze_function(ulong start, ulong offset, struct mips64_unwind_frame *current, struct mips64_unwind_frame *previous); static void mips64_dump_backtrace_entry(struct bt_info *bt, struct syment *sym, struct mips64_unwind_frame *current, struct mips64_unwind_frame *previous, int level); static void mips64_dump_exception_stack(struct bt_info *bt, char *pt_regs); static int mips64_is_exception_entry(struct syment *sym); static void mips64_display_full_frame(struct bt_info *bt, struct mips64_unwind_frame *current, struct mips64_unwind_frame *previous); static void mips64_stackframe_init(void); static void mips64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp); static int mips64_get_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp); static int mips64_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp); static int mips64_init_active_task_regs(void); static int mips64_get_crash_notes(void); static int mips64_get_elf_notes(void); /* * 3 Levels paging PAGE_SIZE=16KB * PGD | PMD | PTE | OFFSET | * 11 | 11 | 11 | 14 | */ /* From arch/mips/include/asm/pgtable{,-64}.h */ typedef struct { ulong pgd; } pgd_t; typedef struct { ulong pmd; } pmd_t; typedef struct { ulong pte; } pte_t; #define PMD_ORDER 0 #define PTE_ORDER 0 #define PMD_SHIFT (PAGESHIFT() + (PAGESHIFT() + PTE_ORDER - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE - 1)) #define PGDIR_SHIFT (PMD_SHIFT + (PAGESHIFT() + PMD_ORDER - 3)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) #define PTRS_PER_PTE (1UL << (PAGESHIFT() - 3)) #define PTRS_PER_PMD PTRS_PER_PTE #define PTRS_PER_PGD PTRS_PER_PTE #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) #define pte_index(addr) (((addr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #define MIPS64_CPU_RIXI (1UL << 23) /* CPU has TLB Read/eXec Inhibit */ /* From arch/mips/include/uapi/asm/reg.h */ #define MIPS64_EF_R0 0 #define MIPS64_EF_R29 29 #define MIPS64_EF_R31 31 #define MIPS64_EF_LO 32 #define MIPS64_EF_HI 33 #define MIPS64_EF_CP0_EPC 34 #define MIPS64_EF_CP0_BADVADDR 35 #define MIPS64_EF_CP0_STATUS 36 #define MIPS64_EF_CP0_CAUSE 37 static struct machine_specific mips64_machine_specific = { 0 }; /* * Holds registers during the crash. */ static struct mips64_register *panic_task_regs; /* * 31 15 14 12 11 10 9 8 7 6 5 4 3 2 1 0 * +-------------------+--------+--+--+--+--+--+--+--+--+--+--+--+--+ * | VPN | C | D| V| G|RI|XI|SP|PN| H| M| A| W| P| * +-------------------+--------+--+--+--+--+--+--+--+--+--+--+--+--+ */ static void mips64_init_page_flags(void) { ulong shift = 0; _PAGE_PRESENT = 1UL << shift++; _PAGE_WRITE = 1UL << shift++; _PAGE_ACCESSED = 1UL << shift++; _PAGE_MODIFIED = 1UL << shift++; _PAGE_HUGE = 1UL << shift++; _PAGE_PROTNONE = 1UL << shift++; if (THIS_KERNEL_VERSION >= LINUX(4,5,0)) _PAGE_SPECIAL = 1UL << shift++; _PAGE_NO_EXEC = 1UL << shift++; _PAGE_NO_READ = _PAGE_READ = 1UL << shift++; _PAGE_GLOBAL = 1UL << shift++; _PAGE_VALID = 1UL << shift++; _PAGE_DIRTY = 1UL << shift++; _PFN_SHIFT = PAGESHIFT() - 12 + shift + 3; } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int mips64_translate_pte(ulong pte, void *physaddr, ulonglong pte64) { char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char buf[BUFSIZE]; int page_present; int len1, len2, others; ulong paddr; paddr = PTOB(pte >> _PFN_SHIFT); page_present = !!(pte & _PAGE_PRESENT); if (physaddr) { *(ulong *)physaddr = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER | LJUST, "PTE")); if (!page_present) return page_present; sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER | LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER | RJUST, NULL), mkstring(physbuf, len2, CENTER | RJUST, NULL)); fprintf(fp, "("); others = 0; #define CHECK_PAGE_FLAG(flag) \ if ((_PAGE_##flag) && (pte & _PAGE_##flag)) \ fprintf(fp, "%s" #flag, others++ ? "|" : "") if (pte) { CHECK_PAGE_FLAG(PRESENT); CHECK_PAGE_FLAG(WRITE); CHECK_PAGE_FLAG(ACCESSED); CHECK_PAGE_FLAG(MODIFIED); CHECK_PAGE_FLAG(HUGE); CHECK_PAGE_FLAG(PROTNONE); CHECK_PAGE_FLAG(SPECIAL); CHECK_PAGE_FLAG(NO_EXEC); CHECK_PAGE_FLAG(NO_READ); CHECK_PAGE_FLAG(READ); CHECK_PAGE_FLAG(GLOBAL); CHECK_PAGE_FLAG(VALID); CHECK_PAGE_FLAG(DIRTY); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return page_present; } /* * Virtual to physical memory translation. This function will be called * by both mips64_kvtop and mips64_uvtop. */ static int mips64_pgd_vtop(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_ptr, pgd_val; ulong *pmd_ptr, pmd_val; ulong *pte_ptr, pte_val; if (verbose) { const char *segment; if (vaddr < 0x4000000000000000lu) segment = "xuseg"; else if (vaddr < 0x8000000000000000lu) segment = "xsseg"; else if (vaddr < 0xc000000000000000lu) segment = "xkphys"; else if (vaddr < 0xffffffff80000000lu) segment = "xkseg"; else if (vaddr < 0xffffffffa0000000lu) segment = "kseg0"; else if (vaddr < 0xffffffffc0000000lu) segment = "kseg1"; else if (vaddr < 0xffffffffe0000000lu) segment = "sseg"; else segment = "kseg3"; fprintf(fp, "SEGMENT: %s\n", segment); } if (IS_CKPHYS(vaddr) || IS_XKPHYS(vaddr)) { *paddr = VTOP(vaddr); return TRUE; } if (verbose) fprintf(fp, "PAGE DIRECTORY: %016lx\n", (ulong)pgd); pgd_ptr = pgd + pgd_index(vaddr); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %16lx => %16lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; pmd_ptr = (ulong *)(VTOP(pgd_val) + sizeof(pmd_t) * pmd_index(vaddr)); FILL_PMD(PAGEBASE(pmd_ptr), PHYSADDR, PAGESIZE()); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); if (verbose) fprintf(fp, " PMD: %016lx => %016lx\n", (ulong)pmd_ptr, pmd_val); if (!pmd_val) goto no_page; pte_ptr = (ulong *)(VTOP(pmd_val) + sizeof(pte_t) * pte_index(vaddr)); FILL_PTBL(PAGEBASE(pte_ptr), PHYSADDR, PAGESIZE()); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %016lx => %016lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (!(pte_val & _PAGE_PRESENT)) { if (verbose) { fprintf(fp, "\n"); mips64_translate_pte((ulong)pte_val, 0, pte_val); } return FALSE; } *paddr = PTOB(pte_val >> _PFN_SHIFT) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %016lx\n\n", PAGEBASE(*paddr)); mips64_translate_pte(pte_val, 0, 0); } return TRUE; no_page: fprintf(fp, "invalid\n"); return FALSE; } /* Translates a user virtual address to its physical address. cmd_vtop() sets * the verbose flag so that the pte translation gets displayed; all other * callers quietly accept the translation. */ static int mips64_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } return mips64_pgd_vtop(pgd, vaddr, paddr, verbose);; } /* Translates a user virtual address to its physical address. cmd_vtop() sets * the verbose flag so that the pte translation gets displayed; all other * callers quietly accept the translation. */ static int mips64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { if (!IS_KVADDR(kvaddr)) return FALSE; if (!verbose) { if (IS_CKPHYS(kvaddr) || IS_XKPHYS(kvaddr)) { *paddr = VTOP(kvaddr); return TRUE; } } return mips64_pgd_vtop((ulong *)vt->kernel_pgd[0], kvaddr, paddr, verbose); } /* * Machine dependent command. */ static void mips64_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cmo")) != EOF) { switch (c) { case 'c': case 'm': case 'o': option_not_supported(c); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); mips64_display_machine_stats(); } /* * "mach" command output. */ static void mips64_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } /* * Unroll a kernel stack. */ static void mips64_back_trace_cmd(struct bt_info *bt) { struct mips64_unwind_frame current, previous; struct mips64_register *regs; struct mips64_pt_regs_main *mains; struct mips64_pt_regs_cp0 *cp0; char pt_regs[SIZE(pt_regs)]; int level = 0; int invalid_ok = 1; if (bt->flags & BT_REGS_NOT_FOUND) return; previous.sp = previous.pc = previous.ra = 0; current.pc = bt->instptr; current.sp = bt->stkptr; current.ra = 0; if (!INSTACK(current.sp, bt)) return; if (bt->machdep) { regs = bt->machdep; previous.pc = current.ra = regs->regs[MIPS64_EF_R31]; } while (current.sp <= bt->stacktop - 32 - SIZE(pt_regs)) { struct syment *symbol = NULL; ulong offset; if (CRASHDEBUG(8)) fprintf(fp, "level %d pc %#lx ra %#lx sp %lx\n", level, current.pc, current.ra, current.sp); if (!IS_KVADDR(current.pc) && !invalid_ok) return; symbol = value_search(current.pc, &offset); if (!symbol && !invalid_ok) { error(FATAL, "PC is unknown symbol (%lx)", current.pc); return; } invalid_ok = 0; /* * If we get an address which points to the start of a * function, then it could one of the following: * * - we are dealing with a noreturn function. The last call * from a noreturn function has an ra which points to the * start of the function after it. This is common in the * oops callchain because of die() which is annotated as * noreturn. * * - we have taken an exception at the start of this function. * In this case we already have the RA in current.ra. * * - we are in one of these routines which appear with zero * offset in manually-constructed stack frames: * * * ret_from_exception * * ret_from_irq * * ret_from_fork * * ret_from_kernel_thread */ if (symbol && !STRNEQ(symbol->name, "ret_from") && !offset && !current.ra && current.sp < bt->stacktop - 32 - SIZE(pt_regs)) { if (CRASHDEBUG(8)) fprintf(fp, "zero offset at %s, try previous symbol\n", symbol->name); symbol = value_search(current.pc - 4, &offset); if (!symbol) { error(FATAL, "PC is unknown symbol (%lx)", current.pc); return; } } if (symbol && mips64_is_exception_entry(symbol)) { mains = (struct mips64_pt_regs_main *) \ (pt_regs + OFFSET(pt_regs_regs)); cp0 = (struct mips64_pt_regs_cp0 *) \ (pt_regs + OFFSET(pt_regs_cp0_badvaddr)); GET_STACK_DATA(current.sp, pt_regs, sizeof(pt_regs)); previous.ra = mains->regs[31]; previous.sp = mains->regs[29]; current.ra = cp0->cp0_epc; if (CRASHDEBUG(8)) fprintf(fp, "exception pc %#lx ra %#lx sp %lx\n", previous.pc, previous.ra, previous.sp); /* The PC causing the exception may have been invalid */ invalid_ok = 1; } else if (symbol) { mips64_analyze_function(symbol->value, offset, ¤t, &previous); } else { /* * The current PC is invalid. Assume that the code * jumped through a invalid pointer and that the SP has * not been adjusted. */ previous.sp = current.sp; } mips64_dump_backtrace_entry(bt, symbol, ¤t, &previous, level++); current.pc = current.ra; current.sp = previous.sp; current.ra = previous.ra; if (CRASHDEBUG(8)) fprintf(fp, "next %d pc %#lx ra %#lx sp %lx\n", level, current.pc, current.ra, current.sp); previous.sp = previous.pc = previous.ra = 0; } } static void mips64_analyze_function(ulong start, ulong offset, struct mips64_unwind_frame *current, struct mips64_unwind_frame *previous) { ulong i, reg; ulong rapos = 0; ulong spadjust = 0; uint32_t *funcbuf, *ip; if (CRASHDEBUG(8)) fprintf(fp, "%s: start %#lx offset %#lx\n", __func__, start, offset); if (!offset) { previous->sp = current->sp; return; } ip = funcbuf = (uint32_t *)GETBUF(offset); if (!readmem(start, KVADDR, funcbuf, offset, "mips64_analyze_function", RETURN_ON_ERROR)) { FREEBUF(funcbuf); error(WARNING, "Cannot read function at %16lx\n", start); return; } for (i = 0; i < offset; i += 4) { ulong insn = *ip & 0xffffffff; ulong high = (insn >> 16) & 0xffff; ulong low = insn & 0xffff; if (CRASHDEBUG(8)) fprintf(fp, "insn @ %#lx = %#lx\n", start + i, insn); if (high == 0x27bd || high == 0x67bd) { /* ADDIU/DADDIU sp, sp, imm */ if (!(low & 0x8000)) break; spadjust += 0x10000 - low; if (CRASHDEBUG(8)) fprintf(fp, "spadjust = %lu\n", spadjust); } else if (high == 0xafbf) { /* SW RA, imm(SP) */ rapos = current->sp + low; if (CRASHDEBUG(8)) fprintf(fp, "rapos %lx\n", rapos); break; } else if (high == 0xffbf) { /* SD RA, imm(SP) */ rapos = current->sp + low; if (CRASHDEBUG(8)) fprintf(fp, "rapos %lx\n", rapos); break; } else if ((insn & 0xffe08020) == 0xeba00020) { /* GSSQ reg, reg, offset(SP) */ reg = (insn >> 16) & 0x1f; if (reg == 31) { low = ((((insn >> 6) & 0x1ff) ^ 0x100) - 0x100) << 4; rapos = current->sp + low; if (CRASHDEBUG(8)) fprintf(fp, "rapos %lx\n", rapos); break; } reg = insn & 0x1f; if (reg == 31) { low = (((((insn >> 6) & 0x1ff) ^ 0x100) - 0x100) << 4) + 8; rapos = current->sp + low; if (CRASHDEBUG(8)) fprintf(fp, "rapos %lx\n", rapos); break; } } ip++; } FREEBUF(funcbuf); previous->sp = current->sp + spadjust; if (rapos && !readmem(rapos, KVADDR, ¤t->ra, sizeof(current->ra), "RA from stack", RETURN_ON_ERROR)) { error(FATAL, "Cannot read RA from stack %lx", rapos); return; } } static void mips64_dump_backtrace_entry(struct bt_info *bt, struct syment *sym, struct mips64_unwind_frame *current, struct mips64_unwind_frame *previous, int level) { const char *name = sym ? sym->name : "(invalid)"; struct load_module *lm; char *name_plus_offset = NULL; struct syment *symp; ulong symbol_offset; char buf[BUFSIZE]; char pt_regs[SIZE(pt_regs)]; if (bt->flags & BT_SYMBOL_OFFSET) { symp = value_search(current->pc, &symbol_offset); if (symp && symbol_offset) name_plus_offset = value_to_symstr(current->pc, buf, bt->radix); } fprintf(fp, "%s#%d [%016lx] %s at %016lx", level < 10 ? " " : "", level, current->sp, name_plus_offset ? name_plus_offset : name, current->pc); if (module_symbol(current->pc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); /* * 'bt -l', get a line number associated with a current pc address. */ if (bt->flags & BT_LINE_NUMBERS) { get_line_number(current->pc, buf, FALSE); if (strlen(buf)) fprintf(fp, " %s\n", buf); } if (sym && mips64_is_exception_entry(sym)) { GET_STACK_DATA(current->sp, &pt_regs, SIZE(pt_regs)); mips64_dump_exception_stack(bt, pt_regs); } /* bt -f */ if (bt->flags & BT_FULL) { fprintf(fp, " " "[PC: %016lx RA: %016lx SP: %016lx SIZE: %ld]\n", current->pc, current->ra, current->sp, previous->sp - current->sp); mips64_display_full_frame(bt, current, previous); } } static void mips64_dump_exception_stack(struct bt_info *bt, char *pt_regs) { struct mips64_pt_regs_main *mains; struct mips64_pt_regs_cp0 *cp0; int i; char buf[BUFSIZE]; mains = (struct mips64_pt_regs_main *) (pt_regs + OFFSET(pt_regs_regs)); cp0 = (struct mips64_pt_regs_cp0 *) \ (pt_regs + OFFSET(pt_regs_cp0_badvaddr)); for (i = 0; i < 32; i += 4) { fprintf(fp, " $%2d : %016lx %016lx %016lx %016lx\n", i, mains->regs[i], mains->regs[i+1], mains->regs[i+2], mains->regs[i+3]); } fprintf(fp, " Hi : %016lx\n", mains->hi); fprintf(fp, " Lo : %016lx\n", mains->lo); value_to_symstr(cp0->cp0_epc, buf, 16); fprintf(fp, " epc : %016lx %s\n", cp0->cp0_epc, buf); value_to_symstr(mains->regs[31], buf, 16); fprintf(fp, " ra : %016lx %s\n", mains->regs[31], buf); fprintf(fp, " Status: %016lx\n", mains->cp0_status); fprintf(fp, " Cause : %016lx\n", cp0->cp0_cause); fprintf(fp, " BadVA : %016lx\n", cp0->cp0_badvaddr); } static int mips64_is_exception_entry(struct syment *sym) { return STREQ(sym->name, "ret_from_exception") || STREQ(sym->name, "ret_from_irq") || STREQ(sym->name, "work_resched") || STREQ(sym->name, "handle_sys") || STREQ(sym->name, "handle_sysn32") || STREQ(sym->name, "handle_sys64"); } /* * 'bt -f' commend output * Display all stack data contained in a frame */ static void mips64_display_full_frame(struct bt_info *bt, struct mips64_unwind_frame *current, struct mips64_unwind_frame *previous) { int i, u_idx; ulong *up; ulong words, addr; char buf[BUFSIZE]; if (previous->sp < current->sp) return; if (!(INSTACK(previous->sp, bt) && INSTACK(current->sp, bt))) return; words = (previous->sp - current->sp) / sizeof(ulong) + 1; addr = current->sp; u_idx = (current->sp - bt->stackbase) / sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); } static void mips64_stackframe_init(void) { long task_struct_thread = MEMBER_OFFSET("task_struct", "thread"); long thread_reg29 = MEMBER_OFFSET("thread_struct", "reg29"); long thread_reg31 = MEMBER_OFFSET("thread_struct", "reg31"); if ((task_struct_thread == INVALID_OFFSET) || (thread_reg29 == INVALID_OFFSET) || (thread_reg31 == INVALID_OFFSET)) { error(FATAL, "cannot determine thread_struct offsets\n"); return; } ASSIGN_OFFSET(task_struct_thread_reg29) = task_struct_thread + thread_reg29; ASSIGN_OFFSET(task_struct_thread_reg31) = task_struct_thread + thread_reg31; STRUCT_SIZE_INIT(pt_regs, "pt_regs"); MEMBER_OFFSET_INIT(pt_regs_regs, "pt_regs", "regs"); MEMBER_OFFSET_INIT(pt_regs_cp0_badvaddr, "pt_regs", "cp0_badvaddr"); } /* * Get a stack frame combination of pc and ra from the most relevant spot. */ static void mips64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { ulong ksp, nip; int ret = 0; nip = ksp = 0; bt->machdep = NULL; if (DUMPFILE() && is_task_active(bt->task)) ret = mips64_get_dumpfile_stack_frame(bt, &nip, &ksp); else ret = mips64_get_frame(bt, &nip, &ksp); if (!ret) error(WARNING, "cannot determine starting stack frame for task %lx\n", bt->task); if (pcp) *pcp = nip; if (spp) *spp = ksp; } /* * Get the starting point for the active cpu in a diskdump. */ static int mips64_get_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp) { const struct machine_specific *ms = machdep->machspec; struct mips64_register *regs; ulong epc, r29; if (!ms->crash_task_regs) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } /* * We got registers for panic task from crash_notes. Just return them. */ regs = &ms->crash_task_regs[bt->tc->processor]; epc = regs->regs[MIPS64_EF_CP0_EPC]; r29 = regs->regs[MIPS64_EF_R29]; if (!epc && !r29) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } if (nip) *nip = epc; if (ksp) *ksp = r29; bt->machdep = regs; return TRUE; } /* * Do the work for mips64_get_stack_frame() for non-active tasks. * Get SP and PC values for idle tasks. */ static int mips64_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (!bt->tc || !(tt->flags & THREAD_INFO)) return FALSE; if (!readmem(bt->task + OFFSET(task_struct_thread_reg31), KVADDR, pcp, sizeof(*pcp), "thread_struct.regs31", RETURN_ON_ERROR)) { return FALSE; } if (!readmem(bt->task + OFFSET(task_struct_thread_reg29), KVADDR, spp, sizeof(*spp), "thread_struct.regs29", RETURN_ON_ERROR)) { return FALSE; } return TRUE; } static int mips64_init_active_task_regs(void) { int retval; retval = mips64_get_crash_notes(); if (retval == TRUE) return retval; return mips64_get_elf_notes(); } /* * Retrieve task registers for the time of the crash. */ static int mips64_get_crash_notes(void) { struct machine_specific *ms = machdep->machspec; ulong crash_notes; Elf64_Nhdr *note; ulong offset; char *buf, *p; ulong *notes_ptrs; ulong i; /* * crash_notes contains per cpu memory for storing cpu states * in case of system crash. */ if (!symbol_exists("crash_notes")) return FALSE; crash_notes = symbol_value("crash_notes"); notes_ptrs = (ulong *)GETBUF(kt->cpus*sizeof(notes_ptrs[0])); /* * Read crash_notes for the first CPU. crash_notes are in standard ELF * note format. */ if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read crash_notes\n"); FREEBUF(notes_ptrs); return FALSE; } if (symbol_exists("__per_cpu_offset")) { /* * Add __per_cpu_offset for each cpu to form the pointer to the notes */ for (i = 0; i < kt->cpus; i++) notes_ptrs[i] = notes_ptrs[kt->cpus-1] + kt->__per_cpu_offset[i]; } buf = GETBUF(SIZE(note_buf)); if (!(panic_task_regs = calloc((size_t)kt->cpus, sizeof(*panic_task_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = 0; i < kt->cpus; i++) { if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); goto fail; } /* * Do some sanity checks for this note before reading registers from it. */ note = (Elf64_Nhdr *)buf; p = buf + sizeof(Elf64_Nhdr); /* * dumpfiles created with qemu won't have crash_notes, but there will * be elf notes; dumpfiles created by kdump do not create notes for * offline cpus. */ if (note->n_namesz == 0 && (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE())) { if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (note) { /* * SIZE(note_buf) accounts for a "final note", which is a * trailing empty elf note header. */ long notesz = SIZE(note_buf) - sizeof(Elf64_Nhdr); if (sizeof(Elf64_Nhdr) + roundup(note->n_namesz, 4) + note->n_descsz == notesz) BCOPY((char *)note, buf, notesz); } else { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } } /* * Check the sanity of NT_PRSTATUS note only for each online cpu. */ if (note->n_type != NT_PRSTATUS) { error(WARNING, "invalid NT_PRSTATUS note (n_type != NT_PRSTATUS)\n"); goto fail; } if (!STRNEQ(p, "CORE")) { error(WARNING, "invalid NT_PRSTATUS note (name != \"CORE\"\n"); goto fail; } /* * Find correct location of note data. This contains elf_prstatus * structure which has registers etc. for the crashed task. */ offset = sizeof(Elf64_Nhdr); offset = roundup(offset + note->n_namesz, 4); p = buf + offset; /* start of elf_prstatus */ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); } /* * And finally we have the registers for the crashed task. This is * used later on when dumping backtrace. */ ms->crash_task_regs = panic_task_regs; FREEBUF(buf); FREEBUF(notes_ptrs); return TRUE; fail: FREEBUF(buf); FREEBUF(notes_ptrs); free(panic_task_regs); return FALSE; } static int mips64_get_elf_notes(void) { struct machine_specific *ms = machdep->machspec; int i; if (!DISKDUMP_DUMPFILE() && !KDUMP_DUMPFILE()) return FALSE; panic_task_regs = calloc(kt->cpus, sizeof(*panic_task_regs)); if (!panic_task_regs) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = 0; i < kt->cpus; i++) { Elf64_Nhdr *note = NULL; size_t len; if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (!note) { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); BCOPY((char *)note + len + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); } ms->crash_task_regs = panic_task_regs; return TRUE; } /* * Accept or reject a symbol from the kernel namelist. */ static int mips64_verify_symbol(const char *name, ulong value, char type) { if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "_text") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; return (name && strlen(name) && (machdep->flags & KSYMS_START) && !STRNEQ(name, "__func__.") && !STRNEQ(name, "__crc_")); } /* * Override smp_num_cpus if possible and necessary. */ static int mips64_get_smp_cpus(void) { return (get_cpus_online() > 0) ? get_cpus_online() : kt->cpus; } static ulong mips64_get_page_size(void) { return memory_page_size(); } /* * Determine where vmalloc'd memory starts. */ static ulong mips64_vmalloc_start(void) { return first_vmalloc_address(); } /* * Calculate and return the speed of the processor. */ static ulong mips64_processor_speed(void) { unsigned long cpu_hz1 = 0, cpu_hz2 = 0; if (machdep->mhz) return (machdep->mhz); if (symbol_exists("mips_cpu_frequency")) { get_symbol_data("mips_cpu_frequency", sizeof(int), &cpu_hz1); if (cpu_hz1) return(machdep->mhz = cpu_hz1/1000000); } if (symbol_exists("cpu_clock_freq")) { get_symbol_data("cpu_clock_freq", sizeof(int), &cpu_hz2); if (cpu_hz2) return(machdep->mhz = cpu_hz2/1000000); } return 0; } /* * Checks whether given task is valid task address. */ static int mips64_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); return (IS_KVADDR(task) && ALIGNED_STACK_OFFSET(task) == 0); } /* * "help -m/M" command output */ void mips64_dump_machdep_table(ulong arg) { int others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " pgdir_shift: %d\n", PGDIR_SHIFT); fprintf(fp, " ptrs_per_pgd: %lu\n", PTRS_PER_PGD); fprintf(fp, " ptrs_per_pte: %ld\n", PTRS_PER_PTE); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " back_trace: mips64_back_trace_cmd()\n"); fprintf(fp, " processor_speed: mips64_processor_speed()\n"); fprintf(fp, " uvtop: mips64_uvtop()\n"); fprintf(fp, " kvtop: mips64_kvtop()\n"); fprintf(fp, " get_stack_frame: mips64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: mips64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: mips64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: mips64_is_task_addr()\n"); fprintf(fp, " verify_symbol: mips64_verify_symbol()\n"); fprintf(fp, " dis_filter: generic_dis_filter()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " cmd_mach: mips64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: mips64_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: NULL\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } static void pt_level_alloc(char **lvl, char *name) { size_t sz = PAGESIZE(); void *pointer = malloc(sz); if (!pointer) error(FATAL, name); *lvl = pointer; } /* * Do all necessary machine-specific setup here. This is called several * times during initialization. */ void mips64_init(int when) { switch (when) { case SETUP_ENV: machdep->process_elf_notes = process_elf64_notes; break; case PRE_SYMTAB: machdep->verify_symbol = mips64_verify_symbol; machdep->machspec = &mips64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; break; case PRE_GDB: machdep->pagesize = mips64_get_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); if (machdep->pagesize >= 16384) machdep->stacksize = machdep->pagesize; else machdep->stacksize = machdep->pagesize * 2; pt_level_alloc(&machdep->pgd, "cannot malloc pgd space."); pt_level_alloc(&machdep->pmd, "cannot malloc pmd space."); pt_level_alloc(&machdep->ptbl, "cannot malloc ptbl space."); machdep->kvbase = 0x8000000000000000lu; machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->uvtop = mips64_uvtop; machdep->kvtop = mips64_kvtop; machdep->cmd_mach = mips64_cmd_mach; machdep->back_trace = mips64_back_trace_cmd; machdep->get_stack_frame = mips64_get_stack_frame; machdep->vmalloc_start = mips64_vmalloc_start; machdep->processor_speed = mips64_processor_speed; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = mips64_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = mips64_is_task_addr; machdep->get_smp_cpus = mips64_get_smp_cpus; machdep->dis_filter = generic_dis_filter; machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; break; case POST_GDB: mips64_init_page_flags(); machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; mips64_stackframe_init(); if (!machdep->hz) machdep->hz = 250; if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); STRUCT_SIZE_INIT(note_buf, "note_buf_t"); break; case POST_VM: /* * crash_notes contains machine specific information about the * crash. In particular, it contains CPU registers at the time * of the crash. We need this information to extract correct * backtraces from the panic task. */ if (!ACTIVE() && !mips64_init_active_task_regs()) error(WARNING, "cannot retrieve registers for active task%s\n\n", kt->cpus > 1 ? "s" : ""); break; } } /* * 'help -r' command output */ void mips64_display_regs_from_elf_notes(int cpu, FILE *ofp) { const struct machine_specific *ms = machdep->machspec; struct mips64_register *regs; if (!ms->crash_task_regs) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } regs = &ms->crash_task_regs[cpu]; if (!regs->regs[MIPS64_EF_R29] && !regs->regs[MIPS64_EF_CP0_EPC]) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } fprintf(ofp, " R0: %016lx R1: %016lx R2: %016lx\n" " R3: %016lx R4: %016lx R5: %016lx\n" " R6: %016lx R7: %016lx R8: %016lx\n" " R9: %016lx R10: %016lx R11: %016lx\n" " R12: %016lx R13: %016lx R14: %016lx\n" " R15: %016lx R16: %016lx R17: %016lx\n" " R18: %016lx R19: %016lx R20: %016lx\n" " R21: %016lx R22: %016lx R23: %016lx\n" " R24: %016lx R25: %016lx R26: %016lx\n" " R27: %016lx R28: %016lx R29: %016lx\n" " R30: %016lx R31: %016lx\n" " LO: %016lx HI: %016lx\n" " EPC: %016lx BADVADDR: %016lx\n" " STATUS: %016lx CAUSE: %016lx\n", regs->regs[MIPS64_EF_R0], regs->regs[MIPS64_EF_R0 + 1], regs->regs[MIPS64_EF_R0 + 2], regs->regs[MIPS64_EF_R0 + 3], regs->regs[MIPS64_EF_R0 + 4], regs->regs[MIPS64_EF_R0 + 5], regs->regs[MIPS64_EF_R0 + 6], regs->regs[MIPS64_EF_R0 + 7], regs->regs[MIPS64_EF_R0 + 8], regs->regs[MIPS64_EF_R0 + 9], regs->regs[MIPS64_EF_R0 + 10], regs->regs[MIPS64_EF_R0 + 11], regs->regs[MIPS64_EF_R0 + 12], regs->regs[MIPS64_EF_R0 + 13], regs->regs[MIPS64_EF_R0 + 14], regs->regs[MIPS64_EF_R0 + 15], regs->regs[MIPS64_EF_R0 + 16], regs->regs[MIPS64_EF_R0 + 17], regs->regs[MIPS64_EF_R0 + 18], regs->regs[MIPS64_EF_R0 + 19], regs->regs[MIPS64_EF_R0 + 20], regs->regs[MIPS64_EF_R0 + 21], regs->regs[MIPS64_EF_R0 + 22], regs->regs[MIPS64_EF_R0 + 23], regs->regs[MIPS64_EF_R0 + 24], regs->regs[MIPS64_EF_R0 + 25], regs->regs[MIPS64_EF_R0 + 26], regs->regs[MIPS64_EF_R0 + 27], regs->regs[MIPS64_EF_R0 + 28], regs->regs[MIPS64_EF_R0 + 29], regs->regs[MIPS64_EF_R0 + 30], regs->regs[MIPS64_EF_R0 + 31], regs->regs[MIPS64_EF_LO], regs->regs[MIPS64_EF_HI], regs->regs[MIPS64_EF_CP0_EPC], regs->regs[MIPS64_EF_CP0_BADVADDR], regs->regs[MIPS64_EF_CP0_STATUS], regs->regs[MIPS64_EF_CP0_CAUSE]); } #else /* !MIPS64 */ #include "defs.h" void mips64_display_regs_from_elf_notes(int cpu, FILE *ofp) { return; } #endif /* !MIPS64 */ crash-utility-crash-9cd43f5/x86_64.c0000664000372000037200000110434015107550337016501 0ustar juerghjuergh/* x86_64.c -- core analysis suite * * Copyright (C) 2004-2019 David Anderson * Copyright (C) 2004-2019 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "xen_hyper_defs.h" #ifdef X86_64 static int x86_64_kvtop(struct task_context *, ulong, physaddr_t *, int); static int x86_64_kvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); static int x86_64_uvtop(struct task_context *, ulong, physaddr_t *, int); static int x86_64_uvtop_level4(struct task_context *, ulong, physaddr_t *, int); static int x86_64_uvtop_level4_xen_wpt(struct task_context *, ulong, physaddr_t *, int); static int x86_64_uvtop_level4_rhel4_xen_wpt(struct task_context *, ulong, physaddr_t *, int); static ulong x86_64_vmalloc_start(void); static int x86_64_is_task_addr(ulong); static int x86_64_verify_symbol(const char *, ulong, char); static int x86_64_verify_line_number(ulong, ulong, ulong); static ulong x86_64_get_task_pgd(ulong); static int x86_64_translate_pte(ulong, void *, ulonglong); static ulong x86_64_processor_speed(void); static int is_vsyscall_addr(ulong); struct syment *x86_64_value_to_symbol(ulong, ulong *); static int x86_64_eframe_search(struct bt_info *); static int x86_64_eframe_verify(struct bt_info *, long, long, long, long, long, long); #define EFRAME_PRINT (0x1) #define EFRAME_VERIFY (0x2) #define EFRAME_CS (0x4) #define EFRAME_SEARCH (0x8) static int x86_64_print_eframe_location(ulong, int, FILE *); static void x86_64_back_trace_cmd(struct bt_info *); static ulong x86_64_in_exception_stack(struct bt_info *, int *); static ulong x86_64_in_irqstack(struct bt_info *); static int x86_64_in_alternate_stack(int, ulong); static ulong x86_64_in_kpti_entry_stack(int, ulong); static ulong __schedule_frame_adjust(ulong, struct bt_info *); static void x86_64_low_budget_back_trace_cmd(struct bt_info *); static void x86_64_dwarf_back_trace_cmd(struct bt_info *); static void x86_64_get_dumpfile_stack_frame(struct bt_info *, ulong *, ulong *); static struct syment *x86_64_function_called_by(ulong); static int is_direct_call_target(struct bt_info *); static void get_x86_64_frame(struct bt_info *, ulong *, ulong *); static ulong text_lock_function(char *, struct bt_info *, ulong); static int x86_64_print_stack_entry(struct bt_info *, FILE *, int, int, ulong); static void x86_64_display_full_frame(struct bt_info *, ulong, FILE *); static void x86_64_do_bt_reference_check(struct bt_info *, ulong,char *); static void x86_64_dump_irq(int); static void x86_64_get_irq_affinity(int); static void x86_64_show_interrupts(int, ulong *); static char *x86_64_extract_idt_function(ulong *, char *, ulong *); static ulong x86_64_get_pc(struct bt_info *); static ulong x86_64_get_sp(struct bt_info *); static void x86_64_get_stack_frame(struct bt_info *, ulong *, ulong *); static int x86_64_dis_filter(ulong, char *, unsigned int); static void x86_64_cmd_mach(void); static int x86_64_get_smp_cpus(void); static void x86_64_display_machine_stats(void); static void x86_64_display_cpu_data(unsigned int); static void x86_64_display_memmap(void); static void x86_64_dump_line_number(ulong); static struct line_number_hook x86_64_line_number_hooks[]; static void x86_64_calc_phys_base(void); static int x86_64_is_module_addr(ulong); static int x86_64_is_kvaddr(ulong); static int x86_64_is_uvaddr(ulong, struct task_context *); static int x86_64_is_page_ptr(ulong, physaddr_t *); static ulong *x86_64_kpgd_offset(ulong, int, int); static ulong x86_64_upgd_offset(struct task_context *, ulong, int, int); static ulong x86_64_upgd_offset_legacy(struct task_context *, ulong, int, int); static ulong x86_64_p4d_offset(ulong, ulong, int, int); static ulong x86_64_pud_offset(ulong, ulong, int, int); static ulong x86_64_pmd_offset(ulong, ulong, int, int); static ulong x86_64_pte_offset(ulong, ulong, int, int); void x86_64_compiler_warning_stub(void); static void x86_64_init_kernel_pgd(void); static void x86_64_cpu_pda_init(void); static void x86_64_per_cpu_init(void); static void x86_64_ist_init(void); static void x86_64_l1tf_init(void); static void x86_64_irq_stack_gap_init(void); static void x86_64_entry_trampoline_init(void); static void x86_64_post_init(void); static void parse_cmdline_args(void); static void x86_64_clear_machdep_cache(void); static void x86_64_irq_eframe_link_init(void); static ulong x86_64_irq_eframe_link(ulong, struct bt_info *, FILE *); static ulong search_for_switch_to(ulong, ulong); static void x86_64_thread_return_init(void); static void x86_64_framepointer_init(void); static void x86_64_ORC_init(void); static int x86_64_virt_phys_base(void); static int x86_64_xendump_p2m_create(struct xendump_data *); static int x86_64_pvops_xendump_p2m_create(struct xendump_data *); static int x86_64_pvops_xendump_p2m_l2_create(struct xendump_data *); static int x86_64_pvops_xendump_p2m_l3_create(struct xendump_data *); static char *x86_64_xendump_load_page(ulong, struct xendump_data *); static int x86_64_xendump_page_index(ulong, struct xendump_data *); static int x86_64_xen_kdump_p2m_create(struct xen_kdump_data *); static char *x86_64_xen_kdump_load_page(ulong, char *); static ulong x86_64_xen_kdump_page_mfn(ulong); static void x86_64_debug_dump_page(FILE *, char *, char *); static void x86_64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); static ulong x86_64_xendump_panic_task(struct xendump_data *); static void x86_64_init_hyper(int); static ulong x86_64_get_stackbase_hyper(ulong); static ulong x86_64_get_stacktop_hyper(ulong); static int x86_64_framesize_cache_resize(void); static int x86_64_do_not_cache_framesize(struct syment *, ulong); static int x86_64_framesize_cache_func(int, ulong, int *, int, struct syment *); static ulong x86_64_get_framepointer(struct bt_info *, ulong); int search_for_eframe_target_caller(struct bt_info *, ulong, int *); static int x86_64_get_framesize(struct bt_info *, ulong, ulong, char *); static void x86_64_framesize_debug(struct bt_info *); static void x86_64_get_active_set(void); static int x86_64_get_kvaddr_ranges(struct vaddr_range *); static int x86_64_get_current_task_reg(int, const char *, int, void *, int); static int x86_64_verify_paddr(uint64_t); static void GART_init(void); static void x86_64_exception_stacks_init(void); static int in_START_KERNEL_map(ulong); static ulong orc_ip(ulong); static orc_entry *__orc_find(ulong, ulong, uint, ulong); static orc_entry *orc_find(ulong); static orc_entry *orc_module_find(ulong); static ulong ip_table_to_vaddr(ulong); static void orc_dump(ulong); struct machine_specific x86_64_machine_specific = { 0 }; static const char *exception_functions_orig[]; static const char *exception_functions_5_8[]; /* Use this hardwired version -- sometimes the * debuginfo doesn't pick this up even though * it exists in the kernel; it shouldn't change. */ struct x86_64_user_regs_struct { unsigned long r15, r14, r13, r12, bp, bx; unsigned long r11, r10, r9, r8, ax, cx, dx; unsigned long si, di, orig_ax, ip, cs; unsigned long flags, sp, ss, fs_base; unsigned long gs_base, ds, es, fs, gs; }; struct user_regs_bitmap_struct { struct x86_64_user_regs_struct ur; ulong bitmap[32]; }; ulong extra_stacks_idx = 0; struct user_regs_bitmap_struct *extra_stacks_regs[MAX_EXCEPTION_STACKS] = {0}; /* * Do all necessary machine-specific setup here. This is called several * times during initialization. */ void x86_64_init(int when) { int len, dim; char *string; if (XEN_HYPER_MODE()) { x86_64_init_hyper(when); return; } switch (when) { case SETUP_ENV: machdep->process_elf_notes = x86_process_elf_notes; machdep->is_page_ptr = x86_64_is_page_ptr; break; case PRE_SYMTAB: machdep->verify_symbol = x86_64_verify_symbol; machdep->verify_line_number = x86_64_verify_line_number; machdep->machspec = &x86_64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pud space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = x86_64_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->flags |= MACHDEP_BT_TEXT; machdep->flags |= FRAMESIZE_DEBUG; machdep->machspec->irq_eframe_link = UNINITIALIZED; machdep->machspec->irq_stack_gap = UNINITIALIZED; machdep->get_kvaddr_ranges = x86_64_get_kvaddr_ranges; machdep->get_current_task_reg = x86_64_get_current_task_reg; if (machdep->cmdline_args[0]) parse_cmdline_args(); if ((string = pc->read_vmcoreinfo("relocate"))) { kt->relocate = htol(string, QUIET, NULL); kt->flags |= RELOC_SET; kt->flags2 |= KASLR; free(string); } if ((string = pc->read_vmcoreinfo("NUMBER(KERNEL_IMAGE_SIZE)"))) { machdep->machspec->kernel_image_size = dtol(string, QUIET, NULL); free(string); } if ((string = pc->read_vmcoreinfo("NUMBER(sme_mask)"))) { machdep->machspec->sme_mask = dtol(string, QUIET, NULL); free(string); } if (SADUMP_DUMPFILE() || QEMU_MEM_DUMP_NO_VMCOREINFO() || VMSS_DUMPFILE()) /* Need for calculation of kaslr_offset and phys_base */ machdep->kvtop = x86_64_kvtop; break; case PRE_GDB: if (!(machdep->flags & VM_FLAGS)) { if (symbol_exists("xen_start_info")) { if (PVOPS()) machdep->flags |= VM_2_6_11; else if (symbol_exists("low_pml4") && symbol_exists("swap_low_mappings")) machdep->flags |= VM_XEN_RHEL4; else machdep->flags |= VM_XEN; } else if (symbol_exists("boot_vmalloc_pgt")) machdep->flags |= VM_ORIG; else machdep->flags |= VM_2_6_11; } switch (machdep->flags & VM_FLAGS) { case VM_ORIG: /* pre-2.6.11 layout */ machdep->machspec->userspace_top = USERSPACE_TOP_ORIG; machdep->machspec->page_offset = PAGE_OFFSET_ORIG; machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_ORIG; machdep->machspec->vmalloc_end = VMALLOC_END_ORIG; machdep->machspec->modules_vaddr = MODULES_VADDR_ORIG; machdep->machspec->modules_end = MODULES_END_ORIG; machdep->uvtop = x86_64_uvtop; machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_2_6; machdep->machspec->pgdir_shift = PGDIR_SHIFT; machdep->machspec->ptrs_per_pgd = PTRS_PER_PGD; break; case VM_2_6_11: /* 2.6.11 layout */ machdep->machspec->userspace_top = USERSPACE_TOP_2_6_11; machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_2_6_11; machdep->machspec->vmalloc_end = VMALLOC_END_2_6_11; machdep->machspec->modules_vaddr = MODULES_VADDR_2_6_11; machdep->machspec->modules_end = MODULES_END_2_6_11; /* 2.6.24 layout */ machdep->machspec->vmemmap_vaddr = VMEMMAP_VADDR_2_6_24; machdep->machspec->vmemmap_end = VMEMMAP_END_2_6_24; if (symbol_exists("vmemmap_populate")) machdep->flags |= VMEMMAP; if (kernel_symbol_exists("end_pfn")) /* 2.6.11 layout */ machdep->machspec->page_offset = PAGE_OFFSET_2_6_11; else /* 2.6.27 layout */ machdep->machspec->page_offset = PAGE_OFFSET_2_6_27; machdep->uvtop = x86_64_uvtop_level4; machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_2_6; machdep->machspec->pgdir_shift = PGDIR_SHIFT; machdep->machspec->ptrs_per_pgd = PTRS_PER_PGD; break; case VM_XEN: /* Xen layout */ machdep->machspec->userspace_top = USERSPACE_TOP_XEN; machdep->machspec->page_offset = PAGE_OFFSET_XEN; machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_XEN; machdep->machspec->vmalloc_end = VMALLOC_END_XEN; machdep->machspec->modules_vaddr = MODULES_VADDR_XEN; machdep->machspec->modules_end = MODULES_END_XEN; machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_XEN; machdep->machspec->pgdir_shift = PGDIR_SHIFT; machdep->machspec->ptrs_per_pgd = PTRS_PER_PGD; break; case VM_XEN_RHEL4: /* RHEL4 Xen layout */ machdep->machspec->userspace_top = USERSPACE_TOP_XEN_RHEL4; machdep->machspec->page_offset = PAGE_OFFSET_XEN_RHEL4; machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_XEN_RHEL4; machdep->machspec->vmalloc_end = VMALLOC_END_XEN_RHEL4; machdep->machspec->modules_vaddr = MODULES_VADDR_XEN_RHEL4; machdep->machspec->modules_end = MODULES_END_XEN_RHEL4; machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_XEN; machdep->machspec->pgdir_shift = PGDIR_SHIFT; machdep->machspec->ptrs_per_pgd = PTRS_PER_PGD; break; } machdep->kvbase = (ulong)PAGE_OFFSET; machdep->identity_map_base = (ulong)PAGE_OFFSET; machdep->is_kvaddr = x86_64_is_kvaddr; machdep->is_uvaddr = x86_64_is_uvaddr; machdep->eframe_search = x86_64_eframe_search; machdep->back_trace = x86_64_low_budget_back_trace_cmd; machdep->processor_speed = x86_64_processor_speed; machdep->kvtop = x86_64_kvtop; machdep->get_task_pgd = x86_64_get_task_pgd; machdep->get_stack_frame = x86_64_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = x86_64_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = x86_64_is_task_addr; machdep->dis_filter = x86_64_dis_filter; machdep->cmd_mach = x86_64_cmd_mach; machdep->get_smp_cpus = x86_64_get_smp_cpus; machdep->value_to_symbol = x86_64_value_to_symbol; machdep->init_kernel_pgd = x86_64_init_kernel_pgd; machdep->clear_machdep_cache = x86_64_clear_machdep_cache; machdep->xendump_p2m_create = x86_64_xendump_p2m_create; machdep->get_xendump_regs = x86_64_get_xendump_regs; machdep->xen_kdump_p2m_create = x86_64_xen_kdump_p2m_create; machdep->xendump_panic_task = x86_64_xendump_panic_task; if (symbol_exists("vgettimeofday")) machdep->machspec->vsyscall_page = PAGEBASE(symbol_value("vgettimeofday")); x86_64_calc_phys_base(); break; case POST_RELOC: /* Check for 5-level paging */ if (!(machdep->flags & VM_5LEVEL)) { int l5_enabled = 0; if ((string = pc->read_vmcoreinfo("NUMBER(pgtable_l5_enabled)"))) { l5_enabled = atoi(string); free(string); } else if (kernel_symbol_exists("__pgtable_l5_enabled")) readmem(symbol_value("__pgtable_l5_enabled"), KVADDR, &l5_enabled, sizeof(int), "__pgtable_l5_enabled", QUIET|FAULT_ON_ERROR); if (l5_enabled) machdep->flags |= VM_5LEVEL; } if (machdep->flags & VM_5LEVEL) { machdep->machspec->userspace_top = USERSPACE_TOP_5LEVEL; machdep->machspec->page_offset = PAGE_OFFSET_5LEVEL; machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_5LEVEL; machdep->machspec->vmalloc_end = VMALLOC_END_5LEVEL; machdep->machspec->modules_vaddr = MODULES_VADDR_5LEVEL; machdep->machspec->modules_end = MODULES_END_5LEVEL; machdep->machspec->vmemmap_vaddr = VMEMMAP_VADDR_5LEVEL; machdep->machspec->vmemmap_end = VMEMMAP_END_5LEVEL; if (symbol_exists("vmemmap_populate")) machdep->flags |= VMEMMAP; machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_5LEVEL; machdep->machspec->pgdir_shift = PGDIR_SHIFT_5LEVEL; machdep->machspec->ptrs_per_pgd = PTRS_PER_PGD_5LEVEL; if (!machdep->machspec->p4d) { if ((machdep->machspec->p4d = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc p4d space."); machdep->machspec->last_p4d_read = 0; } machdep->uvtop = x86_64_uvtop_level4; /* 5-level is optional per-task */ machdep->kvbase = (ulong)PAGE_OFFSET; machdep->identity_map_base = (ulong)PAGE_OFFSET; } /* * Check for CONFIG_RANDOMIZE_MEMORY, and set page_offset and * the virtual address ranges. */ if (kernel_symbol_exists("page_offset_base") && kernel_symbol_exists("vmalloc_base")) { machdep->flags |= RANDOMIZED; readmem(symbol_value("page_offset_base"), KVADDR, &machdep->machspec->page_offset, sizeof(ulong), "page_offset_base", QUIET|FAULT_ON_ERROR); machdep->kvbase = machdep->machspec->page_offset; machdep->identity_map_base = machdep->machspec->page_offset; readmem(symbol_value("vmalloc_base"), KVADDR, &machdep->machspec->vmalloc_start_addr, sizeof(ulong), "vmalloc_base", FAULT_ON_ERROR); if (machdep->flags & VM_5LEVEL) machdep->machspec->vmalloc_end = machdep->machspec->vmalloc_start_addr + TERABYTES(1280) - 1; else machdep->machspec->vmalloc_end = machdep->machspec->vmalloc_start_addr + TERABYTES(32) - 1; if (kernel_symbol_exists("vmemmap_base")) { readmem(symbol_value("vmemmap_base"), KVADDR, &machdep->machspec->vmemmap_vaddr, sizeof(ulong), "vmemmap_base", FAULT_ON_ERROR); machdep->machspec->vmemmap_end = machdep->machspec->vmemmap_vaddr + TERABYTES(1) - 1; } else { machdep->machspec->vmemmap_vaddr = VMEMMAP_VADDR_2_6_31; machdep->machspec->vmemmap_end = VMEMMAP_END_2_6_31; } machdep->machspec->modules_vaddr = __START_KERNEL_map + (machdep->machspec->kernel_image_size ? machdep->machspec->kernel_image_size : GIGABYTES(1)); machdep->machspec->modules_end = MODULES_END_2_6_31; } break; case POST_GDB: if (!(machdep->flags & RANDOMIZED) && ((THIS_KERNEL_VERSION >= LINUX(4,19,5)) || ((THIS_KERNEL_VERSION >= LINUX(4,14,84)) && (THIS_KERNEL_VERSION < LINUX(4,15,0))))) { machdep->machspec->page_offset = machdep->flags & VM_5LEVEL ? PAGE_OFFSET_5LEVEL_4_20 : PAGE_OFFSET_4LEVEL_4_20; machdep->kvbase = machdep->machspec->page_offset; machdep->identity_map_base = machdep->machspec->page_offset; } /* * --machdep page_offset forced override */ if (machdep->machspec->page_offset_force) { machdep->machspec->page_offset = machdep->machspec->page_offset_force; machdep->kvbase = machdep->machspec->page_offset; machdep->identity_map_base = machdep->machspec->page_offset; } if (THIS_KERNEL_VERSION >= LINUX(2,6,26) && THIS_KERNEL_VERSION < LINUX(2,6,31)) { machdep->machspec->modules_vaddr = MODULES_VADDR_2_6_26; } if (THIS_KERNEL_VERSION >= LINUX(2,6,27) && THIS_KERNEL_VERSION < LINUX(2,6,31)) { machdep->machspec->modules_end = MODULES_END_2_6_27; } if (THIS_KERNEL_VERSION >= LINUX(2,6,31)) { if (!(machdep->flags & RANDOMIZED)) { machdep->machspec->vmalloc_start_addr = VMALLOC_START_ADDR_2_6_31; machdep->machspec->vmalloc_end = VMALLOC_END_2_6_31; machdep->machspec->vmemmap_vaddr = VMEMMAP_VADDR_2_6_31; machdep->machspec->vmemmap_end = VMEMMAP_END_2_6_31; if (kt->flags2 & KASLR) machdep->machspec->modules_vaddr = __START_KERNEL_map + (machdep->machspec->kernel_image_size ? machdep->machspec->kernel_image_size : GIGABYTES(1)); else machdep->machspec->modules_vaddr = MODULES_VADDR_2_6_31; machdep->machspec->modules_end = MODULES_END_2_6_31; } } if (STRUCT_EXISTS("cpu_entry_area")) { machdep->machspec->cpu_entry_area_start = CPU_ENTRY_AREA_START; machdep->machspec->cpu_entry_area_end = CPU_ENTRY_AREA_END; } STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); /* * Before 2.6.25 the structure was called gate_struct */ if (STRUCT_EXISTS("gate_desc")) STRUCT_SIZE_INIT(gate_struct, "gate_desc"); else STRUCT_SIZE_INIT(gate_struct, "gate_struct"); if (STRUCT_EXISTS("e820map")) { STRUCT_SIZE_INIT(e820map, "e820map"); MEMBER_OFFSET_INIT(e820map_nr_map, "e820map", "nr_map"); } else { STRUCT_SIZE_INIT(e820map, "e820_table"); MEMBER_OFFSET_INIT(e820map_nr_map, "e820_table", "nr_entries"); } if (STRUCT_EXISTS("e820entry")) { STRUCT_SIZE_INIT(e820entry, "e820entry"); MEMBER_OFFSET_INIT(e820entry_addr, "e820entry", "addr"); MEMBER_OFFSET_INIT(e820entry_size, "e820entry", "size"); MEMBER_OFFSET_INIT(e820entry_type, "e820entry", "type"); } else { STRUCT_SIZE_INIT(e820entry, "e820_entry"); MEMBER_OFFSET_INIT(e820entry_addr, "e820_entry", "addr"); MEMBER_OFFSET_INIT(e820entry_size, "e820_entry", "size"); MEMBER_OFFSET_INIT(e820entry_type, "e820_entry", "type"); } if (KVMDUMP_DUMPFILE()) set_kvm_iohole(NULL); MEMBER_OFFSET_INIT(thread_struct_rip, "thread_struct", "rip"); MEMBER_OFFSET_INIT(thread_struct_rsp, "thread_struct", "rsp"); MEMBER_OFFSET_INIT(thread_struct_rsp0, "thread_struct", "rsp0"); if (INVALID_MEMBER(thread_struct_rip)) MEMBER_OFFSET_INIT(thread_struct_rip, "thread_struct", "ip"); if (INVALID_MEMBER(thread_struct_rsp)) MEMBER_OFFSET_INIT(thread_struct_rsp, "thread_struct", "sp"); if (INVALID_MEMBER(thread_struct_rsp0)) MEMBER_OFFSET_INIT(thread_struct_rsp0, "thread_struct", "sp0"); MEMBER_OFFSET_INIT(thread_struct_es, "thread_struct", "es"); MEMBER_OFFSET_INIT(thread_struct_ds, "thread_struct", "ds"); MEMBER_OFFSET_INIT(thread_struct_fsbase, "thread_struct", "fsbase"); MEMBER_OFFSET_INIT(thread_struct_gsbase, "thread_struct", "gsbase"); MEMBER_OFFSET_INIT(thread_struct_fs, "thread_struct", "fs"); MEMBER_OFFSET_INIT(thread_struct_gs, "thread_struct", "gs"); STRUCT_SIZE_INIT(tss_struct, "tss_struct"); MEMBER_OFFSET_INIT(tss_struct_ist, "tss_struct", "ist"); if (INVALID_MEMBER(tss_struct_ist)) { long x86_tss_offset, ist_offset; x86_tss_offset = MEMBER_OFFSET("tss_struct", "x86_tss"); ist_offset = MEMBER_OFFSET("x86_hw_tss", "ist"); if ((x86_tss_offset != INVALID_OFFSET) && (ist_offset != INVALID_OFFSET)) ASSIGN_OFFSET(tss_struct_ist) = x86_tss_offset + ist_offset; } MEMBER_OFFSET_INIT(user_regs_struct_rip, "user_regs_struct", "rip"); if (INVALID_MEMBER(user_regs_struct_rip)) MEMBER_OFFSET_INIT(user_regs_struct_rip, "user_regs_struct", "ip"); MEMBER_OFFSET_INIT(user_regs_struct_rsp, "user_regs_struct", "rsp"); if (INVALID_MEMBER(user_regs_struct_rsp)) MEMBER_OFFSET_INIT(user_regs_struct_rsp, "user_regs_struct", "sp"); MEMBER_OFFSET_INIT(user_regs_struct_eflags, "user_regs_struct", "eflags"); if (INVALID_MEMBER(user_regs_struct_eflags)) MEMBER_OFFSET_INIT(user_regs_struct_eflags, "user_regs_struct", "flags"); MEMBER_OFFSET_INIT(user_regs_struct_cs, "user_regs_struct", "cs"); MEMBER_OFFSET_INIT(user_regs_struct_ss, "user_regs_struct", "ss"); MEMBER_OFFSET_INIT(user_regs_struct_rax, "user_regs_struct", "rax"); if (INVALID_MEMBER(user_regs_struct_rax)) MEMBER_OFFSET_INIT(user_regs_struct_rax, "user_regs_struct", "ax"); MEMBER_OFFSET_INIT(user_regs_struct_rbx, "user_regs_struct", "rbx"); if (INVALID_MEMBER(user_regs_struct_rbx)) MEMBER_OFFSET_INIT(user_regs_struct_rbx, "user_regs_struct", "bx"); MEMBER_OFFSET_INIT(user_regs_struct_rcx, "user_regs_struct", "rcx"); if (INVALID_MEMBER(user_regs_struct_rcx)) MEMBER_OFFSET_INIT(user_regs_struct_rcx, "user_regs_struct", "cx"); MEMBER_OFFSET_INIT(user_regs_struct_rdx, "user_regs_struct", "rdx"); if (INVALID_MEMBER(user_regs_struct_rdx)) MEMBER_OFFSET_INIT(user_regs_struct_rdx, "user_regs_struct", "dx"); MEMBER_OFFSET_INIT(user_regs_struct_rsi, "user_regs_struct", "rsi"); if (INVALID_MEMBER(user_regs_struct_rsi)) MEMBER_OFFSET_INIT(user_regs_struct_rsi, "user_regs_struct", "si"); MEMBER_OFFSET_INIT(user_regs_struct_rdi, "user_regs_struct", "rdi"); if (INVALID_MEMBER(user_regs_struct_rdi)) MEMBER_OFFSET_INIT(user_regs_struct_rdi, "user_regs_struct", "di"); MEMBER_OFFSET_INIT(user_regs_struct_rbp, "user_regs_struct", "rbp"); if (INVALID_MEMBER(user_regs_struct_rbp)) MEMBER_OFFSET_INIT(user_regs_struct_rbp, "user_regs_struct", "bp"); MEMBER_OFFSET_INIT(user_regs_struct_r8, "user_regs_struct", "r8"); MEMBER_OFFSET_INIT(user_regs_struct_r9, "user_regs_struct", "r9"); MEMBER_OFFSET_INIT(user_regs_struct_r10, "user_regs_struct", "r10"); MEMBER_OFFSET_INIT(user_regs_struct_r11, "user_regs_struct", "r11"); MEMBER_OFFSET_INIT(user_regs_struct_r12, "user_regs_struct", "r12"); MEMBER_OFFSET_INIT(user_regs_struct_r13, "user_regs_struct", "r13"); MEMBER_OFFSET_INIT(user_regs_struct_r14, "user_regs_struct", "r14"); MEMBER_OFFSET_INIT(user_regs_struct_r15, "user_regs_struct", "r15"); STRUCT_SIZE_INIT(user_regs_struct, "user_regs_struct"); if (!VALID_STRUCT(user_regs_struct)) { ASSIGN_SIZE(user_regs_struct) = sizeof(struct x86_64_user_regs_struct); ASSIGN_OFFSET(user_regs_struct_rip) = offsetof(struct x86_64_user_regs_struct, ip); ASSIGN_OFFSET(user_regs_struct_rsp) = offsetof(struct x86_64_user_regs_struct, sp); ASSIGN_OFFSET(user_regs_struct_eflags) = offsetof(struct x86_64_user_regs_struct, flags); ASSIGN_OFFSET(user_regs_struct_cs) = offsetof(struct x86_64_user_regs_struct, cs); ASSIGN_OFFSET(user_regs_struct_ss) = offsetof(struct x86_64_user_regs_struct, ss); ASSIGN_OFFSET(user_regs_struct_rax) = offsetof(struct x86_64_user_regs_struct, ax); ASSIGN_OFFSET(user_regs_struct_rbx) = offsetof(struct x86_64_user_regs_struct, bx); ASSIGN_OFFSET(user_regs_struct_rcx) = offsetof(struct x86_64_user_regs_struct, cx); ASSIGN_OFFSET(user_regs_struct_rdx) = offsetof(struct x86_64_user_regs_struct, dx); ASSIGN_OFFSET(user_regs_struct_rsi) = offsetof(struct x86_64_user_regs_struct, si); ASSIGN_OFFSET(user_regs_struct_rdi) = offsetof(struct x86_64_user_regs_struct, di); ASSIGN_OFFSET(user_regs_struct_rbp) = offsetof(struct x86_64_user_regs_struct, bp); ASSIGN_OFFSET(user_regs_struct_r8) = offsetof(struct x86_64_user_regs_struct, r8); ASSIGN_OFFSET(user_regs_struct_r9) = offsetof(struct x86_64_user_regs_struct, r9); ASSIGN_OFFSET(user_regs_struct_r10) = offsetof(struct x86_64_user_regs_struct, r10); ASSIGN_OFFSET(user_regs_struct_r11) = offsetof(struct x86_64_user_regs_struct, r11); ASSIGN_OFFSET(user_regs_struct_r12) = offsetof(struct x86_64_user_regs_struct, r12); ASSIGN_OFFSET(user_regs_struct_r13) = offsetof(struct x86_64_user_regs_struct, r13); ASSIGN_OFFSET(user_regs_struct_r14) = offsetof(struct x86_64_user_regs_struct, r14); ASSIGN_OFFSET(user_regs_struct_r15) = offsetof(struct x86_64_user_regs_struct, r15); } machdep->vmalloc_start = x86_64_vmalloc_start; vt->vmalloc_start = machdep->vmalloc_start(); machdep->init_kernel_pgd(); if (STRUCT_EXISTS("x8664_pda")) x86_64_cpu_pda_init(); else x86_64_per_cpu_init(); x86_64_ist_init(); if (symbol_exists("repeat_nmi")) machdep->flags |= NESTED_NMI; machdep->in_alternate_stack = x86_64_in_alternate_stack; if ((machdep->machspec->irqstack = (char *) malloc(machdep->machspec->stkinfo.isize)) == NULL) error(FATAL, "cannot malloc irqstack space."); if (symbol_exists("irq_desc")) { if (LKCD_KERNTYPES()) ARRAY_LENGTH_INIT_ALT(machdep->nr_irqs, "irq_desc", "kernel_stat.irqs", NULL, 0); else ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); } else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else machdep->nr_irqs = 224; /* NR_IRQS (at least) */ machdep->dump_irq = x86_64_dump_irq; machdep->get_irq_affinity = x86_64_get_irq_affinity; machdep->show_interrupts = x86_64_show_interrupts; if (THIS_KERNEL_VERSION < LINUX(2,6,24)) machdep->line_number_hooks = x86_64_line_number_hooks; if (!machdep->hz) { machdep->hz = HZ; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) machdep->hz = 1000; } machdep->section_size_bits = _SECTION_SIZE_BITS; if (!machdep->max_physmem_bits) { if ((string = pc->read_vmcoreinfo("NUMBER(MAX_PHYSMEM_BITS)"))) { machdep->max_physmem_bits = atol(string); free(string); } else if (machdep->flags & VM_5LEVEL) machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_5LEVEL; else if (THIS_KERNEL_VERSION >= LINUX(2,6,31)) machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_2_6_31; else if (THIS_KERNEL_VERSION >= LINUX(2,6,26)) machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_2_6_26; else { machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; len = get_array_length("mem_section", &dim, 0); /* * Check for patched MAX_PHYSMEM_BITS. */ if (((len > 32) && !dim) || ((len > 8192) && (dim == 1))) machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_2_6_26; } } if (XEN()) { if (kt->xen_flags & WRITABLE_PAGE_TABLES) { switch (machdep->flags & VM_FLAGS) { case VM_XEN: case VM_2_6_11: machdep->uvtop = x86_64_uvtop_level4_xen_wpt; break; case VM_XEN_RHEL4: machdep->uvtop = x86_64_uvtop_level4_rhel4_xen_wpt; break; } machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_XEN; } else { machdep->uvtop = x86_64_uvtop_level4; } MEMBER_OFFSET_INIT(vcpu_guest_context_user_regs, "vcpu_guest_context", "user_regs"); ASSIGN_OFFSET(cpu_user_regs_rsp) = MEMBER_OFFSET("cpu_user_regs", "ss") - sizeof(ulong); ASSIGN_OFFSET(cpu_user_regs_rip) = MEMBER_OFFSET("cpu_user_regs", "cs") - sizeof(ulong); } x86_64_irq_eframe_link_init(); x86_64_irq_stack_gap_init(); x86_64_entry_trampoline_init(); x86_64_framepointer_init(); x86_64_ORC_init(); x86_64_thread_return_init(); x86_64_l1tf_init(); if (THIS_KERNEL_VERSION >= LINUX(2,6,28)) machdep->machspec->page_protnone = _PAGE_GLOBAL; else machdep->machspec->page_protnone = _PAGE_PSE; STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); STRUCT_SIZE_INIT(percpu_data, "percpu_data"); GART_init(); if (kernel_symbol_exists("asm_exc_divide_error")) machdep->machspec->exception_functions = (char **)exception_functions_5_8; else machdep->machspec->exception_functions = (char **)exception_functions_orig; break; case POST_VM: init_unwind_table(); break; case POST_INIT: x86_64_post_init(); x86_64_get_active_set(); break; case LOG_ONLY: machdep->machspec = &x86_64_machine_specific; x86_64_calc_phys_base(); break; } } void x86_64_dump_machdep_table(ulong arg) { int c, i, cpus; int others; struct machine_specific *ms; ms = machdep->machspec; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PT_REGS_INIT) fprintf(fp, "%sPT_REGS_INIT", others++ ? "|" : ""); if (machdep->flags & MACHDEP_BT_TEXT) fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); if (machdep->flags & VM_ORIG) fprintf(fp, "%sVM_ORIG", others++ ? "|" : ""); if (machdep->flags & VM_2_6_11) fprintf(fp, "%sVM_2_6_11", others++ ? "|" : ""); if (machdep->flags & VM_XEN) fprintf(fp, "%sVM_XEN", others++ ? "|" : ""); if (machdep->flags & VM_XEN_RHEL4) fprintf(fp, "%sVM_XEN_RHEL4", others++ ? "|" : ""); if (machdep->flags & VM_5LEVEL) fprintf(fp, "%sVM_5LEVEL", others++ ? "|" : ""); if (machdep->flags & VMEMMAP) fprintf(fp, "%sVMEMMAP", others++ ? "|" : ""); if (machdep->flags & NO_TSS) fprintf(fp, "%sNO_TSS", others++ ? "|" : ""); if (machdep->flags & SCHED_TEXT) fprintf(fp, "%sSCHED_TEXT", others++ ? "|" : ""); if (machdep->flags & PHYS_BASE) fprintf(fp, "%sPHYS_BASE", others++ ? "|" : ""); if (machdep->flags & FRAMESIZE_DEBUG) fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : ""); if (machdep->flags & ORC) fprintf(fp, "%sORC", others++ ? "|" : ""); if (machdep->flags & ORC_6_4) fprintf(fp, "%sORC_6_4", others++ ? "|" : ""); if (machdep->flags & FRAMEPOINTER) fprintf(fp, "%sFRAMEPOINTER", others++ ? "|" : ""); if (machdep->flags & GART_REGION) fprintf(fp, "%sGART_REGION", others++ ? "|" : ""); if (machdep->flags & NESTED_NMI) fprintf(fp, "%sNESTED_NMI", others++ ? "|" : ""); if (machdep->flags & RANDOMIZED) fprintf(fp, "%sRANDOMIZED", others++ ? "|" : ""); if (machdep->flags & KPTI) fprintf(fp, "%sKPTI", others++ ? "|" : ""); if (machdep->flags & L1TF) fprintf(fp, "%sL1TF", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->kvbase); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %llu (0x%llx)\n", (ulonglong)machdep->memsize, (ulonglong)machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: x86_64_eframe_search()\n"); if (machdep->back_trace == x86_64_back_trace_cmd) fprintf(fp, " back_trace: x86_64_back_trace_cmd()\n"); else if (machdep->back_trace == x86_64_low_budget_back_trace_cmd) fprintf(fp, " back_trace: x86_64_low_budget_back_trace_cmd() %s\n", kt->flags & DWARF_UNWIND ? "-> x86_64_dwarf_back_trace_cmd()" : ""); else if (machdep->back_trace == x86_64_dwarf_back_trace_cmd) fprintf(fp, " back_trace: x86_64_dwarf_back_trace_cmd() %s\n", kt->flags & DWARF_UNWIND ? "" : "->x86_64_low_budget_back_trace_cmd()"); else fprintf(fp, " back_trace: %lx\n", (ulong)machdep->back_trace); fprintf(fp, " processor_speed: x86_64_processor_speed()\n"); if (machdep->uvtop == x86_64_uvtop) fprintf(fp, " uvtop: x86_64_uvtop()\n"); else if (machdep->uvtop == x86_64_uvtop_level4) { fprintf(fp, " uvtop: x86_64_uvtop_level4()"); if (machdep->flags & VM_5LEVEL) fprintf(fp, " (uses 5-level page tables)"); fprintf(fp, "\n"); } else if (machdep->uvtop == x86_64_uvtop_level4_xen_wpt) fprintf(fp, " uvtop: x86_64_uvtop_level4_xen_wpt()\n"); else if (machdep->uvtop == x86_64_uvtop_level4_rhel4_xen_wpt) fprintf(fp, " uvtop: x86_64_uvtop_level4_rhel4_xen_wpt()\n"); else fprintf(fp, " uvtop: %lx\n", (ulong)machdep->uvtop); fprintf(fp, " kvtop: x86_64_kvtop()"); if (machdep->flags & VM_5LEVEL) fprintf(fp, " -> x86_64_kvtop_5level()"); else if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) fprintf(fp, " -> x86_64_kvtop_xen_wpt()"); fprintf(fp, "\n"); fprintf(fp, " get_task_pgd: x86_64_get_task_pgd()\n"); fprintf(fp, " dump_irq: x86_64_dump_irq()\n"); fprintf(fp, " get_irq_affinity: x86_64_get_irq_affinity()\n"); fprintf(fp, " show_interrupts: x86_64_show_interrupts()\n"); fprintf(fp, " get_stack_frame: x86_64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: x86_64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: x86_64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: x86_64_is_task_addr()\n"); fprintf(fp, " verify_symbol: x86_64_verify_symbol()\n"); fprintf(fp, " dis_filter: x86_64_dis_filter()\n"); fprintf(fp, " cmd_mach: x86_64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: x86_64_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: x86_64_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: x86_64_is_uvaddr()\n"); fprintf(fp, " is_page_ptr: x86_64_is_page_ptr()\n"); fprintf(fp, " verify_paddr: x86_64_verify_paddr()\n"); fprintf(fp, " get_kvaddr_ranges: x86_64_get_kvaddr_ranges()\n"); fprintf(fp, "get_current_task_reg: x86_64_get_current_task_reg()\n"); fprintf(fp, " init_kernel_pgd: x86_64_init_kernel_pgd()\n"); fprintf(fp, "clear_machdep_cache: x86_64_clear_machdep_cache()\n"); fprintf(fp, " xendump_p2m_create: %s\n", PVOPS_XEN() ? "x86_64_pvops_xendump_p2m_create()" : "x86_64_xendump_p2m_create()"); fprintf(fp, " get_xendump_regs: x86_64_get_xendump_regs()\n"); fprintf(fp, " xendump_panic_task: x86_64_xendump_panic_task()\n"); fprintf(fp, "xen_kdump_p2m_create: x86_64_xen_kdump_p2m_create()\n"); fprintf(fp, " line_number_hooks: %s\n", machdep->line_number_hooks ? "x86_64_line_number_hooks" : "(unused)"); fprintf(fp, " verify_line_number: x86_64_verify_line_number()\n"); fprintf(fp, " value_to_symbol: x86_64_value_to_symbol()\n"); fprintf(fp, " in_alternate_stack: x86_64_in_alternate_stack()\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pud_read: %lx\n", machdep->last_pud_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pud: %lx\n", (ulong)machdep->pud); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } fprintf(fp, " machspec: %016lx\n", (ulong)machdep->machspec); fprintf(fp, " userspace_top: %016lx\n", (ulong)ms->userspace_top); fprintf(fp, " page_offset: %016lx\n", (ulong)ms->page_offset); fprintf(fp, " page_offset_force: "); if (ms->page_offset_force) fprintf(fp, "%016lx\n", (ulong)ms->page_offset_force); else fprintf(fp, "(unused)\n"); fprintf(fp, " vmalloc_start_addr: %016lx\n", (ulong)ms->vmalloc_start_addr); fprintf(fp, " vmalloc_end: %016lx\n", (ulong)ms->vmalloc_end); fprintf(fp, " modules_vaddr: %016lx\n", (ulong)ms->modules_vaddr); fprintf(fp, " modules_end: %016lx\n", (ulong)ms->modules_end); fprintf(fp, " vmemmap_vaddr: %016lx %s\n", (ulong)ms->vmemmap_vaddr, machdep->flags & VMEMMAP ? "" : "(unused)"); fprintf(fp, " vmemmap_end: %016lx %s\n", (ulong)ms->vmemmap_end, machdep->flags & VMEMMAP ? "" : "(unused)"); fprintf(fp, " phys_base: %lx\n", (ulong)ms->phys_base); fprintf(fp, " kernel_image_size: "); if (ms->kernel_image_size) fprintf(fp, "%lx (%ldMB)\n", ms->kernel_image_size, ms->kernel_image_size/MEGABYTES(1)); else fprintf(fp, "(uninitialized)\n"); fprintf(fp, " sme_mask: %lx\n", ms->sme_mask); fprintf(fp, " physical_mask_shift: %ld\n", ms->physical_mask_shift); fprintf(fp, " pgdir_shift: %ld\n", ms->pgdir_shift); fprintf(fp, " GART_start: %lx\n", ms->GART_start); fprintf(fp, " GART_end: %lx\n", ms->GART_end); /* pml4 and upml is legacy for extension modules */ if (ms->pml4) { fprintf(fp, " pml4: %lx\n", (ulong)ms->pml4); fprintf(fp, " last_pml4_read: %lx\n", (ulong)ms->last_pml4_read); } else { fprintf(fp, " pml4: (unused)\n"); fprintf(fp, " last_pml4_read: (unused)\n"); } if (ms->upml) { fprintf(fp, " upml: %lx\n", (ulong)ms->upml); fprintf(fp, " last_upml_read: %lx\n", (ulong)ms->last_upml_read); } else { fprintf(fp, " GART_end: %lx\n", ms->GART_end); fprintf(fp, " upml: (unused)\n"); fprintf(fp, " last_upml_read: (unused)\n"); } if (ms->p4d) { fprintf(fp, " p4d: %lx\n", (ulong)ms->p4d); fprintf(fp, " last_p4d_read: %lx\n", (ulong)ms->last_p4d_read); } else { fprintf(fp, " p4d: (unused)\n"); fprintf(fp, " last_p4d_read: (unused)\n"); } fprintf(fp, " ORC_data: %s", machdep->flags & ORC ? "\n" : "(unused)\n"); if (machdep->flags & ORC) { fprintf(fp, " module_ORC: %s\n", ms->orc.module_ORC ? "TRUE" : "FALSE"); fprintf(fp, " has_signal: %s\n", ms->orc.has_signal ? "TRUE" : "FALSE"); fprintf(fp, " has_end: %s\n", ms->orc.has_end ? "TRUE" : "FALSE"); fprintf(fp, " lookup_num_blocks: %d\n", ms->orc.lookup_num_blocks); fprintf(fp, " __start_orc_unwind_ip: %lx\n", ms->orc.__start_orc_unwind_ip); fprintf(fp, " __stop_orc_unwind_ip: %lx\n", ms->orc.__stop_orc_unwind_ip); fprintf(fp, " __start_orc_unwind: %lx\n", ms->orc.__start_orc_unwind); fprintf(fp, " __stop_orc_unwind: %lx\n", ms->orc.__stop_orc_unwind); fprintf(fp, " orc_lookup: %lx\n", ms->orc.orc_lookup); fprintf(fp, " ip_entry: %lx\n", ms->orc.ip_entry); fprintf(fp, " orc_entry: %lx\n", ms->orc.orc_entry); fprintf(fp, " orc_entry_data:\n"); fprintf(fp, " sp_offset: %d\n", ms->orc.orc_entry_data.sp_offset); fprintf(fp, " bp_offset: %d\n", ms->orc.orc_entry_data.bp_offset); fprintf(fp, " sp_reg: %d\n", ms->orc.orc_entry_data.sp_reg); fprintf(fp, " bp_reg: %d\n", ms->orc.orc_entry_data.bp_reg); fprintf(fp, " type: %d\n", ms->orc.orc_entry_data.type); if (ms->orc.has_signal) fprintf(fp, " signal: %d\n", ms->orc.orc_entry_data.signal); else fprintf(fp, " signal: (n/a)\n"); if (ms->orc.has_end) fprintf(fp, " end: %d\n", ms->orc.orc_entry_data.end); else fprintf(fp, " end: (n/a)\n"); } fprintf(fp, " pto: %s", machdep->flags & PT_REGS_INIT ? "\n" : "(uninitialized)\n"); if (machdep->flags & PT_REGS_INIT) { fprintf(fp, " r15: %ld\n", ms->pto.r15); fprintf(fp, " r14: %ld\n", ms->pto.r14); fprintf(fp, " r13: %ld\n", ms->pto.r13); fprintf(fp, " r12: %ld\n", ms->pto.r12); fprintf(fp, " rbp: %ld\n", ms->pto.rbp); fprintf(fp, " rbx: %ld\n", ms->pto.rbx); fprintf(fp, " r11: %ld\n", ms->pto.r11); fprintf(fp, " r10: %ld\n", ms->pto.r10); fprintf(fp, " r9: %ld\n", ms->pto.r9); fprintf(fp, " r8: %ld\n", ms->pto.r8); fprintf(fp, " rax: %ld\n", ms->pto.rax); fprintf(fp, " rcx: %ld\n", ms->pto.rcx); fprintf(fp, " rdx: %ld\n", ms->pto.rdx); fprintf(fp, " rsi: %ld\n", ms->pto.rsi); fprintf(fp, " rdi: %ld\n", ms->pto.rdi); fprintf(fp, " orig_rax: %ld\n", ms->pto.orig_rax); fprintf(fp, " rip: %ld\n", ms->pto.rip); fprintf(fp, " cs: %ld\n", ms->pto.cs); fprintf(fp, " eflags: %ld\n", ms->pto.eflags); fprintf(fp, " rsp: %ld\n", ms->pto.rsp); fprintf(fp, " ss: %ld\n", ms->pto.ss); } #define CPU_SPACES(C) \ ((C) < 10 ? 3 : (C) < 100 ? 2 : (C) < 1000 ? 1 : 0) fprintf(fp, "%s current[%d]:%s", space(CPU_SPACES(kt->cpus)), kt->cpus, ms->current ? "\n " : " (unused)\n"); for (c = 0; ms->current && (c < kt->cpus); c++) { if (c && !(c%4)) fprintf(fp, "\n "); fprintf(fp, "%016lx ", ms->current[c]); } if (ms->current) fprintf(fp, "\n"); fprintf(fp, "%s crash_nmi_rsp[%d]:%s", space(CPU_SPACES(kt->cpus)), kt->cpus, ms->crash_nmi_rsp ? "\n " : " (unused)\n"); for (c = 0; ms->crash_nmi_rsp && (c < kt->cpus); c++) { if (c && !(c%4)) fprintf(fp, "\n "); fprintf(fp, "%016lx ", ms->crash_nmi_rsp[c]); } if (ms->crash_nmi_rsp) fprintf(fp, "\n"); fprintf(fp, " vsyscall_page: %lx\n", ms->vsyscall_page); fprintf(fp, " thread_return: %lx\n", ms->thread_return); fprintf(fp, " page_protnone: %lx\n", ms->page_protnone); fprintf(fp, " irqstack: %lx\n", (ulong)ms->irqstack); fprintf(fp, " irq_eframe_link: %ld\n", ms->irq_eframe_link); fprintf(fp, " irq_stack_gap: %ld\n", ms->irq_stack_gap); fprintf(fp, " stkinfo: isize: %d\n", ms->stkinfo.isize); fprintf(fp, " esize[%d]: %d,%d,%d,%d,%d,%d,%d%s\n", MAX_EXCEPTION_STACKS, ms->stkinfo.esize[0], ms->stkinfo.esize[1], ms->stkinfo.esize[2], ms->stkinfo.esize[3], ms->stkinfo.esize[4], ms->stkinfo.esize[5], ms->stkinfo.esize[6], machdep->flags & NO_TSS ? " (NO TSS) " : " "); fprintf(fp, " NMI_stack_index: %d\n", ms->stkinfo.NMI_stack_index); fprintf(fp, " exception_stacks:\n"); for (i = 0; i < MAX_EXCEPTION_STACKS; i++) fprintf(fp, " [%d]: %s\n", i, ms->stkinfo.exception_stacks[i]); fprintf(fp, " ebase[%s][%d]:", arg ? "NR_CPUS" : "cpus", MAX_EXCEPTION_STACKS); cpus = arg ? NR_CPUS : kt->cpus; for (c = 0; c < cpus; c++) { fprintf(fp, "\n %s[%d]: ", c < 10 ? " " : "", c); for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { fprintf(fp, "%016lx ", ms->stkinfo.ebase[c][i]); if (i == 3) fprintf(fp, "\n "); } } fprintf(fp, "\n ibase[%s]:\n ", arg ? "NR_CPUS" : "cpus"); for (c = 0; c < cpus; c++) { if (c && !(c%4)) fprintf(fp, "\n "); fprintf(fp, "%016lx ", ms->stkinfo.ibase[c]); } fprintf(fp, "\n kpti_entry_stack_size: "); if (ms->kpti_entry_stack_size) fprintf(fp, "%ld", ms->kpti_entry_stack_size); else fprintf(fp, "(unused)"); fprintf(fp, "\n kpti_entry_stack: "); if (machdep->flags & KPTI) { fprintf(fp, "(percpu: %lx):\n ", ms->kpti_entry_stack); for (c = 0; c < cpus; c++) { if (c && !(c%4)) fprintf(fp, "\n "); fprintf(fp, "%016lx ", ms->kpti_entry_stack + kt->__per_cpu_offset[c]); } fprintf(fp, "\n"); } else fprintf(fp, "(unused)\n"); fprintf(fp, " cpu_entry_area_start: "); if (ms->cpu_entry_area_start) fprintf(fp, "%016lx\n", (ulong)ms->cpu_entry_area_start); else fprintf(fp, "(unused)\n"); fprintf(fp, " cpu_entry_area_end: "); if (ms->cpu_entry_area_end) fprintf(fp, "%016lx\n", (ulong)ms->cpu_entry_area_end); else fprintf(fp, "(unused)\n"); fprintf(fp, " excpetion_functions: "); if (ms->exception_functions == (char **)exception_functions_5_8) fprintf(fp, "excpetion_functions_5_8\n"); else fprintf(fp, "excpetion_functions_orig\n"); } /* * Gather the cpu_pda array info, updating any smp-related items that * were possibly bypassed or improperly initialized in kernel_init(). */ static void x86_64_cpu_pda_init(void) { int i, cpus, nr_pda, cpunumber, _cpu_pda, _boot_cpu_pda; char *cpu_pda_buf; ulong level4_pgt, data_offset, cpu_pda_addr; struct syment *sp, *nsp; ulong offset, istacksize; _boot_cpu_pda = FALSE; level4_pgt = 0; STRUCT_SIZE_INIT(x8664_pda, "x8664_pda"); MEMBER_OFFSET_INIT(x8664_pda_pcurrent, "x8664_pda", "pcurrent"); MEMBER_OFFSET_INIT(x8664_pda_data_offset, "x8664_pda", "data_offset"); MEMBER_OFFSET_INIT(x8664_pda_kernelstack, "x8664_pda", "kernelstack"); MEMBER_OFFSET_INIT(x8664_pda_irqrsp, "x8664_pda", "irqrsp"); MEMBER_OFFSET_INIT(x8664_pda_irqstackptr, "x8664_pda", "irqstackptr"); MEMBER_OFFSET_INIT(x8664_pda_level4_pgt, "x8664_pda", "level4_pgt"); MEMBER_OFFSET_INIT(x8664_pda_cpunumber, "x8664_pda", "cpunumber"); MEMBER_OFFSET_INIT(x8664_pda_me, "x8664_pda", "me"); cpu_pda_buf = GETBUF(SIZE(x8664_pda)); if (LKCD_KERNTYPES()) { if (symbol_exists("_cpu_pda")) _cpu_pda = TRUE; else _cpu_pda = FALSE; nr_pda = get_cpus_possible(); } else { if (symbol_exists("_cpu_pda")) { if (!(nr_pda = get_array_length("_cpu_pda", NULL, 0))) nr_pda = NR_CPUS; _cpu_pda = TRUE; } else { if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) nr_pda = NR_CPUS; _cpu_pda = FALSE; } } if (_cpu_pda) { if (symbol_exists("_boot_cpu_pda")) _boot_cpu_pda = TRUE; else _boot_cpu_pda = FALSE; } if (DUMPFILE() && !(machdep->machspec->current = calloc(nr_pda, sizeof(ulong)))) error(FATAL, "cannot calloc %d x86_64 current pointers!\n", nr_pda); for (i = cpus = 0; i < nr_pda; i++) { if (_cpu_pda) { if (_boot_cpu_pda) { if (!_CPU_PDA_READ2(i, cpu_pda_buf)) break; } else { if (!_CPU_PDA_READ(i, cpu_pda_buf)) break; } } else { if (!CPU_PDA_READ(i, cpu_pda_buf)) break; } if (VALID_MEMBER(x8664_pda_level4_pgt)) { level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); if (!VALID_LEVEL4_PGT_ADDR(level4_pgt)) break; } cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber)); if (cpunumber != cpus) break; cpus++; if (VALID_MEMBER(x8664_pda_data_offset)) { data_offset = ULONG(cpu_pda_buf + OFFSET(x8664_pda_data_offset)); kt->__per_cpu_offset[i] = data_offset; kt->flags |= PER_CPU_OFF; } else data_offset = 0; machdep->machspec->stkinfo.ibase[i] = ULONG(cpu_pda_buf + OFFSET(x8664_pda_irqstackptr)); if (DUMPFILE()) machdep->machspec->current[i] = ULONG(cpu_pda_buf + OFFSET(x8664_pda_pcurrent)); if (CRASHDEBUG(2)) fprintf(fp, "CPU%d: level4_pgt: %lx " "data_offset: %lx pcurrent: %lx\n", i, level4_pgt, data_offset, DUMPFILE() ? machdep->machspec->current[i] : 0); } if (!LKCD_KERNTYPES() && (i = get_array_length("boot_cpu_stack", NULL, 0))) { istacksize = i; } else if ((sp = symbol_search("boot_cpu_stack")) && (nsp = next_symbol(NULL, sp))) { istacksize = (nsp->value - sp->value) & ~(PAGESIZE()-1); if (istacksize != 16384) error(WARNING, "calculated irqstack size of %ld != 16K?\n\n", istacksize); } else istacksize = 16384; machdep->machspec->stkinfo.isize = istacksize; /* * Adjust the kernel top-of-stack values down to their base. */ for (i = 0; i < NR_CPUS; i++) { if (machdep->machspec->stkinfo.ibase[i]) machdep->machspec->stkinfo.ibase[i] -= (istacksize-64); else break; } /* * Sanity check cpu 0's IRQ stack, which should be located at * the address of &boot_cpu_stack[0]. */ sp = value_search(machdep->machspec->stkinfo.ibase[0], &offset); nsp = symbol_search("boot_cpu_stack"); if (!sp || offset || !nsp || (sp->value != nsp->value)) { if (symbol_exists("boot_cpu_stack")) { error(WARNING, "cpu 0 IRQ stack: %lx\n boot_cpu_stack: %lx\n\n", machdep->machspec->stkinfo.ibase[0], symbol_value("boot_cpu_stack")); if (!machdep->machspec->stkinfo.ibase[0]) machdep->machspec->stkinfo.ibase[0] = symbol_value("boot_cpu_stack"); } else error(WARNING, "boot_cpu_stack: symbol does not exist in this kernel!\n"); } kt->cpus = cpus; if (kt->cpus > 1) kt->flags |= SMP; verify_spinlock(); FREEBUF(cpu_pda_buf); } static void x86_64_per_cpu_init(void) { int i, cpus, cpunumber; struct machine_specific *ms; struct syment *irq_sp, *curr_sp, *cpu_sp, *hardirq_stack_ptr_sp, *pcpu_sp; ulong hardirq_stack_ptr; ulong __per_cpu_load = 0; long hardirq_addr = 0, cpu_addr = 0, curr_addr = 0; ms = machdep->machspec; pcpu_sp = per_cpu_symbol_search("pcpu_hot"); hardirq_stack_ptr_sp = per_cpu_symbol_search("hardirq_stack_ptr"); irq_sp = per_cpu_symbol_search("per_cpu__irq_stack_union"); cpu_sp = per_cpu_symbol_search("per_cpu__cpu_number"); curr_sp = per_cpu_symbol_search("per_cpu__current_task"); if (!(kt->flags & PER_CPU_OFF)) { /* * Presume kernel is !CONFIG_SMP. */ if (irq_sp || (irq_sp = symbol_search("irq_stack_union"))) { ms->stkinfo.ibase[0] = irq_sp->value; if ((ms->stkinfo.isize = MEMBER_SIZE("irq_stack_union", "irq_stack")) <= 0) ms->stkinfo.isize = 16384; } if (DUMPFILE() && curr_sp) { if (!(ms->current = calloc(kt->cpus, sizeof(ulong)))) error(FATAL, "cannot calloc" " %d x86_64 current pointers!\n", kt->cpus); get_symbol_data(curr_sp->name, sizeof(ulong), &ms->current[0]); } return; } if (!pcpu_sp && (!cpu_sp || (!irq_sp && !hardirq_stack_ptr_sp))) return; if (MEMBER_EXISTS("irq_stack_union", "irq_stack")) ms->stkinfo.isize = MEMBER_SIZE("irq_stack_union", "irq_stack"); else if (MEMBER_EXISTS("irq_stack", "stack")) ms->stkinfo.isize = MEMBER_SIZE("irq_stack", "stack"); else if (!ms->stkinfo.isize) ms->stkinfo.isize = 16384; if (kernel_symbol_exists("__per_cpu_load")) __per_cpu_load = symbol_value("__per_cpu_load"); if (pcpu_sp) { hardirq_addr = pcpu_sp->value + MEMBER_OFFSET("pcpu_hot", "hardirq_stack_ptr"); cpu_addr = pcpu_sp->value + MEMBER_OFFSET("pcpu_hot", "cpu_number"); curr_addr = pcpu_sp->value + MEMBER_OFFSET("pcpu_hot", "current_task"); } else { if (hardirq_stack_ptr_sp) hardirq_addr = hardirq_stack_ptr_sp->value; cpu_addr = cpu_sp->value; curr_addr = curr_sp->value; } for (i = cpus = 0; i < NR_CPUS; i++) { if (__per_cpu_load && kt->__per_cpu_offset[i] == __per_cpu_load) break; if (!readmem(cpu_addr + kt->__per_cpu_offset[i], KVADDR, &cpunumber, sizeof(int), "cpu number (per_cpu)", QUIET|RETURN_ON_ERROR)) break; if (cpunumber != cpus) break; cpus++; if (pcpu_sp || hardirq_stack_ptr_sp) { if (!readmem(hardirq_addr + kt->__per_cpu_offset[i], KVADDR, &hardirq_stack_ptr, sizeof(void *), "hardirq_stack_ptr (per_cpu)", QUIET|RETURN_ON_ERROR)) continue; if (hardirq_stack_ptr != PAGEBASE(hardirq_stack_ptr)) hardirq_stack_ptr += 8; ms->stkinfo.ibase[i] = hardirq_stack_ptr - ms->stkinfo.isize; } else if (irq_sp) ms->stkinfo.ibase[i] = irq_sp->value + kt->__per_cpu_offset[i]; } if (CRASHDEBUG(2)) fprintf(fp, "x86_64_per_cpu_init: " "setup_percpu areas: %d\n", cpus); if (cpus > 1) kt->flags |= SMP; if ((i = get_cpus_present()) && (!cpus || (i < cpus))) kt->cpus = get_highest_cpu_present() + 1; else kt->cpus = cpus; if (DUMPFILE() && (pcpu_sp || curr_sp)) { if ((ms->current = calloc(kt->cpus, sizeof(ulong))) == NULL) error(FATAL, "cannot calloc %d x86_64 current pointers!\n", kt->cpus); for (i = 0; i < kt->cpus; i++) if (!readmem(curr_addr + kt->__per_cpu_offset[i], KVADDR, &ms->current[i], sizeof(ulong), "current_task (per_cpu)", RETURN_ON_ERROR)) continue; } verify_spinlock(); } /* * Gather the ist addresses for each CPU. */ static void x86_64_ist_init(void) { int c, i, cpus, esize; ulong vaddr, offset; ulong init_tss; struct machine_specific *ms; struct syment *boot_sp, *tss_sp, *ist_sp; char *exc_stack_struct_name = NULL; ms = machdep->machspec; if (!(tss_sp = per_cpu_symbol_search("per_cpu__init_tss"))) { if (!(tss_sp = per_cpu_symbol_search("per_cpu__cpu_tss"))) tss_sp = per_cpu_symbol_search("per_cpu__cpu_tss_rw"); } ist_sp = per_cpu_symbol_search("per_cpu__orig_ist"); x86_64_exception_stacks_init(); if (!tss_sp && symbol_exists("init_tss")) { init_tss = symbol_value("init_tss"); for (c = cpus = 0; c < NR_CPUS; c++) { vaddr = init_tss + (c * SIZE(tss_struct)) + OFFSET(tss_struct_ist); readmem(vaddr, KVADDR, &ms->stkinfo.ebase[c][0], sizeof(ulong) * MAX_EXCEPTION_STACKS, "tss_struct ist array", FAULT_ON_ERROR); if (ms->stkinfo.ebase[c][0] == 0) break; } } else if (tss_sp) { for (c = 0; c < kt->cpus; c++) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { if (kt->__per_cpu_offset[c] == 0) break; vaddr = tss_sp->value + kt->__per_cpu_offset[c]; } else vaddr = tss_sp->value; vaddr += OFFSET(tss_struct_ist); readmem(vaddr, KVADDR, &ms->stkinfo.ebase[c][0], sizeof(ulong) * MAX_EXCEPTION_STACKS, "tss_struct ist array", FAULT_ON_ERROR); if (ms->stkinfo.ebase[c][0] == 0) break; } if (ist_sp) { for (c = 0; c < kt->cpus; c++) { ulong estacks[MAX_EXCEPTION_STACKS]; if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { if (kt->__per_cpu_offset[c] == 0) break; vaddr = ist_sp->value + kt->__per_cpu_offset[c]; } else vaddr = ist_sp->value; readmem(vaddr, KVADDR, &estacks[0], sizeof(ulong) * MAX_EXCEPTION_STACKS, "orig_ist array", FAULT_ON_ERROR); for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[c][i] && estacks[i] && (ms->stkinfo.ebase[c][i] != estacks[i])) error(WARNING, "cpu %d %s stack: init_tss: %lx orig_ist: %lx\n", c, ms->stkinfo.exception_stacks[i], ms->stkinfo.ebase[c][i], estacks[i]); ms->stkinfo.ebase[c][i] = estacks[i]; } } } } else if (!symbol_exists("boot_exception_stacks")) { machdep->flags |= NO_TSS; if (CRASHDEBUG(1)) error(NOTE, "CONFIG_X86_NO_TSS\n"); return; } if (MEMBER_EXISTS("cea_exception_stacks", "NMI_stack")) { /* The effective cpu entry area mapping with guard pages. */ exc_stack_struct_name = "cea_exception_stacks"; } else if (MEMBER_EXISTS("exception_stacks", "NMI_stack")) { /* The exception stacks' physical storage. No guard pages and no VC stack. */ exc_stack_struct_name = "exception_stacks"; } if (exc_stack_struct_name) { for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (STREQ(ms->stkinfo.exception_stacks[i], "DEBUG")) ms->stkinfo.esize[i] = MEMBER_SIZE(exc_stack_struct_name, "DB_stack"); else if (STREQ(ms->stkinfo.exception_stacks[i], "NMI")) ms->stkinfo.esize[i] = MEMBER_SIZE(exc_stack_struct_name, "NMI_stack"); else if (STREQ(ms->stkinfo.exception_stacks[i], "DOUBLEFAULT")) ms->stkinfo.esize[i] = MEMBER_SIZE(exc_stack_struct_name, "DF_stack"); else if (STREQ(ms->stkinfo.exception_stacks[i], "MCE")) ms->stkinfo.esize[i] = MEMBER_SIZE(exc_stack_struct_name, "MCE_stack"); else if (STREQ(ms->stkinfo.exception_stacks[i], "VC")) ms->stkinfo.esize[i] = MEMBER_SIZE(exc_stack_struct_name, "VC_stack"); } /* * Adjust the top-of-stack addresses down to the base stack address * and set stack page availabilituy flag. */ for (c = 0; c < kt->cpus; c++) { for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[c][i]) ms->stkinfo.ebase[c][i] -= ms->stkinfo.esize[i]; /* VC stack can be unmapped if SEV-ES is disabled or not supported. */ ms->stkinfo.available[c][i] = accessible(ms->stkinfo.ebase[c][i]); } } return; } else if (ms->stkinfo.ebase[0][0] && ms->stkinfo.ebase[0][1]) esize = ms->stkinfo.ebase[0][1] - ms->stkinfo.ebase[0][0]; else esize = 4096; /* * Knowing the size, now adjust the top-of-stack addresses back down * to the base stack address. */ for (c = 0; c < kt->cpus; c++) { for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[c][i] == 0) break; if ((THIS_KERNEL_VERSION >= LINUX(2,6,18)) && STREQ(ms->stkinfo.exception_stacks[i], "DEBUG")) ms->stkinfo.esize[i] = esize*2; else ms->stkinfo.esize[i] = esize; ms->stkinfo.ebase[c][i] -= ms->stkinfo.esize[i]; ms->stkinfo.available[c][i] = TRUE; } } /* * Sanity check cpu 0's first exception stack, which should be * located at: &boot_exception_stacks[0] */ boot_sp = value_search(ms->stkinfo.ebase[0][0], &offset); if (!boot_sp || offset || !STREQ(boot_sp->name, "boot_exception_stacks")) { if ((boot_sp = symbol_search("boot_exception_stacks"))) { error(WARNING, "cpu 0 first exception stack: %lx\n boot_exception_stacks: %lx\n\n", ms->stkinfo.ebase[0][0], boot_sp->value); if (!ms->stkinfo.ebase[0][0]) ms->stkinfo.ebase[0][0] = boot_sp->value; } else if (STRUCT_EXISTS("x8664_pda")) error(WARNING, "boot_exception_stacks: symbol does not exist in this kernel!\n"); } } /* * Determine whether the unused gap at the top of the IRQ stack exists, * and store its size (either 0 or 64 bytes). */ static void x86_64_irq_stack_gap_init(void) { int c, cpus; struct syment *sp; ulong irq_stack_ptr; struct machine_specific *ms = machdep->machspec; if (ms->irq_stack_gap != UNINITIALIZED) return; if (THIS_KERNEL_VERSION >= LINUX(4,9,0)) { ms->irq_stack_gap = 0; return; } ms->irq_stack_gap = 64; /* * Check for backports of this commit: * * commit 4950d6d48a0c43cc61d0bbb76fb10e0214b79c66 * Author: Josh Poimboeuf * Date: Thu Aug 18 10:59:08 2016 -0500 * * x86/dumpstack: Remove 64-byte gap at end of irq stack */ if (!(sp = per_cpu_symbol_search("per_cpu__irq_stack_ptr"))) return; /* * CONFIG_SMP=n */ if (!(kt->flags & PER_CPU_OFF)) { get_symbol_data(sp->name, sizeof(ulong), &irq_stack_ptr); if ((irq_stack_ptr & 0xfff) == 0) ms->irq_stack_gap = 0; return; } /* * Check the per-cpu irq_stack_ptr of the first possible cpu. */ if (!cpu_map_addr("possible")) return; cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS; for (c = 0; c < cpus; c++) { if (!in_cpu_map(POSSIBLE, c)) continue; if (readmem(sp->value + kt->__per_cpu_offset[c], KVADDR, &irq_stack_ptr, sizeof(void *), "irq_stack_ptr", QUIET|RETURN_ON_ERROR)) { if ((irq_stack_ptr & 0xfff) == 0) ms->irq_stack_gap = 0; break; } } } /* * Check kernel version and/or backport for L1TF */ static void x86_64_l1tf_init(void) { if (THIS_KERNEL_VERSION >= LINUX(4,18,1) || kernel_symbol_exists("l1tf_mitigation")) machdep->flags |= L1TF; } static void x86_64_post_init(void) { int c, i, clues; struct machine_specific *ms; ulong *up; struct syment *spt, *spc; ulong offset; /* * Check whether each cpu was stopped by an NMI. */ ms = machdep->machspec; if (DUMPFILE() && (ms->crash_nmi_rsp = calloc(kt->cpus, sizeof(ulong))) == NULL) error(FATAL, "cannot calloc %d x86_64 NMI rsp values\n", kt->cpus); for (c = 0; DUMPFILE() && (c < kt->cpus); c++) { if (ms->stkinfo.ebase[c][NMI_STACK] == 0) break; if (!readmem(ms->stkinfo.ebase[c][NMI_STACK], KVADDR, ms->irqstack, ms->stkinfo.esize[NMI_STACK], "NMI exception stack contents", RETURN_ON_ERROR|QUIET)) continue; for (i = clues = 0; i < (ms->stkinfo.esize[NMI_STACK])/sizeof(ulong); i++){ up = (ulong *)(&ms->irqstack[i*sizeof(ulong)]); if (!is_kernel_text(*up) || !(spt = value_search(*up, &offset))) continue; if (STREQ(spt->name, "try_crashdump") || STREQ(spt->name, "die_nmi")) clues++; if ((STREQ(spt->name, "nmi_watchdog_tick") || STREQ(spt->name, "default_do_nmi"))) { spc = x86_64_function_called_by((*up)-5); if (spc && STREQ(spc->name, "die_nmi")) clues += 2; } if (STREQ(spt->name, "crash_nmi_callback")) { up = (ulong *)(&ms->irqstack[ms->stkinfo.esize[NMI_STACK]]); up -= 2; ms->crash_nmi_rsp[c] = *up; } } if (clues >= 2) kt->cpu_flags[c] |= NMI; } if (symbol_exists("__sched_text_start") && (symbol_value("__sched_text_start") == symbol_value("schedule"))) machdep->flags |= SCHED_TEXT; } /* * No x86_64 swapper_pg_dir; initialize the vt->kernel_pgd[NR_CPUS] array * with the lazily-sync'd init_level4_pgt page address. The level4 page * could be taken from the per-cpu cpu_pda.level4_pgt pointer, but since * the kernel pgd_offset_k() is defined as shown below, we'll derive * the third-level pgd in the same manner: * * /@ This accesses the reference page table of the boot cpu. * Other CPUs get synced lazily via the page fault handler. @/ * * static inline pgd_t *pgd_offset_k(unsigned long address) * { * unsigned long addr; * * addr = pml4_val(init_level4_pgt[pml4_index(address)]); * addr &= PHYSICAL_PAGE_MASK; * return __pgd_offset_k((pgd_t *)__va(addr), address); * } */ static void x86_64_init_kernel_pgd(void) { int i; ulong kernel_pgt = 0; if (kernel_symbol_exists("init_level4_pgt")) kernel_pgt = symbol_value("init_level4_pgt"); else if (kernel_symbol_exists("init_top_pgt")) kernel_pgt = symbol_value("init_top_pgt"); else error(WARNING, "neither \"init_level4_pgt\" or \"init_top_pgt\" exist\n"); for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = kernel_pgt; FILL_TOP_PGD(); } /* * x86_64 __pa() clone. */ ulong x86_64_VTOP(ulong vaddr) { if (vaddr >= __START_KERNEL_map) return ((vaddr) - (ulong)__START_KERNEL_map + machdep->machspec->phys_base); else return ((vaddr) - PAGE_OFFSET); } /* * Include both vmalloc'd and module address space as VMALLOC space. */ int x86_64_IS_VMALLOC_ADDR(ulong vaddr) { return ((vaddr >= VMALLOC_START && vaddr <= VMALLOC_END) || ((machdep->flags & VMEMMAP) && (vaddr >= VMEMMAP_VADDR && vaddr <= VMEMMAP_END)) || (vaddr >= MODULES_VADDR && vaddr <= MODULES_END) || (vaddr >= VSYSCALL_START && vaddr < VSYSCALL_END) || (machdep->machspec->cpu_entry_area_start && vaddr >= machdep->machspec->cpu_entry_area_start && vaddr <= machdep->machspec->cpu_entry_area_end) || ((machdep->flags & VM_5LEVEL) && vaddr > VMALLOC_END && vaddr < VMEMMAP_VADDR)); } static int x86_64_is_module_addr(ulong vaddr) { return (vaddr >= MODULES_VADDR && vaddr <= MODULES_END); } /* * Refining this may cause more problems than just doing it this way. */ static int x86_64_is_kvaddr(ulong addr) { if (machdep->flags & VM_XEN_RHEL4) return (addr >= VMALLOC_START); else return (addr >= PAGE_OFFSET); } static int x86_64_is_uvaddr(ulong addr, struct task_context *tc) { return (addr < USERSPACE_TOP); } static int x86_64_is_page_ptr(ulong addr, physaddr_t *phys) { ulong pfn, nr; if (IS_SPARSEMEM() && (machdep->flags & VMEMMAP) && (addr >= VMEMMAP_VADDR && addr <= VMEMMAP_END) && !((addr - VMEMMAP_VADDR) % SIZE(page))) { pfn = (addr - VMEMMAP_VADDR) / SIZE(page); nr = pfn_to_section_nr(pfn); if (valid_section_nr(nr)) { if (phys) *phys = PTOB(pfn); return TRUE; } } return FALSE; } /* * Find the kernel pgd entry.. * pgd = pgd_offset_k(addr); */ static ulong * x86_64_kpgd_offset(ulong kvaddr, int verbose, int IS_XEN) { ulong *pgd; FILL_TOP_PGD(); pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); if (verbose) { fprintf(fp, "PGD DIRECTORY: %lx\n", vt->kernel_pgd[0]); if (IS_XEN) fprintf(fp, "PAGE DIRECTORY: %lx [machine]\n", *pgd); else fprintf(fp, "PAGE DIRECTORY: %lx\n", *pgd & ~machdep->machspec->sme_mask); } return pgd; } /* * In x86 64 bit system, Linux uses the 4-level page table as the default both * in Kernel page tables and user page tables. * * But in some old versions(pre-2.6.11), the 3-level page table is used for * user page tables. * * So reuse the PUD and find the user pgd entry for this older version Linux.. * pgd = pgd_offset(mm, address); */ static ulong x86_64_upgd_offset_legacy(struct task_context *tc, ulong uvaddr, int verbose, int IS_XEN) { ulong *pud; ulong pud_paddr; ulong pud_pte; if (task_mm(tc->task, TRUE)) pud = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pud, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); pud_paddr = x86_64_VTOP((ulong)pud); FILL_PUD(pud_paddr, PHYSADDR, PAGESIZE()); pud = ((ulong *)pud_paddr) + pud_index(uvaddr); pud_pte = ULONG(machdep->pud + PAGEOFFSET(pud)); if (verbose) { if (IS_XEN) fprintf(fp, " PGD: %lx => %lx [machine]\n", (ulong)pud, pud_pte); else fprintf(fp, " PGD: %lx => %lx\n", (ulong)pud, pud_pte & ~machdep->machspec->sme_mask); } return pud_pte; } /* * Find the user pgd entry.. * pgd = pgd_offset(mm, address); */ static ulong x86_64_upgd_offset(struct task_context *tc, ulong uvaddr, int verbose, int IS_XEN) { ulong *pgd; ulong pgd_paddr; ulong pgd_pte; if (task_mm(tc->task, TRUE)) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); pgd_paddr = x86_64_VTOP((ulong)pgd); FILL_PGD(pgd_paddr, PHYSADDR, PAGESIZE()); pgd = ((ulong *)pgd_paddr) + pgd_index(uvaddr); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(pgd)); if (verbose) { if (IS_XEN) fprintf(fp, " PGD: %lx => %lx [machine]\n", (ulong)pgd, pgd_pte); else fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd, pgd_pte & ~machdep->machspec->sme_mask); } return pgd_pte; } /* * Find an entry in the fourth-level page table.. * p4d = p4d_offset(pgd, address); */ static ulong x86_64_p4d_offset(ulong pgd_pte, ulong vaddr, int verbose, int IS_XEN) { ulong *p4d; ulong p4d_paddr; ulong p4d_pte; p4d_paddr = pgd_pte & PHYSICAL_PAGE_MASK; p4d_paddr &= ~machdep->machspec->sme_mask; FILL_P4D(p4d_paddr, PHYSADDR, PAGESIZE()); p4d = ((ulong *)p4d_paddr) + p4d_index(vaddr); p4d_pte = ULONG(machdep->machspec->p4d + PAGEOFFSET(p4d)); p4d_pte &= ~machdep->machspec->sme_mask; if (verbose) { if (IS_XEN) fprintf(fp, " P4D: %lx => %lx [machine]\n", (ulong)p4d, p4d_pte); else fprintf(fp, " P4D: %lx => %lx\n", (ulong)p4d, p4d_pte); } return p4d_pte; } /* * Find an entry in the third-level page table.. * pud = pud_offset(pgd, address); */ static ulong x86_64_pud_offset(ulong pgd_pte, ulong vaddr, int verbose, int IS_XEN) { ulong *pud; ulong pud_paddr; ulong pud_pte; pud_paddr = pgd_pte & PHYSICAL_PAGE_MASK; pud_paddr &= ~machdep->machspec->sme_mask; if (IS_XEN) { pud_paddr = xen_m2p(pud_paddr); if (verbose) fprintf(fp, " PGD: %lx\n", pud_paddr); } FILL_PUD(pud_paddr, PHYSADDR, PAGESIZE()); pud = ((ulong *)pud_paddr) + pud_index(vaddr); pud_pte = ULONG(machdep->pud + PAGEOFFSET(pud)); pud_pte &= ~machdep->machspec->sme_mask; if (verbose) { if (IS_XEN) fprintf(fp, " PUD: %lx => %lx [machine]\n", (ulong)pud, pud_pte); else fprintf(fp, " PUD: %lx => %lx\n", (ulong)pud, pud_pte); } return pud_pte; } /* * Find an entry in the middle page table.. * pmd = pmd_offset(pud, address); */ static ulong x86_64_pmd_offset(ulong pud_pte, ulong vaddr, int verbose, int IS_XEN) { ulong *pmd; ulong pmd_paddr; ulong pmd_pte; pmd_paddr = pud_pte & PHYSICAL_PAGE_MASK; pmd_paddr &= ~machdep->machspec->sme_mask; if (IS_XEN) { pmd_paddr = xen_m2p(pmd_paddr); if (verbose) fprintf(fp, " PUD: %lx\n", pmd_paddr); } FILL_PMD(pmd_paddr, PHYSADDR, PAGESIZE()); pmd = ((ulong *)pmd_paddr) + pmd_index(vaddr); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(pmd)); pmd_pte &= ~machdep->machspec->sme_mask; if (verbose) { if (IS_XEN) fprintf(fp, " PMD: %lx => %lx [machine]\n", (ulong)pmd, pmd_pte); else fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd, pmd_pte); } return pmd_pte; } /* * Find an entry in the pet page table.. * pmd = pmd_offset(pud, address); */ static ulong x86_64_pte_offset(ulong pmd_pte, ulong vaddr, int verbose, int IS_XEN) { ulong *ptep; ulong pte_paddr; ulong pte; pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; pte_paddr &= ~machdep->machspec->sme_mask; if (IS_XEN) { pte_paddr = xen_m2p(pte_paddr); if (verbose) fprintf(fp, " PMD: %lx\n", pte_paddr); } FILL_PTBL(pte_paddr, PHYSADDR, PAGESIZE()); ptep = ((ulong *)pte_paddr) + pte_index(vaddr); pte = ULONG(machdep->ptbl + PAGEOFFSET(ptep)); pte &= ~machdep->machspec->sme_mask; if (verbose) { if (IS_XEN) fprintf(fp, " PTE: %lx => %lx [machine]\n", (ulong)ptep, pte); else fprintf(fp, " PTE: %lx => %lx\n", (ulong)ptep, pte); } return pte; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(), just pass it to x86_64_kvtop(). */ static int x86_64_uvtop_level4(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong pgd_pte; ulong pud_pte; ulong pmd_pte; ulong pte; physaddr_t physpage; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return x86_64_kvtop(tc, uvaddr, paddr, verbose); pgd_pte = x86_64_upgd_offset(tc, uvaddr, verbose, FALSE); if (!(pgd_pte & _PAGE_PRESENT)) goto no_upage; /* If the VM is in 5-level page table */ if (machdep->flags & VM_5LEVEL) { ulong p4d_pte; /* * p4d = p4d_offset(pgd, address); */ p4d_pte = x86_64_p4d_offset(pgd_pte, uvaddr, verbose, FALSE); if (!(p4d_pte & _PAGE_PRESENT)) goto no_upage; /* * pud = pud_offset(p4d, address); */ pud_pte = x86_64_pud_offset(p4d_pte, uvaddr, verbose, FALSE); } else { /* * pud = pud_offset(pgd, address); */ pud_pte = x86_64_pud_offset(pgd_pte, uvaddr, verbose, FALSE); } if (!(pud_pte & _PAGE_PRESENT)) goto no_upage; if (pud_pte & _PAGE_PSE) { if (verbose) { fprintf(fp, " PAGE: %lx (1GB)\n\n", PAGEBASE(pud_pte) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pud_pte, 0, 0); } physpage = (PAGEBASE(pud_pte) & PHYSICAL_PAGE_MASK) + (uvaddr & ~_1GB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * pmd = pmd_offset(pud, address); */ pmd_pte = x86_64_pmd_offset(pud_pte, uvaddr, verbose, FALSE); if (!(pmd_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) goto no_upage; if (pmd_pte & _PAGE_PSE) { if (verbose) { fprintf(fp, " PAGE: %lx (2MB%s)\n\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK, IS_ZEROPAGE(PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) ? ", ZERO PAGE" : ""); x86_64_translate_pte(pmd_pte, 0, 0); } physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + (uvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, address); * pte = *ptep; */ pte = x86_64_pte_offset(pmd_pte, uvaddr, verbose, FALSE); if (!(pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_upage; } *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); if (verbose) { fprintf(fp, " PAGE: %lx %s\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK, IS_ZEROPAGE(PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK) ? "(ZERO PAGE)" : ""); x86_64_translate_pte(pte, 0, 0); } return TRUE; no_upage: return FALSE; } static int x86_64_uvtop_level4_xen_wpt(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong pgd_pte; ulong pud_pte; ulong pmd_pte; ulong pseudo_pmd_pte; ulong pte; ulong pseudo_pte; physaddr_t physpage; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return x86_64_kvtop(tc, uvaddr, paddr, verbose); pgd_pte = x86_64_upgd_offset(tc, uvaddr, verbose, TRUE); if (!(pgd_pte & _PAGE_PRESENT)) goto no_upage; pud_pte = x86_64_pud_offset(pgd_pte, uvaddr, verbose, TRUE); if (!(pud_pte & _PAGE_PRESENT)) goto no_upage; /* * pmd = pmd_offset(pud, address); */ pmd_pte = x86_64_pmd_offset(pud_pte, uvaddr, verbose, TRUE); if (!(pmd_pte & _PAGE_PRESENT)) goto no_upage; if (pmd_pte & _PAGE_PSE) { if (verbose) fprintf(fp, " PAGE: %lx (2MB) [machine]\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { if (verbose) fprintf(fp, " PAGE: page not available\n"); *paddr = PADDR_NOT_AVAILABLE; return FALSE; } pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); if (verbose) { fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK))); x86_64_translate_pte(pseudo_pmd_pte, 0, 0); } physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + (uvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, address); * pte = *ptep; */ pte = x86_64_pte_offset(pmd_pte, uvaddr, verbose, TRUE); if (!(pte & (_PAGE_PRESENT))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_upage; } pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); if (verbose) fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); if (verbose) { fprintf(fp, " PAGE: %lx [machine]\n", PAGEBASE(pte) & PHYSICAL_PAGE_MASK); fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); } return TRUE; no_upage: return FALSE; } static int x86_64_uvtop_level4_rhel4_xen_wpt(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong pgd_pte; ulong pmd_pte; ulong pseudo_pmd_pte; ulong pte; ulong pseudo_pte; physaddr_t physpage; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return x86_64_kvtop(tc, uvaddr, paddr, verbose); pgd_pte = x86_64_upgd_offset_legacy(tc, uvaddr, verbose, TRUE); if (!(pgd_pte & _PAGE_PRESENT)) goto no_upage; /* * pmd = pmd_offset(pgd, address); */ pmd_pte = x86_64_pmd_offset(pgd_pte, uvaddr, verbose, TRUE); if (!(pmd_pte & _PAGE_PRESENT)) goto no_upage; if (pmd_pte & _PAGE_PSE) { if (verbose) fprintf(fp, " PAGE: %lx (2MB) [machine]\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { if (verbose) fprintf(fp, " PAGE: page not available\n"); *paddr = PADDR_NOT_AVAILABLE; return FALSE; } pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); if (verbose) { fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK))); x86_64_translate_pte(pseudo_pmd_pte, 0, 0); } physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + (uvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, address); * pte = *ptep; */ pte = x86_64_pte_offset(pmd_pte, uvaddr, verbose, TRUE); if (!(pte & (_PAGE_PRESENT))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_upage; } pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); if (verbose) fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); if (verbose) { fprintf(fp, " PAGE: %lx [machine]\n", PAGEBASE(pte) & PHYSICAL_PAGE_MASK); fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); } return TRUE; no_upage: return FALSE; } static int x86_64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong pgd_pte; ulong pmd_pte; ulong pte; physaddr_t physpage; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return x86_64_kvtop(tc, uvaddr, paddr, verbose); /* * pgd = pgd_offset(mm, address); */ pgd_pte = x86_64_upgd_offset_legacy(tc, uvaddr, verbose, FALSE); if (!(pgd_pte & _PAGE_PRESENT)) goto no_upage; /* * pmd = pmd_offset(pgd, address); */ pmd_pte = x86_64_pmd_offset(pgd_pte, uvaddr, verbose, FALSE); if (!(pmd_pte & _PAGE_PRESENT)) goto no_upage; if (pmd_pte & _PAGE_PSE) { if (verbose) { fprintf(fp, " PAGE: %lx (2MB)\n\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pmd_pte, 0, 0); } physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + (uvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, address); * pte = *ptep; */ pte = x86_64_pte_offset(pmd_pte, uvaddr, verbose, FALSE); if (!(pte & (_PAGE_PRESENT))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_upage; } *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(uvaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pte, 0, 0); } return TRUE; no_upage: return FALSE; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int x86_64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; ulong pud_pte; ulong pmd_pte; ulong pte; physaddr_t physpage; if ((SADUMP_DUMPFILE() || QEMU_MEM_DUMP_NO_VMCOREINFO() || VMSS_DUMPFILE()) && !(machdep->flags & KSYMS_START)) { /* * In the case of sadump, to calculate kaslr_offset and * phys_base, kvtop is called during symtab_init(). In this * stage phys_base is not initialized yet and x86_64_VTOP() * does not work. Jump to the code of pagetable translation. */ pgd = x86_64_kpgd_offset(kvaddr, verbose, FALSE); goto start_vtop_with_pagetable; } if (!IS_KVADDR(kvaddr)) return FALSE; if (XEN_HYPER_MODE()) { if (XEN_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - XEN_VIRT_START + xen_phys_start(); return TRUE; } if (DIRECTMAP_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - DIRECTMAP_VIRT_START; return TRUE; } FILL_TOP_PGD_HYPER(); pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); if (verbose) { fprintf(fp, "PGD DIRECTORY: %lx\n", vt->kernel_pgd[0]); fprintf(fp, "PAGE DIRECTORY: %lx\n", *pgd); } } else { if (!vt->vmalloc_start) { *paddr = x86_64_VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = x86_64_VTOP(kvaddr); if (!verbose) return TRUE; } if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) return (x86_64_kvtop_xen_wpt(tc, kvaddr, paddr, verbose)); /* * pgd = pgd_offset_k(addr); */ pgd = x86_64_kpgd_offset(kvaddr, verbose, FALSE); } start_vtop_with_pagetable: if (!(*pgd & _PAGE_PRESENT)) goto no_kpage; /* If the VM is in 5-level page table */ if (machdep->flags & VM_5LEVEL) { ulong p4d_pte; /* * p4d = p4d_offset(pgd, address); */ p4d_pte = x86_64_p4d_offset(*pgd, kvaddr, verbose, FALSE); if (!(p4d_pte & _PAGE_PRESENT)) goto no_kpage; /* * pud = pud_offset(p4d, address); */ pud_pte = x86_64_pud_offset(p4d_pte, kvaddr, verbose, FALSE); } else { pud_pte = x86_64_pud_offset(*pgd, kvaddr, verbose, FALSE); } if (!(pud_pte & _PAGE_PRESENT)) goto no_kpage; if (pud_pte & _PAGE_PSE) { if (verbose) { fprintf(fp, " PAGE: %lx (1GB)\n\n", PAGEBASE(pud_pte) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pud_pte, 0, 0); } physpage = (PAGEBASE(pud_pte) & PHYSICAL_PAGE_MASK) + (kvaddr & ~_1GB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * pmd = pmd_offset(pud, address); */ pmd_pte = x86_64_pmd_offset(pud_pte, kvaddr, verbose, FALSE); if (!(pmd_pte & _PAGE_PRESENT)) goto no_kpage; if (pmd_pte & _PAGE_PSE) { if (verbose) { fprintf(fp, " PAGE: %lx (2MB)\n\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pmd_pte, 0, 0); } physpage = (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + (kvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, addr); * pte = *ptep; */ pte = x86_64_pte_offset(pmd_pte, kvaddr, verbose, FALSE); if (!(pte & (_PAGE_PRESENT))) { if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_kpage; } *paddr = (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pte, 0, 0); } return TRUE; no_kpage: return FALSE; } static int x86_64_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; ulong pud_pte; ulong pmd_pte; ulong pseudo_pmd_pte; ulong pte; ulong pseudo_pte; physaddr_t physpage; char buf[BUFSIZE]; /* * pgd = pgd_offset_k(addr); */ pgd = x86_64_kpgd_offset(kvaddr, verbose, TRUE); if (!(*pgd & _PAGE_PRESENT)) goto no_kpage; pud_pte = x86_64_pud_offset(*pgd, kvaddr, verbose, TRUE); if (!(pud_pte & _PAGE_PRESENT)) goto no_kpage; /* * pmd = pmd_offset(pgd, addr); */ pmd_pte = x86_64_pmd_offset(pud_pte, kvaddr, verbose, TRUE); if (!(pmd_pte & _PAGE_PRESENT)) goto no_kpage; if (pmd_pte & _PAGE_PSE) { if (verbose) fprintf(fp, " PAGE: %lx (2MB) [machine]\n", PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK); pseudo_pmd_pte = xen_m2p(PAGEBASE(pmd_pte)); if (pseudo_pmd_pte == XEN_MACHADDR_NOT_FOUND) { if (verbose) fprintf(fp, " PAGE: page not available\n"); *paddr = PADDR_NOT_AVAILABLE; return FALSE; } pseudo_pmd_pte |= PAGEOFFSET(pmd_pte); if (verbose) { fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK))); x86_64_translate_pte(pseudo_pmd_pte, 0, 0); } physpage = (PAGEBASE(pseudo_pmd_pte) & PHYSICAL_PAGE_MASK) + (kvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } /* * ptep = pte_offset_map(pmd, addr); * pte = *ptep; */ pte = x86_64_pte_offset(pmd_pte, kvaddr, verbose, TRUE); if (!(pte & (_PAGE_PRESENT))) { if (pte && verbose) { fprintf(fp, "\n"); x86_64_translate_pte(pte, 0, 0); } goto no_kpage; } pseudo_pte = xen_m2p(pte & PHYSICAL_PAGE_MASK); if (verbose) fprintf(fp, " PTE: %lx\n", pseudo_pte + PAGEOFFSET(pte)); *paddr = (PAGEBASE(pseudo_pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(kvaddr); if (verbose) { fprintf(fp, " PAGE: %lx [machine]\n", PAGEBASE(pte) & PHYSICAL_PAGE_MASK); fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr) & PHYSICAL_PAGE_MASK); x86_64_translate_pte(pseudo_pte + PAGEOFFSET(pte), 0, 0); } return TRUE; no_kpage: return FALSE; } /* * Determine where vmalloc'd memory starts. */ static ulong x86_64_vmalloc_start(void) { return ((ulong)VMALLOC_START); } /* * thread_info implementation makes for less accurate results here. */ static int x86_64_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * easy enough... */ static ulong x86_64_processor_speed(void) { unsigned long cpu_khz = 0; if (machdep->mhz) return (machdep->mhz); if (symbol_exists("cpu_khz")) { get_symbol_data("cpu_khz", sizeof(int), &cpu_khz); if (cpu_khz) return(machdep->mhz = cpu_khz/1000); } return 0; } /* * Accept or reject a symbol from the kernel namelist. */ static int x86_64_verify_symbol(const char *name, ulong value, char type) { if (!name || !strlen(name)) return FALSE; if (XEN_HYPER_MODE() && STREQ(name, "__per_cpu_shift")) return TRUE; if (!(machdep->flags & KSYMS_START)) { if (STREQ(name, "_text") || STREQ(name, "_stext")) { machdep->flags |= KSYMS_START; if (!st->first_ksymbol) st->first_ksymbol = value; return TRUE; } else if (STREQ(name, "__per_cpu_start")) { st->flags |= PERCPU_SYMS; return TRUE; } else if (st->flags & PERCPU_SYMS) { if (STRNEQ(name, "per_cpu") || STREQ(name, "__per_cpu_end")) return TRUE; if ((type == 'V') || (type == 'd') || (type == 'D')) return TRUE; } return FALSE; } return TRUE; } /* * Prevent base kernel pc section ranges that end with a * vsyscall address from being accepted for kernel module * addresses. */ static int x86_64_verify_line_number(ulong pc, ulong low, ulong high) { if (IS_MODULE_VADDR(pc) && !IS_MODULE_VADDR(low) && is_vsyscall_addr(high)) return FALSE; return TRUE; } /* * Get the relevant page directory pointer from a task structure. */ static ulong x86_64_get_task_pgd(ulong task) { return (error(FATAL, "x86_64_get_task_pgd: N/A\n")); } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int x86_64_translate_pte(ulong pte, void *physaddr, ulonglong unused) { int c, others, len1, len2, len3; ulong paddr; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; int page_present; paddr = pte & PHYSICAL_PAGE_MASK; page_present = pte & (_PAGE_PRESENT | _PAGE_PROTNONE); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_PRESENT) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (pte & _PAGE_RW) fprintf(fp, "%sRW", others++ ? "|" : ""); if (pte & _PAGE_USER) fprintf(fp, "%sUSER", others++ ? "|" : ""); if (pte & _PAGE_PWT) fprintf(fp, "%sPWT", others++ ? "|" : ""); if (pte & _PAGE_PCD) fprintf(fp, "%sPCD", others++ ? "|" : ""); if (pte & _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); if (pte & _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if ((pte & _PAGE_PSE) && (pte & _PAGE_PRESENT)) fprintf(fp, "%sPSE", others++ ? "|" : ""); if ((pte & _PAGE_PROTNONE) && !(pte & _PAGE_PRESENT)) fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); if (pte & _PAGE_GLOBAL) fprintf(fp, "%sGLOBAL", others++ ? "|" : ""); if (pte & _PAGE_NX) fprintf(fp, "%sNX", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return (page_present); } /* * Look for likely exception frames in a stack. */ static int x86_64_eframe_search(struct bt_info *bt) { int i, c, cnt, estack_index; ulong estack, irqstack, stacksize; ulong *up; struct machine_specific *ms; struct bt_info bt_local; if (bt->flags & BT_EFRAME_SEARCH2) { BCOPY(bt, &bt_local, sizeof(struct bt_info)); bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; ms = machdep->machspec; for (c = 0; c < kt->cpus; c++) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; if (ms->stkinfo.ibase[c] == 0) break; bt->hp->esp = ms->stkinfo.ibase[c]; fprintf(fp, "CPU %d IRQ STACK:", c); if (hide_offline_cpu(c)) { fprintf(fp, " [OFFLINE]\n\n"); continue; } else fprintf(fp, "\n"); if ((cnt = x86_64_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } for (c = 0; c < kt->cpus; c++) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[c][i] == 0 || !ms->stkinfo.available[c][i]) break; bt->hp->esp = ms->stkinfo.ebase[c][i]; fprintf(fp, "CPU %d %s EXCEPTION STACK:", c, ms->stkinfo.exception_stacks[i]); if (hide_offline_cpu(c)) { fprintf(fp, " [OFFLINE]\n\n"); continue; } else fprintf(fp, "\n"); if ((cnt = x86_64_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } } return 0; } if (bt->hp && bt->hp->esp) { ms = machdep->machspec; bt->stkptr = bt->hp->esp; if ((estack = x86_64_in_exception_stack(bt, &estack_index))) { stacksize = ms->stkinfo.esize[estack_index]; bt->stackbase = estack; bt->stacktop = estack + ms->stkinfo.esize[estack_index]; bt->stackbuf = ms->irqstack; alter_stackbuf(bt); } else if ((irqstack = x86_64_in_irqstack(bt))) { stacksize = ms->stkinfo.isize; bt->stackbase = irqstack; bt->stacktop = irqstack + ms->stkinfo.isize; bt->stackbuf = ms->irqstack; alter_stackbuf(bt); } else if (!INSTACK(bt->stkptr, bt)) error(FATAL, "unrecognized stack address for this task: %lx\n", bt->hp->esp); } stacksize = bt->stacktop - bt->stackbase - SIZE(pt_regs); if (bt->stkptr) i = (bt->stkptr - bt->stackbase)/sizeof(ulong); else i = 0; for (cnt = 0; i <= stacksize/sizeof(ulong); i++) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (x86_64_exception_frame(EFRAME_SEARCH|EFRAME_PRINT| EFRAME_VERIFY, 0, (char *)up, bt, fp)) cnt++; } return cnt; } static void x86_64_display_full_frame(struct bt_info *bt, ulong rsp, FILE *ofp) { int i, u_idx; ulong *up; ulong words, addr; char buf[BUFSIZE]; if (rsp < bt->frameptr) return; if (!INSTACK(rsp, bt) || !INSTACK(bt->frameptr, bt)) return; words = (rsp - bt->frameptr) / sizeof(ulong) + 1; addr = bt->frameptr; u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(ofp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(ofp, "\n"); } /* * Check a frame for a requested reference. */ static void x86_64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) { ulong offset; struct syment *sp = NULL; if (!name) sp = value_search(text, &offset); else if (!text) sp = symbol_search(name); switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (name) { if (STREQ(name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } else { if (sp && !offset && STREQ(sp->name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } break; case BT_REF_HEXVAL: if (text) { if (bt->ref->hexval == text) bt->ref->cmdflags |= BT_REF_FOUND; } else if (sp && (bt->ref->hexval == sp->value)) bt->ref->cmdflags |= BT_REF_FOUND; else if (!name && !text && (bt->ref->hexval == 0)) bt->ref->cmdflags |= BT_REF_FOUND; break; } } /* * Determine the function containing a .text.lock. reference. */ static ulong text_lock_function(char *name, struct bt_info *bt, ulong locktext) { int c, reterror, instr, arg; char buf[BUFSIZE]; char *arglist[MAXARGS]; char *p1; ulong locking_func; instr = arg = -1; locking_func = 0; open_tmpfile2(); if (STREQ(name, ".text.lock.spinlock")) sprintf(buf, "x/4i 0x%lx", locktext); else sprintf(buf, "x/1i 0x%lx", locktext); if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { close_tmpfile2(); bt->flags |= BT_FRAMESIZE_DISABLE; return 0; } rewind(pc->tmpfile2); while (fgets(buf, BUFSIZE, pc->tmpfile2)) { c = parse_line(buf, arglist); if (instr == -1) { /* * Check whether are * in the output string. */ if (LASTCHAR(arglist[0]) == ':') { instr = 1; arg = 2; } else { instr = 2; arg = 3; } } if (c < (arg+1)) break; if (STREQ(arglist[instr], "jmpq") || STREQ(arglist[instr], "jmp")) { p1 = arglist[arg]; reterror = 0; locking_func = htol(p1, RETURN_ON_ERROR, &reterror); if (reterror) locking_func = 0; break; } } close_tmpfile2(); if (!locking_func) bt->flags |= BT_FRAMESIZE_DISABLE; return locking_func; } /* * As of 2.6.29, the handy check for the "error_exit:" label * no longer applies; it became an entry point that was jmp'd to * after the exception handler was called. Therefore, if the * return address is an offset from any of these functions, * then the exception frame should be checked for: * * .macro errorentry sym do_sym * errorentry invalid_TSS do_invalid_TSS * errorentry segment_not_present do_segment_not_present * errorentry alignment_check do_alignment_check * errorentry xen_stack_segment do_stack_segment * errorentry general_protection do_general_protection * errorentry page_fault do_page_fault * * .macro zeroentry sym do_sym * zeroentry divide_error do_divide_error * zeroentry overflow do_overflow * zeroentry bounds do_bounds * zeroentry invalid_op do_invalid_op * zeroentry device_not_available do_device_not_available * zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun * zeroentry spurious_interrupt_bug do_spurious_interrupt_bug * zeroentry coprocessor_error do_coprocessor_error * zeroentry simd_coprocessor_error do_simd_coprocessor_error * zeroentry xen_hypervisor_callback xen_do_hypervisor_callback * zeroentry xen_debug do_debug * zeroentry xen_int3 do_int3 */ static const char *exception_functions_orig[] = { "invalid_TSS", "segment_not_present", "alignment_check", "xen_stack_segment", "general_protection", "page_fault", "divide_error", "overflow", "bounds", "invalid_op", "device_not_available", "coprocessor_segment_overrun", "spurious_interrupt_bug", "coprocessor_error", "simd_coprocessor_error", "xen_hypervisor_callback", "xen_debug", "xen_int3", "async_page_fault", NULL, }; static const char *exception_functions_5_8[] = { "asm_exc_invalid_tss", "asm_exc_segment_not_present", "asm_exc_alignment_check", "asm_exc_general_protection", "asm_exc_page_fault", "asm_exc_divide_error", "asm_exc_overflow", "asm_exc_bounds", "asm_exc_invalid_op", "asm_exc_device_not_available", "asm_exc_coproc_segment_overrun", "asm_exc_spurious_interrupt_bug", "asm_exc_coprocessor_error", "asm_exc_simd_coprocessor_error", "asm_exc_debug", "xen_asm_exc_stack_segment", "xen_asm_exc_xen_hypervisor_callback", "xen_asm_exc_int3", NULL, }; /* * print one entry of a stack trace */ #define BACKTRACE_COMPLETE (1) #define BACKTRACE_ENTRY_IGNORED (2) #define BACKTRACE_ENTRY_DISPLAYED (3) #define BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED (4) static int x86_64_print_stack_entry(struct bt_info *bt, FILE *ofp, int level, int stkindex, ulong text) { ulong rsp, offset, locking_func; struct syment *sp, *spl; char *name, *name_plus_offset; int i, result; long eframe_check; char buf1[BUFSIZE]; char buf2[BUFSIZE]; struct load_module *lm; eframe_check = -1; if (!(bt->flags & BT_SAVE_EFRAME_IP)) bt->eframe_ip = 0; offset = 0; sp = value_search(text, &offset); if (!sp) return BACKTRACE_ENTRY_IGNORED; name = sp->name; if (offset && (bt->flags & BT_SYMBOL_OFFSET)) name_plus_offset = value_to_symstr(text, buf2, bt->radix); else name_plus_offset = NULL; if (bt->flags & BT_TEXT_SYMBOLS) { if (bt->flags & BT_EXCEPTION_FRAME) rsp = bt->stkptr; else rsp = bt->stackbase + (stkindex * sizeof(long)); fprintf(ofp, " [%s] %s at %lx", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(rsp)), name_plus_offset ? name_plus_offset : name, text); if (module_symbol(text, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); fprintf(ofp, "\n"); if (BT_REFERENCE_CHECK(bt)) x86_64_do_bt_reference_check(bt, text, name); return BACKTRACE_ENTRY_DISPLAYED; } if (!offset && !(bt->flags & BT_EXCEPTION_FRAME) && !(bt->flags & BT_START)) { if (STREQ(name, "child_rip")) { if (symbol_exists("kernel_thread")) name = "kernel_thread"; else if (symbol_exists("arch_kernel_thread")) name = "arch_kernel_thread"; } else if (!(bt->flags & BT_SCHEDULE)) { if (STREQ(name, "error_exit")) eframe_check = 8; else { if (CRASHDEBUG(2)) fprintf(ofp, "< ignoring text symbol with no offset: %s() >\n", sp->name); return BACKTRACE_ENTRY_IGNORED; } } } if ((THIS_KERNEL_VERSION >= LINUX(2,6,29)) && (eframe_check == -1) && offset && !(bt->flags & (BT_EXCEPTION_FRAME|BT_START|BT_SCHEDULE))) { for (i = 0; machdep->machspec->exception_functions[i]; i++) { if (STREQ(name, machdep->machspec->exception_functions[i])) { eframe_check = 8; break; } } if (x86_64_in_irqstack(bt) && strstr(name, "_interrupt")) eframe_check = 0; } if (bt->flags & BT_SCHEDULE) name = "schedule"; if (STREQ(name, "child_rip")) { if (symbol_exists("kernel_thread")) name = "kernel_thread"; else if (symbol_exists("arch_kernel_thread")) name = "arch_kernel_thread"; result = BACKTRACE_COMPLETE; } else if (STREQ(name, "cpu_idle") || STREQ(name, "system_call_fastpath")) result = BACKTRACE_COMPLETE; else result = BACKTRACE_ENTRY_DISPLAYED; if (bt->flags & BT_EXCEPTION_FRAME) rsp = bt->stkptr; else if (bt->flags & BT_START) rsp = bt->stkptr; else rsp = bt->stackbase + (stkindex * sizeof(long)); if ((bt->flags & BT_FULL)) { if (bt->frameptr) x86_64_display_full_frame(bt, rsp, ofp); bt->frameptr = rsp + sizeof(ulong); } fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, rsp, name_plus_offset ? name_plus_offset : name, text); if (STREQ(name, "tracesys")) fprintf(ofp, " (via system_call)"); else if (STRNEQ(name, ".text.lock.")) { if ((locking_func = text_lock_function(name, bt, text)) && (spl = value_search(locking_func, &offset))) fprintf(ofp, " (via %s)", spl->name); } if (module_symbol(text, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); if (bt->flags & BT_FRAMESIZE_DISABLE) fprintf(ofp, " *"); fprintf(ofp, "\n"); if (bt->flags & BT_LINE_NUMBERS) { get_line_number(text, buf1, FALSE); if (strlen(buf1)) fprintf(ofp, " %s\n", buf1); } if (eframe_check >= 0) { if (x86_64_exception_frame(EFRAME_PRINT|EFRAME_VERIFY, bt->stackbase + (stkindex*sizeof(long)) + eframe_check, NULL, bt, ofp)) result = BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED; } if (BT_REFERENCE_CHECK(bt)) x86_64_do_bt_reference_check(bt, text, name); bt->call_target = name; /* * The caller check below does not work correctly for some kernels, * so skip it if ORC unwinder is available. */ if (machdep->flags & ORC) return result; if (is_direct_call_target(bt)) { if (CRASHDEBUG(2)) fprintf(ofp, "< enable BT_CHECK_CALLER for %s >\n", bt->call_target); bt->flags |= BT_CHECK_CALLER; } else { if (CRASHDEBUG(2) && (bt->flags & BT_CHECK_CALLER)) fprintf(ofp, "< disable BT_CHECK_CALLER for %s >\n", bt->call_target); if (bt->flags & BT_CHECK_CALLER) { if (CRASHDEBUG(2)) fprintf(ofp, "< set BT_NO_CHECK_CALLER >\n"); bt->flags |= BT_NO_CHECK_CALLER; } bt->flags &= ~(ulonglong)BT_CHECK_CALLER; } return result; } /* * Unroll a kernel stack. */ static void x86_64_back_trace_cmd(struct bt_info *bt) { error(FATAL, "x86_64_back_trace_cmd: TBD\n"); } /* * Determine whether the initial stack pointer is located in one of the * exception stacks. */ static ulong x86_64_in_exception_stack(struct bt_info *bt, int *estack_index) { int c, i; ulong rsp; ulong estack; struct machine_specific *ms; rsp = bt->stkptr; ms = machdep->machspec; estack = 0; for (c = 0; !estack && (c < kt->cpus); c++) { for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[c][i] == 0 || !ms->stkinfo.available[c][i]) break; if ((rsp >= ms->stkinfo.ebase[c][i]) && (rsp < (ms->stkinfo.ebase[c][i] + ms->stkinfo.esize[i]))) { estack = ms->stkinfo.ebase[c][i]; if (estack_index) *estack_index = i; if (CRASHDEBUG(1) && (c != bt->tc->processor)) error(INFO, "task cpu: %d exception stack cpu: %d\n", bt->tc->processor, c); break; } } } return estack; } /* * Determine whether the current stack pointer is in a cpu's irqstack. */ static ulong x86_64_in_irqstack(struct bt_info *bt) { int c; ulong rsp; ulong irqstack; struct machine_specific *ms; rsp = bt->stkptr; ms = machdep->machspec; irqstack = 0; for (c = 0; !irqstack && (c < kt->cpus); c++) { if (ms->stkinfo.ibase[c] == 0) break; if ((rsp >= ms->stkinfo.ibase[c]) && (rsp < (ms->stkinfo.ibase[c] + ms->stkinfo.isize))) { irqstack = ms->stkinfo.ibase[c]; if (CRASHDEBUG(1) && (c != bt->tc->processor)) error(INFO, "task cpu: %d IRQ stack cpu: %d\n", bt->tc->processor, c); break; } } return irqstack; } static int x86_64_in_alternate_stack(int cpu, ulong rsp) { int i; struct machine_specific *ms; if (cpu >= NR_CPUS) return FALSE; ms = machdep->machspec; if (ms->stkinfo.ibase[cpu] && (rsp >= ms->stkinfo.ibase[cpu]) && (rsp < (ms->stkinfo.ibase[cpu] + ms->stkinfo.isize))) return TRUE; for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (ms->stkinfo.ebase[cpu][i] && (rsp >= ms->stkinfo.ebase[cpu][i]) && (rsp < (ms->stkinfo.ebase[cpu][i] + ms->stkinfo.esize[i]))) return TRUE; } return FALSE; } static char * x86_64_exception_RIP_message(struct bt_info *bt, ulong rip) { physaddr_t phys; if (IS_VMALLOC_ADDR(rip) && machdep->kvtop(bt->tc, rip, &phys, 0)) return ("no symbolic reference"); return ("unknown or invalid address"); } #define STACK_TRANSITION_ERRMSG_E_I_P \ "cannot transition from exception stack to IRQ stack to current process stack:\n exception stack pointer: %lx\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" #define STACK_TRANSITION_ERRMSG_E_P \ "cannot transition from exception stack to current process stack:\n exception stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" #define STACK_TRANSITION_ERRMSG_I_P \ "cannot transition from IRQ stack to current process stack:\n IRQ stack pointer: %lx\n process stack pointer: %lx\n current stack base: %lx\n" #define SET_REG_BITMAP(REGMAP, TYPE, MEMBER) \ SET_BIT(REGMAP, REG_SEQ(TYPE, MEMBER)) /* * Low-budget back tracer -- dump text return addresses, following call chain * when possible, along with any verifiable exception frames. */ static void x86_64_low_budget_back_trace_cmd(struct bt_info *bt_in) { int i, level, done, framesize, estack_index; ulong rsp, offset, stacktop; ulong *up; long cs; struct syment *sp, *spt; FILE *ofp; ulong estack, irqstack; ulong irq_eframe, kpti_eframe; struct bt_info bt_local, *bt; struct machine_specific *ms; ulong last_process_stack_eframe; ulong user_mode_eframe; char *rip_symbol; char buf[BUFSIZE]; /* * User may have made a run-time switch. */ if (kt->flags & DWARF_UNWIND) { machdep->back_trace = x86_64_dwarf_back_trace_cmd; x86_64_dwarf_back_trace_cmd(bt_in); return; } bt = &bt_local; BCOPY(bt_in, bt, sizeof(struct bt_info)); if (bt->flags & BT_FRAMESIZE_DEBUG) { x86_64_framesize_debug(bt); return; } level = 0; done = FALSE; irq_eframe = 0; last_process_stack_eframe = 0; bt->call_target = NULL; extra_stacks_idx = 0; rsp = bt->stkptr; ms = machdep->machspec; if (BT_REFERENCE_CHECK(bt)) ofp = pc->nullfp; else ofp = fp; /* If rsp is in user stack, the memory may not be included in vmcore, and * we only output the register's value. So it's not necessary to check * whether it can be accessible. */ if (!(bt->flags & BT_USER_SPACE) && (!rsp || !accessible(rsp))) { error(INFO, "cannot determine starting stack pointer\n"); if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, ofp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, ofp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, ofp); else if (VMSS_DUMPFILE()) vmware_vmss_display_regs(bt->tc->processor, ofp); return; } if (bt->flags & BT_TEXT_SYMBOLS) { if ((bt->flags & BT_USER_SPACE) && !(bt->flags & BT_TEXT_SYMBOLS_ALL)) return; if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) fprintf(ofp, "%sSTART: %s%s at %lx\n", space(VADDR_PRLEN > 8 ? 14 : 6), closest_symbol(bt->instptr), STREQ(closest_symbol(bt->instptr), "thread_return") ? " (schedule)" : "", bt->instptr); } else if (bt->flags & BT_USER_SPACE) { fprintf(ofp, " [exception RIP: user space]\n"); if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, ofp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, ofp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, ofp); else if (VMSS_DUMPFILE()) vmware_vmss_display_regs(bt->tc->processor, ofp); else if (pc->flags2 & QEMU_MEM_DUMP_ELF) display_regs_from_elf_notes(bt->tc->processor, ofp); return; } else if ((bt->flags & BT_KERNEL_SPACE) && (KVMDUMP_DUMPFILE() || (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) || SADUMP_DUMPFILE() || (pc->flags2 & QEMU_MEM_DUMP_ELF) || VMSS_DUMPFILE())) { fprintf(ofp, " [exception RIP: "); if ((sp = value_search(bt->instptr, &offset))) { fprintf(ofp, "%s", sp->name); if (offset) fprintf(ofp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); } else fprintf(ofp, "%s", x86_64_exception_RIP_message(bt, bt->instptr)); fprintf(ofp, "]\n"); if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, ofp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, ofp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, ofp); else if (VMSS_DUMPFILE()) vmware_vmss_display_regs(bt->tc->processor, ofp); else if (pc->flags2 & QEMU_MEM_DUMP_ELF) display_regs_from_elf_notes(bt->tc->processor, ofp); } else if (bt->flags & BT_START) { x86_64_print_stack_entry(bt, ofp, level, 0, bt->instptr); bt->flags &= ~BT_START; level++; } if (is_task_active(bt->task) && bt->flags & BT_DUMPFILE_SEARCH && !(bt->flags & BT_TEXT_SYMBOLS_ALL)) { if (!extra_stacks_regs[extra_stacks_idx]) { extra_stacks_regs[extra_stacks_idx] = (struct user_regs_bitmap_struct *) malloc(sizeof(struct user_regs_bitmap_struct)); } memset(extra_stacks_regs[extra_stacks_idx], 0, sizeof(struct user_regs_bitmap_struct)); extra_stacks_regs[extra_stacks_idx]->ur.ip = bt->instptr; extra_stacks_regs[extra_stacks_idx]->ur.sp = bt->stkptr + 8; SET_REG_BITMAP(extra_stacks_regs[extra_stacks_idx]->bitmap, x86_64_user_regs_struct, ip); SET_REG_BITMAP(extra_stacks_regs[extra_stacks_idx]->bitmap, x86_64_user_regs_struct, sp); /* Sometimes bp is needed for stack unwinding, so we try to get it. The bt->instptr usually points to a inst after a call inst, let's check the previous call inst. Note the call inst len is 5 */ open_tmpfile2(); sprintf(buf, "x/1i 0x%lx", bt->instptr - 5); gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR); rewind(pc->tmpfile2); fgets(buf, BUFSIZE, pc->tmpfile2); if (strstr(buf, "call")) { if (strstr(buf, "") || strstr(buf, "")) { /* OK, we are calling relocate_kernel(), which * is written in assembly and hasn't changed for * years, so we get some extra regs out of it. */ readmem(bt->stkptr - sizeof(ulong) * 6, KVADDR, buf, sizeof(ulong) * 6, "relocate_kernel", FAULT_ON_ERROR); extra_stacks_regs[extra_stacks_idx]->ur.r15 = *(ulong *)(buf + sizeof(ulong) * 0); extra_stacks_regs[extra_stacks_idx]->ur.r14 = *(ulong *)(buf + sizeof(ulong) * 1); extra_stacks_regs[extra_stacks_idx]->ur.r13 = *(ulong *)(buf + sizeof(ulong) * 2); extra_stacks_regs[extra_stacks_idx]->ur.r12 = *(ulong *)(buf + sizeof(ulong) * 3); extra_stacks_regs[extra_stacks_idx]->ur.bp = *(ulong *)(buf + sizeof(ulong) * 4); extra_stacks_regs[extra_stacks_idx]->ur.bx = *(ulong *)(buf + sizeof(ulong) * 5); SET_REG_BITMAP(extra_stacks_regs[extra_stacks_idx]->bitmap, x86_64_user_regs_struct, r15); SET_REG_BITMAP(extra_stacks_regs[extra_stacks_idx]->bitmap, x86_64_user_regs_struct, r14); SET_REG_BITMAP(extra_stacks_regs[extra_stacks_idx]->bitmap, x86_64_user_regs_struct, r13); SET_REG_BITMAP(extra_stacks_regs[extra_stacks_idx]->bitmap, x86_64_user_regs_struct, r12); SET_REG_BITMAP(extra_stacks_regs[extra_stacks_idx]->bitmap, x86_64_user_regs_struct, bp); SET_REG_BITMAP(extra_stacks_regs[extra_stacks_idx]->bitmap, x86_64_user_regs_struct, bx); } else { /* This is a try-best effort. Usually the call inst will result in a next-inst addr pushed in and a rbp push of the calling function. So we can get rbp here */ readmem(extra_stacks_regs[extra_stacks_idx]->ur.sp - sizeof(ulong) * 2, KVADDR, &extra_stacks_regs[extra_stacks_idx]->ur.bp, sizeof(ulong), "extra_stacks_regs.bp", FAULT_ON_ERROR); if (INSTACK(extra_stacks_regs[extra_stacks_idx]->ur.bp, bt)) { SET_REG_BITMAP(extra_stacks_regs[extra_stacks_idx]->bitmap, x86_64_user_regs_struct, bp); extra_stacks_regs[extra_stacks_idx]->ur.ip -= 5; } } } close_tmpfile2(); /* * bt->machdep is handled at x86_64_get_stack_frame(), so skip it */ if (!bt->machdep || (extra_stacks_regs[extra_stacks_idx]->ur.sp != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.sp && extra_stacks_regs[extra_stacks_idx]->ur.ip != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.ip)) { gdb_add_substack(extra_stacks_idx++); } } if ((estack = x86_64_in_exception_stack(bt, &estack_index))) { in_exception_stack: bt->flags |= BT_EXCEPTION_STACK; /* * The stack buffer will have been loaded with the process * stack, so switch to the indicated exception stack. */ bt->stackbase = estack; bt->stacktop = estack + ms->stkinfo.esize[estack_index]; bt->stackbuf = ms->irqstack; if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, bt->hp && (bt->hp->esp == bt->stkptr) ? "irqstack contents via hook" : "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of exception stack at %lx failed\n", bt->stackbase); /* * If irq_eframe is set, we've jumped back here from the * IRQ stack dump below. Do basically the same thing as if * had come from the processor stack, but presume that we * must have been in kernel mode, i.e., took an exception * while operating on an IRQ stack. (untested) */ if (irq_eframe) { bt->flags |= BT_EXCEPTION_FRAME; i = (irq_eframe - bt->stackbase)/sizeof(ulong); x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); rsp += SIZE(pt_regs); /* guaranteed kernel mode */ if (bt->eframe_ip && ((framesize = x86_64_get_framesize(bt, bt->eframe_ip, rsp, NULL)) >= 0)) rsp += framesize; level++; irq_eframe = 0; } stacktop = bt->stacktop - SIZE(pt_regs); if ((machdep->flags & NESTED_NMI) && estack_index == NMI_STACK) stacktop -= 12*sizeof(ulong); bt->flags &= ~BT_FRAMESIZE_DISABLE; for (i = (rsp - bt->stackbase)/sizeof(ulong); !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) { case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: rsp += SIZE(pt_regs); i += SIZE(pt_regs)/sizeof(ulong); if (!bt->eframe_ip) { level++; break; } /* else fall through */ case BACKTRACE_ENTRY_DISPLAYED: level++; if ((framesize = x86_64_get_framesize(bt, bt->eframe_ip ? bt->eframe_ip : *up, rsp, NULL)) >= 0) { rsp += framesize; i += framesize/sizeof(ulong); } break; case BACKTRACE_ENTRY_IGNORED: break; case BACKTRACE_COMPLETE: done = TRUE; break; } } cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (stacktop - bt->stackbase), bt, ofp); if (!BT_REFERENCE_CHECK(bt)) fprintf(fp, "--- <%s exception stack> ---\n", ms->stkinfo.exception_stacks[estack_index]); /* * Find the CPU-saved, or handler-saved registers */ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); up -= 5; if ((machdep->flags & NESTED_NMI) && estack_index == NMI_STACK && bt->stkptr <= bt->stacktop - 17*sizeof(ulong)) { up -= 12; /* Copied and saved regs are swapped in pre-3.8 kernels */ if (*up == symbol_value("repeat_nmi")) up += 5; } /* Registers (as saved by CPU): * * up[4] SS * up[3] RSP * up[2] RFLAGS * up[1] CS * up[0] RIP */ rsp = bt->stkptr = up[3]; bt->instptr = up[0]; if (cs & 3) done = TRUE; /* user-mode exception */ else done = FALSE; /* kernel-mode exception */ bt->frameptr = 0; /* * Print the return values from the estack end. */ if (!done) { bt->flags |= BT_START|BT_SAVE_EFRAME_IP; x86_64_print_stack_entry(bt, ofp, level, 0, bt->instptr); bt->flags &= ~(BT_START|BT_SAVE_EFRAME_IP|BT_FRAMESIZE_DISABLE); /* * Protect against exception stack recursion. */ if (x86_64_in_exception_stack(bt, NULL) == estack) { fprintf(ofp, " [ %s exception stack recursion: " "prior stack location overwritten ]\n", ms->stkinfo.exception_stacks[estack_index]); return; } level++; if ((framesize = x86_64_get_framesize(bt, bt->instptr, rsp, NULL)) >= 0) rsp += framesize; } } /* * IRQ stack entry always comes in via the process stack, regardless * whether it happened while running in user or kernel space. */ if (!done && (irqstack = x86_64_in_irqstack(bt))) { bt->flags |= BT_IRQSTACK; /* * Until coded otherwise, the stackbase will be pointing to * either the exception stack or, more likely, the process * stack base. Switch it to the IRQ stack. */ bt->stackbase = irqstack; bt->stacktop = irqstack + ms->stkinfo.isize; bt->stackbuf = ms->irqstack; if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, bt->hp && (bt->hp->esp == bt_in->stkptr) ? "irqstack contents via hook" : "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of IRQ stack at %lx failed\n", bt->stackbase); stacktop = bt->stacktop - ms->irq_stack_gap; bt->flags &= ~BT_FRAMESIZE_DISABLE; for (i = (rsp - bt->stackbase)/sizeof(ulong); !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) { case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: rsp += SIZE(pt_regs); i += SIZE(pt_regs)/sizeof(ulong); if (!bt->eframe_ip) { level++; break; } /* else fall through */ case BACKTRACE_ENTRY_DISPLAYED: level++; if ((framesize = x86_64_get_framesize(bt, bt->eframe_ip ? bt->eframe_ip : *up, rsp, NULL)) >= 0) { rsp += framesize; i += framesize/sizeof(ulong); } break; case BACKTRACE_ENTRY_IGNORED: break; case BACKTRACE_COMPLETE: done = TRUE; break; } } if (!BT_REFERENCE_CHECK(bt)) fprintf(fp, "--- ---\n"); /* * stack = (unsigned long *) (irqstack_end[-1]); * (where irqstack_end is 64 bytes below page end) */ up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); up -= 1; irq_eframe = rsp = bt->stkptr = x86_64_irq_eframe_link(*up, bt, ofp); up -= 1; bt->instptr = *up; /* * No exception frame when coming from do_softirq, * do_softirq_own_stack or call_softirq. */ if ((sp = value_search(bt->instptr, &offset)) && (STREQ(sp->name, "do_softirq") || STREQ(sp->name, "do_softirq_own_stack") || STREQ(sp->name, "call_softirq"))) irq_eframe = 0; bt->frameptr = 0; done = FALSE; } else irq_eframe = 0; if (!done && (estack = x86_64_in_exception_stack(bt, &estack_index))) goto in_exception_stack; if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { /* * Verify that the rsp pointer taken from either the * exception or IRQ stack points into the process stack. */ bt->stackbase = GET_STACKBASE(bt->tc->task); bt->stacktop = GET_STACKTOP(bt->tc->task); if (!INSTACK(rsp, bt)) { /* * If the exception occurred while on the KPTI entry trampoline stack, * just print the entry exception frame and bail out. */ if ((kpti_eframe = x86_64_in_kpti_entry_stack(bt->tc->processor, rsp))) { x86_64_exception_frame(EFRAME_PRINT, kpti_eframe, 0, bt, ofp); fprintf(fp, "--- ---\n"); return; } switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) { case (BT_EXCEPTION_STACK|BT_IRQSTACK): error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, bt_in->stkptr, bt->stkptr, rsp, bt->stackbase); case BT_EXCEPTION_STACK: if (in_user_stack(bt->tc->task, rsp)) { done = TRUE; break; } if (STREQ(closest_symbol(bt->instptr), "ia32_sysenter_target")) { /* * RSP 0 from MSR_IA32_SYSENTER_ESP? */ if (rsp == 0) return; done = TRUE; break; } error(FATAL, STACK_TRANSITION_ERRMSG_E_P, bt_in->stkptr, rsp, bt->stackbase); case BT_IRQSTACK: error(FATAL, STACK_TRANSITION_ERRMSG_I_P, bt_in->stkptr, rsp, bt->stackbase); } } /* * Now fill the local stack buffer from the process stack. */ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of process stack at %lx failed\n", bt->stackbase); } /* * For a normally blocked task, hand-create the first level(s). * associated with __schedule() and/or schedule(). */ if (!done && !(bt->flags & (BT_TEXT_SYMBOLS|BT_EXCEPTION_STACK|BT_IRQSTACK)) && (rip_symbol = closest_symbol(bt->instptr)) && (STREQ(rip_symbol, "thread_return") || STREQ(rip_symbol, "schedule") || STREQ(rip_symbol, "__schedule"))) { if ((machdep->flags & ORC) && VALID_MEMBER(inactive_task_frame_ret_addr)) { /* * %rsp should have the address of inactive_task_frame, so * skip the registers before ret_addr to adjust rsp. */ if (CRASHDEBUG(1)) fprintf(fp, "rsp: %lx rbp: %lx\n", rsp, bt->bptr); rsp += OFFSET(inactive_task_frame_ret_addr); } else { if (STREQ(rip_symbol, "__schedule")) { i = (rsp - bt->stackbase)/sizeof(ulong); x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); level++; rsp = __schedule_frame_adjust(rsp, bt); if (STREQ(closest_symbol(bt->instptr), "schedule")) bt->flags |= BT_SCHEDULE; } else bt->flags |= BT_SCHEDULE; if (bt->flags & BT_SCHEDULE) { i = (rsp - bt->stackbase)/sizeof(ulong); x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); bt->flags &= ~(ulonglong)BT_SCHEDULE; rsp += sizeof(ulong); level++; } } } /* * Dump the IRQ exception frame from the process stack. * If the CS register indicates a user exception frame, * then set done to TRUE to avoid the process stack walk-through. * Otherwise, bump up the rsp past the kernel-mode eframe. */ if (irq_eframe) { bt->flags |= BT_EXCEPTION_FRAME; i = (irq_eframe - bt->stackbase)/sizeof(ulong); if (symbol_exists("asm_common_interrupt")) { i -= 1; up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); bt->instptr = *up; } x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); if (cs & 3) done = TRUE; /* IRQ from user-mode */ else { if (x86_64_print_eframe_location(rsp, level, ofp)) level++; rsp += SIZE(pt_regs); irq_eframe = 0; bt->flags |= BT_EFRAME_TARGET; if (bt->eframe_ip && ((framesize = x86_64_get_framesize(bt, bt->eframe_ip, rsp, NULL)) >= 0)) rsp += framesize; bt->flags &= ~BT_EFRAME_TARGET; } level++; } /* * Walk the process stack. */ bt->flags &= ~BT_FRAMESIZE_DISABLE; for (i = (rsp - bt->stackbase)/sizeof(ulong); !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; if ((bt->flags & BT_CHECK_CALLER)) { /* * A non-zero offset value from the value_search() * lets us know if it's a real text return address. */ if (!(spt = value_search(*up, &offset))) continue; if (!offset && !(bt->flags & BT_FRAMESIZE_DISABLE)) continue; /* * sp gets the syment of the function that the text * routine above called before leaving its return * address on the stack -- if it can be determined. */ sp = x86_64_function_called_by((*up)-5); if (sp == NULL) { /* * We were unable to get the called function. * If the text address had an offset, then * it must have made an indirect call, and * can't have called our target function. */ if (offset) { if (CRASHDEBUG(1)) fprintf(ofp, "< ignoring %s() -- makes indirect call and NOT %s()>\n", spt->name, bt->call_target); continue; } } else if ((machdep->flags & SCHED_TEXT) && STREQ(bt->call_target, "schedule") && STREQ(sp->name, "__sched_text_start")) { ; /* bait and switch */ } else if (!STREQ(sp->name, bt->call_target)) { /* * We got function called by the text routine, * but it's not our target function. */ if (CRASHDEBUG(2)) fprintf(ofp, "< ignoring %s() -- calls %s() and NOT %s()>\n", spt->name, sp->name, bt->call_target); continue; } } switch (x86_64_print_stack_entry(bt, ofp, level, i,*up)) { case BACKTRACE_ENTRY_AND_EFRAME_DISPLAYED: last_process_stack_eframe = rsp + 8; if (x86_64_print_eframe_location(last_process_stack_eframe, level, ofp)) level++; rsp += SIZE(pt_regs); i += SIZE(pt_regs)/sizeof(ulong); if (!bt->eframe_ip) { level++; break; } /* else fall through */ case BACKTRACE_ENTRY_DISPLAYED: level++; if ((framesize = x86_64_get_framesize(bt, bt->eframe_ip ? bt->eframe_ip : *up, rsp, (char *)up)) >= 0) { rsp += framesize; i += framesize/sizeof(ulong); } break; case BACKTRACE_ENTRY_IGNORED: break; case BACKTRACE_COMPLETE: done = TRUE; break; } } if (!irq_eframe && !is_kernel_thread(bt->tc->task) && (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { long stack_padding_size = VALID_SIZE(fred_frame) ? (2*8) : 0; user_mode_eframe = bt->stacktop - SIZE(pt_regs); if (last_process_stack_eframe < user_mode_eframe) x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + (bt->stacktop - stack_padding_size - bt->stackbase) - SIZE(pt_regs), bt, ofp); } if (bt->flags & BT_TEXT_SYMBOLS) { if (BT_REFERENCE_FOUND(bt)) { print_task_header(fp, task_to_context(bt->task), 0); BCOPY(bt_in, bt, sizeof(struct bt_info)); bt->ref = NULL; machdep->back_trace(bt); fprintf(fp, "\n"); } } } /* * Use dwarf CFI encodings to correctly follow the call chain. */ static void x86_64_dwarf_back_trace_cmd(struct bt_info *bt_in) { int i, level, done, estack_index; ulong rsp, offset, stacktop; ulong *up; long cs; struct syment *sp; FILE *ofp; ulong estack, irqstack; ulong irq_eframe, kpti_eframe; struct bt_info bt_local, *bt; struct machine_specific *ms; ulong last_process_stack_eframe; ulong user_mode_eframe; /* * User may have made a run-time switch. */ if (!(kt->flags & DWARF_UNWIND)) { machdep->back_trace = x86_64_low_budget_back_trace_cmd; x86_64_low_budget_back_trace_cmd(bt_in); return; } bt = &bt_local; BCOPY(bt_in, bt, sizeof(struct bt_info)); if (bt->flags & BT_FRAMESIZE_DEBUG) { dwarf_debug(bt); return; } level = 0; done = FALSE; irq_eframe = 0; last_process_stack_eframe = 0; bt->call_target = NULL; bt->bptr = 0; extra_stacks_idx = 0; rsp = bt->stkptr; if (!rsp) { error(INFO, "cannot determine starting stack pointer\n"); return; } ms = machdep->machspec; if (BT_REFERENCE_CHECK(bt)) ofp = pc->nullfp; else ofp = fp; if (bt->flags & BT_TEXT_SYMBOLS) { if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) fprintf(ofp, "%sSTART: %s%s at %lx\n", space(VADDR_PRLEN > 8 ? 14 : 6), closest_symbol(bt->instptr), STREQ(closest_symbol(bt->instptr), "thread_return") ? " (schedule)" : "", bt->instptr); } else if (bt->flags & BT_START) { x86_64_print_stack_entry(bt, ofp, level, 0, bt->instptr); bt->flags &= ~BT_START; level++; } if ((estack = x86_64_in_exception_stack(bt, &estack_index))) { in_exception_stack: bt->flags |= BT_EXCEPTION_STACK; /* * The stack buffer will have been loaded with the process * stack, so switch to the indicated exception stack. */ bt->stackbase = estack; bt->stacktop = estack + ms->stkinfo.esize[estack_index]; bt->stackbuf = ms->irqstack; if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, bt->hp && (bt->hp->esp == bt->stkptr) ? "irqstack contents via hook" : "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of exception stack at %lx failed\n", bt->stackbase); /* * If irq_eframe is set, we've jumped back here from the * IRQ stack dump below. Do basically the same thing as if * had come from the processor stack, but presume that we * must have been in kernel mode, i.e., took an exception * while operating on an IRQ stack. (untested) */ if (irq_eframe) { bt->flags |= BT_EXCEPTION_FRAME; i = (irq_eframe - bt->stackbase)/sizeof(ulong); x86_64_print_stack_entry(bt, ofp, level, i, bt->instptr); bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); rsp += SIZE(pt_regs); /* guaranteed kernel mode */ level++; irq_eframe = 0; } stacktop = bt->stacktop - SIZE(pt_regs); if ((machdep->flags & NESTED_NMI) && estack_index == NMI_STACK) stacktop -= 12*sizeof(ulong); if (!done) { level = dwarf_backtrace(bt, level, stacktop); done = TRUE; } cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (stacktop - bt->stackbase), bt, ofp); if (!BT_REFERENCE_CHECK(bt)) fprintf(fp, "--- ---\n"); /* * Find the CPU-saved, or handler-saved registers */ up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); up -= 5; if ((machdep->flags & NESTED_NMI) && estack_index == NMI_STACK && bt->stkptr <= bt->stacktop - 17*sizeof(ulong)) { up -= 12; /* Copied and saved regs are swapped in pre-3.8 kernels */ if (*up == symbol_value("repeat_nmi")) up += 5; } /* Registers (as saved by CPU): * * up[4] SS * up[3] RSP * up[2] RFLAGS * up[1] CS * up[0] RIP */ rsp = bt->stkptr = up[3]; bt->instptr = up[0]; if (cs & 3) done = TRUE; /* user-mode exception */ else done = FALSE; /* kernel-mode exception */ bt->frameptr = 0; /* * Print the return values from the estack end. */ if (!done) { bt->flags |= BT_START; x86_64_print_stack_entry(bt, ofp, level, 0, bt->instptr); bt->flags &= ~BT_START; level++; } } /* * IRQ stack entry always comes in via the process stack, regardless * whether it happened while running in user or kernel space. */ if (!done && (irqstack = x86_64_in_irqstack(bt))) { bt->flags |= BT_IRQSTACK; /* * Until coded otherwise, the stackbase will be pointing to * either the exception stack or, more likely, the process * stack base. Switch it to the IRQ stack. */ bt->stackbase = irqstack; bt->stacktop = irqstack + ms->stkinfo.isize; bt->stackbuf = ms->irqstack; if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, bt->hp && (bt->hp->esp == bt_in->stkptr) ? "irqstack contents via hook" : "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of IRQ stack at %lx failed\n", bt->stackbase); stacktop = bt->stacktop - ms->irq_stack_gap; if (!done) { level = dwarf_backtrace(bt, level, stacktop); done = TRUE; } if (!BT_REFERENCE_CHECK(bt)) fprintf(fp, "--- ---\n"); /* * stack = (unsigned long *) (irqstack_end[-1]); * (where irqstack_end is 64 bytes below page end) */ up = (ulong *)(&bt->stackbuf[stacktop - bt->stackbase]); up -= 1; irq_eframe = rsp = bt->stkptr = (*up) - ms->irq_eframe_link; up -= 1; bt->instptr = *up; /* * No exception frame when coming from call_softirq. */ if ((sp = value_search(bt->instptr, &offset)) && STREQ(sp->name, "call_softirq")) irq_eframe = 0; bt->frameptr = 0; done = FALSE; } else irq_eframe = 0; if (!done && (estack = x86_64_in_exception_stack(bt, &estack_index))) goto in_exception_stack; if (!done && (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK))) { /* * Verify that the rsp pointer taken from either the * exception or IRQ stack points into the process stack. */ bt->stackbase = GET_STACKBASE(bt->tc->task); bt->stacktop = GET_STACKTOP(bt->tc->task); if (!INSTACK(rsp, bt)) { /* * If the exception occurred while on the KPTI entry trampoline stack, * just print the entry exception frame and bail out. */ if ((kpti_eframe = x86_64_in_kpti_entry_stack(bt->tc->processor, rsp))) { x86_64_exception_frame(EFRAME_PRINT, kpti_eframe, 0, bt, ofp); fprintf(fp, "--- ---\n"); return; } switch (bt->flags & (BT_EXCEPTION_STACK|BT_IRQSTACK)) { case (BT_EXCEPTION_STACK|BT_IRQSTACK): error(FATAL, STACK_TRANSITION_ERRMSG_E_I_P, bt_in->stkptr, bt->stkptr, rsp, bt->stackbase); case BT_EXCEPTION_STACK: error(FATAL, STACK_TRANSITION_ERRMSG_E_P, bt_in->stkptr, rsp, bt->stackbase); case BT_IRQSTACK: error(FATAL, STACK_TRANSITION_ERRMSG_I_P, bt_in->stkptr, rsp, bt->stackbase); } } /* * Now fill the local stack buffer from the process stack. */ if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "irqstack contents", RETURN_ON_ERROR)) error(FATAL, "read of process stack at %lx failed\n", bt->stackbase); } /* * Dump the IRQ exception frame from the process stack. * If the CS register indicates a user exception frame, * then set done to TRUE to avoid the process stack walk-through. * Otherwise, bump up the rsp past the kernel-mode eframe. */ if (irq_eframe) { bt->flags |= BT_EXCEPTION_FRAME; level = dwarf_print_stack_entry(bt, level); bt->flags &= ~(ulonglong)BT_EXCEPTION_FRAME; cs = x86_64_exception_frame(EFRAME_PRINT|EFRAME_CS, 0, bt->stackbuf + (irq_eframe - bt->stackbase), bt, ofp); if (cs & 3) done = TRUE; /* IRQ from user-mode */ else { if (x86_64_print_eframe_location(rsp, level, ofp)) level++; rsp += SIZE(pt_regs); irq_eframe = 0; } level++; } /* * Walk the process stack. */ if (!done) { level = dwarf_backtrace(bt, level, bt->stacktop); done = TRUE; } if (!irq_eframe && !is_kernel_thread(bt->tc->task) && (GET_STACKBASE(bt->tc->task) == bt->stackbase)) { long stack_padding_size = VALID_SIZE(fred_frame) ? (2*8) : 0; user_mode_eframe = bt->stacktop - SIZE(pt_regs); if (last_process_stack_eframe < user_mode_eframe) x86_64_exception_frame(EFRAME_PRINT, 0, bt->stackbuf + (bt->stacktop - stack_padding_size - bt->stackbase) - SIZE(pt_regs), bt, ofp); } if (bt->flags & BT_TEXT_SYMBOLS) { if (BT_REFERENCE_FOUND(bt)) { print_task_header(fp, task_to_context(bt->task), 0); BCOPY(bt_in, bt, sizeof(struct bt_info)); bt->ref = NULL; machdep->back_trace(bt); fprintf(fp, "\n"); } } } /* * Functions that won't be called indirectly. * Add more to this as they are discovered. */ static const char *direct_call_targets[] = { "schedule", "schedule_timeout", NULL }; static int is_direct_call_target(struct bt_info *bt) { int i; if (!bt->call_target || (bt->flags & BT_NO_CHECK_CALLER)) return FALSE; if (strstr(bt->call_target, "schedule") && is_task_active(bt->task)) return FALSE; for (i = 0; direct_call_targets[i]; i++) { if (STREQ(direct_call_targets[i], bt->call_target)) return TRUE; } return FALSE; } static struct syment * x86_64_function_called_by(ulong rip) { struct syment *sp; char buf[BUFSIZE], *p1; ulong value, offset; unsigned char byte; value = 0; sp = NULL; if (!readmem(rip, KVADDR, &byte, sizeof(unsigned char), "call byte", QUIET|RETURN_ON_ERROR)) return sp; if (byte != 0xe8) return sp; sprintf(buf, "x/i 0x%lx", rip); open_tmpfile2(); if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { rewind(pc->tmpfile2); while (fgets(buf, BUFSIZE, pc->tmpfile2)) { if ((p1 = strstr(buf, " call")) || (p1 = strstr(buf, "\tcall"))) { if (extract_hex(p1, &value, NULLCHAR, TRUE)) break; } } } close_tmpfile2(); if (value) sp = value_search(value, &offset); /* * Functions that jmp to schedule() or schedule_timeout(). */ if (sp) { if ((STREQ(sp->name, "schedule_timeout_interruptible") || STREQ(sp->name, "schedule_timeout_uninterruptible"))) sp = symbol_search("schedule_timeout"); if (STREQ(sp->name, "__cond_resched")) sp = symbol_search("schedule"); } return sp; } /* * Unroll the kernel stack using a minimal amount of gdb services. */ static void x86_64_back_trace(struct gnu_request *req, struct bt_info *bt) { error(FATAL, "x86_64_back_trace: unused\n"); } /* * Print exception frame information for x86_64. * * Pid: 0, comm: swapper Not tainted 2.6.5-1.360phro.rootsmp * RIP: 0010:[] {default_idle+36} * RSP: 0018:ffffffff8048bfd8 EFLAGS: 00000246 * RAX: 0000000000000000 RBX: ffffffff8010f510 RCX: 0000000000000018 * RDX: 0000010001e37280 RSI: ffffffff803ac0a0 RDI: 000001007f43c400 * RBP: 0000000000000000 R08: ffffffff8048a000 R09: 0000000000000000 * R10: ffffffff80482188 R11: 0000000000000001 R12: 0000000000000000 * R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 * FS: 0000002a96e14fc0(0000) GS:ffffffff80481d80(0000) GS:0000000055578aa0 * CS: 0010 DS: 0018 ES: 0018 CR0: 000000008005003b * CR2: 0000002a9556b000 CR3: 0000000000101000 CR4: 00000000000006e0 * */ long x86_64_exception_frame(ulong flags, ulong kvaddr, char *local, struct bt_info *bt, FILE *ofp) { long rip, rsp, cs, ss, rflags, orig_rax, rbp; long rax, rbx, rcx, rdx, rsi, rdi; long r8, r9, r10, r11, r12, r13, r14, r15; struct machine_specific *ms; struct syment *sp; ulong offset, verify_addr; char *pt_regs_buf; long verified; long err; char buf[BUFSIZE]; if (flags & EFRAME_VERIFY) { if (kvaddr) verify_addr = kvaddr; else verify_addr = (local - bt->stackbuf) + bt->stackbase; if (!accessible(verify_addr) || !accessible(verify_addr + SIZE(pt_regs) - sizeof(long))) return FALSE; } ms = machdep->machspec; sp = NULL; if (!(machdep->flags & PT_REGS_INIT) || (flags == EFRAME_INIT)) { err = 0; err |= ((ms->pto.r15 = MEMBER_OFFSET("pt_regs", "r15")) == INVALID_OFFSET); err |= ((ms->pto.r14 = MEMBER_OFFSET("pt_regs", "r14")) == INVALID_OFFSET); err |= ((ms->pto.r13 = MEMBER_OFFSET("pt_regs", "r13")) == INVALID_OFFSET); err |= ((ms->pto.r12 = MEMBER_OFFSET("pt_regs", "r12")) == INVALID_OFFSET); err |= ((ms->pto.r11 = MEMBER_OFFSET("pt_regs", "r11")) == INVALID_OFFSET); err |= ((ms->pto.r10 = MEMBER_OFFSET("pt_regs", "r10")) == INVALID_OFFSET); err |= ((ms->pto.r9 = MEMBER_OFFSET("pt_regs", "r9")) == INVALID_OFFSET); err |= ((ms->pto.r8 = MEMBER_OFFSET("pt_regs", "r8")) == INVALID_OFFSET); err |= ((ms->pto.cs = MEMBER_OFFSET("pt_regs", "cs")) == INVALID_OFFSET); err |= ((ms->pto.ss = MEMBER_OFFSET("pt_regs", "ss")) == INVALID_OFFSET); /* * x86/x86_64 merge changed traditional register names. */ if (((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "rbp")) == INVALID_OFFSET) && ((ms->pto.rbp = MEMBER_OFFSET("pt_regs", "bp")) == INVALID_OFFSET)) err++; if (((ms->pto.rax = MEMBER_OFFSET("pt_regs", "rax")) == INVALID_OFFSET) && ((ms->pto.rax = MEMBER_OFFSET("pt_regs", "ax")) == INVALID_OFFSET)) err++; if (((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "rbx")) == INVALID_OFFSET) && ((ms->pto.rbx = MEMBER_OFFSET("pt_regs", "bx")) == INVALID_OFFSET)) err++; if (((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "rcx")) == INVALID_OFFSET) && ((ms->pto.rcx = MEMBER_OFFSET("pt_regs", "cx")) == INVALID_OFFSET)) err++; if (((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "rdx")) == INVALID_OFFSET) && ((ms->pto.rdx = MEMBER_OFFSET("pt_regs", "dx")) == INVALID_OFFSET)) err++; if (((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "rsi")) == INVALID_OFFSET) && ((ms->pto.rsi = MEMBER_OFFSET("pt_regs", "si")) == INVALID_OFFSET)) err++; if (((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "rdi")) == INVALID_OFFSET) && ((ms->pto.rdi = MEMBER_OFFSET("pt_regs", "di")) == INVALID_OFFSET)) err++; if (((ms->pto.rip = MEMBER_OFFSET("pt_regs", "rip")) == INVALID_OFFSET) && ((ms->pto.rip = MEMBER_OFFSET("pt_regs", "ip")) == INVALID_OFFSET)) err++; if (((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "rsp")) == INVALID_OFFSET) && ((ms->pto.rsp = MEMBER_OFFSET("pt_regs", "sp")) == INVALID_OFFSET)) err++; if (((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "eflags")) == INVALID_OFFSET) && ((ms->pto.eflags = MEMBER_OFFSET("pt_regs", "flags")) == INVALID_OFFSET)) err++; if (((ms->pto.orig_rax = MEMBER_OFFSET("pt_regs", "orig_rax")) == INVALID_OFFSET) && ((ms->pto.orig_rax = MEMBER_OFFSET("pt_regs", "orig_ax")) == INVALID_OFFSET)) err++; if (err) error(WARNING, "pt_regs structure has changed\n"); machdep->flags |= PT_REGS_INIT; if (flags == EFRAME_INIT) return err; } if (kvaddr) { pt_regs_buf = GETBUF(SIZE(pt_regs)); readmem(kvaddr, KVADDR, pt_regs_buf, SIZE(pt_regs), "pt_regs", FAULT_ON_ERROR); } else pt_regs_buf = local; rip = ULONG(pt_regs_buf + ms->pto.rip); rsp = ULONG(pt_regs_buf + ms->pto.rsp); cs = ULONG(pt_regs_buf + ms->pto.cs); ss = ULONG(pt_regs_buf + ms->pto.ss); rflags = ULONG(pt_regs_buf + ms->pto.eflags); orig_rax = ULONG(pt_regs_buf + ms->pto.orig_rax); rbp = ULONG(pt_regs_buf + ms->pto.rbp); rax = ULONG(pt_regs_buf + ms->pto.rax); rbx = ULONG(pt_regs_buf + ms->pto.rbx); rcx = ULONG(pt_regs_buf + ms->pto.rcx); rdx = ULONG(pt_regs_buf + ms->pto.rdx); rsi = ULONG(pt_regs_buf + ms->pto.rsi); rdi = ULONG(pt_regs_buf + ms->pto.rdi); r8 = ULONG(pt_regs_buf + ms->pto.r8); r9 = ULONG(pt_regs_buf + ms->pto.r9); r10 = ULONG(pt_regs_buf + ms->pto.r10); r11 = ULONG(pt_regs_buf + ms->pto.r11); r12 = ULONG(pt_regs_buf + ms->pto.r12); r13 = ULONG(pt_regs_buf + ms->pto.r13); r14 = ULONG(pt_regs_buf + ms->pto.r14); r15 = ULONG(pt_regs_buf + ms->pto.r15); verified = x86_64_eframe_verify(bt, kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase, cs, ss, rip, rsp, rflags); /* * If it's print-if-verified request, don't print bogus eframes. */ if (!verified && ((flags & (EFRAME_VERIFY|EFRAME_PRINT)) == (EFRAME_VERIFY|EFRAME_PRINT))) flags &= ~EFRAME_PRINT; else if (CRASHDEBUG(1) && verified && (flags != EFRAME_VERIFY)) fprintf(ofp, "< exception frame at: %lx >\n", kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase); if (flags & EFRAME_PRINT) { if (flags & EFRAME_SEARCH) { fprintf(ofp, "\n %s-MODE EXCEPTION FRAME AT: %lx\n", cs & 3 ? "USER" : "KERNEL", kvaddr ? kvaddr : (local - bt->stackbuf) + bt->stackbase); if (!(cs & 3)) { fprintf(ofp, " [exception RIP: "); if ((sp = value_search(rip, &offset))) { fprintf(ofp, "%s", sp->name); if (offset) fprintf(ofp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); } else fprintf(ofp, "%s", x86_64_exception_RIP_message(bt, rip)); fprintf(ofp, "]\n"); } } else if (!(cs & 3)) { fprintf(ofp, " [exception RIP: "); if ((sp = value_search(rip, &offset))) { fprintf(ofp, "%s", sp->name); if (offset) fprintf(ofp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); bt->eframe_ip = rip; } else fprintf(ofp, "%s", x86_64_exception_RIP_message(bt, rip)); fprintf(ofp, "]\n"); } fprintf(ofp, " RIP: %016lx RSP: %016lx RFLAGS: %08lx\n", rip, rsp, rflags); fprintf(ofp, " RAX: %016lx RBX: %016lx RCX: %016lx\n", rax, rbx, rcx); fprintf(ofp, " RDX: %016lx RSI: %016lx RDI: %016lx\n", rdx, rsi, rdi); fprintf(ofp, " RBP: %016lx R8: %016lx R9: %016lx\n", rbp, r8, r9); fprintf(ofp, " R10: %016lx R11: %016lx R12: %016lx\n", r10, r11, r12); fprintf(ofp, " R13: %016lx R14: %016lx R15: %016lx\n", r13, r14, r15); fprintf(ofp, " ORIG_RAX: %016lx CS: %04lx SS: %04lx\n", orig_rax, cs, ss); if (!(cs & 3) && sp && (bt->flags & BT_LINE_NUMBERS)) { get_line_number(rip, buf, FALSE); if (strlen(buf)) fprintf(ofp, " %s\n", buf); } if (!verified && CRASHDEBUG((pc->flags & RUNTIME) ? 0 : 1)) error(WARNING, "possibly bogus exception frame\n"); } if ((flags & EFRAME_PRINT) && BT_REFERENCE_CHECK(bt)) { x86_64_do_bt_reference_check(bt, rip, NULL); if ((sp = value_search(rip, &offset))) x86_64_do_bt_reference_check(bt, 0, sp->name); x86_64_do_bt_reference_check(bt, rsp, NULL); x86_64_do_bt_reference_check(bt, cs, NULL); x86_64_do_bt_reference_check(bt, ss, NULL); x86_64_do_bt_reference_check(bt, rflags, NULL); x86_64_do_bt_reference_check(bt, orig_rax, NULL); x86_64_do_bt_reference_check(bt, rbp, NULL); x86_64_do_bt_reference_check(bt, rax, NULL); x86_64_do_bt_reference_check(bt, rbx, NULL); x86_64_do_bt_reference_check(bt, rcx, NULL); x86_64_do_bt_reference_check(bt, rdx, NULL); x86_64_do_bt_reference_check(bt, rsi, NULL); x86_64_do_bt_reference_check(bt, rdi, NULL); x86_64_do_bt_reference_check(bt, r8, NULL); x86_64_do_bt_reference_check(bt, r9, NULL); x86_64_do_bt_reference_check(bt, r10, NULL); x86_64_do_bt_reference_check(bt, r11, NULL); x86_64_do_bt_reference_check(bt, r12, NULL); x86_64_do_bt_reference_check(bt, r13, NULL); x86_64_do_bt_reference_check(bt, r14, NULL); x86_64_do_bt_reference_check(bt, r15, NULL); } /* Remember the rip and rsp for unwinding the process stack */ if (kt->flags & DWARF_UNWIND){ bt->instptr = rip; bt->stkptr = rsp; bt->bptr = rbp; } else if (machdep->flags & ORC) bt->bptr = rbp; /* * Preserve registers set for each additional in-kernel stack */ if (!(cs & 3) && verified && flags & EFRAME_PRINT && extra_stacks_idx < MAX_EXCEPTION_STACKS && !(bt->flags & BT_EFRAME_SEARCH)) { if (!extra_stacks_regs[extra_stacks_idx]) { extra_stacks_regs[extra_stacks_idx] = (struct user_regs_bitmap_struct *) malloc(sizeof(struct user_regs_bitmap_struct)); } memset(extra_stacks_regs[extra_stacks_idx], 0, sizeof(struct user_regs_bitmap_struct)); memcpy(&extra_stacks_regs[extra_stacks_idx]->ur, pt_regs_buf, SIZE(pt_regs)); for (int i = 0; i < SIZE(pt_regs)/sizeof(long); i++) SET_BIT(extra_stacks_regs[extra_stacks_idx]->bitmap, i); if (!bt->machdep || (extra_stacks_regs[extra_stacks_idx]->ur.sp != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.sp && extra_stacks_regs[extra_stacks_idx]->ur.ip != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.ip)) { gdb_add_substack(extra_stacks_idx++); } } if (kvaddr) FREEBUF(pt_regs_buf); if (flags & EFRAME_CS) return cs; else if (flags & EFRAME_VERIFY) return verified; return 0; } static int x86_64_print_eframe_location(ulong eframe, int level, FILE *ofp) { return FALSE; #ifdef NOTDEF ulong rip; char *pt_regs_buf; struct machine_specific *ms; struct syment *sp; ms = machdep->machspec; pt_regs_buf = GETBUF(SIZE(pt_regs)); if (!readmem(eframe, KVADDR, pt_regs_buf, SIZE(pt_regs), "pt_regs", RETURN_ON_ERROR|QUIET)) { FREEBUF(pt_regs_buf); return FALSE; } rip = ULONG(pt_regs_buf + ms->pto.rip); FREEBUF(pt_regs_buf); if (!(sp = value_search(rip, NULL))) return FALSE; fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level+1, eframe, sp->name, rip); return TRUE; #endif } /* * Check whether an RIP is in the FIXMAP vsyscall page. */ static int is_vsyscall_addr(ulong rip) { ulong page; if ((page = machdep->machspec->vsyscall_page)) if ((rip >= page) && (rip < (page+PAGESIZE()))) return TRUE; return FALSE; } struct syment * x86_64_value_to_symbol(ulong vaddr, ulong *offset) { struct syment *sp; if (is_vsyscall_addr(vaddr) && (sp = value_search_base_kernel(vaddr, offset))) return sp; return generic_machdep_value_to_symbol(vaddr, offset); } /* * Check that the verifiable registers contain reasonable data. */ #define RAZ_MASK 0xffffffffffc08028 /* return-as-zero bits */ static int x86_64_eframe_verify(struct bt_info *bt, long kvaddr, long cs, long ss, long rip, long rsp, long rflags) { int estack; struct syment *sp; ulong offset, exception; physaddr_t phys; if ((rflags & RAZ_MASK) || !(rflags & 0x2)) return FALSE; if ((cs == 0x10) && (ss == 0x18)) { if (is_kernel_text(rip) && IS_KVADDR(rsp)) return TRUE; if (x86_64_is_module_addr(rip) && IS_KVADDR(rsp) && (rsp == (kvaddr + SIZE(pt_regs)))) return TRUE; if (is_kernel_text(rip) && (bt->flags & BT_EXCEPTION_STACK) && in_user_stack(bt->tc->task, rsp)) return TRUE; if (is_kernel_text(rip) && !IS_KVADDR(rsp) && (bt->flags & BT_EFRAME_SEARCH) && x86_64_in_exception_stack(bt, NULL)) return TRUE; if (is_kernel_text(rip) && x86_64_in_exception_stack(bt, &estack) && (estack <= 1)) return TRUE; /* * RSP may be 0 from MSR_IA32_SYSENTER_ESP. */ if (STREQ(closest_symbol(rip), "ia32_sysenter_target")) return TRUE; if ((rip == 0) && INSTACK(rsp, bt) && STREQ(bt->call_target, "ret_from_fork")) return TRUE; if (readmem(kvaddr - 8, KVADDR, &exception, sizeof(ulong), "exception type", RETURN_ON_ERROR|QUIET) && (sp = value_search(exception, &offset)) && STREQ(sp->name, "page_fault")) return TRUE; if ((kvaddr + SIZE(pt_regs)) == rsp) return TRUE; } if ((cs == 0x10) && kvaddr) { if (is_kernel_text(rip) && IS_KVADDR(rsp) && (rsp == (kvaddr + SIZE(pt_regs) + 8))) return TRUE; } if ((cs == 0x10) && kvaddr) { if (is_kernel_text(rip) && IS_KVADDR(rsp) && (rsp == (kvaddr + SIZE(pt_regs)))) return TRUE; } if ((cs == 0x10) && kvaddr) { if (is_kernel_text(rip) && IS_KVADDR(rsp) && x86_64_in_exception_stack(bt, NULL)) return TRUE; } if ((cs == 0x10) && kvaddr) { if (IS_KVADDR(rsp) && IS_VMALLOC_ADDR(rip) && machdep->kvtop(bt->tc, rip, &phys, 0)) return TRUE; } if ((cs == 0x33) && (ss == 0x2b)) { if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) return TRUE; if (is_vsyscall_addr(rip) && IS_UVADDR(rsp, bt->tc)) return TRUE; } if (XEN() && ((cs == 0x33) || (cs == 0xe033)) && ((ss == 0x2b) || (ss == 0xe02b))) { if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) return TRUE; } if (XEN() && ((cs == 0x10000e030) || (cs == 0xe030)) && (ss == 0xe02b)) { if (is_kernel_text(rip) && IS_KVADDR(rsp)) return TRUE; } /* * 32-bit segments */ if ((cs == 0x23) && (ss == 0x2b)) { if (IS_UVADDR(rip, bt->tc) && IS_UVADDR(rsp, bt->tc)) return TRUE; } return FALSE; } static ulong get_reg_from_inactive_task_frame(struct bt_info *bt, char *reg_name, ulong reg_offset) { ulong reg_value = 0, rsp = 0; char reg_info[64]; snprintf(reg_info, sizeof(reg_info), "inactive_task_frame.%s", reg_name); readmem(bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp), KVADDR, &rsp, sizeof(ulong), "thread_struct.rsp", FAULT_ON_ERROR); readmem(rsp + reg_offset, KVADDR, ®_value, sizeof(ulong), reg_info, FAULT_ON_ERROR); return reg_value; } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void x86_64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { struct user_regs_bitmap_struct *ur_bitmap; ulong sp = 0; if (bt->flags & BT_SKIP_IDLE) bt->flags &= ~BT_SKIP_IDLE; ur_bitmap = (struct user_regs_bitmap_struct *)GETBUF(sizeof(*ur_bitmap)); memset(ur_bitmap, 0, sizeof(*ur_bitmap)); if (VALID_MEMBER(inactive_task_frame_bp)) { if (!is_task_active(bt->task)) { /* * For inactive tasks in live and dumpfile, regs can be * get from inactive_task_frame struct. */ ur_bitmap->ur.r15 = get_reg_from_inactive_task_frame(bt, "r15", OFFSET(inactive_task_frame_r15)); ur_bitmap->ur.r14 = get_reg_from_inactive_task_frame(bt, "r14", OFFSET(inactive_task_frame_r14)); ur_bitmap->ur.r13 = get_reg_from_inactive_task_frame(bt, "r13", OFFSET(inactive_task_frame_r13)); ur_bitmap->ur.r12 = get_reg_from_inactive_task_frame(bt, "r12", OFFSET(inactive_task_frame_r12)); ur_bitmap->ur.bx = get_reg_from_inactive_task_frame(bt, "bx", OFFSET(inactive_task_frame_bx)); ur_bitmap->ur.bp = get_reg_from_inactive_task_frame(bt, "bp", OFFSET(inactive_task_frame_bp)); /* For inactive tasks: crash> task -x 1|grep sp sp = 0xffffc90000013d00 crash> rd ffffc90000013d00 32 ffffc90000013d00: ffff888104dad4a8 0000000000000000 r15,r14 ffffc90000013d10: ffff888100280000 ffff888100216500 r13,r12 ffffc90000013d20: ffff888100217018 ffff88817fd2c800 rbx,rbp ffffc90000013d30: ffffffff81a6a1b3 ffffc90000013de0 saved_rip,... ffffc90000013d40: ffff888100000004 99ccbf53ea493000 ffffc90000013d50: ffff888100216500 ffff888100216500 crash> dis __schedule ... 0xffffffff81a6a1ab <__schedule+507>: mov %r13,%rsi 0xffffffff81a6a1ae <__schedule+510>: call 0xffffffff81003490 <__switch_to_asm> 0xffffffff81a6a1b3 <__schedule+515>: mov %rax,%rdi <<=== saved_rip ... crash> dis __switch_to_asm 0xffffffff81003490 <__switch_to_asm>: push %rbp 0xffffffff81003491 <__switch_to_asm+1>: push %rbx 0xffffffff81003492 <__switch_to_asm+2>: push %r12 0xffffffff81003494 <__switch_to_asm+4>: push %r13 0xffffffff81003496 <__switch_to_asm+6>: push %r14 0xffffffff81003498 <__switch_to_asm+8>: push %r15 0xffffffff8100349a <__switch_to_asm+10>: mov %rsp,0x14d8(%rdi) ... Now saved_rip = ffffffff81a6a1b3, and we are starting the stack unwind at saved_rip, which is function __schedule() instead of function __switch_to_asm(), so the stack pointer should be rewind from ffffc90000013d00 back to ffffc90000013d38, aka *spp += 7 * reg_len. Otherwise we are unwinding function __schedule() but with __switch_to_asm()'s stack frame, which will fail. */ sp += 7 * sizeof(unsigned long); SET_REG_BITMAP(ur_bitmap->bitmap, x86_64_user_regs_struct, r15); SET_REG_BITMAP(ur_bitmap->bitmap, x86_64_user_regs_struct, r14); SET_REG_BITMAP(ur_bitmap->bitmap, x86_64_user_regs_struct, r13); SET_REG_BITMAP(ur_bitmap->bitmap, x86_64_user_regs_struct, r12); SET_REG_BITMAP(ur_bitmap->bitmap, x86_64_user_regs_struct, bx); SET_REG_BITMAP(ur_bitmap->bitmap, x86_64_user_regs_struct, bp); } else { /* * For active tasks in dumpfile, we get regs through the * original way. For active tasks in live, we only get * ip and sp in the end of the function. */ if (bt->flags & BT_DUMPFILE_SEARCH) { FREEBUF(ur_bitmap); bt->need_free = FALSE; return x86_64_get_dumpfile_stack_frame(bt, pcp, spp); } } } else { if (!is_task_active(bt->task)) { if (spp) { *spp = x86_64_get_sp(bt); readmem(*spp, KVADDR, &(ur_bitmap->ur.bp), sizeof(ulong), "ur_bitmap->ur.bp", FAULT_ON_ERROR); SET_REG_BITMAP(ur_bitmap->bitmap, x86_64_user_regs_struct, bp); } } else { if (bt->flags & BT_DUMPFILE_SEARCH) { FREEBUF(ur_bitmap); bt->need_free = FALSE; return x86_64_get_dumpfile_stack_frame(bt, pcp, spp); } } } if (pcp) { *pcp = x86_64_get_pc(bt); ur_bitmap->ur.ip = *pcp; SET_REG_BITMAP(ur_bitmap->bitmap, x86_64_user_regs_struct, ip); } if (spp) { *spp = x86_64_get_sp(bt); ur_bitmap->ur.sp = sp + *spp; SET_REG_BITMAP(ur_bitmap->bitmap, x86_64_user_regs_struct, sp); } bt->machdep = ur_bitmap; bt->need_free = TRUE; } /* * Get the starting point for the active cpus in a diskdump/netdump. */ static void x86_64_get_dumpfile_stack_frame(struct bt_info *bt_in, ulong *rip, ulong *rsp) { int panic_task; int i, j, estack, panic, stage, in_nmi_stack; char *sym; struct syment *sp; ulong *up, *up2; struct bt_info bt_local, *bt; struct machine_specific *ms; char *user_regs; ulong ur_rip, ur_rsp; ulong halt_rip, halt_rsp; ulong crash_kexec_rip, crash_kexec_rsp; ulong call_function_rip, call_function_rsp; ulong sysrq_c_rip, sysrq_c_rsp; ulong notify_die_rip, notify_die_rsp; #define STACKTOP_INDEX(BT) (((BT)->stacktop - (BT)->stackbase)/sizeof(ulong)) bt = &bt_local; BCOPY(bt_in, bt, sizeof(struct bt_info)); ms = machdep->machspec; ur_rip = ur_rsp = 0; halt_rip = halt_rsp = 0; crash_kexec_rip = crash_kexec_rsp = 0; call_function_rip = call_function_rsp = 0; notify_die_rsp = notify_die_rip = 0; sysrq_c_rip = sysrq_c_rsp = 0; in_nmi_stack = stage = 0; estack = -1; panic = FALSE; if (bt_in->flags & BT_SKIP_IDLE) bt_in->flags &= ~BT_SKIP_IDLE; panic_task = tt->panic_task == bt->task ? TRUE : FALSE; if (panic_task && bt->machdep) { user_regs = bt->machdep; if (x86_64_eframe_verify(bt, 0, ULONG(user_regs + OFFSET(user_regs_struct_cs)), ULONG(user_regs + OFFSET(user_regs_struct_ss)), ULONG(user_regs + OFFSET(user_regs_struct_rip)), ULONG(user_regs + OFFSET(user_regs_struct_rsp)), ULONG(user_regs + OFFSET(user_regs_struct_eflags)))) { bt->stkptr = ULONG(user_regs + OFFSET(user_regs_struct_rsp)); if (x86_64_in_irqstack(bt)) { ur_rip = ULONG(user_regs + OFFSET(user_regs_struct_rip)); ur_rsp = ULONG(user_regs + OFFSET(user_regs_struct_rsp)); goto skip_stage; } } } else if (ELF_NOTES_VALID() && bt->machdep) { user_regs = bt->machdep; ur_rip = ULONG(user_regs + OFFSET(user_regs_struct_rip)); ur_rsp = ULONG(user_regs + OFFSET(user_regs_struct_rsp)); } /* * Check the process stack first. */ next_stack: for (i = 0, up = (ulong *)bt->stackbuf; i < STACKTOP_INDEX(bt); i++, up++) { sym = closest_symbol(*up); if (XEN_CORE_DUMPFILE()) { if (STREQ(sym, "crash_kexec")) { sp = x86_64_function_called_by((*up)-5); if (sp && STREQ(sp->name, "machine_kexec")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } } if (STREQ(sym, "xen_machine_kexec")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } } else if (STREQ(sym, "netconsole_netdump") || STREQ(sym, "netpoll_start_netdump") || STREQ(sym, "start_disk_dump") || STREQ(sym, "disk_dump") || STREQ(sym, "crash_kexec") || STREQ(sym, "machine_kexec") || STREQ(sym, "try_crashdump")) { if (STREQ(sym, "crash_kexec")) { sp = x86_64_function_called_by((*up)-5); if (sp && STREQ(sp->name, "machine_kexec")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } } /* * Use second instance of crash_kexec if it exists. */ if (!(bt->flags & BT_TEXT_SYMBOLS) && STREQ(sym, "crash_kexec") && !crash_kexec_rip) { crash_kexec_rip = *up; crash_kexec_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); continue; } *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } if ((estack >= 0) && (STREQ(sym, "nmi_watchdog_tick") || STREQ(sym, "default_do_nmi"))) { sp = x86_64_function_called_by((*up)-5); if (!sp || !STREQ(sp->name, "die_nmi")) continue; *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); bt_in->flags |= BT_START; *rip = symbol_value("die_nmi"); *rsp = (*rsp) - (7*sizeof(ulong)); return; } if (STREQ(sym, "panic")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); panic = TRUE; continue; /* keep looking for die */ } if (STREQ(sym, "die")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); j = i; up2 = up; for (j++, up2++; j < STACKTOP_INDEX(bt); j++, up2++) { sym = closest_symbol(*up2); if (STREQ(sym, "sysrq_handle_crash")) goto next_sysrq; } return; } if (STREQ(sym, "sysrq_handle_crash")) { j = i; up2 = up; next_sysrq: sysrq_c_rip = *up2; sysrq_c_rsp = bt->stackbase + ((char *)(up2) - bt->stackbuf); pc->flags |= SYSRQ; for (j++, up2++; j < STACKTOP_INDEX(bt); j++, up2++) { sym = closest_symbol(*up2); if (STREQ(sym, "sysrq_handle_crash")) goto next_sysrq; } } if (!panic_task && (stage > 0) && (STREQ(sym, "smp_call_function_interrupt") || STREQ(sym, "stop_this_cpu"))) { call_function_rip = *up; call_function_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); } if (!panic_task && STREQ(sym, "crash_nmi_callback")) { *rip = *up; *rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); if ((bt->flags & BT_SKIP_IDLE) && is_idle_thread(bt->task)) bt_in->flags |= BT_SKIP_IDLE; return; } if (!panic_task && in_nmi_stack && (pc->flags2 & VMCOREINFO) && STREQ(sym, "notify_die")) { notify_die_rip = *up; notify_die_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); } if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && (stage == 0) && STREQ(sym, "safe_halt")) { halt_rip = *up; halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); } if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && !halt_rip && (stage == 0) && STREQ(sym, "xen_idle")) { halt_rip = *up; halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); } if (!XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && !halt_rip && (stage == 0) && STREQ(sym, "cpu_idle")) { halt_rip = *up; halt_rsp = bt->stackbase + ((char *)(up) - bt->stackbuf); } } if (panic) return; if (crash_kexec_rip) { *rip = crash_kexec_rip; *rsp = crash_kexec_rsp; return; } skip_stage: switch (stage) { /* * Now check the processor's interrupt stack. */ case 0: bt->stackbase = ms->stkinfo.ibase[bt->tc->processor]; bt->stacktop = ms->stkinfo.ibase[bt->tc->processor] + ms->stkinfo.isize; console("x86_64_get_dumpfile_stack_frame: searching IRQ stack at %lx\n", bt->stackbase); bt->stackbuf = ms->irqstack; alter_stackbuf(bt); stage = 1; goto next_stack; /* * Check the exception stacks. */ case 1: if (++estack == MAX_EXCEPTION_STACKS) break; bt->stackbase = ms->stkinfo.ebase[bt->tc->processor][estack]; bt->stacktop = ms->stkinfo.ebase[bt->tc->processor][estack] + ms->stkinfo.esize[estack]; console("x86_64_get_dumpfile_stack_frame: searching %s estack at %lx\n", ms->stkinfo.exception_stacks[estack], bt->stackbase); if (!(bt->stackbase && ms->stkinfo.available[bt->tc->processor][estack])) goto skip_stage; bt->stackbuf = ms->irqstack; alter_stackbuf(bt); in_nmi_stack = STREQ(ms->stkinfo.exception_stacks[estack], "NMI"); goto next_stack; } if (sysrq_c_rip) { *rip = sysrq_c_rip; *rsp = sysrq_c_rsp; return; } if (notify_die_rip) { *rip = notify_die_rip; *rsp = notify_die_rsp; return; } /* * We didn't find what we were looking for, so just use what was * passed in from the ELF header. */ if (ur_rip && ur_rsp) { *rip = ur_rip; *rsp = ur_rsp; if (is_kernel_text(ur_rip) && (INSTACK(ur_rsp, bt_in) || in_alternate_stack(bt->tc->processor, ur_rsp))) bt_in->flags |= BT_KERNEL_SPACE; if (!is_kernel_text(ur_rip) && in_user_stack(bt->tc->task, ur_rsp)) bt_in->flags |= BT_USER_SPACE; return; } if (call_function_rip && call_function_rsp) { *rip = call_function_rip; *rsp = call_function_rsp; return; } if (halt_rip && halt_rsp) { *rip = halt_rip; *rsp = halt_rsp; if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE() || (VMSS_DUMPFILE() && vmware_vmss_valid_regs(bt))) bt_in->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; return; } /* * Use what was (already) saved in the panic task's * registers found in the ELF header. */ if (bt->flags & BT_KDUMP_ELF_REGS) { user_regs = bt->machdep; ur_rip = ULONG(user_regs + OFFSET(user_regs_struct_rip)); ur_rsp = ULONG(user_regs + OFFSET(user_regs_struct_rsp)); if (!in_alternate_stack(bt->tc->processor, ur_rsp) && !stkptr_to_task(ur_rsp)) { if (CRASHDEBUG(1)) error(INFO, "x86_64_get_dumpfile_stack_frame: " "ELF mismatch: RSP: %lx task: %lx\n", ur_rsp, bt->task); } else { if (is_kernel_text(ur_rip) && (INSTACK(ur_rsp, bt_in) || in_alternate_stack(bt->tc->processor, ur_rsp))) bt_in->flags |= BT_KERNEL_SPACE; if (!is_kernel_text(ur_rip) && in_user_stack(bt->tc->task, ur_rsp)) bt_in->flags |= BT_USER_SPACE; return; } } if (CRASHDEBUG(1)) error(INFO, "x86_64_get_dumpfile_stack_frame: cannot find anything useful (task: %lx)\n", bt->task); if (XEN_CORE_DUMPFILE() && !panic_task && is_task_active(bt->task) && !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS))) error(FATAL, "starting backtrace locations of the active (non-crashing) " "xen tasks\n cannot be determined: try -t or -T options\n"); bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; machdep->get_stack_frame(bt, rip, rsp); if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE() || (VMSS_DUMPFILE() && vmware_vmss_valid_regs(bt))) bt_in->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; } /* * Get the saved RSP from the task's thread_struct. */ static ulong x86_64_get_sp(struct bt_info *bt) { ulong offset, rsp; if (tt->flags & THREAD_INFO) { readmem(bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp), KVADDR, &rsp, sizeof(void *), "thread_struct rsp", FAULT_ON_ERROR); if ((machdep->flags & ORC) && VALID_MEMBER(inactive_task_frame_bp)) { readmem(rsp + OFFSET(inactive_task_frame_bp), KVADDR, &bt->bptr, sizeof(void *), "inactive_task_frame.bp", FAULT_ON_ERROR); } return rsp; } offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rsp); return GET_STACK_ULONG(offset); } /* * Get the saved PC from the task's thread_struct if it exists; * otherwise just use the pre-determined thread_return value. */ static ulong x86_64_get_pc(struct bt_info *bt) { ulong offset, rip; if (INVALID_MEMBER(thread_struct_rip)) return machdep->machspec->thread_return; if (tt->flags & THREAD_INFO) { readmem(bt->task + OFFSET(task_struct_thread) + OFFSET(thread_struct_rip), KVADDR, &rip, sizeof(void *), "thread_struct rip", FAULT_ON_ERROR); if (rip) return rip; else return machdep->machspec->thread_return; } offset = OFFSET(task_struct_thread) + OFFSET(thread_struct_rip); return GET_STACK_ULONG(offset); } /* * Do the work for x86_64_get_sp() and x86_64_get_pc(). */ static void get_x86_64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { error(FATAL, "get_x86_64_frame: TBD\n"); } /* * Do the work for cmd_irq(). */ static void x86_64_dump_irq(int irq) { if (kernel_symbol_exists("sparse_irqs") || symbol_exists("irq_desc") || kernel_symbol_exists("irq_desc_ptrs") || kernel_symbol_exists("irq_desc_tree")) { machdep->dump_irq = generic_dump_irq; return(generic_dump_irq(irq)); } error(FATAL, "x86_64_dump_irq: irq_desc[] or irq_desc_tree do not exist?\n"); } static void x86_64_get_irq_affinity(int irq) { if (kernel_symbol_exists("sparse_irqs") || symbol_exists("irq_desc") || kernel_symbol_exists("irq_desc_ptrs") || kernel_symbol_exists("irq_desc_tree")) { machdep->get_irq_affinity = generic_get_irq_affinity; return(generic_get_irq_affinity(irq)); } error(FATAL, "x86_64_get_irq_affinity: irq_desc[] or irq_desc_tree do not exist?\n"); } static void x86_64_show_interrupts(int irq, ulong *cpus) { if (kernel_symbol_exists("sparse_irqs") || symbol_exists("irq_desc") || kernel_symbol_exists("irq_desc_ptrs") || kernel_symbol_exists("irq_desc_tree")) { machdep->show_interrupts = generic_show_interrupts; return(generic_show_interrupts(irq, cpus)); } error(FATAL, "x86_64_show_interrupts: irq_desc[] or irq_desc_tree do not exist?\n"); } /* * Do the work for irq -d */ void x86_64_display_idt_table(void) { int i; char *idt_table_buf; char buf[BUFSIZE]; ulong *ip; if (INVALID_SIZE(gate_struct)) { option_not_supported('d'); return; } idt_table_buf = GETBUF(SIZE(gate_struct) * 256); readmem(symbol_value("idt_table"), KVADDR, idt_table_buf, SIZE(gate_struct) * 256, "idt_table", FAULT_ON_ERROR); ip = (ulong *)idt_table_buf; for (i = 0; i < 256; i++, ip += 2) { if (i < 10) fprintf(fp, " "); else if (i < 100) fprintf(fp, " "); fprintf(fp, "[%d] %s\n", i, x86_64_extract_idt_function(ip, buf, NULL)); } FREEBUF(idt_table_buf); } static void x86_64_exception_stacks_init(void) { char *idt_table_buf; char buf[BUFSIZE]; int i; ulong *ip, ist; long size; struct machine_specific *ms; ms = machdep->machspec; ms->stkinfo.NMI_stack_index = -1; for (i = 0; i < MAX_EXCEPTION_STACKS; i++) ms->stkinfo.exception_stacks[i] = "(unknown)"; if (!kernel_symbol_exists("idt_table")) return; if (INVALID_SIZE(gate_struct)) size = 16; else size = SIZE(gate_struct); idt_table_buf = GETBUF(size * 256); readmem(symbol_value("idt_table"), KVADDR, idt_table_buf, size * 256, "idt_table", FAULT_ON_ERROR); ip = (ulong *)idt_table_buf; if (CRASHDEBUG(1)) fprintf(fp, "exception IST:\n"); for (i = 0; i < 256; i++, ip += 2) { ist = ((*ip) >> 32) & 0x7; if (ist) { x86_64_extract_idt_function(ip, buf, NULL); if (CRASHDEBUG(1)) fprintf(fp, " %ld: %s\n", ist, buf); if (strstr(buf, "nmi")) { ms->stkinfo.NMI_stack_index = ist-1; ms->stkinfo.exception_stacks[ist-1] = "NMI"; } if (strstr(buf, "debug")) ms->stkinfo.exception_stacks[ist-1] = "DEBUG"; if (strstr(buf, "stack")) ms->stkinfo.exception_stacks[ist-1] = "STACKFAULT"; if (strstr(buf, "double")) ms->stkinfo.exception_stacks[ist-1] = "DOUBLEFAULT"; if (strstr(buf, "machine")) ms->stkinfo.exception_stacks[ist-1] = "MCE"; if (strstr(buf, "vmm")) ms->stkinfo.exception_stacks[ist-1] = "VC"; } } if (CRASHDEBUG(1)) { fprintf(fp, "exception stacks:\n"); for (i = 0; i < MAX_EXCEPTION_STACKS; i++) fprintf(fp, " [%d]: %s\n", i, ms->stkinfo.exception_stacks[i]); } FREEBUF(idt_table_buf); } /* * Extract the function name out of the IDT entry. */ static char * x86_64_extract_idt_function(ulong *ip, char *buf, ulong *retaddr) { ulong i1, i2, addr; char locbuf[BUFSIZE]; physaddr_t phys; if (buf) BZERO(buf, BUFSIZE); i1 = *ip; i2 = *(ip+1); i2 <<= 32; addr = i2 & 0xffffffff00000000; addr |= (i1 & 0xffff); i1 >>= 32; addr |= (i1 & 0xffff0000); if (retaddr) *retaddr = addr; if (!buf) return NULL; value_to_symstr(addr, locbuf, 0); if (strlen(locbuf)) sprintf(buf, "%s", locbuf); else { sprintf(buf, "%016lx", addr); if (kvtop(NULL, addr, &phys, 0)) { addr = machdep->kvbase + (ulong)phys; if (value_to_symstr(addr, locbuf, 0)) { strcat(buf, " <"); strcat(buf, locbuf); strcat(buf, ">"); } } } return buf; } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int x86_64_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on x86_64) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console(" IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } else if ((STREQ(argv[argc-2], "callq") || (argv[argc-2][0] == 'j')) && hexadecimal(argv[argc-1], 0)) { /* * Update code of the form: * * callq * jmp * jCC * * to show a translated, bracketed, target. */ p1 = &LASTCHAR(inbuf); if (extract_hex(argv[argc-1], &value, NULLCHAR, TRUE)) { sprintf(buf1, " <%s>\n", value_to_symstr(value, buf2, output_radix)); if (!strstr(buf1, "<>")) sprintf(p1, "%s", buf1); } } if (value_symbol(vaddr) && (strstr(inbuf, "nopl 0x0(%rax,%rax,1)") || strstr(inbuf, "data32 data32 data32 xchg %ax,%ax"))) { strip_line_end(inbuf); strcat(inbuf, " [FTRACE NOP]\n"); } console("OUT: %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int x86_64_get_smp_cpus(void) { int i, cpus, nr_pda, cpunumber, _cpu_pda, _boot_cpu_pda; char *cpu_pda_buf; ulong level4_pgt, cpu_pda_addr; struct syment *sp; ulong __per_cpu_load = 0, cpu_addr; if (!VALID_STRUCT(x8664_pda)) { if (!(kt->flags & PER_CPU_OFF)) return 1; if ((sp = per_cpu_symbol_search("pcpu_hot")) && (cpu_addr = MEMBER_OFFSET("pcpu_hot", "cpu_number")) != INVALID_OFFSET) cpu_addr += sp->value; else if ((sp = per_cpu_symbol_search("per_cpu__cpu_number"))) cpu_addr = sp->value; else return 1; if (kernel_symbol_exists("__per_cpu_load")) __per_cpu_load = symbol_value("__per_cpu_load"); for (i = cpus = 0; i < NR_CPUS; i++) { if (__per_cpu_load && kt->__per_cpu_offset[i] == __per_cpu_load) break; if (!readmem(cpu_addr + kt->__per_cpu_offset[i], KVADDR, &cpunumber, sizeof(int), "cpu number (per_cpu)", QUIET|RETURN_ON_ERROR)) break; if (cpunumber != cpus) break; cpus++; } if ((i = get_cpus_present()) && (!cpus || (i < cpus))) cpus = get_highest_cpu_present() + 1; return cpus; } _boot_cpu_pda = FALSE; cpu_pda_buf = GETBUF(SIZE(x8664_pda)); if (LKCD_KERNTYPES()) { if (symbol_exists("_cpu_pda")) _cpu_pda = TRUE; else _cpu_pda = FALSE; nr_pda = get_cpus_possible(); } else { if (symbol_exists("_cpu_pda")) { if (!(nr_pda = get_array_length("_cpu_pda", NULL, 0))) nr_pda = NR_CPUS; _cpu_pda = TRUE; } else { if (!(nr_pda = get_array_length("cpu_pda", NULL, 0))) nr_pda = NR_CPUS; _cpu_pda = FALSE; } } if (_cpu_pda) { if (symbol_exists("_boot_cpu_pda")) _boot_cpu_pda = TRUE; else _boot_cpu_pda = FALSE; } for (i = cpus = 0; i < nr_pda; i++) { if (_cpu_pda) { if (_boot_cpu_pda) { if (!_CPU_PDA_READ2(i, cpu_pda_buf)) break; } else { if (!_CPU_PDA_READ(i, cpu_pda_buf)) break; } } else { if (!CPU_PDA_READ(i, cpu_pda_buf)) break; } if (VALID_MEMBER(x8664_pda_level4_pgt)) { level4_pgt = ULONG(cpu_pda_buf + OFFSET(x8664_pda_level4_pgt)); if (!VALID_LEVEL4_PGT_ADDR(level4_pgt)) break; } cpunumber = INT(cpu_pda_buf + OFFSET(x8664_pda_cpunumber)); if (cpunumber != cpus) break; cpus++; } FREEBUF(cpu_pda_buf); return cpus; } /* * Machine dependent command. */ void x86_64_cmd_mach(void) { int c, cflag, mflag; unsigned int radix; cflag = mflag = radix = 0; while ((c = getopt(argcnt, args, "cmxd")) != EOF) { switch(c) { case 'c': cflag++; break; case 'm': mflag++; x86_64_display_memmap(); break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (cflag) x86_64_display_cpu_data(radix); if (!cflag && !mflag) x86_64_display_machine_stats(); } /* * "mach" command output. */ static void x86_64_display_machine_stats(void) { int i, c; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d", kt->cpus); if (kt->cpus - get_cpus_to_display()) fprintf(fp, " [OFFLINE: %d]\n", kt->cpus - get_cpus_to_display()); else fprintf(fp, "\n"); if (!STREQ(kt->hypervisor, "(undetermined)") && !STREQ(kt->hypervisor, "bare hardware")) fprintf(fp, " HYPERVISOR: %s\n", kt->hypervisor); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, " KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, " KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); if (machdep->flags & VMEMMAP) fprintf(fp, " KERNEL VMEMMAP BASE: %lx\n", machdep->machspec->vmemmap_vaddr); fprintf(fp, " KERNEL START MAP: %lx\n", __START_KERNEL_map); fprintf(fp, " KERNEL MODULES BASE: %lx\n", MODULES_VADDR); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " IRQ STACK SIZE: %d\n", machdep->machspec->stkinfo.isize); fprintf(fp, " IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { sprintf(buf, "CPU %d", c); fprintf(fp, "%22s: %016lx", buf, machdep->machspec->stkinfo.ibase[c]); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } for (i = 0; i < MAX_EXCEPTION_STACKS; i++) { if (machdep->machspec->stkinfo.ebase[0][i] == 0) break; fprintf(fp, "%11s STACK SIZE: %d\n", machdep->machspec->stkinfo.exception_stacks[i], machdep->machspec->stkinfo.esize[i]); sprintf(buf, "%s STACKS:\n", machdep->machspec->stkinfo.exception_stacks[i]); fprintf(fp, "%24s", buf); for (c = 0; c < kt->cpus; c++) { if (machdep->machspec->stkinfo.ebase[c][i] == 0) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%22s: %016lx", buf, machdep->machspec->stkinfo.ebase[c][i]); if (!machdep->machspec->stkinfo.available[c][i]) fprintf(fp, " [unavailable]"); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } } } /* * "mach -c" */ static void x86_64_display_cpu_data(unsigned int radix) { int cpu, cpus, boot_cpu, _cpu_pda; ulong cpu_data; ulong cpu_pda, cpu_pda_addr; struct syment *per_cpu; boot_cpu = _cpu_pda = FALSE; cpu_data = cpu_pda = 0; cpus = 0; per_cpu = NULL; if (symbol_exists("cpu_data")) { cpu_data = symbol_value("cpu_data"); cpus = kt->cpus; boot_cpu = FALSE; } else if ((per_cpu = per_cpu_symbol_search("per_cpu__cpu_info"))) { cpus = kt->cpus; boot_cpu = FALSE; } else if (symbol_exists("boot_cpu_data")) { cpu_data = symbol_value("boot_cpu_data"); boot_cpu = TRUE; cpus = 1; } if (symbol_exists("_cpu_pda")) { cpu_pda = symbol_value("_cpu_pda"); _cpu_pda = TRUE; } else if (symbol_exists("cpu_pda")) { cpu_pda = symbol_value("cpu_pda"); _cpu_pda = FALSE; } for (cpu = 0; cpu < cpus; cpu++) { if (boot_cpu) fprintf(fp, "BOOT CPU:\n"); else { if (hide_offline_cpu(cpu)) { fprintf(fp, "%sCPU %d: [OFFLINE]\n", cpu ? "\n" : "", cpu); continue; } else fprintf(fp, "%sCPU %d:\n", cpu ? "\n" : "", cpu); } if (per_cpu) cpu_data = per_cpu->value + kt->__per_cpu_offset[cpu]; dump_struct("cpuinfo_x86", cpu_data, radix); if (_cpu_pda) { readmem(cpu_pda, KVADDR, &cpu_pda_addr, sizeof(unsigned long), "_cpu_pda addr", FAULT_ON_ERROR); fprintf(fp, "\n"); dump_struct("x8664_pda", cpu_pda_addr, radix); cpu_pda += sizeof(void *); } else if (VALID_STRUCT(x8664_pda)) { fprintf(fp, "\n"); dump_struct("x8664_pda", cpu_pda, radix); cpu_pda += SIZE(x8664_pda); } if (!per_cpu) cpu_data += SIZE(cpuinfo_x86); } } /* * "mach -m" */ static char *e820type[] = { "(invalid type)", "E820_RAM", "E820_RESERVED", "E820_ACPI", "E820_NVS", "E820_UNUSABLE", }; static void x86_64_display_memmap(void) { ulong e820; int nr_map, i; char *buf, *e820entry_ptr; ulonglong addr, size; uint type; if (kernel_symbol_exists("e820")) { if (get_symbol_type("e820", NULL, NULL) == TYPE_CODE_PTR) get_symbol_data("e820", sizeof(void *), &e820); else e820 = symbol_value("e820"); } else if (kernel_symbol_exists("e820_table")) get_symbol_data("e820_table", sizeof(void *), &e820); else error(FATAL, "neither e820 or e820_table symbols exist\n"); if (CRASHDEBUG(1)) { if (STRUCT_EXISTS("e820map")) dump_struct("e820map", e820, RADIX(16)); else if (STRUCT_EXISTS("e820_table")) dump_struct("e820_table", e820, RADIX(16)); } buf = (char *)GETBUF(SIZE(e820map)); readmem(e820, KVADDR, &buf[0], SIZE(e820map), "e820map", FAULT_ON_ERROR); nr_map = INT(buf + OFFSET(e820map_nr_map)); fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE\n"); for (i = 0; i < nr_map; i++) { e820entry_ptr = buf + sizeof(int) + (SIZE(e820entry) * i); addr = ULONGLONG(e820entry_ptr + OFFSET(e820entry_addr)); size = ULONGLONG(e820entry_ptr + OFFSET(e820entry_size)); type = UINT(e820entry_ptr + OFFSET(e820entry_type)); fprintf(fp, "%016llx - %016llx ", addr, addr+size); if (type >= (sizeof(e820type)/sizeof(char *))) { if (type == 12) fprintf(fp, "E820_PRAM\n"); else if (type == 128) fprintf(fp, "E820_RESERVED_KERN\n"); else fprintf(fp, "type %d\n", type); } else fprintf(fp, "%s\n", e820type[type]); } } static const char *hook_files[] = { "arch/x86_64/kernel/entry.S", "arch/x86_64/kernel/head.S", "arch/x86_64/kernel/semaphore.c" }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) #define SEMAPHORE_C ((char **)&hook_files[2]) static struct line_number_hook x86_64_line_number_hooks[] = { {"ret_from_fork", ENTRY_S}, {"system_call", ENTRY_S}, {"int_ret_from_sys_call", ENTRY_S}, {"ptregscall_common", ENTRY_S}, {"stub_execve", ENTRY_S}, {"stub_rt_sigreturn", ENTRY_S}, {"common_interrupt", ENTRY_S}, {"ret_from_intr", ENTRY_S}, {"load_gs_index", ENTRY_S}, {"arch_kernel_thread", ENTRY_S}, {"execve", ENTRY_S}, {"page_fault", ENTRY_S}, {"coprocessor_error", ENTRY_S}, {"simd_coprocessor_error", ENTRY_S}, {"device_not_available", ENTRY_S}, {"debug", ENTRY_S}, {"nmi", ENTRY_S}, {"int3", ENTRY_S}, {"overflow", ENTRY_S}, {"bounds", ENTRY_S}, {"invalid_op", ENTRY_S}, {"coprocessor_segment_overrun", ENTRY_S}, {"reserved", ENTRY_S}, {"double_fault", ENTRY_S}, {"invalid_TSS", ENTRY_S}, {"segment_not_present", ENTRY_S}, {"stack_segment", ENTRY_S}, {"general_protection", ENTRY_S}, {"alignment_check", ENTRY_S}, {"divide_error", ENTRY_S}, {"spurious_interrupt_bug", ENTRY_S}, {"machine_check", ENTRY_S}, {"call_debug", ENTRY_S}, {NULL, NULL} /* list must be NULL-terminated */ }; static void x86_64_dump_line_number(ulong callpc) { error(FATAL, "x86_64_dump_line_number: TBD\n"); } void x86_64_compiler_warning_stub(void) { struct line_number_hook *lhp; char **p ATTRIBUTE_UNUSED; lhp = &x86_64_line_number_hooks[0]; lhp++; p = ENTRY_S; x86_64_back_trace(NULL, NULL); get_x86_64_frame(NULL, NULL, NULL); x86_64_dump_line_number(0); } /* * Force the VM address-range selection via: * * --machdep vm=orig * --machdep vm=2.6.11 * * Force the phys_base address via: * * --machdep phys_base=
* * Force the IRQ stack back-link via: * * --machdep irq_eframe_link= * * Force the IRQ stack gap size via: * * --machdep irq_stack_gap= * * Force max_physmem_bits via: * * --machdep max_physmem_bits= */ void parse_cmdline_args(void) { int index, i, c, errflag; char *p; char buf[BUFSIZE]; char *arglist[MAXARGS]; int megabytes, gigabytes; int lines = 0; int vm_flag; ulong value; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { error(WARNING, "ignoring --machdep option: %s\n\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = vm_flag = 0; i < c; i++) { errflag = 0; if (STRNEQ(arglist[i], "vm=")) { vm_flag++; p = arglist[i] + strlen("vm="); if (strlen(p)) { if (STREQ(p, "orig")) { machdep->flags |= VM_ORIG; continue; } else if (STREQ(p, "2.6.11")) { machdep->flags |= VM_2_6_11; continue; } else if (STREQ(p, "xen")) { machdep->flags |= VM_XEN; continue; } else if (STREQ(p, "xen-rhel4")) { machdep->flags |= VM_XEN_RHEL4; continue; } else if (STREQ(p, "5level")) { machdep->flags |= VM_5LEVEL; continue; } } } else if (STRNEQ(arglist[i], "phys_base=")) { megabytes = FALSE; if ((LASTCHAR(arglist[i]) == 'm') || (LASTCHAR(arglist[i]) == 'M')) { LASTCHAR(arglist[i]) = NULLCHAR; megabytes = TRUE; } p = arglist[i] + strlen("phys_base="); if (strlen(p)) { if (hexadecimal(p, 0) && !decimal(p, 0) && !STRNEQ(p, "0x") && !STRNEQ(p, "0X")) string_insert("0x", p); errno = 0; value = strtoull(p, NULL, 0); if (!errno) { if (megabytes) value = MEGABYTES(value); machdep->machspec->phys_base = value; error(NOTE, "setting phys_base to: 0x%lx\n\n", machdep->machspec->phys_base); machdep->flags |= PHYS_BASE; continue; } } } else if (STRNEQ(arglist[i], "kernel_image_size=")) { megabytes = gigabytes = FALSE; if ((LASTCHAR(arglist[i]) == 'm') || (LASTCHAR(arglist[i]) == 'M')) { LASTCHAR(arglist[i]) = NULLCHAR; megabytes = TRUE; } if ((LASTCHAR(arglist[i]) == 'g') || (LASTCHAR(arglist[i]) == 'G')) { LASTCHAR(arglist[i]) = NULLCHAR; gigabytes = TRUE; } p = arglist[i] + strlen("kernel_image_size="); if (strlen(p)) { if (megabytes || gigabytes) { value = dtol(p, RETURN_ON_ERROR|QUIET, &errflag); } else value = htol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { if (megabytes) value = MEGABYTES(value); else if (gigabytes) value = GIGABYTES(value); machdep->machspec->kernel_image_size = value; error(NOTE, "setting kernel_image_size to: 0x%lx\n\n", machdep->machspec->kernel_image_size); continue; } } } else if (STRNEQ(arglist[i], "irq_eframe_link=")) { p = arglist[i] + strlen("irq_eframe_link="); if (strlen(p)) { value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { machdep->machspec->irq_eframe_link = value; continue; } } } else if (STRNEQ(arglist[i], "irq_stack_gap=")) { p = arglist[i] + strlen("irq_stack_gap="); if (strlen(p)) { value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { machdep->machspec->irq_stack_gap = value; continue; } } } else if (STRNEQ(arglist[i], "max_physmem_bits=")) { p = arglist[i] + strlen("max_physmem_bits="); if (strlen(p)) { value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { machdep->max_physmem_bits = value; error(NOTE, "setting max_physmem_bits to: %ld\n\n", machdep->max_physmem_bits); continue; } } } else if (STRNEQ(arglist[i], "page_offset=")) { p = arglist[i] + strlen("page_offset="); if (strlen(p)) { value = htol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { machdep->machspec->page_offset_force = value; error(NOTE, "setting PAGE_OFFSET to: 0x%lx\n\n", machdep->machspec->page_offset_force); continue; } } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); lines++; } if (vm_flag) { switch (machdep->flags & VM_FLAGS) { case 0: break; case VM_ORIG: error(NOTE, "using original x86_64 VM address ranges\n"); lines++; break; case VM_2_6_11: error(NOTE, "using 2.6.11 x86_64 VM address ranges\n"); lines++; break; case VM_XEN: error(NOTE, "using xen x86_64 VM address ranges\n"); lines++; break; case VM_XEN_RHEL4: error(NOTE, "using RHEL4 xen x86_64 VM address ranges\n"); lines++; break; case VM_5LEVEL: error(NOTE, "using 5-level pagetable x86_64 VM address ranges\n"); lines++; break; default: error(WARNING, "cannot set multiple vm values\n"); lines++; machdep->flags &= ~VM_FLAGS; break; } } if (lines) fprintf(fp, "\n"); } } void x86_64_clear_machdep_cache(void) { if (machdep->last_pgd_read != vt->kernel_pgd[0]) machdep->last_pgd_read = 0; } #define PUSH_RBP_MOV_RSP_RBP 0xe5894855 static void x86_64_framepointer_init(void) { unsigned int push_rbp_mov_rsp_rbp; int i, check; char *checkfuncs[] = {"sys_open", "sys_fork", "sys_read", "__x64_sys_open", "__x64_sys_fork", "__x64_sys_read", "do_futex", "do_fork", "_do_fork", "sys_write", "vfs_read", "__schedule"}; if (pc->flags & KERNEL_DEBUG_QUERY) return; for (i = check = 0; i < 12; i++) { if (!kernel_symbol_exists(checkfuncs[i])) continue; if (!readmem(symbol_value(checkfuncs[i]), KVADDR, &push_rbp_mov_rsp_rbp, sizeof(uint), "framepointer check", RETURN_ON_ERROR)) return; if ((push_rbp_mov_rsp_rbp == 0x66666666) || (push_rbp_mov_rsp_rbp == 0x00441f0f)) { if (!readmem(symbol_value(checkfuncs[i]) + 5, KVADDR, &push_rbp_mov_rsp_rbp, sizeof(uint), "framepointer check", RETURN_ON_ERROR)) return; } if (push_rbp_mov_rsp_rbp == PUSH_RBP_MOV_RSP_RBP) { if (++check > 2) { machdep->flags |= FRAMEPOINTER; break; } } } } static void x86_64_ORC_init(void) { int i; char *ORC_symbols[] = { "lookup_num_blocks", "__start_orc_unwind_ip", "__stop_orc_unwind_ip", "__start_orc_unwind", "__stop_orc_unwind", "orc_lookup", NULL }; struct ORC_data *orc; MEMBER_OFFSET_INIT(inactive_task_frame_bp, "inactive_task_frame", "bp"); MEMBER_OFFSET_INIT(inactive_task_frame_ret_addr, "inactive_task_frame", "ret_addr"); MEMBER_OFFSET_INIT(inactive_task_frame_r15, "inactive_task_frame", "r15"); MEMBER_OFFSET_INIT(inactive_task_frame_r14, "inactive_task_frame", "r14"); MEMBER_OFFSET_INIT(inactive_task_frame_r13, "inactive_task_frame", "r13"); MEMBER_OFFSET_INIT(inactive_task_frame_r12, "inactive_task_frame", "r12"); MEMBER_OFFSET_INIT(inactive_task_frame_flags, "inactive_task_frame", "flags"); MEMBER_OFFSET_INIT(inactive_task_frame_si, "inactive_task_frame", "si"); MEMBER_OFFSET_INIT(inactive_task_frame_di, "inactive_task_frame", "di"); MEMBER_OFFSET_INIT(inactive_task_frame_bx, "inactive_task_frame", "bx"); if (machdep->flags & FRAMEPOINTER) return; STRUCT_SIZE_INIT(orc_entry, "orc_entry"); if (!VALID_STRUCT(orc_entry)) return; if (!MEMBER_EXISTS("orc_entry", "sp_offset") || !MEMBER_EXISTS("orc_entry", "bp_offset") || !MEMBER_EXISTS("orc_entry", "sp_reg") || !MEMBER_EXISTS("orc_entry", "bp_reg") || !MEMBER_EXISTS("orc_entry", "type") || SIZE(orc_entry) != sizeof(kernel_orc_entry)) { error(WARNING, "ORC unwinder: orc_entry structure has changed\n"); return; } for (i = 0; ORC_symbols[i]; i++) { if (!symbol_exists(ORC_symbols[i])) { error(WARNING, "ORC unwinder: %s does not exist in this kernel\n", ORC_symbols[i]); return; } } orc = &machdep->machspec->orc; MEMBER_OFFSET_INIT(module_arch, "module", "arch"); MEMBER_OFFSET_INIT(mod_arch_specific_num_orcs, "mod_arch_specific", "num_orcs"); MEMBER_OFFSET_INIT(mod_arch_specific_orc_unwind_ip, "mod_arch_specific", "orc_unwind_ip"); MEMBER_OFFSET_INIT(mod_arch_specific_orc_unwind, "mod_arch_specific", "orc_unwind"); /* * Nice to have, but not required. */ if (VALID_MEMBER(module_arch) && VALID_MEMBER(mod_arch_specific_num_orcs) && VALID_MEMBER(mod_arch_specific_orc_unwind_ip) && VALID_MEMBER(mod_arch_specific_orc_unwind)) { orc->module_ORC = TRUE; } else { orc->module_ORC = FALSE; error(WARNING, "ORC unwinder: module orc_entry structures have changed\n"); } if (!readmem(symbol_value("lookup_num_blocks"), KVADDR, &orc->lookup_num_blocks, sizeof(unsigned int), "lookup_num_blocks", RETURN_ON_ERROR|QUIET)) { error(WARNING, "ORC unwinder: cannot read lookup_num_blocks\n"); return; } orc->__start_orc_unwind_ip = symbol_value("__start_orc_unwind_ip"); orc->__stop_orc_unwind_ip = symbol_value("__stop_orc_unwind_ip"); orc->__start_orc_unwind = symbol_value("__start_orc_unwind"); orc->__stop_orc_unwind = symbol_value("__stop_orc_unwind"); orc->orc_lookup = symbol_value("orc_lookup"); orc->has_signal = MEMBER_EXISTS("orc_entry", "signal"); /* added at 6.3 */ orc->has_end = MEMBER_EXISTS("orc_entry", "end"); /* removed at 6.4 */ if (orc->has_signal && !orc->has_end) machdep->flags |= ORC_6_4; machdep->flags |= ORC; } static ulong search_for_switch_to(ulong start, ulong end) { ulong max_instructions, address; char buf1[BUFSIZE]; char search_string1[BUFSIZE]; char search_string2[BUFSIZE]; char search_string3[BUFSIZE]; char search_string4[BUFSIZE]; int found; max_instructions = end - start; found = FALSE; search_string1[0] = search_string2[0] = NULLCHAR; search_string3[0] = search_string4[0] = NULLCHAR; sprintf(buf1, "x/%ldi 0x%lx", max_instructions, start); if (symbol_exists("__switch_to")) { sprintf(search_string1, "callq 0x%lx", symbol_value("__switch_to")); sprintf(search_string2, "call 0x%lx", symbol_value("__switch_to")); } if (symbol_exists("__switch_to_asm")) { sprintf(search_string3, "callq 0x%lx", symbol_value("__switch_to_asm")); sprintf(search_string4, "call 0x%lx", symbol_value("__switch_to_asm")); } open_tmpfile(); if (!gdb_pass_through(buf1, pc->tmpfile, GNU_RETURN_ON_ERROR)) return FALSE; rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (found) break; if (strstr(buf1, "<__switch_to>")) found = TRUE; if (strlen(search_string1) && strstr(buf1, search_string1)) found = TRUE; if (strlen(search_string2) && strstr(buf1, search_string2)) found = TRUE; if (strlen(search_string3) && strstr(buf1, search_string3)) found = TRUE; if (strlen(search_string4) && strstr(buf1, search_string4)) found = TRUE; } close_tmpfile(); if (found && extract_hex(buf1, &address, ':', TRUE)) return address; return 0; } static void x86_64_thread_return_init(void) { struct syment *sp, *spn; ulong address; if ((sp = kernel_symbol_search("thread_return"))) { machdep->machspec->thread_return = sp->value; return; } if ((sp = kernel_symbol_search("schedule")) && (spn = next_symbol(NULL, sp)) && (address = search_for_switch_to(sp->value, spn->value))) { machdep->machspec->thread_return = address; return; } if ((sp = kernel_symbol_search("__schedule")) && (spn = next_symbol(NULL, sp)) && (address = search_for_switch_to(sp->value, spn->value))) { machdep->machspec->thread_return = address; return; } error(INFO, "cannot determine thread return address\n"); machdep->machspec->thread_return = (sp = kernel_symbol_search("schedule")) ? sp->value : 0; } static void x86_64_irq_eframe_link_init(void) { int c; struct syment *sp, *spn; char buf[BUFSIZE]; char link_register[BUFSIZE]; char *arglist[MAXARGS]; if (machdep->machspec->irq_eframe_link == UNINITIALIZED) machdep->machspec->irq_eframe_link = 0; else return; if (symbol_exists("asm_common_interrupt")) { if (symbol_exists("asm_call_on_stack")) machdep->machspec->irq_eframe_link = -64; else machdep->machspec->irq_eframe_link = -32; return; } if (THIS_KERNEL_VERSION < LINUX(2,6,9)) return; if (!(sp = symbol_search("common_interrupt")) || !(spn = next_symbol(NULL, sp))) { return; } open_tmpfile(); sprintf(buf, "disassemble 0x%lx, 0x%lx", sp->value, spn->value); if (!gdb_pass_through(buf, pc->tmpfile, GNU_RETURN_ON_ERROR)) return; link_register[0] = NULLCHAR; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "Dump of assembler code")) continue; if (!strstr(buf, sp->name)) break; if ((c = parse_line(buf, arglist)) < 4) continue; if (strstr(arglist[2], "push")) strcpy(link_register, arglist[3]); } close_tmpfile(); if (CRASHDEBUG(1)) fprintf(fp, "IRQ stack link register: %s\n", strlen(link_register) ? link_register : "undetermined"); if (STREQ(link_register, "%rbp")) machdep->machspec->irq_eframe_link = 40; else if (THIS_KERNEL_VERSION >= LINUX(2,6,29)) machdep->machspec->irq_eframe_link = 40; } /* * Calculate and verify the IRQ exception frame location from the * stack reference at the top of the IRQ stack, keep ms->irq_eframe_link * as the most likely value, and try a few sizes around it. */ static ulong x86_64_irq_eframe_link(ulong stkref, struct bt_info *bt, FILE *ofp) { ulong irq_eframe; int i, try[] = { 8, -8, 16, -16 }; if (x86_64_exception_frame(EFRAME_VERIFY, stkref, 0, bt, ofp)) return stkref; irq_eframe = stkref - machdep->machspec->irq_eframe_link; if (x86_64_exception_frame(EFRAME_VERIFY, irq_eframe, 0, bt, ofp)) return irq_eframe; for (i = 0; i < sizeof(try)/sizeof(int); i++) { if (x86_64_exception_frame(EFRAME_VERIFY, irq_eframe+try[i], 0, bt, ofp)) return (irq_eframe + try[i]); } return irq_eframe; } #include "netdump.h" #include "xen_dom0.h" /* * From the xen vmcore, create an index of mfns for each page that makes * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. */ #define MAX_X86_64_FRAMES (512) #define MFNS_PER_FRAME (PAGESIZE()/sizeof(ulong)) static int x86_64_xen_kdump_p2m_create(struct xen_kdump_data *xkd) { int i, j; ulong kvaddr; ulong *up; ulong frames; ulong frame_mfn[MAX_X86_64_FRAMES] = { 0 }; int mfns[MAX_X86_64_FRAMES] = { 0 }; struct syment *sp; /* * Temporarily read physical (machine) addresses from vmcore. */ pc->curcmd_flags |= XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (temporary): force XEN_MACHINE_ADDR\n"); if (xkd->flags & KDUMP_CR3) goto use_cr3; if (CRASHDEBUG(1)) fprintf(fp, "x86_64_xen_kdump_p2m_create: p2m_mfn: %lx\n", xkd->p2m_mfn); if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->page, PAGESIZE(), "xen kdump p2m mfn page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn page\n"); if (CRASHDEBUG(2)) x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list"); for (i = 0, up = (ulong *)xkd->page; i < MAX_X86_64_FRAMES; i++, up++) frame_mfn[i] = *up; for (i = 0; i < MAX_X86_64_FRAMES; i++) { if (!frame_mfn[i]) break; if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, xkd->page, PAGESIZE(), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn list page\n"); for (j = 0, up = (ulong *)xkd->page; j < MFNS_PER_FRAME; j++, up++) if (*up) mfns[i]++; xkd->p2m_frames += mfns[i]; if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, xkd->page, "pfn_to_mfn_frame_list page"); } if (CRASHDEBUG(1)) fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames); if ((xkd->p2m_mfn_frame_list = (ulong *) malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); for (i = 0, frames = xkd->p2m_frames; frames; i++) { if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn list page\n"); frames -= mfns[i]; } if (CRASHDEBUG(2)) { for (i = 0; i < xkd->p2m_frames; i++) fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); fprintf(fp, "\n"); } pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; use_cr3: if (CRASHDEBUG(1)) fprintf(fp, "x86_64_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3); if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->pgd, PAGESIZE(), "xen kdump cr3 page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump cr3 page\n"); if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, machdep->pgd, "contents of PML4 page:"); /* * kernel version < 2.6.27 => end_pfn * kernel version >= 2.6.27 => max_pfn */ if ((sp = symbol_search("end_pfn"))) kvaddr = sp->value; else kvaddr = symbol_value("max_pfn"); if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page)) return FALSE; up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if (CRASHDEBUG(1)) fprintf(fp, "end_pfn at %lx: %lx (%ld) -> %d p2m_frames\n", kvaddr, *up, *up, xkd->p2m_frames); if ((xkd->p2m_mfn_frame_list = (ulong *) malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); kvaddr = symbol_value("phys_to_machine_mapping"); if (!x86_64_xen_kdump_load_page(kvaddr, xkd->page)) return FALSE; up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); kvaddr = *up; if (CRASHDEBUG(1)) fprintf(fp, "phys_to_machine_mapping: %lx\n", kvaddr); machdep->last_pud_read = BADADDR; machdep->last_pmd_read = BADADDR; machdep->last_ptbl_read = BADADDR; for (i = 0; i < xkd->p2m_frames; i++) { xkd->p2m_mfn_frame_list[i] = x86_64_xen_kdump_page_mfn(kvaddr); kvaddr += PAGESIZE(); } if (CRASHDEBUG(1)) { for (i = 0; i < xkd->p2m_frames; i++) fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); fprintf(fp, "\n"); } machdep->last_pud_read = 0; machdep->last_ptbl_read = 0; machdep->last_pmd_read = 0; pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; } static char * x86_64_xen_kdump_load_page(ulong kvaddr, char *pgbuf) { ulong mfn; ulong *pgd, *pud, *pmd, *ptep; pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(fp, "[%lx] pgd: %lx mfn: %lx pgd_index: %lx\n", kvaddr, *pgd, mfn, pgd_index(kvaddr)); if (!readmem(PTOB(mfn), PHYSADDR, machdep->pud, PAGESIZE(), "xen kdump pud page", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pud page\n"); machdep->last_pud_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, machdep->pud, "contents of page upper directory page:"); pud = ((ulong *)machdep->pud) + pud_index(kvaddr); mfn = ((*pud) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(fp, "[%lx] pud: %lx mfn: %lx pud_index: %lx\n", kvaddr, *pgd, mfn, pud_index(kvaddr)); if (!readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), "xen kdump pmd page", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pmd page\n"); machdep->last_pmd_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, machdep->pmd, "contents of page middle directory page:"); pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(fp, "[%lx] pmd: %lx mfn: %lx pmd_index: %lx\n", kvaddr, *pmd, mfn, pmd_index(kvaddr)); if (!readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), "xen kdump page table page", RETURN_ON_ERROR)) error(FATAL, "cannot read/find page table page\n"); machdep->last_ptbl_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, machdep->ptbl, "contents of page table page:"); ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(fp, "[%lx] ptep: %lx mfn: %lx pte_index: %lx\n", kvaddr, *ptep, mfn, pte_index(kvaddr)); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen kdump page table page", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pte page\n"); if (CRASHDEBUG(7)) x86_64_debug_dump_page(fp, pgbuf, "contents of page:"); return pgbuf; } static ulong x86_64_xen_kdump_page_mfn(ulong kvaddr) { ulong mfn; ulong *pgd, *pud, *pmd, *ptep; pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_pud_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->pud, PAGESIZE(), "xen kdump pud entry", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pud page\n"); machdep->last_pud_read = mfn; pud = ((ulong *)machdep->pud) + pud_index(kvaddr); mfn = ((*pud) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_pmd_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), "xen kdump pmd entry", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pmd page\n"); machdep->last_pmd_read = mfn; pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_ptbl_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), "xen kdump page table page", RETURN_ON_ERROR)) error(FATAL, "cannot read/find page table page\n"); machdep->last_ptbl_read = mfn; ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); return mfn; } #include "xendump.h" static int in_START_KERNEL_map(ulong vaddr) { if (machdep->machspec->kernel_image_size && ((vaddr >= __START_KERNEL_map) && (vaddr < (__START_KERNEL_map + machdep->machspec->kernel_image_size)))) return TRUE; if ((vaddr >= __START_KERNEL_map) && (vaddr < highest_bss_symbol())) return TRUE; return FALSE; } /* * Determine the physical address base for relocatable kernels. */ static void x86_64_calc_phys_base(void) { int i; FILE *iomem; char buf[BUFSIZE]; char *p1; ulong phys_base, text_start, kernel_code_start; int errflag; struct vmcore_data *vd; static struct xendump_data *xd; Elf64_Phdr *phdr; if (machdep->flags & PHYS_BASE) /* --machdep override */ return; machdep->machspec->phys_base = 0; /* default/traditional */ if (pc->flags2 & GET_LOG) text_start = BADADDR; else { if (!kernel_symbol_exists("phys_base")) return; if (!symbol_exists("_text")) return; else text_start = symbol_value("_text"); if (REMOTE()) { phys_base = get_remote_phys_base(text_start, symbol_value("phys_base")); if (phys_base) { machdep->machspec->phys_base = phys_base; if (CRASHDEBUG(1)) { fprintf(fp, "_text: %lx ", text_start); fprintf(fp, "phys_base: %lx\n\n", machdep->machspec->phys_base); } return; } } } /* * Linux 4.10 exports it in VMCOREINFO (finally). */ if ((p1 = pc->read_vmcoreinfo("NUMBER(phys_base)"))) { if (*p1 == '-') machdep->machspec->phys_base = dtol(p1+1, QUIET, NULL) * -1; else machdep->machspec->phys_base = dtol(p1, QUIET, NULL); if (CRASHDEBUG(1)) fprintf(fp, "VMCOREINFO: NUMBER(phys_base): %s -> %lx\n", p1, machdep->machspec->phys_base); free(p1); return; } if (LOCAL_ACTIVE()) { if ((iomem = fopen("/proc/iomem", "r")) == NULL) return; errflag = 1; while (fgets(buf, BUFSIZE, iomem)) { if (strstr(buf, ": Kernel code")) { clean_line(buf); errflag = 0; break; } } fclose(iomem); if (errflag) return; if (!(p1 = strstr(buf, "-"))) return; else *p1 = NULLCHAR; errflag = 0; kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag); if (errflag) return; machdep->machspec->phys_base = kernel_code_start - (text_start - __START_KERNEL_map); if (CRASHDEBUG(1)) { fprintf(fp, "_text: %lx ", text_start); fprintf(fp, "Kernel code: %lx -> ", kernel_code_start); fprintf(fp, "phys_base: %lx\n\n", machdep->machspec->phys_base); } return; } /* * Get relocation value from whatever dumpfile format is being used. */ if (QEMU_MEM_DUMP_NO_VMCOREINFO()) { if ((KDUMP_DUMPFILE() && kdump_phys_base(&phys_base)) || (DISKDUMP_DUMPFILE() && diskdump_phys_base(&phys_base))) machdep->machspec->phys_base = phys_base; if (!x86_64_virt_phys_base()) error(WARNING, "cannot determine physical base address:" " defaulting to %lx\n\n", machdep->machspec->phys_base); return; } if (VMSS_DUMPFILE()) { if (vmware_vmss_phys_base(&phys_base)) { machdep->machspec->phys_base = phys_base; if (!x86_64_virt_phys_base()) error(WARNING, "cannot determine physical base address:" " defaulting to %lx\n\n", machdep->machspec->phys_base); if (CRASHDEBUG(1)) fprintf(fp, "compressed kdump: phys_base: %lx\n", phys_base); } return; } if (DISKDUMP_DUMPFILE()) { if (diskdump_phys_base(&phys_base)) { machdep->machspec->phys_base = phys_base; if ((pc->flags2 & QEMU_MEM_DUMP_COMPRESSED) && !x86_64_virt_phys_base()) error(WARNING, "cannot determine physical base address:" " defaulting to %lx\n\n", machdep->machspec->phys_base); if (CRASHDEBUG(1)) fprintf(fp, "compressed kdump: phys_base: %lx\n", phys_base); } return; } if (KVMDUMP_DUMPFILE()) { if (kvmdump_phys_base(&phys_base)) { machdep->machspec->phys_base = phys_base; if (CRASHDEBUG(1)) fprintf(fp, "kvmdump: phys_base: %lx\n", phys_base); } else { machdep->machspec->phys_base = phys_base; if (!x86_64_virt_phys_base()) error(WARNING, "cannot determine physical base address:" " defaulting to %lx\n\n", phys_base); } return; } if (SADUMP_DUMPFILE()) { if (sadump_phys_base(&phys_base)) { machdep->machspec->phys_base = phys_base; if (CRASHDEBUG(1)) fprintf(fp, "sadump: phys_base: %lx\n", phys_base); } else { machdep->machspec->phys_base = phys_base; if (!x86_64_virt_phys_base()) error(WARNING, "cannot determine physical base address:" " defaulting to %lx\n\n", phys_base); } return; } if ((vd = get_kdump_vmcore_data())) { for (i = 0; i < vd->num_pt_load_segments; i++) { phdr = vd->load64 + i; if ((phdr->p_vaddr >= __START_KERNEL_map) && (in_START_KERNEL_map(phdr->p_vaddr) || !(IS_VMALLOC_ADDR(phdr->p_vaddr)))) { machdep->machspec->phys_base = phdr->p_paddr - (phdr->p_vaddr & ~(__START_KERNEL_map)); if (CRASHDEBUG(1)) { fprintf(fp, "p_vaddr: %lx p_paddr: %lx -> ", phdr->p_vaddr, phdr->p_paddr); fprintf(fp, "phys_base: %lx\n\n", machdep->machspec->phys_base); } break; } } if ((pc->flags2 & QEMU_MEM_DUMP_ELF) && !x86_64_virt_phys_base()) error(WARNING, "cannot determine physical base address:" " defaulting to %lx\n\n", machdep->machspec->phys_base); return; } if ((xd = get_xendump_data())) { if (text_start == __START_KERNEL_map) { /* * Xen kernels are not relocable (yet) and don't have * the "phys_base" entry point, so this is most likely * a xendump of a fully-virtualized relocatable kernel. * No clues exist in the xendump header, so hardwire * phys_base to 2MB and hope for the best. */ machdep->machspec->phys_base = 0x200000; if (CRASHDEBUG(1)) fprintf(fp, "default relocatable phys_base: %lx\n", machdep->machspec->phys_base); } else if (text_start > __START_KERNEL_map) { switch (xd->flags & (XC_CORE_ELF|XC_CORE_NO_P2M)) { /* * If this is a new ELF-style xendump with no * p2m information, then it also must be a * fully-virtualized relocatable kernel. Again, * the xendump header is useless, and we don't * have /proc/iomem, so presume that the kernel * code starts at 2MB. */ case (XC_CORE_ELF|XC_CORE_NO_P2M): machdep->machspec->phys_base = 0x200000 - (text_start - __START_KERNEL_map); if (CRASHDEBUG(1)) fprintf(fp, "default relocatable " "phys_base: %lx\n", machdep->machspec->phys_base); break; default: break; } } if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC_HVM) x86_64_virt_phys_base(); } } /* * Verify, or possibly override, the xendump/kvmdump phys_base * calculation by trying to read linux_banner from a range of * typical physical offsets. */ static int x86_64_virt_phys_base(void) { char buf[BUFSIZE]; struct syment *sp; ulong phys, linux_banner_phys; if (!(sp = symbol_search("linux_banner")) || !((sp->type == 'R') || (sp->type == 'r') || (sp->type == 'D'))) return FALSE; linux_banner_phys = sp->value - __START_KERNEL_map; if (readmem(linux_banner_phys + machdep->machspec->phys_base, PHYSADDR, buf, strlen("Linux version"), "linux_banner verify", QUIET|RETURN_ON_ERROR) && STRNEQ(buf, "Linux version")) return TRUE; for (phys = (ulong)(-MEGABYTES(32)); phys != 0xfffff00000; phys += MEGABYTES(1)) { if (readmem(linux_banner_phys + phys, PHYSADDR, buf, strlen("Linux version"), "linux_banner search", QUIET|RETURN_ON_ERROR) && STRNEQ(buf, "Linux version")) { if (CRASHDEBUG(1)) fprintf(fp, "virtual dump phys_base: %lx %s\n", phys, machdep->machspec->phys_base != phys ? "override" : ""); machdep->machspec->phys_base = phys; return TRUE; } } return FALSE; } /* * Create an index of mfns for each page that makes up the * kernel's complete phys_to_machine_mapping[max_pfn] array. */ static int x86_64_xendump_p2m_create(struct xendump_data *xd) { int i, idx; ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; ulong *up; off_t offset; struct syment *sp; /* * Check for pvops Xen kernel before presuming it's HVM. */ if (symbol_exists("pv_init_ops") && (symbol_exists("xen_patch") || symbol_exists("paravirt_patch_default")) && (xd->xc_core.header.xch_magic == XC_CORE_MAGIC)) return x86_64_pvops_xendump_p2m_create(xd); if (!symbol_exists("phys_to_machine_mapping")) { xd->flags |= XC_CORE_NO_P2M; return TRUE; } if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == INVALID_OFFSET) error(FATAL, "cannot determine vcpu_guest_context.ctrlreg offset\n"); else if (CRASHDEBUG(1)) fprintf(xd->ofp, "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", ctrlreg_offset); offset = xd->xc_core.header.xch_ctxt_offset + (off_t)ctrlreg_offset; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_ctxt_offset\n"); if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != sizeof(ctrlreg)) error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); for (i = 0; CRASHDEBUG(1) && (i < 8); i++) fprintf(xd->ofp, "ctrlreg[%d]: %lx\n", i, ctrlreg[i]); mfn = ctrlreg[3] >> PAGESHIFT(); if (!xc_core_mfn_to_page(mfn, machdep->pgd)) error(FATAL, "cannot read/find cr3 page\n"); if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, machdep->pgd, "contents of PGD page:"); /* * kernel version < 2.6.27 => end_pfn * kernel version >= 2.6.27 => max_pfn */ if ((sp = symbol_search("end_pfn"))) kvaddr = sp->value; else kvaddr = symbol_value("max_pfn"); if (!x86_64_xendump_load_page(kvaddr, xd)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(xd->ofp, "end pfn: %lx\n", *up); xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if ((xd->xc_core.p2m_frame_index_list = (ulong *) malloc(xd->xc_core.p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_list"); kvaddr = symbol_value("phys_to_machine_mapping"); if (!x86_64_xendump_load_page(kvaddr, xd)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(fp, "phys_to_machine_mapping: %lx\n", *up); kvaddr = *up; machdep->last_ptbl_read = BADADDR; for (i = 0; i < xd->xc_core.p2m_frames; i++) { if ((idx = x86_64_xendump_page_index(kvaddr, xd)) == MFN_NOT_FOUND) return FALSE; xd->xc_core.p2m_frame_index_list[i] = idx; kvaddr += PAGESIZE(); } machdep->last_ptbl_read = 0; return TRUE; } static int x86_64_pvops_xendump_p2m_create(struct xendump_data *xd) { int i; ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; ulong *up; off_t offset; struct syment *sp; if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == INVALID_OFFSET) error(FATAL, "cannot determine vcpu_guest_context.ctrlreg offset\n"); else if (CRASHDEBUG(1)) fprintf(xd->ofp, "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", ctrlreg_offset); offset = xd->xc_core.header.xch_ctxt_offset + (off_t)ctrlreg_offset; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_ctxt_offset\n"); if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != sizeof(ctrlreg)) error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); for (i = 0; CRASHDEBUG(1) && (i < 8); i++) fprintf(xd->ofp, "ctrlreg[%d]: %lx\n", i, ctrlreg[i]); mfn = ctrlreg[3] >> PAGESHIFT(); if (!xc_core_mfn_to_page(mfn, machdep->pgd)) error(FATAL, "cannot read/find cr3 page\n"); if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, machdep->pgd, "contents of PGD page:"); /* * kernel version < 2.6.27 => end_pfn * kernel version >= 2.6.27 => max_pfn */ if ((sp = symbol_search("end_pfn"))) kvaddr = sp->value; else kvaddr = symbol_value("max_pfn"); if (!x86_64_xendump_load_page(kvaddr, xd)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(xd->ofp, "end pfn: %lx\n", *up); xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if ((xd->xc_core.p2m_frame_index_list = (ulong *) malloc(xd->xc_core.p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_list"); if (symbol_exists("p2m_mid_missing")) return x86_64_pvops_xendump_p2m_l3_create(xd); else return x86_64_pvops_xendump_p2m_l2_create(xd); } static int x86_64_pvops_xendump_p2m_l2_create(struct xendump_data *xd) { int i, idx, p; ulong kvaddr, *up; machdep->last_ptbl_read = BADADDR; kvaddr = symbol_value("p2m_top"); for (p = 0; p < xd->xc_core.p2m_frames; p += XEN_PFNS_PER_PAGE) { if (!x86_64_xendump_load_page(kvaddr, xd)) return FALSE; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, xd->page, "contents of page:"); up = (ulong *)(xd->page); for (i = 0; i < XEN_PFNS_PER_PAGE; i++, up++) { if ((p+i) >= xd->xc_core.p2m_frames) break; if ((idx = x86_64_xendump_page_index(*up, xd)) == MFN_NOT_FOUND) return FALSE; xd->xc_core.p2m_frame_index_list[p+i] = idx; } kvaddr += PAGESIZE(); } machdep->last_ptbl_read = 0; return TRUE; } static int x86_64_pvops_xendump_p2m_l3_create(struct xendump_data *xd) { int i, idx, j, p2m_frame, ret = FALSE; ulong kvaddr, *p2m_mid, p2m_mid_missing, p2m_missing, *p2m_top; p2m_top = NULL; machdep->last_ptbl_read = BADADDR; kvaddr = symbol_value("p2m_missing"); if (!x86_64_xendump_load_page(kvaddr, xd)) goto err; p2m_missing = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); kvaddr = symbol_value("p2m_mid_missing"); if (!x86_64_xendump_load_page(kvaddr, xd)) goto err; p2m_mid_missing = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); kvaddr = symbol_value("p2m_top"); if (!x86_64_xendump_load_page(kvaddr, xd)) goto err; kvaddr = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (!x86_64_xendump_load_page(kvaddr, xd)) goto err; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, xd->page, "contents of p2m_top page:"); p2m_top = (ulong *)GETBUF(PAGESIZE()); memcpy(p2m_top, xd->page, PAGESIZE()); for (i = 0; i < XEN_P2M_TOP_PER_PAGE; ++i) { p2m_frame = i * XEN_P2M_MID_PER_PAGE; if (p2m_frame >= xd->xc_core.p2m_frames) break; if (p2m_top[i] == p2m_mid_missing) continue; if (!x86_64_xendump_load_page(p2m_top[i], xd)) goto err; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, xd->page, "contents of p2m_mid page:"); p2m_mid = (ulong *)xd->page; for (j = 0; j < XEN_P2M_MID_PER_PAGE; ++j, ++p2m_frame) { if (p2m_frame >= xd->xc_core.p2m_frames) break; if (p2m_mid[j] == p2m_missing) continue; idx = x86_64_xendump_page_index(p2m_mid[j], xd); if (idx == MFN_NOT_FOUND) goto err; xd->xc_core.p2m_frame_index_list[p2m_frame] = idx; } } machdep->last_ptbl_read = 0; ret = TRUE; err: if (p2m_top) FREEBUF(p2m_top); return ret; } static void x86_64_debug_dump_page(FILE *ofp, char *page, char *name) { int i; ulong *up; fprintf(ofp, "%s\n", name); up = (ulong *)page; for (i = 0; i < 256; i++) { fprintf(ofp, "%016lx: %016lx %016lx\n", (ulong)((i * 2) * sizeof(ulong)), *up, *(up+1)); up += 2; } } /* * Find the page associate with the kvaddr, and read its contents * into the passed-in buffer. */ static char * x86_64_xendump_load_page(ulong kvaddr, struct xendump_data *xd) { ulong mfn; ulong *pgd, *pud, *pmd, *ptep; pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(xd->ofp, "[%lx] pgd: %lx mfn: %lx pgd_index: %lx\n", kvaddr, *pgd, mfn, pgd_index(kvaddr)); if (!xc_core_mfn_to_page(mfn, machdep->pud)) error(FATAL, "cannot read/find pud page\n"); machdep->last_pud_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, machdep->pud, "contents of page upper directory page:"); pud = ((ulong *)machdep->pud) + pud_index(kvaddr); mfn = ((*pud) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(xd->ofp, "[%lx] pud: %lx mfn: %lx pud_index: %lx\n", kvaddr, *pud, mfn, pud_index(kvaddr)); if (!xc_core_mfn_to_page(mfn, machdep->pmd)) error(FATAL, "cannot read/find pmd page\n"); machdep->last_pmd_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, machdep->pmd, "contents of page middle directory page:"); pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(xd->ofp, "[%lx] pmd: %lx mfn: %lx pmd_index: %lx\n", kvaddr, *pmd, mfn, pmd_index(kvaddr)); if (!xc_core_mfn_to_page(mfn, machdep->ptbl)) error(FATAL, "cannot read/find page table page\n"); machdep->last_ptbl_read = mfn; if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, machdep->ptbl, "contents of page table page:"); ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if (CRASHDEBUG(3)) fprintf(xd->ofp, "[%lx] ptep: %lx mfn: %lx pte_index: %lx\n", kvaddr, *ptep, mfn, pte_index(kvaddr)); if (!xc_core_mfn_to_page(mfn, xd->page)) error(FATAL, "cannot read/find pte page\n"); if (CRASHDEBUG(7)) x86_64_debug_dump_page(xd->ofp, xd->page, "contents of page:"); return xd->page; } /* * Find the dumpfile page index associated with the kvaddr. */ static int x86_64_xendump_page_index(ulong kvaddr, struct xendump_data *xd) { int idx; ulong mfn; ulong *pgd, *pud, *pmd, *ptep; pgd = ((ulong *)machdep->pgd) + pgd_index(kvaddr); mfn = ((*pgd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_pud_read) && !xc_core_mfn_to_page(mfn, machdep->pud)) error(FATAL, "cannot read/find pud page\n"); machdep->last_pud_read = mfn; pud = ((ulong *)machdep->pud) + pud_index(kvaddr); mfn = ((*pud) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_pmd_read) && !xc_core_mfn_to_page(mfn, machdep->pmd)) error(FATAL, "cannot read/find pmd page\n"); machdep->last_pmd_read = mfn; pmd = ((ulong *)machdep->pmd) + pmd_index(kvaddr); mfn = ((*pmd) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((mfn != machdep->last_ptbl_read) && !xc_core_mfn_to_page(mfn, machdep->ptbl)) error(FATAL, "cannot read/find page table page\n"); machdep->last_ptbl_read = mfn; ptep = ((ulong *)machdep->ptbl) + pte_index(kvaddr); mfn = ((*ptep) & PHYSICAL_PAGE_MASK) >> PAGESHIFT(); if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) error(INFO, "cannot determine page index for %lx\n", kvaddr); return idx; } /* * Pull the rsp from the cpu_user_regs struct in the header * turn it into a task, and match it with the active_set. * Unfortunately, the registers in the vcpu_guest_context * are not necessarily those of the panic task, so for now * let get_active_set_panic_task() get the right task. */ static ulong x86_64_xendump_panic_task(struct xendump_data *xd) { int i; ulong rsp; off_t offset; ulong task; if (INVALID_MEMBER(vcpu_guest_context_user_regs) || INVALID_MEMBER(cpu_user_regs_esp)) return NO_TASK; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_rsp); if (lseek(xd->xfd, offset, SEEK_SET) == -1) return NO_TASK; if (read(xd->xfd, &rsp, sizeof(ulong)) != sizeof(ulong)) return NO_TASK; if (IS_KVADDR(rsp) && (task = stkptr_to_task(rsp))) { for (i = 0; i < NR_CPUS; i++) { if (task == tt->active_set[i]) { if (CRASHDEBUG(0)) error(INFO, "x86_64_xendump_panic_task: rsp: %lx -> task: %lx\n", rsp, task); return task; } } error(WARNING, "x86_64_xendump_panic_task: rsp: %lx -> task: %lx (not active)\n", rsp); } return NO_TASK; } /* * Because of an off-by-one vcpu bug in early xc_domain_dumpcore() * instantiations, the registers in the vcpu_guest_context are not * necessarily those of the panic task. Furthermore, the rsp is * seemingly unassociated with the task, presumably due a hypervisor * callback, so only accept the contents if they retfer to the panic * task's stack. */ static void x86_64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp) { ulong task, xrip, xrsp; off_t offset; struct syment *sp; char *rip_symbol; int cpu; if (INVALID_MEMBER(vcpu_guest_context_user_regs) || INVALID_MEMBER(cpu_user_regs_rip) || INVALID_MEMBER(cpu_user_regs_rsp)) goto generic; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_rsp); if (lseek(xd->xfd, offset, SEEK_SET) == -1) goto generic; if (read(xd->xfd, &xrsp, sizeof(ulong)) != sizeof(ulong)) goto generic; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_rip); if (lseek(xd->xfd, offset, SEEK_SET) == -1) goto generic; if (read(xd->xfd, &xrip, sizeof(ulong)) != sizeof(ulong)) goto generic; /* * This works -- comes from smp_send_stop call in panic. * But xendump_panic_hook() will forestall this function * from being called (for now). */ if (IS_KVADDR(xrsp) && (task = stkptr_to_task(xrsp)) && (task == bt->task)) { if (CRASHDEBUG(1)) fprintf(xd->ofp, "hooks from vcpu_guest_context: rip: %lx rsp: %lx\n", xrip, xrsp); *rip = xrip; *rsp = xrsp; return; } generic: machdep->get_stack_frame(bt, rip, rsp); /* * If this is an active task showing itself in schedule(), * then the thread_struct rsp is stale. It has to be coming * from a callback via the interrupt stack. */ if (is_task_active(bt->task) && (rip_symbol = closest_symbol(*rip)) && (STREQ(rip_symbol, "thread_return") || STREQ(rip_symbol, "schedule"))) { cpu = bt->tc->processor; xrsp = machdep->machspec->stkinfo.ibase[cpu] + machdep->machspec->stkinfo.isize - sizeof(ulong); while (readmem(xrsp, KVADDR, &xrip, sizeof(ulong), "xendump rsp", RETURN_ON_ERROR)) { if ((sp = value_search(xrip, (ulong *)&offset)) && STREQ(sp->name, "smp_really_stop_cpu") && offset) { *rip = xrip; *rsp = xrsp; if (CRASHDEBUG(1)) error(INFO, "switch thread_return to smp_call_function_interrupt\n"); break; } xrsp -= sizeof(ulong); if (xrsp <= machdep->machspec->stkinfo.ibase[cpu]) break; } } } /* for XEN Hypervisor analysis */ static int x86_64_is_kvaddr_hyper(ulong addr) { return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END); } static ulong x86_64_get_stackbase_hyper(ulong task) { struct xen_hyper_vcpu_context *vcc; struct xen_hyper_pcpu_context *pcc; ulong rsp0, base; /* task means vcpu here */ vcc = xen_hyper_vcpu_to_vcpu_context(task); if (!vcc) error(FATAL, "invalid vcpu\n"); pcc = xen_hyper_id_to_pcpu_context(vcc->processor); if (!pcc) error(FATAL, "invalid pcpu number\n"); rsp0 = pcc->sp.rsp0; base = rsp0 & (~(STACKSIZE() - 1)); return base; } static ulong x86_64_get_stacktop_hyper(ulong task) { return x86_64_get_stackbase_hyper(task) + STACKSIZE(); } #define EXCEPTION_STACKSIZE_HYPER (1024UL) static ulong x86_64_in_exception_stack_hyper(ulong vcpu, ulong rsp) { struct xen_hyper_vcpu_context *vcc; struct xen_hyper_pcpu_context *pcc; int i; ulong stackbase; vcc = xen_hyper_vcpu_to_vcpu_context(vcpu); if (!vcc) error(FATAL, "invalid vcpu\n"); pcc = xen_hyper_id_to_pcpu_context(vcc->processor); if (!pcc) error(FATAL, "invalid pcpu number\n"); for (i = 0; i < XEN_HYPER_TSS_IST_MAX; i++) { if (pcc->ist[i] == 0) { continue; } stackbase = pcc->ist[i] - EXCEPTION_STACKSIZE_HYPER; if ((rsp & ~(EXCEPTION_STACKSIZE_HYPER - 1)) == stackbase) { return stackbase; } } return 0; } static void x86_64_get_stack_frame_hyper(struct bt_info *bt, ulong *pcp, ulong *spp) { struct xen_hyper_vcpu_context *vcc; int pcpu; ulong *regs; ulong rsp, rip; /* task means vcpu here */ vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); if (!vcc) error(FATAL, "invalid vcpu\n"); pcpu = vcc->processor; if (!xen_hyper_test_pcpu_id(pcpu)) { error(FATAL, "invalid pcpu number\n"); } if (bt->flags & BT_TEXT_SYMBOLS_ALL) { if (spp) *spp = x86_64_get_stackbase_hyper(bt->task); if (pcp) *pcp = 0; bt->flags &= ~BT_TEXT_SYMBOLS_ALL; return; } regs = (ulong *)xen_hyper_id_to_dumpinfo_context(pcpu)->pr_reg_ptr; rsp = XEN_HYPER_X86_64_NOTE_RSP(regs); rip = XEN_HYPER_X86_64_NOTE_RIP(regs); if (spp) { if (x86_64_in_exception_stack_hyper(bt->task, rsp)) *spp = rsp; else if (rsp < x86_64_get_stackbase_hyper(bt->task) || rsp >= x86_64_get_stacktop_hyper(bt->task)) *spp = x86_64_get_stackbase_hyper(bt->task); else *spp = rsp; } if (pcp) { if (is_kernel_text(rip)) *pcp = rip; else *pcp = 0; } } static int x86_64_print_stack_entry_hyper(struct bt_info *bt, FILE *ofp, int level, int stkindex, ulong text) { ulong rsp, offset; struct syment *sp; char *name, *name_plus_offset; int result; char buf1[BUFSIZE]; char buf2[BUFSIZE]; offset = 0; sp = value_search(text, &offset); if (!sp) return BACKTRACE_ENTRY_IGNORED; name = sp->name; if (offset && (bt->flags & BT_SYMBOL_OFFSET)) name_plus_offset = value_to_symstr(text, buf2, bt->radix); else name_plus_offset = NULL; if (STREQ(name, "syscall_enter")) result = BACKTRACE_COMPLETE; else result = BACKTRACE_ENTRY_DISPLAYED; rsp = bt->stackbase + (stkindex * sizeof(long)); if ((bt->flags & BT_FULL)) { if (bt->frameptr) x86_64_display_full_frame(bt, rsp, ofp); bt->frameptr = rsp + sizeof(ulong); } fprintf(ofp, "%s#%d [%8lx] %s at %lx\n", level < 10 ? " " : "", level, rsp, name_plus_offset ? name_plus_offset : name, text); if (bt->flags & BT_LINE_NUMBERS) { get_line_number(text, buf1, FALSE); if (strlen(buf1)) fprintf(ofp, " %s\n", buf1); } if (BT_REFERENCE_CHECK(bt)) x86_64_do_bt_reference_check(bt, text, name); return result; } static void x86_64_print_eframe_regs_hyper(struct bt_info *bt) { ulong *up; ulong offset; struct syment *sp; up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); up -= 21; fprintf(fp, " [exception RIP: "); if ((sp = value_search(up[16], &offset))) { fprintf(fp, "%s", sp->name); if (offset) fprintf(fp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); } else fprintf(fp, "unknown or invalid address"); fprintf(fp, "]\n"); fprintf(fp, " RIP: %016lx RSP: %016lx RFLAGS: %08lx\n", up[16], up[19], up[18]); fprintf(fp, " RAX: %016lx RBX: %016lx RCX: %016lx\n", up[10], up[5], up[11]); fprintf(fp, " RDX: %016lx RSI: %016lx RDI: %016lx\n", up[12], up[13], up[14]); fprintf(fp, " RBP: %016lx R8: %016lx R9: %016lx\n", up[4], up[9], up[8]); fprintf(fp, " R10: %016lx R11: %016lx R12: %016lx\n", up[7], up[6], up[3]); fprintf(fp, " R13: %016lx R14: %016lx R15: %016lx\n", up[2], up[1], up[0]); fprintf(fp, " ORIG_RAX: %016lx CS: %04lx SS: %04lx\n", up[15], up[17], up[20]); fprintf(fp, "--- ---\n"); } /* * simple back tracer for xen hypervisor * irq stack does not exist. so relative easy. */ static void x86_64_simple_back_trace_cmd_hyper(struct bt_info *bt_in) { int i, level, done; ulong rsp, estack, stacktop; ulong *up; FILE *ofp; struct bt_info bt_local, *bt; char ebuf[EXCEPTION_STACKSIZE_HYPER]; bt = &bt_local; BCOPY(bt_in, bt, sizeof(struct bt_info)); if (bt->flags & BT_FRAMESIZE_DEBUG) { error(INFO, "-F not support\n"); return; } level = 0; done = FALSE; bt->call_target = NULL; rsp = bt->stkptr; if (!rsp) { error(INFO, "cannot determine starting stack pointer\n"); return; } if (BT_REFERENCE_CHECK(bt)) ofp = pc->nullfp; else ofp = fp; while ((estack = x86_64_in_exception_stack_hyper(bt->task, rsp))) { bt->flags |= BT_EXCEPTION_STACK; bt->stackbase = estack; bt->stacktop = estack + EXCEPTION_STACKSIZE_HYPER; bt->stackbuf = ebuf; if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "exception stack contents", RETURN_ON_ERROR)) error(FATAL, "read of exception stack at %lx failed\n", bt->stackbase); stacktop = bt->stacktop - 168; for (i = (rsp - bt->stackbase)/sizeof(ulong); !done && (rsp < stacktop); i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; switch (x86_64_print_stack_entry_hyper(bt, ofp, level, i,*up)) { case BACKTRACE_ENTRY_DISPLAYED: level++; break; case BACKTRACE_ENTRY_IGNORED: break; case BACKTRACE_COMPLETE: done = TRUE; break; } } if (!BT_REFERENCE_CHECK(bt)) x86_64_print_eframe_regs_hyper(bt); up = (ulong *)(&bt->stackbuf[bt->stacktop - bt->stackbase]); up -= 2; rsp = bt->stkptr = *up; up -= 3; bt->instptr = *up; done = FALSE; bt->frameptr = 0; } if (bt->flags & BT_EXCEPTION_STACK) { bt->flags &= ~BT_EXCEPTION_STACK; bt->stackbase = bt_in->stackbase; bt->stacktop = bt_in->stacktop; bt->stackbuf = bt_in->stackbuf; } for (i = (rsp - bt->stackbase)/sizeof(ulong); !done && (rsp < bt->stacktop); i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; switch (x86_64_print_stack_entry_hyper(bt, ofp, level, i,*up)) { case BACKTRACE_ENTRY_DISPLAYED: level++; break; case BACKTRACE_ENTRY_IGNORED: break; case BACKTRACE_COMPLETE: done = TRUE; break; } } } static void x86_64_init_hyper(int when) { switch (when) { case PRE_SYMTAB: machdep->verify_symbol = x86_64_verify_symbol; machdep->machspec = &x86_64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 8; if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pud space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; if (machdep->cmdline_args[0]) parse_cmdline_args(); break; case PRE_GDB: machdep->machspec->page_offset = PAGE_OFFSET_XEN_HYPER; machdep->kvbase = (ulong)HYPERVISOR_VIRT_START; machdep->identity_map_base = (ulong)PAGE_OFFSET_XEN_HYPER; machdep->is_kvaddr = x86_64_is_kvaddr_hyper; machdep->is_uvaddr = x86_64_is_uvaddr; machdep->eframe_search = x86_64_eframe_search; machdep->back_trace = x86_64_simple_back_trace_cmd_hyper; machdep->processor_speed = x86_64_processor_speed; machdep->kvtop = x86_64_kvtop; machdep->get_task_pgd = x86_64_get_task_pgd; machdep->get_stack_frame = x86_64_get_stack_frame_hyper; machdep->get_stackbase = x86_64_get_stackbase_hyper; machdep->get_stacktop = x86_64_get_stacktop_hyper; machdep->translate_pte = x86_64_translate_pte; machdep->memory_size = xen_hyper_x86_memory_size; /* KAK add */ machdep->is_task_addr = x86_64_is_task_addr; machdep->dis_filter = x86_64_dis_filter; machdep->cmd_mach = x86_64_cmd_mach; machdep->get_smp_cpus = xen_hyper_x86_get_smp_cpus; /* KAK add */ machdep->line_number_hooks = x86_64_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = x86_64_init_kernel_pgd; machdep->clear_machdep_cache = x86_64_clear_machdep_cache; /* machdep table for Xen Hypervisor */ xhmachdep->pcpu_init = xen_hyper_x86_pcpu_init; break; case POST_GDB: XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); if (symbol_exists("per_cpu__tss_page")) { XEN_HYPER_STRUCT_SIZE_INIT(tss, "tss64"); XEN_HYPER_ASSIGN_OFFSET(tss_rsp0) = MEMBER_OFFSET("tss64", "rsp0"); XEN_HYPER_MEMBER_OFFSET_INIT(tss_ist, "tss64", "ist"); } else { XEN_HYPER_STRUCT_SIZE_INIT(tss, "tss_struct"); XEN_HYPER_MEMBER_OFFSET_INIT(tss_ist, "tss_struct", "ist"); if (MEMBER_EXISTS("tss_struct", "__blh")) { XEN_HYPER_ASSIGN_OFFSET(tss_rsp0) = MEMBER_OFFSET("tss_struct", "__blh") + sizeof(short unsigned int); } else { XEN_HYPER_ASSIGN_OFFSET(tss_rsp0) = MEMBER_OFFSET("tss_struct", "rsp0"); } } if (symbol_exists("cpu_data")) { xht->cpu_data_address = symbol_value("cpu_data"); } /* KAK Can this be calculated? */ if (!machdep->hz) { machdep->hz = XEN_HYPER_HZ; } break; case POST_INIT: break; } } struct framesize_cache { ulong textaddr; int framesize; int exception; }; static struct framesize_cache *x86_64_framesize_cache = NULL; static int framesize_cache_entries = 0; #define FRAMESIZE_QUERY (1) #define FRAMESIZE_ENTER (2) #define FRAMESIZE_DUMP (3) #define FRAMESIZE_CACHE_INCR (50) static int x86_64_framesize_cache_resize(void) { int i; struct framesize_cache *new_fc, *fc; if ((new_fc = realloc(x86_64_framesize_cache, (framesize_cache_entries+FRAMESIZE_CACHE_INCR) * sizeof(struct framesize_cache))) == NULL) { error(INFO, "cannot realloc x86_64_framesize_cache space!\n"); return FALSE; } fc = new_fc + framesize_cache_entries; for (i = framesize_cache_entries; i < (framesize_cache_entries+FRAMESIZE_CACHE_INCR); fc++, i++) { fc->textaddr = 0; fc->framesize = 0; fc->exception = 0; } x86_64_framesize_cache = new_fc; framesize_cache_entries += FRAMESIZE_CACHE_INCR; return TRUE; } ulong *x86_64_framesize_no_cache = NULL; static int framesize_no_cache_entries = 0; #define FRAMESIZE_NO_CACHE_INCR (10) static int x86_64_do_not_cache_framesize(struct syment *sp, ulong textaddr) { int c, instr, arg; char buf[BUFSIZE]; char *arglist[MAXARGS]; ulong *new_fnc; if (x86_64_framesize_no_cache[framesize_no_cache_entries-1]) { if ((new_fnc = realloc(x86_64_framesize_no_cache, (framesize_no_cache_entries+FRAMESIZE_NO_CACHE_INCR) * sizeof(ulong))) == NULL) { error(INFO, "cannot realloc x86_64_framesize_no_cache space!\n"); return FALSE; } x86_64_framesize_no_cache = new_fnc; for (c = framesize_no_cache_entries; c < framesize_no_cache_entries + FRAMESIZE_NO_CACHE_INCR; c++) x86_64_framesize_no_cache[c] = 0; framesize_no_cache_entries += FRAMESIZE_NO_CACHE_INCR; } for (c = 0; c < framesize_no_cache_entries; c++) if (x86_64_framesize_no_cache[c] == sp->value) return TRUE; if (!accessible(sp->value)) return FALSE; sprintf(buf, "disassemble 0x%lx,0x%lx", sp->value, textaddr); open_tmpfile2(); if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { close_tmpfile2(); return FALSE; } rewind(pc->tmpfile2); instr = arg = -1; while (fgets(buf, BUFSIZE, pc->tmpfile2)) { if (STRNEQ(buf, "Dump of assembler code")) continue; else if (STRNEQ(buf, "End of assembler dump.")) break; else if ((c = parse_line(buf, arglist)) < 3) continue; if (instr == -1) { if (LASTCHAR(arglist[0]) == ':') { instr = 1; arg = 2; } else { instr = 2; arg = 3; } } if (STREQ(arglist[instr], "and") && STREQ(arglist[arg], "$0xfffffffffffffff0,%rsp")) { close_tmpfile2(); for (c = 0; c < framesize_no_cache_entries; c++) { if (x86_64_framesize_no_cache[c] == 0) { x86_64_framesize_no_cache[c] = sp->value; break; } } return TRUE; } if (STREQ(arglist[instr], "callq") || STREQ(arglist[instr], "call")) break; } close_tmpfile2(); return FALSE; } static int x86_64_framesize_cache_func(int cmd, ulong textaddr, int *framesize, int exception, struct syment *sp) { int i, n; struct framesize_cache *fc; char buf[BUFSIZE]; if (!x86_64_framesize_cache) { framesize_cache_entries = FRAMESIZE_CACHE_INCR; if ((x86_64_framesize_cache = calloc(framesize_cache_entries, sizeof(struct framesize_cache))) == NULL) error(FATAL, "cannot calloc x86_64_framesize_cache space!\n"); framesize_no_cache_entries = FRAMESIZE_NO_CACHE_INCR; if ((x86_64_framesize_no_cache = calloc(framesize_no_cache_entries, sizeof(ulong))) == NULL) error(FATAL, "cannot calloc x86_64_framesize_no_cache space!\n"); } switch (cmd) { case FRAMESIZE_QUERY: fc = &x86_64_framesize_cache[0]; for (i = 0; i < framesize_cache_entries; i++, fc++) { if (fc->textaddr == textaddr) { if (fc->exception != exception) return FALSE; *framesize = fc->framesize; return TRUE; } } return FALSE; case FRAMESIZE_ENTER: if (sp && x86_64_do_not_cache_framesize(sp, textaddr)) return *framesize; retry: fc = &x86_64_framesize_cache[0]; for (i = 0; i < framesize_cache_entries; i++, fc++) { if ((fc->textaddr == 0) || (fc->textaddr == textaddr)) { if (*framesize == -1) { fc->textaddr = 0; fc->framesize = 0; fc->exception = 0; for (n = i+1; n < framesize_cache_entries; i++, n++) x86_64_framesize_cache[i] = x86_64_framesize_cache[n]; return 0; } fc->textaddr = textaddr; fc->framesize = *framesize; fc->exception = exception; return fc->framesize; } } if (x86_64_framesize_cache_resize()) goto retry; return *framesize; case FRAMESIZE_DUMP: fprintf(fp, "framesize_cache_entries:\n"); fc = &x86_64_framesize_cache[0]; for (i = 0; i < framesize_cache_entries; i++, fc++) { if (fc->textaddr == 0) { if (i < (framesize_cache_entries-1)) { fprintf(fp, " [%d-%d]: (unused)\n", i, framesize_cache_entries-1); } break; } fprintf(fp, " [%3d]: %lx %3d %s (%s)\n", i, fc->textaddr, fc->framesize, fc->exception ? "EX" : "CF", value_to_symstr(fc->textaddr, buf, 0)); } fprintf(fp, "\nframesize_no_cache_entries:\n"); for (i = 0; i < framesize_no_cache_entries; i++) { if (x86_64_framesize_no_cache[i]) fprintf(fp, " [%3d]: %lx (%s)\n", i, x86_64_framesize_no_cache[i], value_to_symstr(x86_64_framesize_no_cache[i], buf, 0)); else { fprintf(fp, " [%d-%d]: (unused)\n", i, framesize_no_cache_entries-1); break; } } break; } return TRUE; } ulong x86_64_get_framepointer(struct bt_info *bt, ulong rsp) { ulong stackptr, framepointer, retaddr; framepointer = 0; stackptr = rsp - sizeof(ulong); if (!INSTACK(stackptr, bt)) return 0; if (!readmem(stackptr, KVADDR, &framepointer, sizeof(ulong), "framepointer", RETURN_ON_ERROR|QUIET)) return 0; if (!INSTACK(framepointer, bt)) return 0; if (framepointer <= (rsp+sizeof(ulong))) return 0; if (!readmem(framepointer + sizeof(ulong), KVADDR, &retaddr, sizeof(ulong), "return address", RETURN_ON_ERROR|QUIET)) return 0; if (!is_kernel_text(retaddr)) return 0; return framepointer; } int search_for_eframe_target_caller(struct bt_info *bt, ulong stkptr, int *framesize) { int i; ulong *up, offset, rsp; struct syment *sp1, *sp2; char *called_function; if ((sp1 = value_search(bt->eframe_ip, &offset))) called_function = sp1->name; else return FALSE; rsp = stkptr; for (i = (rsp - bt->stackbase)/sizeof(ulong); rsp < bt->stacktop; i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text(*up)) continue; if (!(sp1 = value_search(*up, &offset))) continue; if (!offset && !(bt->flags & BT_FRAMESIZE_DISABLE)) continue; /* * Get the syment of the function that the text * routine above called before leaving its return * address on the stack -- if it can be determined. */ if ((sp2 = x86_64_function_called_by((*up)-5))) { if (STREQ(sp2->name, called_function)) { if (CRASHDEBUG(1)) { fprintf(fp, "< %lx/%s rsp: %lx caller: %s >\n", bt->eframe_ip, called_function, stkptr, sp1->name); } *framesize = rsp - stkptr; return TRUE; } } } return FALSE; } #define BT_FRAMESIZE_IGNORE_MASK \ (BT_OLD_BACK_TRACE|BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_ALL|BT_FRAMESIZE_DISABLE) static int x86_64_get_framesize(struct bt_info *bt, ulong textaddr, ulong rsp, char *stack_ptr) { int c, framesize, instr, arg, max; struct syment *sp; long max_instructions; ulong offset; char buf[BUFSIZE]; char buf2[BUFSIZE]; char *arglist[MAXARGS]; ulong locking_func, textaddr_save, current, framepointer; char *p1, *p2; int reterror; int arg_exists; int exception; orc_entry *korc; if (!(bt->flags & BT_FRAMESIZE_DEBUG)) { if ((bt->flags & BT_FRAMESIZE_IGNORE_MASK) || (kt->flags & USE_OLD_BT)) return 0; } if (!(sp = value_search(textaddr, &offset))) { if (!(bt->flags & BT_FRAMESIZE_DEBUG)) bt->flags |= BT_FRAMESIZE_DISABLE; return 0; } exception = bt->eframe_ip == textaddr ? TRUE : FALSE; if ((bt->flags & BT_EFRAME_TARGET) && search_for_eframe_target_caller(bt, rsp, &framesize)) return framesize; if (!(bt->flags & BT_FRAMESIZE_DEBUG) && x86_64_framesize_cache_func(FRAMESIZE_QUERY, textaddr, &framesize, exception, NULL)) { if (framesize == -1) bt->flags |= BT_FRAMESIZE_DISABLE; return framesize; } /* * Bait and switch an incoming .text.lock address * with the containing function's address. */ if (STRNEQ(sp->name, ".text.lock.") && (locking_func = text_lock_function(sp->name, bt, textaddr))) { if (!(sp = value_search(locking_func, &offset))) { bt->flags |= BT_FRAMESIZE_DISABLE; return 0; } textaddr_save = textaddr; textaddr = locking_func; } else textaddr_save = 0; /* * As of 2.6.29, "irq_entries_start" replaced the range of IRQ * entry points named IRQ0x00_interrupt through IRQ0x##_interrupt. * Each IRQ entry point in the list of non-symbolically-named * entry stubs consists of a single pushq and a jmp. */ if (STREQ(sp->name, "irq_entries_start")) { #define PUSH_IMM8 0x6a if (readmem(textaddr, KVADDR, &instr, sizeof(short), "irq_entries_start instruction", QUIET|RETURN_ON_ERROR) && ((instr & 0xff) == PUSH_IMM8)) framesize = 0; else framesize = 8; return (x86_64_framesize_cache_func(FRAMESIZE_ENTER, textaddr, &framesize, exception, NULL)); } if ((machdep->flags & FRAMEPOINTER) && rsp && !exception && !textaddr_save) { framepointer = x86_64_get_framepointer(bt, rsp); if (CRASHDEBUG(3)) { if (framepointer) fprintf(fp, " rsp: %lx framepointer: %lx -> %ld\n", rsp, framepointer, framepointer - rsp); else fprintf(fp, " rsp: %lx framepointer: (unknown)\n", rsp); } if (framepointer) { framesize = framepointer - rsp; return (x86_64_framesize_cache_func(FRAMESIZE_ENTER, textaddr, &framesize, 0, sp)); } } if ((sp->value >= kt->init_begin) && (sp->value < kt->init_end)) return 0; if ((machdep->flags & ORC) && (korc = orc_find(textaddr))) { if (CRASHDEBUG(1)) { struct ORC_data *orc = &machdep->machspec->orc; fprintf(fp, "rsp: %lx textaddr: %lx -> spo: %d bpo: %d spr: %d bpr: %d type: %d", rsp, textaddr, korc->sp_offset, korc->bp_offset, korc->sp_reg, korc->bp_reg, korc->type); if (orc->has_signal) fprintf(fp, " signal: %d", korc->signal); if (orc->has_end) fprintf(fp, " end: %d", korc->end); fprintf(fp, "\n"); } if (korc->type == ORC_TYPE_CALL) { ulong prev_sp = 0, prev_bp = 0; framesize = -1; if (korc->sp_reg == ORC_REG_SP) { framesize = (korc->sp_offset - 8); /* rsp points to a return address, so +8 to use sp_offset */ prev_sp = (rsp + 8) + korc->sp_offset; if (CRASHDEBUG(1)) fprintf(fp, "rsp: %lx prev_sp: %lx framesize: %d\n", rsp, prev_sp, framesize); } else if ((korc->sp_reg == ORC_REG_BP) && bt->bptr && INSTACK(bt->bptr, bt)) { prev_sp = bt->bptr + korc->sp_offset; framesize = (prev_sp - (rsp + 8) - 8); if (CRASHDEBUG(1)) fprintf(fp, "rsp: %lx rbp: %lx prev_sp: %lx framesize: %d\n", rsp, bt->bptr, prev_sp, framesize); } if ((korc->bp_reg == ORC_REG_PREV_SP) && prev_sp) { prev_bp = prev_sp + korc->bp_offset; if (stack_ptr && INSTACK(prev_bp, bt)) { bt->bptr = ULONG(stack_ptr + (prev_bp - rsp)); if (CRASHDEBUG(1)) fprintf(fp, "rsp: %lx prev_sp: %lx prev_bp: %lx -> %lx\n", rsp, prev_sp, prev_bp, bt->bptr); } else bt->bptr = 0; } else if ((korc->bp_reg != ORC_REG_UNDEFINED)) bt->bptr = 0; if (framesize >= 0) /* Do not cache this, possibly it may be variable. */ return framesize; } } framesize = max = 0; max_instructions = textaddr - sp->value; instr = arg = -1; open_tmpfile2(); sprintf(buf, "x/%ldi 0x%lx", max_instructions, sp->value); if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { close_tmpfile2(); bt->flags |= BT_FRAMESIZE_DISABLE; return 0; } rewind(pc->tmpfile2); while (fgets(buf, BUFSIZE, pc->tmpfile2)) { if (STRNEQ(buf, "=>")) shift_string_left(buf, 2); strcpy(buf2, buf); if (CRASHDEBUG(3)) fprintf(fp, "%s", buf2); c = parse_line(buf, arglist); if (instr == -1) { /* * Check whether are * in the output string. */ if (LASTCHAR(arglist[0]) == ':') { instr = 1; arg = 2; } else { instr = 2; arg = 3; } } if (c < (instr+1)) continue; else if (c >= (arg+1)) arg_exists = TRUE; else arg_exists = FALSE; reterror = 0; current = htol(strip_ending_char(arglist[0], ':'), RETURN_ON_ERROR, &reterror); if (reterror) continue; if (current > textaddr) break; else if ((current == textaddr) && !exception) break; if (STRNEQ(arglist[instr], "push")) { framesize += 8; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize: %d]\n", strip_linefeeds(buf2), framesize); max = framesize; } else if (STRNEQ(arglist[instr], "pop") || STRNEQ(arglist[instr], "leaveq")) { if (framesize > 0) framesize -= 8; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize: %d]\n", strip_linefeeds(buf2), framesize); } else if (arg_exists && STRNEQ(arglist[instr], "add") && (p1 = strstr(arglist[arg], ",%rsp"))) { *p1 = NULLCHAR; p2 = arglist[arg]; reterror = 0; offset = htol(p2+1, RETURN_ON_ERROR, &reterror); if (reterror) continue; if (framesize > 0) framesize -= offset; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize: %d]\n", strip_linefeeds(buf2), framesize); } else if (arg_exists && STRNEQ(arglist[instr], "sub") && (p1 = strstr(arglist[arg], ",%rsp"))) { *p1 = NULLCHAR; p2 = arglist[arg]; reterror = 0; offset = htol(p2+1, RETURN_ON_ERROR|QUIET, &reterror); if (reterror) continue; framesize += offset; max = framesize; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize: %d]\n", strip_linefeeds(buf2), framesize); } else if (STRNEQ(arglist[instr], "retq") || STRNEQ(arglist[instr], "ret")) { if (!exception) { framesize = max; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize restored to: %d]\n", strip_linefeeds(buf2), max); } } else if (STRNEQ(arglist[instr], "retq_NOT_CHECKED")) { bt->flags |= BT_FRAMESIZE_DISABLE; framesize = -1; if (CRASHDEBUG(2) || (bt->flags & BT_FRAMESIZE_DEBUG)) fprintf(fp, "%s\t[framesize: DISABLED]\n", strip_linefeeds(buf2)); break; } } close_tmpfile2(); if (textaddr_save) textaddr = textaddr_save; return (x86_64_framesize_cache_func(FRAMESIZE_ENTER, textaddr, &framesize, exception, NULL)); } static void x86_64_framesize_debug(struct bt_info *bt) { int framesize; int exception; exception = (bt->flags & BT_EFRAME_SEARCH); switch (bt->hp->esp) { case 1: /* "dump" */ x86_64_framesize_cache_func(FRAMESIZE_DUMP, 0, NULL, 0, NULL); break; case 0: if (bt->hp->eip) { /* clear one entry */ framesize = -1; x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, &framesize, exception, NULL); } else { /* clear all entries */ BZERO(&x86_64_framesize_cache[0], sizeof(struct framesize_cache)*framesize_cache_entries); BZERO(&x86_64_framesize_no_cache[0], sizeof(ulong)*framesize_no_cache_entries); fprintf(fp, "framesize caches cleared\n"); } break; case -1: if (!bt->hp->eip) error(INFO, "x86_64_framesize_debug: ignoring command\n"); else x86_64_get_framesize(bt, bt->hp->eip, 0, NULL); break; case -3: machdep->flags |= FRAMEPOINTER; BZERO(&x86_64_framesize_cache[0], sizeof(struct framesize_cache)*framesize_cache_entries); BZERO(&x86_64_framesize_no_cache[0], sizeof(ulong)*framesize_no_cache_entries); fprintf(fp, "framesize caches cleared and FRAMEPOINTER turned ON\n"); break; case -4: machdep->flags &= ~FRAMEPOINTER; BZERO(&x86_64_framesize_cache[0], sizeof(struct framesize_cache)*framesize_cache_entries); BZERO(&x86_64_framesize_no_cache[0], sizeof(ulong)*framesize_no_cache_entries); fprintf(fp, "framesize caches cleared and FRAMEPOINTER turned OFF\n"); break; case -5: if (!bt->hp->eip) error(INFO, "x86_64_framesize_debug: ignoring command (no ip)\n"); else orc_dump(bt->hp->eip); break; default: if (bt->hp->esp > 1) { framesize = bt->hp->esp; if (bt->hp->eip) x86_64_framesize_cache_func(FRAMESIZE_ENTER, bt->hp->eip, &framesize, exception, NULL); } else error(INFO, "x86_64_framesize_debug: ignoring command\n"); break; } } /* * The __schedule() framesize should only have to be calculated * one time, but always verify that the previously-determined * framesize applies to this task, and if it doesn't, recalculate. * Update the bt->instptr here, and return the new stack pointer. */ static ulong __schedule_frame_adjust(ulong rsp_in, struct bt_info *bt) { int i, found; ulong rsp, *up; struct syment *sp; int framesize; if (!INSTACK(rsp_in, bt)) error(FATAL, "invalid RSP: %lx bt->stackbase/stacktop: %lx/%lx cpu: %d\n", rsp_in, bt->stackbase, bt->stacktop, bt->tc->processor); if (x86_64_framesize_cache_func(FRAMESIZE_QUERY, machdep->machspec->thread_return, &framesize, 0, NULL)) { rsp = rsp_in + framesize; i = (rsp - bt->stackbase)/sizeof(ulong); up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (is_kernel_text_offset(*up) && (sp = x86_64_function_called_by((*up)-5)) && STREQ(sp->name, "__schedule")) { bt->instptr = *up; return (rsp); } } rsp = rsp_in; for (found = FALSE, i = (rsp - bt->stackbase)/sizeof(ulong); rsp < bt->stacktop; i++, rsp += sizeof(ulong)) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (!is_kernel_text_offset(*up)) continue; if ((sp = x86_64_function_called_by((*up)-5)) && (STREQ(sp->name, "__schedule"))) { framesize = (int)(rsp - rsp_in); bt->instptr = *up; x86_64_framesize_cache_func(FRAMESIZE_ENTER, machdep->machspec->thread_return, &framesize, 0, NULL); bt->instptr = *up; found = TRUE; break; } } if (CRASHDEBUG(1) && !found) error(INFO, "cannot determine __schedule() caller\n"); return (found ? rsp : rsp_in); } static void x86_64_get_active_set(void) { int c; ulong current; struct task_context *actctx, *curctx; struct machine_specific *ms; if (ACTIVE()) return; ms = machdep->machspec; if (!ms->current) return; if (CRASHDEBUG(1)) fprintf(fp, "x86_64_get_active_set: runqueue vs. %s\n", VALID_STRUCT(x8664_pda) ? "x8664_pda" : "current_task"); for (c = 0; c < kt->cpus; c++) { if (!tt->active_set[c]) continue; current = ms->current[c]; curctx = task_to_context(current); actctx = task_to_context(tt->active_set[c]); if (CRASHDEBUG(1)) fprintf(fp, " [%d]: %016lx %016lx %s%s\n", c, tt->active_set[c], current, curctx ? "" : "(invalid task)", curctx && (curctx->processor != c) ? "(wrong processor)" : ""); if (!curctx || (curctx->processor != c)) continue; if (tt->active_set[c] == current) continue; if (tt->active_set[c] == tt->panic_task) continue; if (stkptr_to_task(ms->crash_nmi_rsp[c]) == curctx->task) tt->active_set[c] = tt->panic_threads[c] = current; error(INFO, "inconsistent active task indications for CPU %d:\n", c); error(CONT, " %srunqueue: %lx \"%s\" (default)\n", VALID_STRUCT(x8664_pda) ? "" : " ", actctx->task, actctx->comm); error(CONT, "%s: %lx \"%s\" %s\n%s", VALID_STRUCT(x8664_pda) ? " x8664_pda" : "current_task", current, curctx->comm, tt->active_set[c] == current ? "(reassigned)" : "", CRASHDEBUG(1) ? "" : "\n"); } } static int compare_kvaddr(const void *v1, const void *v2) { struct vaddr_range *r1, *r2; r1 = (struct vaddr_range *)v1; r2 = (struct vaddr_range *)v2; return (r1->start < r2->start ? -1 : r1->start == r2->start ? 0 : 1); } /* * Populate the vaddr_range array with a sorted list of * kernel virtual address ranges. The caller is responsible * for ensuring that the array is large enough, so it should * first call this function with a NULL vaddr_range pointer, * which will return the count of kernel virtual address * space ranges. */ static int x86_64_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; ulong start; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->machspec->page_offset; vrp[cnt++].end = vt->high_memory; vrp[cnt].type = KVADDR_START_MAP; vrp[cnt].start = __START_KERNEL_map; vrp[cnt++].end = kt->end; vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = machdep->machspec->vmalloc_start_addr; vrp[cnt++].end = last_vmalloc_address(); /* * Verify that these two regions stand alone. */ if (st->mods_installed) { start = lowest_module_address(); if (!in_vmlist_segment(start)) { vrp[cnt].type = KVADDR_MODULES; vrp[cnt].start = start; vrp[cnt++].end = roundup(highest_module_address(), PAGESIZE()); } } if (machdep->flags & VMEMMAP) { start = machdep->machspec->vmemmap_vaddr; if (!in_vmlist_segment(start)) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = start; vrp[cnt++].end = vt->node_table[vt->numnodes-1].mem_map + (vt->node_table[vt->numnodes-1].size * SIZE(page)); } } qsort(vrp, cnt, sizeof(struct vaddr_range), compare_kvaddr); return cnt; } #define CHECK_REG_CASE(R, r) \ case R##_REGNUM: \ if (!NUM_IN_BITMAP(ur_bitmap->bitmap, \ REG_SEQ(x86_64_user_regs_struct, r))) { \ if (!sid) \ FREEBUF(ur_bitmap); \ return FALSE; \ } \ break; #define COPY_REG_CASE(R, r) \ case R##_REGNUM: \ if (size != sizeof(ur_bitmap->ur.r)) \ break; \ memcpy(value, &ur_bitmap->ur.r, size); \ ret = TRUE; \ break; static int x86_64_get_current_task_reg(int regno, const char *name, int size, void *value, int sid) { struct bt_info bt_info, bt_setup; struct task_context *tc; struct user_regs_bitmap_struct *ur_bitmap; ulong ip, sp; bool ret = FALSE; switch (regno) { case RAX_REGNUM ... GS_REGNUM: case FS_BASE_REGNUM ... ORIG_RAX_REGNUM: break; default: return FALSE; } tc = CURRENT_CONTEXT(); if (!tc) return FALSE; /* Non zero stack ID, use extra stacks regs */ if (sid && sid <= extra_stacks_idx) { ur_bitmap = extra_stacks_regs[sid - 1]; goto get_sub; } /* * Task is active, grab CPU's registers */ if (is_task_active(tc->task) && VMSS_DUMPFILE()) return vmware_vmss_get_cpu_reg(tc->processor, regno, name, size, value); BZERO(&bt_setup, sizeof(struct bt_info)); clone_bt_info(&bt_setup, &bt_info, tc); if (bt_info.stackbase == 0) return FALSE; fill_stackbuf(&bt_info); // reusing the get_dumpfile_regs function to get pt regs structure get_dumpfile_regs(&bt_info, &sp, &ip); if (bt_info.stackbuf) FREEBUF(bt_info.stackbuf); ur_bitmap = (struct user_regs_bitmap_struct *)bt_info.machdep; if (!ur_bitmap) return FALSE; /* Get all registers from elf notes*/ if (!bt_info.need_free) { goto get_all; } /* Get subset registers from stack frame*/ get_sub: switch (regno) { CHECK_REG_CASE(RAX, ax); CHECK_REG_CASE(RBX, bx); CHECK_REG_CASE(RCX, cx); CHECK_REG_CASE(RDX, dx); CHECK_REG_CASE(RSI, si); CHECK_REG_CASE(RDI, di); CHECK_REG_CASE(RBP, bp); CHECK_REG_CASE(RSP, sp); CHECK_REG_CASE(R8, r8); CHECK_REG_CASE(R9, r9); CHECK_REG_CASE(R10, r10); CHECK_REG_CASE(R11, r11); CHECK_REG_CASE(R12, r12); CHECK_REG_CASE(R13, r13); CHECK_REG_CASE(R14, r14); CHECK_REG_CASE(R15, r15); CHECK_REG_CASE(RIP, ip); CHECK_REG_CASE(EFLAGS, flags); CHECK_REG_CASE(CS, cs); CHECK_REG_CASE(SS, ss); CHECK_REG_CASE(DS, ds); CHECK_REG_CASE(ES, es); CHECK_REG_CASE(FS, fs); CHECK_REG_CASE(GS, gs); CHECK_REG_CASE(FS_BASE, fs_base); CHECK_REG_CASE(GS_BASE, gs_base); CHECK_REG_CASE(ORIG_RAX, orig_ax); } get_all: switch (regno) { COPY_REG_CASE(RAX, ax); COPY_REG_CASE(RBX, bx); COPY_REG_CASE(RCX, cx); COPY_REG_CASE(RDX, dx); COPY_REG_CASE(RSI, si); COPY_REG_CASE(RDI, di); COPY_REG_CASE(RBP, bp); COPY_REG_CASE(RSP, sp); COPY_REG_CASE(R8, r8); COPY_REG_CASE(R9, r9); COPY_REG_CASE(R10, r10); COPY_REG_CASE(R11, r11); COPY_REG_CASE(R12, r12); COPY_REG_CASE(R13, r13); COPY_REG_CASE(R14, r14); COPY_REG_CASE(R15, r15); COPY_REG_CASE(RIP, ip); COPY_REG_CASE(EFLAGS, flags); COPY_REG_CASE(CS, cs); COPY_REG_CASE(SS, ss); COPY_REG_CASE(DS, ds); COPY_REG_CASE(ES, es); COPY_REG_CASE(FS, fs); COPY_REG_CASE(GS, gs); COPY_REG_CASE(FS_BASE, fs_base); COPY_REG_CASE(GS_BASE, gs_base); COPY_REG_CASE(ORIG_RAX, orig_ax); } if (!sid && bt_info.need_free) { FREEBUF(ur_bitmap); bt_info.need_free = FALSE; } return ret; } /* * Determine the physical memory range reserved for GART. */ static void GART_init(void) { char resource[BUFSIZE]; struct syment *sp; struct machine_specific *ms; if (!(sp = kernel_symbol_search("gart_resource"))) return; STRUCT_SIZE_INIT(resource, "resource"); MEMBER_OFFSET_INIT(resource_start, "resource", "start"); MEMBER_OFFSET_INIT(resource_end, "resource", "end"); if (VALID_STRUCT(resource) && VALID_MEMBER(resource_start) && VALID_MEMBER(resource_end)) { if (!readmem(sp->value, KVADDR, resource, SIZE(resource), "GART resource", RETURN_ON_ERROR)) return; ms = machdep->machspec; ms->GART_start = ULONG(resource + OFFSET(resource_start)); ms->GART_end = ULONG(resource + OFFSET(resource_end)); if (ms->GART_start && ms->GART_end) { machdep->flags |= GART_REGION; if (CRASHDEBUG(1)) fprintf(fp, "GART address range: %lx - %lx\n", ms->GART_start, ms->GART_end); } } } static int x86_64_verify_paddr(uint64_t paddr) { struct machine_specific *ms; if (machdep->flags & GART_REGION) { ms = machdep->machspec; if (ms->GART_start && ms->GART_end && (paddr >= ms->GART_start) && (paddr <= ms->GART_end)) return FALSE; } return TRUE; } static ulong orc_ip(ulong ip) { int ip_entry; if (!readmem((ulong)ip, KVADDR, &ip_entry, sizeof(int), "orc_ip", QUIET|RETURN_ON_ERROR)) return 0; return (ip + ip_entry); } static orc_entry * orc_get_entry(struct ORC_data *orc) { struct orc_entry *entry = &orc->orc_entry_data; if (machdep->flags & ORC_6_4) { kernel_orc_entry_6_4 korc; if (!readmem(orc->orc_entry, KVADDR, &korc, sizeof(kernel_orc_entry_6_4), "kernel orc_entry", RETURN_ON_ERROR|QUIET)) return NULL; entry->sp_offset = korc.sp_offset; entry->bp_offset = korc.bp_offset; entry->sp_reg = korc.sp_reg; entry->bp_reg = korc.bp_reg; entry->type = korc.type; entry->signal = korc.signal; } else { kernel_orc_entry korc; if (!readmem(orc->orc_entry, KVADDR, &korc, sizeof(kernel_orc_entry), "kernel orc_entry", RETURN_ON_ERROR|QUIET)) return NULL; entry->sp_offset = korc.sp_offset; entry->bp_offset = korc.bp_offset; entry->sp_reg = korc.sp_reg; entry->bp_reg = korc.bp_reg; entry->type = korc.type; if (orc->has_end) { /* * orc_entry.signal was inserted before orc_entry.end. * see ffb1b4a41016. */ if (orc->has_signal) { entry->signal = korc.signal; entry->end = korc.end; } else entry->end = korc.signal; /* on purpose */ } } return entry; } static orc_entry * __orc_find(ulong ip_table_ptr, ulong u_table_ptr, uint num_entries, ulong ip) { int index; int *first = (int *)ip_table_ptr; int *last = (int *)ip_table_ptr + num_entries - 1; int *mid = first, *found = first; int *ip_table = (int *)ip_table_ptr; struct ORC_data *orc = &machdep->machspec->orc; ulong vaddr; orc_entry *korc; if (CRASHDEBUG(2)) { int i, ip_entry; ulong ptr; ulong offset; struct syment *sp; fprintf(fp, "__orc_find:\n ip: %lx num_entries: %d\n", ip, num_entries); for (i = 0; i < num_entries; i++) { ptr = ip_table_ptr + (i*4); if (!readmem((ulong)ptr, KVADDR, &ip_entry, sizeof(int), "ip entry", RETURN_ON_ERROR)) return NULL; if (!(vaddr = orc_ip(ptr))) return NULL; fprintf(fp, " orc_ip(%lx): %x -> %lx / ", ptr, ip_entry, vaddr); if ((sp = value_search(vaddr, &offset))) { fprintf(fp, "%s+%ld -> ", sp->name, offset); fprintf(fp, "%lx\n", u_table_ptr + (i * SIZE(orc_entry))); } else fprintf(fp, "(unknown symbol value)\n"); } } while (first <= last) { mid = first + ((last - first) / 2); if (!(vaddr = orc_ip((ulong)mid))) return NULL; if (vaddr <= ip) { found = mid; first = mid + 1; } else last = mid - 1; } index = found - ip_table; orc->ip_entry = (ulong)found; orc->orc_entry = u_table_ptr + (index * SIZE(orc_entry)); if (!orc_get_entry(orc)) return NULL; korc = &orc->orc_entry_data; if (CRASHDEBUG(2)) { fprintf(fp, " found: %lx index: %d\n", (ulong)found, index); fprintf(fp, " orc_entry: %lx sp_offset: %d bp_offset: %d sp_reg: %d bp_reg: %d type: %d", orc->orc_entry, korc->sp_offset, korc->bp_offset, korc->sp_reg, korc->bp_reg, korc->type); if (orc->has_signal) fprintf(fp, " signal: %d", korc->signal); if (orc->has_end) fprintf(fp, " end: %d", korc->end); fprintf(fp, "\n"); } return korc; } #define LOOKUP_BLOCK_ORDER 8 #define LOOKUP_BLOCK_SIZE (1 << LOOKUP_BLOCK_ORDER) #define LOOKUP_START_IP (unsigned long)kt->stext #define LOOKUP_STOP_IP (unsigned long)kt->etext static orc_entry * orc_find(ulong ip) { unsigned int idx, start, stop; struct ORC_data *orc = &machdep->machspec->orc; if ((ip < LOOKUP_START_IP) || (ip >= LOOKUP_STOP_IP)) { if ((ip >= MODULES_VADDR) && (ip < MODULES_END)) return orc_module_find(ip); error(WARNING, "%lx: ip is outside kernel and module text ranges\n", ip); return NULL; } idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; if (idx >= orc->lookup_num_blocks-1) { if (CRASHDEBUG(1)) { error(INFO, "bad lookup: idx: %u lookup_num_blocks: %u ip: %lx\n", idx, orc->lookup_num_blocks, ip); } return NULL; } if (!readmem(orc->orc_lookup + (sizeof(unsigned int) * idx), KVADDR, &start, sizeof(unsigned int), "orc_lookup start", RETURN_ON_ERROR|QUIET)) { if (CRASHDEBUG(1)) error(INFO, "cannot read \"start\" orc_lookup entry at %lx\n", orc->orc_lookup + (sizeof(unsigned int) * idx)); return NULL; } if (!readmem(orc->orc_lookup + (sizeof(unsigned int) * (idx+1)), KVADDR, &stop, sizeof(unsigned int), "orc_lookup stop", RETURN_ON_ERROR|QUIET)) { if (CRASHDEBUG(1)) error(INFO, "cannot read \"stop\" orc_lookup entry at %lx\n", orc->orc_lookup + (sizeof(unsigned int) * (idx+1))); return NULL; } stop += 1; if (CRASHDEBUG(2)) { fprintf(fp, "orc_find:\n ip: %lx idx: %d\n", ip, idx); fprintf(fp, " start = orc_lookup[%d]: %d\n" " stop = orc_lookup[%d] + 1: %d\n", idx, start, idx+1, stop); fprintf(fp, " ip table start: %lx\n", orc->__start_orc_unwind_ip + (start * sizeof(int))); fprintf(fp, " unwind table start: %lx\n", orc->__start_orc_unwind + (start * SIZE(orc_entry))); } if ((orc->__start_orc_unwind + (start * SIZE(orc_entry))) >= orc->__stop_orc_unwind) { if (CRASHDEBUG(1)) error(INFO, "bad unwind lookup start: idx: %u num: %u start: %u stop: %u ip: %lx\n", idx, orc->lookup_num_blocks, start, stop, ip); return NULL; } if ((orc->__start_orc_unwind + (stop * SIZE(orc_entry))) > orc->__stop_orc_unwind) { if (CRASHDEBUG(1)) error(INFO, "bad unwind lookup stop: idx: %u num: %u start: %u stop: %u ip: %lx\n", idx, orc->lookup_num_blocks, start, stop, ip); return NULL; } return __orc_find(orc->__start_orc_unwind_ip + (start * sizeof(int)), orc->__start_orc_unwind + (start * SIZE(orc_entry)), stop - start, ip); } static orc_entry * orc_module_find(ulong ip) { struct load_module *lm; uint num_orcs; ulong orc_unwind_ip, orc_unwind, module_arch; struct ORC_data *orc = &machdep->machspec->orc; if (!(orc->module_ORC) || !module_symbol(ip, NULL, &lm, NULL, 0)) return NULL; module_arch = lm->module_struct + OFFSET(module_arch); if (!readmem(module_arch + OFFSET(mod_arch_specific_num_orcs), KVADDR, &num_orcs, sizeof(int), "module num_orcs", RETURN_ON_ERROR|QUIET)) return NULL; if (!readmem(module_arch + OFFSET(mod_arch_specific_orc_unwind_ip), KVADDR, &orc_unwind_ip, sizeof(void *), "module orc_unwind_ip", RETURN_ON_ERROR|QUIET)) return NULL; if (!readmem(module_arch + OFFSET(mod_arch_specific_orc_unwind), KVADDR, &orc_unwind, sizeof(void *), "module orc_unwind", RETURN_ON_ERROR|QUIET)) return NULL; if (CRASHDEBUG(2)) { fprintf(fp, "orc_module_find:\n"); fprintf(fp, " num_orcs: %d orc_unwind_ip: %lx orc_unwind: %lx\n", num_orcs, orc_unwind_ip, orc_unwind); } return __orc_find(orc_unwind_ip, orc_unwind, num_orcs, ip); } static ulong ip_table_to_vaddr(ulong ip_table) { int ip_entry; if (!readmem((ulong)ip_table, KVADDR, &ip_entry, sizeof(int), "ip entry", RETURN_ON_ERROR)) error(FATAL, "ip_table_to_vaddr: cannot read ip_table: %lx\n", ip_table); return (ip_table + ip_entry); } static void orc_dump(ulong ip) { struct ORC_data *orc = &machdep->machspec->orc; orc_entry *korc; ulong vaddr, offset; struct syment *sp, *orig; fprintf(fp, "orc_dump: %lx / ", ip); if ((sp = value_search(ip, &offset))) fprintf(fp, "%s+%ld\n--------\n", sp->name, offset); else fprintf(fp, "(unresolved)\n--------\n"); orig = sp; if (!orc_find(ip)) { fprintf(fp, "%lx: ip not found\n", ip); return; } next_in_func: fprintf(fp, "ip: %lx -> %lx / ", orc->ip_entry, vaddr = ip_table_to_vaddr(orc->ip_entry)); if ((sp = value_search(vaddr, &offset))) fprintf(fp, "%s+%ld -> ", sp->name, offset); else fprintf(fp, "(unresolved) -> "); if (!orc_get_entry(orc)) error(FATAL, "cannot read orc_entry\n"); korc = &orc->orc_entry_data; fprintf(fp, "orc: %lx spo: %d bpo: %d spr: %d bpr: %d type: %d", orc->orc_entry, korc->sp_offset, korc->bp_offset, korc->sp_reg, korc->bp_reg, korc->type); if (orc->has_signal) fprintf(fp, " signal: %d", korc->signal); if (orc->has_end) fprintf(fp, " end: %d", korc->end); fprintf(fp, "\n"); orc->ip_entry += sizeof(int); orc->orc_entry += sizeof(kernel_orc_entry); vaddr = ip_table_to_vaddr(orc->ip_entry); if ((sp = value_search(vaddr, &offset))) if (sp == orig) goto next_in_func; } /* * KPTI entry stack initialization. May vary signficantly * between upstream and distribution backports. */ static void x86_64_entry_trampoline_init(void) { struct machine_specific *ms; struct syment *sp; ms = machdep->machspec; if (!kernel_symbol_exists("pti_init") && !kernel_symbol_exists("kaiser_init")) return; /* * 4.15 */ if (MEMBER_EXISTS("entry_stack", "words") && MEMBER_EXISTS("entry_stack_page", "stack") && (sp = per_cpu_symbol_search("per_cpu__entry_stack_storage"))) { ms->kpti_entry_stack = sp->value + MEMBER_OFFSET("entry_stack_page", "stack"); ms->kpti_entry_stack_size = MEMBER_SIZE("entry_stack", "words"); machdep->flags |= KPTI; return; } /* * RHEL */ if (MEMBER_EXISTS("tss_struct", "stack")) { if (!(sp = per_cpu_symbol_search("per_cpu__init_tss"))) sp = per_cpu_symbol_search("per_cpu__cpu_tss"); ms->kpti_entry_stack = sp->value + MEMBER_OFFSET("tss_struct", "stack"); ms->kpti_entry_stack_size = MEMBER_SIZE("tss_struct", "stack"); machdep->flags |= KPTI; return; } } static ulong x86_64_in_kpti_entry_stack(int cpu, ulong rsp) { ulong stack_base, stack_end; struct machine_specific *ms; if (!(machdep->flags & KPTI)) return 0; ms = machdep->machspec; if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { if (kt->__per_cpu_offset[cpu] == 0) return 0; stack_base = ms->kpti_entry_stack + kt->__per_cpu_offset[cpu]; } else stack_base = ms->kpti_entry_stack; stack_end = stack_base + (ms->kpti_entry_stack_size > 0 ? ms->kpti_entry_stack_size : 512); if ((rsp >= stack_base) && (rsp < stack_end)) return(stack_end - SIZE(pt_regs)); return 0; } /* * Original: * * #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) * #define SWP_OFFSET(entry) ((entry) >> 8) * * 4.8: * | OFFSET (14-63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| * * l1tf: * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry */ ulong x86_64_swp_type(ulong entry) { if (machdep->flags & L1TF) return(entry >> 59); if (THIS_KERNEL_VERSION >= LINUX(4,8,0)) return((entry >> 9) & 0x1f); return SWP_TYPE(entry); } ulong x86_64_swp_offset(ulong entry) { if (machdep->flags & L1TF) return((~entry << 5) >> 14); if (THIS_KERNEL_VERSION >= LINUX(4,8,0)) return(entry >> 14); return SWP_OFFSET(entry); } #endif /* X86_64 */ crash-utility-crash-9cd43f5/ci-tests/0000775000372000037200000000000015107550337017127 5ustar juerghjuerghcrash-utility-crash-9cd43f5/ci-tests/main.fmf0000664000372000037200000000123415107550337020545 0ustar juerghjuerghprovision: - name: client hardware: memory: ">= 4 GiB" cpu: processors: ">= 4" prepare: # Set root password to log in as root in the console - name: Set root password how: shell script: - echo root:kdump | chpasswd - name: Use custom mirror how: shell script: - test -v CUSTOM_MIRROR && sed -e 's/^metalink=/#metalink=/g' -e "s|^#baseurl=http://download.example/pub/fedora/linux|baseurl=${CUSTOM_MIRROR}|g" -i.bak /etc/yum.repos.d/fedora{,-updates}.repo || true execute: how: tmt exit-first: true discover: how: fmf url: https://github.com/crash-utility/crash-test.git ref: main crash-utility-crash-9cd43f5/ci-tests/local.fmf0000664000372000037200000000044215107550337020713 0ustar juerghjuerghsummary: Generate vmcore and analyze it with crash tool environment: KDUMP_UTILS_RPM: kdump-utils discover+: test: - /kdump/default_crashkernel - /kdump/config-any - /kdump/crash-sysrq-c - /kdump/build-crash-utility - /kdump/analyse-crash-cmd/common_analyse crash-utility-crash-9cd43f5/ci-tests/.fmf/0000775000372000037200000000000015107550337017755 5ustar juerghjuerghcrash-utility-crash-9cd43f5/ci-tests/.fmf/version0000664000372000037200000000000215107550337021355 0ustar juerghjuergh1 crash-utility-crash-9cd43f5/xen_hyper_dump_tables.c0000664000372000037200000011340215107550337022121 0ustar juerghjuergh/* * xen_hyper_dump_tables.c * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "defs.h" #ifdef XEN_HYPERVISOR_ARCH #include "xen_hyper_defs.h" static void xen_hyper_dump_xen_hyper_table(int verbose); static void xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose); static void xen_hyper_dump_xen_hyper_domain_table(int verbose); static void xen_hyper_dump_xen_hyper_vcpu_table(int verbose); static void xen_hyper_dump_xen_hyper_pcpu_table(int verbose); static void xen_hyper_dump_xen_hyper_sched_table(int verbose); static void xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct); static void xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct); static void xen_hyper_dump_mem(void *mem, ulong len, int dsz); /* * Get help for a command, to dump an internal table, or the GNU public * license copying/warranty information. */ void xen_hyper_cmd_help(void) { int c; int oflag; oflag = 0; while ((c = getopt(argcnt, args, "aBbcDgHhM:mnOopszX:")) != EOF) { switch(c) { case 'a': dump_alias_data(); return; case 'b': dump_shared_bufs(); return; case 'B': dump_build_data(); return; case 'c': dump_numargs_cache(); return; case 'n': case 'D': dumpfile_memory(DUMPFILE_MEM_DUMP); return; case 'g': dump_gdb_data(); return; case 'H': dump_hash_table(VERBOSE); return; case 'h': dump_hash_table(!VERBOSE); return; case 'M': dump_machdep_table(stol(optarg, FAULT_ON_ERROR, NULL)); return; case 'm': dump_machdep_table(0); return; case 'O': dump_offset_table(NULL, TRUE); return; case 'o': oflag = TRUE; break; case 'p': dump_program_context(); return; case 's': dump_symbol_table(); return; case 'X': if (strlen(optarg) != 3) { argerrs++; break; } if (!strncmp("Xen", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_table(VERBOSE); else if (!strncmp("xen", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_table(!VERBOSE); else if (!strncmp("Dmp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_dumpinfo_table(VERBOSE); else if (!strncmp("dmp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_dumpinfo_table(!VERBOSE); else if (!strncmp("Dom", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_domain_table(VERBOSE); else if (!strncmp("dom", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_domain_table(!VERBOSE); else if (!strncmp("Vcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_vcpu_table(VERBOSE); else if (!strncmp("vcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_vcpu_table(!VERBOSE); else if (!strncmp("Pcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_pcpu_table(VERBOSE); else if (!strncmp("pcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_pcpu_table(!VERBOSE); else if (!strncmp("Sch", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_sched_table(VERBOSE); else if (!strncmp("sch", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_sched_table(!VERBOSE); else if (!strncmp("siz", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_size_table(NULL, TRUE); else if (!strncmp("ofs", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_offset_table(NULL, TRUE); else { argerrs++; break; } return; case 'z': fprintf(fp, "help options:\n"); fprintf(fp, " -a - alias data\n"); fprintf(fp, " -b - shared buffer data\n"); fprintf(fp, " -B - build data\n"); fprintf(fp, " -c - numargs cache\n"); fprintf(fp, " -M machine specific\n"); fprintf(fp, " -m - machdep_table\n"); fprintf(fp, " -s - symbol table data\n"); fprintf(fp, " -o - offset_table and size_table\n"); fprintf(fp, " -p - program_context\n"); fprintf(fp, " -h - hash_table data\n"); fprintf(fp, " -H - hash_table data (verbose)\n"); fprintf(fp, " -X Xen - xen table data (verbose)\n"); fprintf(fp, " -X xen - xen table data\n"); fprintf(fp, " -X Dmp - dumpinfo table data (verbose)\n"); fprintf(fp, " -X dmp - dumpinfo table data\n"); fprintf(fp, " -X Dom - domain table data (verbose)\n"); fprintf(fp, " -X dom - domain table data\n"); fprintf(fp, " -X Vcp - vcpu table data (verbose)\n"); fprintf(fp, " -X vcp - vcpu table data\n"); fprintf(fp, " -X Pcp - pcpu table data (verbose)\n"); fprintf(fp, " -X pcp - pcpu table data\n"); fprintf(fp, " -X Sch - schedule table data (verbose)\n"); fprintf(fp, " -X sch - schedule table data\n"); fprintf(fp, " -X siz - size table data\n"); fprintf(fp, " -X ofs - offset table data\n"); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, COMPLETE_HELP); if (!args[optind]) { if (oflag) dump_offset_table(NULL, FALSE); else display_help_screen(""); return; } do { if (oflag) dump_offset_table(args[optind], FALSE); else cmd_usage(args[optind], COMPLETE_HELP); optind++; } while (args[optind]); } /* * "help -x xen" output */ static void xen_hyper_dump_xen_hyper_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; uint cpuid; int len, flag, i; len = 14; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "cpu_data_address: ", buf, flag, (buf, "%lu\n", xht->cpu_data_address)); XEN_HYPER_PRI(fp, len, "cpu_curr: ", buf, flag, (buf, "%u\n", xht->cpu_curr)); XEN_HYPER_PRI(fp, len, "max_cpus: ", buf, flag, (buf, "%u\n", xht->max_cpus)); XEN_HYPER_PRI(fp, len, "cores: ", buf, flag, (buf, "%d\n", xht->cores)); XEN_HYPER_PRI(fp, len, "pcpus: ", buf, flag, (buf, "%d\n", xht->pcpus)); XEN_HYPER_PRI(fp, len, "vcpus: ", buf, flag, (buf, "%d\n", xht->vcpus)); XEN_HYPER_PRI(fp, len, "domains: ", buf, flag, (buf, "%d\n", xht->domains)); XEN_HYPER_PRI(fp, len, "sys_pages: ", buf, flag, (buf, "%lu\n", xht->sys_pages)); XEN_HYPER_PRI(fp, len, "crashing_cpu: ", buf, flag, (buf, "%d\n", xht->crashing_cpu)); XEN_HYPER_PRI(fp, len, "crashing_vcc: ", buf, flag, (buf, "%p\n", xht->crashing_vcc)); XEN_HYPER_PRI(fp, len, "max_page: ", buf, flag, (buf, "%lu\n", xht->max_page)); XEN_HYPER_PRI(fp, len, "total_pages: ", buf, flag, (buf, "%lu\n", xht->total_pages)); XEN_HYPER_PRI(fp, len, "cpumask: ", buf, flag, (buf, "%p\n", xht->cpumask)); if (verbose && xht->cpumask) { xen_hyper_dump_mem(xht->cpumask, XEN_HYPER_SIZE(cpumask_t), sizeof(long)); } XEN_HYPER_PRI(fp, len, "cpu_idxs: ", buf, flag, (buf, "%p\n", xht->cpu_idxs)); if (verbose) { for_cpu_indexes(i, cpuid) fprintf(fp, "%03d : %d\n", i, cpuid); } } /* * "help -x dmp" output */ static void xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 25; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "note_ver: ", buf, flag, (buf, "%u\n", xhdit->note_ver)); XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", xhdit->context_array)); if (verbose && xhdit->context_array) { xen_hyper_dump_mem((long *)xhdit->context_array, sizeof(struct xen_hyper_dumpinfo_context) * XEN_HYPER_MAX_CPUS(), sizeof(long)); } XEN_HYPER_PRI(fp, len, "context_xen_core_array: ", buf, flag, (buf, "%p\n", xhdit->context_xen_core_array)); if (verbose && xhdit->context_xen_core_array) { xen_hyper_dump_mem((long *)xhdit->context_xen_core_array, sizeof(struct xen_hyper_dumpinfo_context_xen_core) * XEN_HYPER_MAX_CPUS(), sizeof(long)); } XEN_HYPER_PRI_CONST(fp, len, "context_xen_info: ", flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "note: ", buf, flag, (buf, "%lx\n", xhdit->context_xen_info.note)); XEN_HYPER_PRI(fp, len, "pcpu_id: ", buf, flag, (buf, "%u\n", xhdit->context_xen_info.pcpu_id)); XEN_HYPER_PRI(fp, len, "crash_xen_info_ptr: ", buf, flag, (buf, "%p\n", xhdit->context_xen_info.crash_xen_info_ptr)); XEN_HYPER_PRI(fp, len, "crash_note_core_array: ", buf, flag, (buf, "%p\n", xhdit->crash_note_core_array)); if (verbose && xhdit->crash_note_core_array) { xen_hyper_dump_mem((long *)xhdit->crash_note_core_array, xhdit->core_size * XEN_HYPER_NR_PCPUS(), sizeof(long)); } XEN_HYPER_PRI(fp, len, "crash_note_xen_core_array: ", buf, flag, (buf, "%p\n", xhdit->crash_note_xen_core_array)); if (verbose && xhdit->crash_note_xen_core_array) { xen_hyper_dump_mem( xhdit->crash_note_xen_core_array, xhdit->xen_core_size * XEN_HYPER_NR_PCPUS(), sizeof(long)); } XEN_HYPER_PRI(fp, len, "crash_note_xen_info_ptr: ", buf, flag, (buf, "%p\n", xhdit->crash_note_xen_info_ptr)); if (verbose && xhdit->crash_note_xen_info_ptr) { xen_hyper_dump_mem( xhdit->crash_note_xen_info_ptr, xhdit->xen_info_size, sizeof(long)); } XEN_HYPER_PRI(fp, len, "xen_info_cpu: ", buf, flag, (buf, "%u\n", xhdit->xen_info_cpu)); XEN_HYPER_PRI(fp, len, "note_size: ", buf, flag, (buf, "%u\n", xhdit->note_size)); XEN_HYPER_PRI(fp, len, "core_offset: ", buf, flag, (buf, "%u\n", xhdit->core_offset)); XEN_HYPER_PRI(fp, len, "core_size: ", buf, flag, (buf, "%u\n", xhdit->core_size)); XEN_HYPER_PRI(fp, len, "xen_core_offset: ", buf, flag, (buf, "%u\n", xhdit->xen_core_offset)); XEN_HYPER_PRI(fp, len, "xen_core_size: ", buf, flag, (buf, "%u\n", xhdit->xen_core_size)); XEN_HYPER_PRI(fp, len, "xen_info_offset: ", buf, flag, (buf, "%u\n", xhdit->xen_info_offset)); XEN_HYPER_PRI(fp, len, "xen_info_size: ", buf, flag, (buf, "%u\n", xhdit->xen_info_size)); } /* * "help -x dom" output */ static void xen_hyper_dump_xen_hyper_domain_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; struct xen_hyper_domain_context *dcca; int len, flag, i; len = 22; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", xhdt->context_array)); if (verbose) { char buf1[XEN_HYPER_CMD_BUFSIZE]; int j; for (i = 0, dcca = xhdt->context_array; i < xhdt->context_array_cnt; i++, dcca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", i); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, (buf, "%lx\n", dcca->domain)); XEN_HYPER_PRI(fp, len, "domain_id: ", buf, flag, (buf, "%d\n", dcca->domain_id)); XEN_HYPER_PRI(fp, len, "tot_pages: ", buf, flag, (buf, "%x\n", dcca->tot_pages)); XEN_HYPER_PRI(fp, len, "max_pages: ", buf, flag, (buf, "%x\n", dcca->max_pages)); XEN_HYPER_PRI(fp, len, "xenheap_pages: ", buf, flag, (buf, "%x\n", dcca->xenheap_pages)); XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag, (buf, "%lx\n", dcca->shared_info)); XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, (buf, "%lx\n", dcca->sched_priv)); XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag, (buf, "%lx\n", dcca->next_in_list)); XEN_HYPER_PRI(fp, len, "domain_flags: ", buf, flag, (buf, "%lx\n", dcca->domain_flags)); XEN_HYPER_PRI(fp, len, "evtchn: ", buf, flag, (buf, "%lx\n", dcca->evtchn)); XEN_HYPER_PRI(fp, len, "vcpu_cnt: ", buf, flag, (buf, "%d\n", dcca->vcpu_cnt)); for (j = 0; j < XEN_HYPER_MAX_VIRT_CPUS; j++) { snprintf(buf1, XEN_HYPER_CMD_BUFSIZE, "vcpu[%d]: ", j); XEN_HYPER_PRI(fp, len, buf1, buf, flag, (buf, "%lx\n", dcca->vcpu[j])); } XEN_HYPER_PRI(fp, len, "vcpu_context_array: ", buf, flag, (buf, "%p\n", dcca->vcpu_context_array)); } } XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag, (buf, "%d\n", xhdt->context_array_cnt)); XEN_HYPER_PRI(fp, len, "running_domains: ", buf, flag, (buf, "%lu\n", xhdt->running_domains)); XEN_HYPER_PRI(fp, len, "dom_io: ", buf, flag, (buf, "%p\n", xhdt->dom_io)); XEN_HYPER_PRI(fp, len, "dom_xen: ", buf, flag, (buf, "%p\n", xhdt->dom_xen)); XEN_HYPER_PRI(fp, len, "dom0: ", buf, flag, (buf, "%p\n", xhdt->dom0)); XEN_HYPER_PRI(fp, len, "idle_domain: ", buf, flag, (buf, "%p\n", xhdt->idle_domain)); XEN_HYPER_PRI(fp, len, "curr_domain: ", buf, flag, (buf, "%p\n", xhdt->curr_domain)); XEN_HYPER_PRI(fp, len, "last: ", buf, flag, (buf, "%p\n", xhdt->last)); XEN_HYPER_PRI(fp, len, "domain_struct: ", buf, flag, (buf, "%p\n", xhdt->domain_struct)); XEN_HYPER_PRI(fp, len, "domain_struct_verify: ", buf, flag, (buf, "%p\n", xhdt->domain_struct_verify)); } /* * "help -x vcp" output */ static void xen_hyper_dump_xen_hyper_vcpu_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 25; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "vcpu_context_arrays: ", buf, flag, (buf, "%p\n", xhvct->vcpu_context_arrays)); XEN_HYPER_PRI(fp, len, "vcpu_context_arrays_cnt: ", buf, flag, (buf, "%d\n", xhvct->vcpu_context_arrays_cnt)); if (verbose) { struct xen_hyper_vcpu_context_array *vcca; struct xen_hyper_vcpu_context *vca; int i, j; for (i = 0, vcca = xhvct->vcpu_context_arrays; i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "vcpu_context_arrays[%d]: ", i); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); if (vcca->context_array) { XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", vcca->context_array)); } else { XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "NULL\n")); } XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag, (buf, "%d\n", vcca->context_array_cnt)); XEN_HYPER_PRI(fp, len, "context_array_valid: ", buf, flag, (buf, "%d\n", vcca->context_array_valid)); for (j = 0, vca = vcca->context_array; j < vcca->context_array_cnt; j++, vca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", j); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag, (buf, "%lx\n", vca->vcpu)); XEN_HYPER_PRI(fp, len, "vcpu_id: ", buf, flag, (buf, "%d\n", vca->vcpu_id)); XEN_HYPER_PRI(fp, len, "processor: ", buf, flag, (buf, "%d\n", vca->processor)); XEN_HYPER_PRI(fp, len, "vcpu_info: ", buf, flag, (buf, "%lx\n", vca->vcpu_info)); XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, (buf, "%lx\n", vca->domain)); XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag, (buf, "%lx\n", vca->next_in_list)); XEN_HYPER_PRI(fp, len, "sleep_tick: ", buf, flag, (buf, "%lx\n", vca->sleep_tick)); XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, (buf, "%lx\n", vca->sched_priv)); XEN_HYPER_PRI(fp, len, "state: ", buf, flag, (buf, "%d\n", vca->state)); XEN_HYPER_PRI(fp, len, "state_entry_time: ", buf, flag, (buf, "%llux\n", (unsigned long long)(vca->state_entry_time))); XEN_HYPER_PRI(fp, len, "runstate_guest: ", buf, flag, (buf, "%lx\n", vca->runstate_guest)); XEN_HYPER_PRI(fp, len, "vcpu_flags: ", buf, flag, (buf, "%lx\n", vca->vcpu_flags)); } } } XEN_HYPER_PRI(fp, len, "idle_vcpu: ", buf, flag, (buf, "%lx\n", xhvct->idle_vcpu)); XEN_HYPER_PRI(fp, len, "idle_vcpu_context_array: ", buf, flag, (buf, "%p\n", xhvct->idle_vcpu_context_array)); XEN_HYPER_PRI(fp, len, "last: ", buf, flag, (buf, "%p\n", xhvct->last)); XEN_HYPER_PRI(fp, len, "vcpu_struct: ", buf, flag, (buf, "%p\n", xhvct->vcpu_struct)); XEN_HYPER_PRI(fp, len, "vcpu_struct_verify: ", buf, flag, (buf, "%p\n", xhvct->vcpu_struct_verify)); } /* * "help -x pcp" output */ static void xen_hyper_dump_xen_hyper_pcpu_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; struct xen_hyper_pcpu_context *pcca; int len, flag, i; #ifdef X86_64 uint64_t *ist_p; int j; #endif len = 21; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", xhpct->context_array)); if (verbose) { for (i = 0, pcca = xhpct->context_array; i < XEN_HYPER_MAX_CPUS(); i++, pcca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array %d: ", i); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "pcpu: ", buf, flag, (buf, "%lx\n", pcca->pcpu)); XEN_HYPER_PRI(fp, len, "processor_id: ", buf, flag, (buf, "%u\n", pcca->processor_id)); XEN_HYPER_PRI(fp, len, "guest_cpu_user_regs: ", buf, flag, (buf, "%lx\n", pcca->guest_cpu_user_regs)); XEN_HYPER_PRI(fp, len, "current_vcpu: ", buf, flag, (buf, "%lx\n", pcca->current_vcpu)); XEN_HYPER_PRI(fp, len, "init_tss: ", buf, flag, (buf, "%lx\n", pcca->init_tss)); #ifdef X86 XEN_HYPER_PRI(fp, len, "sp.esp0: ", buf, flag, (buf, "%x\n", pcca->sp.esp0)); #endif #ifdef X86_64 XEN_HYPER_PRI(fp, len, "sp.rsp0: ", buf, flag, (buf, "%lx\n", pcca->sp.rsp0)); for (j = 0, ist_p = pcca->ist; j < XEN_HYPER_TSS_IST_MAX; j++, ist_p++) { XEN_HYPER_PRI(fp, len, "ist: ", buf, flag, (buf, "%lx\n", *ist_p)); } #endif } } XEN_HYPER_PRI(fp, len, "last: ", buf, flag, (buf, "%p\n", xhpct->last)); XEN_HYPER_PRI(fp, len, "pcpu_struct: ", buf, flag, (buf, "%p\n", xhpct->pcpu_struct)); } /* * "help -x sch" output */ static void xen_hyper_dump_xen_hyper_sched_table(int verbose) { struct xen_hyper_sched_context *schc; char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag, i; len = 21; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "name: ", buf, flag, (buf, "%s\n", xhscht->name)); XEN_HYPER_PRI(fp, len, "opt_sched: ", buf, flag, (buf, "%s\n", xhscht->opt_sched)); XEN_HYPER_PRI(fp, len, "sched_id: ", buf, flag, (buf, "%d\n", xhscht->sched_id)); XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag, (buf, "%lx\n", xhscht->scheduler)); XEN_HYPER_PRI(fp, len, "scheduler_struct: ", buf, flag, (buf, "%p\n", xhscht->scheduler_struct)); XEN_HYPER_PRI(fp, len, "sched_context_array: ", buf, flag, (buf, "%p\n", xhscht->sched_context_array)); if (verbose) { for (i = 0, schc = xhscht->sched_context_array; i < xht->pcpus; i++, schc++) { XEN_HYPER_PRI(fp, len, "sched_context_array[", buf, flag, (buf, "%d]\n", i)); XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag, (buf, "%lx\n", schc->schedule_data)); XEN_HYPER_PRI(fp, len, "sched_resource: ", buf, flag, (buf, "%lx\n", schc->sched_resource)); XEN_HYPER_PRI(fp, len, "curr: ", buf, flag, (buf, "%lx\n", schc->curr)); XEN_HYPER_PRI(fp, len, "idle: ", buf, flag, (buf, "%lx\n", schc->idle)); XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, (buf, "%lx\n", schc->sched_priv)); XEN_HYPER_PRI(fp, len, "tick: ", buf, flag, (buf, "%lx\n", schc->tick)); } } } /* * "help -x siz" output */ static void xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 23; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "ELF_Prstatus: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Prstatus)); XEN_HYPER_PRI(fp, len, "ELF_Signifo: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Signifo)); XEN_HYPER_PRI(fp, len, "ELF_Gregset: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Gregset)); XEN_HYPER_PRI(fp, len, "ELF_Timeval: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Timeval)); XEN_HYPER_PRI(fp, len, "arch_domain: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.arch_domain)); XEN_HYPER_PRI(fp, len, "arch_shared_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.arch_shared_info)); XEN_HYPER_PRI(fp, len, "cpu_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpu_info)); XEN_HYPER_PRI(fp, len, "cpu_time: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpu_time)); XEN_HYPER_PRI(fp, len, "cpu_user_regs: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpu_user_regs)); XEN_HYPER_PRI(fp, len, "cpumask_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpumask_t)); XEN_HYPER_PRI(fp, len, "cpuinfo_ia64: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpuinfo_ia64)); XEN_HYPER_PRI(fp, len, "cpuinfo_x86: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpuinfo_x86)); XEN_HYPER_PRI(fp, len, "crash_note_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_t)); XEN_HYPER_PRI(fp, len, "crash_note_core_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_core_t)); XEN_HYPER_PRI(fp, len, "crash_note_xen_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_t)); XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_core_t)); XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_info_t)); XEN_HYPER_PRI(fp, len, "crash_xen_core_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_xen_core_t)); XEN_HYPER_PRI(fp, len, "crash_xen_info_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_xen_info_t)); XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.domain)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "mm_struct: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.mm_struct)); #endif XEN_HYPER_PRI(fp, len, "note_buf_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.note_buf_t)); XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.schedule_data)); XEN_HYPER_PRI(fp, len, "sched_resource: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.sched_resource)); XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.scheduler)); XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.shared_info)); XEN_HYPER_PRI(fp, len, "timer: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.timer)); XEN_HYPER_PRI(fp, len, "tss: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.tss)); XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.vcpu)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.vcpu_runstate_info)); XEN_HYPER_PRI(fp, len, "xen_crash_xen_regs_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.xen_crash_xen_regs_t)); } /* * "help -x ofs" output */ static void xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 45; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_info)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cursig: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cursig)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sigpend: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sigpend)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sighold: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sighold)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pid)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_ppid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_ppid)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pgrp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pgrp)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sid)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_stime: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_stime)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cutime: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cutime)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cstime: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cstime)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_reg: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_reg)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_fpvalid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_fpvalid)); XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_sec: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_sec)); XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_usec: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_usec)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "arch_domain_mm: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_domain_mm)); #endif XEN_HYPER_PRI(fp, len, "arch_shared_info_max_pfn: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_max_pfn)); XEN_HYPER_PRI(fp, len, "arch_shared_info_pfn_to_mfn_frame_list_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_pfn_to_mfn_frame_list_list)); XEN_HYPER_PRI(fp, len, "arch_shared_info_nmi_reason: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_nmi_reason)); XEN_HYPER_PRI(fp, len, "cpu_info_guest_cpu_user_regs: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_info_guest_cpu_user_regs)); XEN_HYPER_PRI(fp, len, "cpu_info_processor_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_info_processor_id)); XEN_HYPER_PRI(fp, len, "cpu_info_current_vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_info_current_vcpu)); XEN_HYPER_PRI(fp, len, "cpu_time_local_tsc_stamp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_local_tsc_stamp)); XEN_HYPER_PRI(fp, len, "cpu_time_stime_local_stamp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_local_stamp)); XEN_HYPER_PRI(fp, len, "cpu_time_stime_master_stamp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_master_stamp)); XEN_HYPER_PRI(fp, len, "cpu_time_tsc_scale: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_tsc_scale)); XEN_HYPER_PRI(fp, len, "cpu_time_calibration_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_calibration_timer)); XEN_HYPER_PRI(fp, len, "crash_note_t_core: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_core)); XEN_HYPER_PRI(fp, len, "crash_note_t_xen: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen)); XEN_HYPER_PRI(fp, len, "crash_note_t_xen_regs: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_regs)); XEN_HYPER_PRI(fp, len, "crash_note_t_xen_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_info)); XEN_HYPER_PRI(fp, len, "crash_note_core_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_core_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_desc)); XEN_HYPER_PRI(fp, len, "crash_note_xen_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_xen_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_desc)); XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_desc)); XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_desc)); XEN_HYPER_PRI(fp, len, "domain_page_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_page_list)); XEN_HYPER_PRI(fp, len, "domain_xenpage_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_xenpage_list)); XEN_HYPER_PRI(fp, len, "domain_domain_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_domain_id)); XEN_HYPER_PRI(fp, len, "domain_tot_pages: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_tot_pages)); XEN_HYPER_PRI(fp, len, "domain_max_pages: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_max_pages)); XEN_HYPER_PRI(fp, len, "domain_xenheap_pages: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_xenheap_pages)); XEN_HYPER_PRI(fp, len, "domain_shared_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_shared_info)); XEN_HYPER_PRI(fp, len, "domain_sched_priv: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_sched_priv)); XEN_HYPER_PRI(fp, len, "domain_next_in_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_next_in_list)); XEN_HYPER_PRI(fp, len, "domain_domain_flags: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_domain_flags)); XEN_HYPER_PRI(fp, len, "domain_evtchn: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_evtchn)); XEN_HYPER_PRI(fp, len, "domain_is_hvm: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_hvm)); XEN_HYPER_PRI(fp, len, "domain_guest_type: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_guest_type)); XEN_HYPER_PRI(fp, len, "domain_is_privileged: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_privileged)); XEN_HYPER_PRI(fp, len, "domain_debugger_attached: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_debugger_attached)); if (XEN_HYPER_VALID_MEMBER(domain_is_polling)) { XEN_HYPER_PRI(fp, len, "domain_is_polling: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_polling)); } XEN_HYPER_PRI(fp, len, "domain_is_dying: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_dying)); /* Only one of next both exists but print both, ones value is -1. */ XEN_HYPER_PRI(fp, len, "domain_is_paused_by_controller: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_paused_by_controller)); XEN_HYPER_PRI(fp, len, "domain_controller_pause_count: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_controller_pause_count)); XEN_HYPER_PRI(fp, len, "domain_is_shutting_down: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_shutting_down)); XEN_HYPER_PRI(fp, len, "domain_is_shut_down: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_shut_down)); XEN_HYPER_PRI(fp, len, "domain_vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_vcpu)); XEN_HYPER_PRI(fp, len, "domain_arch: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_arch)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "mm_struct_pgd: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.mm_struct_pgd)); #endif XEN_HYPER_PRI(fp, len, "schedule_data_schedule_lock: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_schedule_lock)); XEN_HYPER_PRI(fp, len, "schedule_data_curr: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_curr)); XEN_HYPER_PRI(fp, len, "schedule_data_idle: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_idle)); XEN_HYPER_PRI(fp, len, "schedule_data_sched_priv: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_sched_priv)); XEN_HYPER_PRI(fp, len, "schedule_data_s_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_s_timer)); XEN_HYPER_PRI(fp, len, "schedule_data_tick: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_tick)); XEN_HYPER_PRI(fp, len, "scheduler_name: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_name)); XEN_HYPER_PRI(fp, len, "scheduler_opt_name: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_opt_name)); XEN_HYPER_PRI(fp, len, "scheduler_sched_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_sched_id)); XEN_HYPER_PRI(fp, len, "scheduler_init: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_init)); XEN_HYPER_PRI(fp, len, "scheduler_tick: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_tick)); XEN_HYPER_PRI(fp, len, "scheduler_init_vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_init_vcpu)); XEN_HYPER_PRI(fp, len, "scheduler_destroy_domain: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_destroy_domain)); XEN_HYPER_PRI(fp, len, "scheduler_sleep: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_sleep)); XEN_HYPER_PRI(fp, len, "scheduler_wake: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_wake)); XEN_HYPER_PRI(fp, len, "scheduler_set_affinity: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_set_affinity)); XEN_HYPER_PRI(fp, len, "scheduler_do_schedule: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_do_schedule)); XEN_HYPER_PRI(fp, len, "scheduler_adjust: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_adjust)); XEN_HYPER_PRI(fp, len, "scheduler_dump_settings: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_settings)); XEN_HYPER_PRI(fp, len, "scheduler_dump_cpu_state: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_cpu_state)); XEN_HYPER_PRI(fp, len, "shared_info_vcpu_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_vcpu_info)); XEN_HYPER_PRI(fp, len, "shared_info_evtchn_pending: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_pending)); XEN_HYPER_PRI(fp, len, "shared_info_evtchn_mask: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_mask)); XEN_HYPER_PRI(fp, len, "shared_info_arch: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_arch)); XEN_HYPER_PRI(fp, len, "timer_expires: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_expires)); XEN_HYPER_PRI(fp, len, "timer_cpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_cpu)); XEN_HYPER_PRI(fp, len, "timer_function: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_function)); XEN_HYPER_PRI(fp, len, "timer_data: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_data)); XEN_HYPER_PRI(fp, len, "timer_heap_offset: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_heap_offset)); XEN_HYPER_PRI(fp, len, "timer_killed: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_killed)); XEN_HYPER_PRI(fp, len, "tss_struct_rsp0: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.tss_rsp0)); XEN_HYPER_PRI(fp, len, "tss_struct_esp0: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.tss_esp0)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_id)); XEN_HYPER_PRI(fp, len, "vcpu_processor: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_processor)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_info)); XEN_HYPER_PRI(fp, len, "vcpu_domain: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_domain)); XEN_HYPER_PRI(fp, len, "vcpu_next_in_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_next_in_list)); XEN_HYPER_PRI(fp, len, "vcpu_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_timer)); XEN_HYPER_PRI(fp, len, "vcpu_sleep_tick: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_sleep_tick)); XEN_HYPER_PRI(fp, len, "vcpu_poll_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_poll_timer)); XEN_HYPER_PRI(fp, len, "vcpu_sched_priv: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_sched_priv)); XEN_HYPER_PRI(fp, len, "vcpu_runstate: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_guest: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_guest)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_flags: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_flags)); XEN_HYPER_PRI(fp, len, "vcpu_pause_count: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_pause_count)); XEN_HYPER_PRI(fp, len, "vcpu_virq_to_evtchn: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_virq_to_evtchn)); XEN_HYPER_PRI(fp, len, "vcpu_cpu_affinity: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_cpu_affinity)); XEN_HYPER_PRI(fp, len, "vcpu_nmi_addr: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_nmi_addr)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_dirty_cpumask: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_dirty_cpumask)); XEN_HYPER_PRI(fp, len, "vcpu_arch: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_arch)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state_entry_time: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state_entry_time)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_time: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_time)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "vcpu_thread_ksp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_thread_ksp)); #endif } /* * dump specified memory with specified size. */ #define DSP_BYTE_SIZE 16 static void xen_hyper_dump_mem(void *mem, ulong len, int dsz) { long i, max; void *mem_w = mem; if (!len || (dsz != SIZEOF_8BIT && dsz != SIZEOF_16BIT && dsz != SIZEOF_32BIT && dsz != SIZEOF_64BIT)) return; max = len / dsz + (len % dsz ? 1 : 0); for (i = 0; i < max; i++) { if (i != 0 && !(i % (DSP_BYTE_SIZE / dsz))) fprintf(fp, "\n"); if (i == 0 || !(i % (DSP_BYTE_SIZE / dsz))) fprintf(fp, "%p : ", mem_w); if (dsz == SIZEOF_8BIT) fprintf(fp, "%02x ", *(uint8_t *)mem_w); else if (dsz == SIZEOF_16BIT) fprintf(fp, "%04x ", *(uint16_t *)mem_w); else if (dsz == SIZEOF_32BIT) fprintf(fp, "%08x ", *(uint32_t *)mem_w); else if (dsz == SIZEOF_64BIT) fprintf(fp, "%016llx ", *(unsigned long long *)mem_w); mem_w = (char *)mem_w + dsz; } fprintf(fp, "\n"); } #endif crash-utility-crash-9cd43f5/crash.80000664000372000037200000005565515107550337016605 0ustar juerghjuergh.\" .de CO \dB\\$1\fP \fI\\$2\fP .. .TH CRASH 8 .SH NAME crash \- Analyze Linux crash dump data or a live system .SH SYNOPSIS .B crash [\fIOPTION\fR]... \fINAMELIST MEMORY-IMAGE[@ADDRESS] (dumpfile form)\fR .br .B crash [\fIOPTION\fR]... \fI[NAMELIST] (live system form)\fR .SH DESCRIPTION .B Crash is a tool for interactively analyzing the state of the Linux system while it is running, or after a kernel crash has occurred and a core dump has been created by the .I netdump, .I diskdump, .I LKCD, .I kdump, .I xendump .I kvmdump or .I VMware facilities. It is loosely based on the SVR4 UNIX crash command, but has been significantly enhanced by completely merging it with the .B gdb(1) debugger. The marriage of the two effectively combines the kernel-specific nature of the traditional UNIX crash utility with the source code level debugging capabilities of .B gdb(1). In the .I dumpfile form, both a NAMELIST and a MEMORY-IMAGE argument must be entered. In the .I live system form, the NAMELIST argument must be entered if the kernel's .I vmlinux file is not located in a known location, such as the .I /usr/lib/debug/lib/modules/ directory. The .B crash utility has also been extended to support the analysis of dumpfiles generated by a crash of the Xen hypervisor. In that case, the NAMELIST argument must be that of the .I xen-syms binary. Live system analysis is not supported for the Xen hypervisor. The .B crash utility command set consists of common kernel core analysis tools such as kernel stack back traces of all processes, source code disassembly, formatted kernel structure and variable displays, virtual memory data, dumps of linked-lists, etc., along with several commands that delve deeper into specific kernel subsystems. Appropriate .B gdb commands may also be entered, which in turn are passed on to the .B gdb module for execution. If desired, commands may be placed in either a .I $HOME/.crashrc file and/or in a .I .crashrc file in the current directory. During initialization, the commands in .I $HOME/.crashrc are executed first, followed by those in the .I ./.crashrc file. The .B crash utility is designed to be independent of Linux version dependencies. When new kernel source code impacts the correct functionality of .B crash and its command set, the utility will be updated to recognize new kernel code changes, while maintaining backwards compatibility with earlier releases. .SH OPTIONS .de BS \fB\\$1\fP\ \fR\\$2\fP .. .TP .BI NAMELIST This is a pathname to an uncompressed kernel image (a .I vmlinux file), or a Xen hypervisor image (a .I xen-syms file) which has been compiled with the "-g" option. If using the .I dumpfile form, a .I vmlinux file may be compressed in either gzip or bzip2 formats. .TP .BI MEMORY-IMAGE[@ADDRESS] A kernel core dump file created by the .I netdump, .I diskdump, .I LKCD .I kdump, .I xendump .I kvmdump or .I VMware facilities. If a MEMORY-IMAGE argument is not entered, the session will be invoked on the live system, which typically requires root privileges because of the device file used to access system RAM. By default, .I /dev/crash will be used if it exists. If it does not exist, then .I /dev/mem will be used; but if the kernel has been configured with .B CONFIG_STRICT_DEVMEM, then .I /proc/kcore will be used. It is permissible to explicitly enter .I /dev/crash, .I /dev/mem or .I /proc/kcore. An @ADDRESS value must be appended to the MEMORY-IMAGE if the dumpfile is a raw RAM dumpfile that has no header information describing the file contents. Multiple MEMORY-IMAGE@ADDRESS ordered pairs may be entered, with each dumpfile containing a contiguous block of RAM, where the ADDRESS value is the physical start address of the block expressed in hexadecimal. The physical address value(s) will be used to create a temporary ELF header in /var/tmp, which will only exist during the crash session. If a raw RAM dumpile represents a live memory source, such as that specified by the QEMU mem-path argument of a memory-backend-file object, then "live:" must be prepended to the MEMORY-IMAGE name. As VMware facility, the .B crash utility is able to process VMware VM memory dump generated by VM suspend or guest core dump. In that case, .vmss or .guest file should be used as a MEMORY-IMAGE and .vmem file must be located in the same folder. .TP .BI mapfile If the NAMELIST file is not the same kernel that is running (live system form), or the kernel that was running when the system crashed (dumpfile form), then the .I System.map file of the original kernel should be entered on the command line. .P .BI -h \ [option] .br .BI \--help \ [option] .RS Without an .I option argument, display a .B crash usage help message. If the .I option argument is a .B crash command name, the help page for that command is displayed. If it is the string "input", a page describing the various .B crash command line input options is displayed. If it is the string "output", a page describing command line output options is displayed. If it is the string "all", then all of the possible help messages are displayed. After the help message is displayed, .B crash exits. .RE .TP .B \-s Silently proceed directly to the "crash>" prompt without displaying any version, GPL, or .B crash initialization data during startup, and by default, runtime command output is not passed to any scrolling command. .TP .BI \-i \ file Execute the command(s) contained in .I file prior to displaying the "crash>" prompt for interactive user input. .TP .BI \-d \ num Set the internal debug level. The higher the number, the more debugging data will be printed when .B crash initializes and runs. .TP .B \-S Use .I /boot/System.map as the .I mapfile\fP. .TP .B \-e \fIvi | emacs\fR Set the .B readline(3) command line editing mode to "vi" or "emacs". The default editing mode is "vi". .TP .B \-f Force the usage of a compressed .I vmlinux file if its original name does not start with "vmlinux". .TP .B \-k Indicate that the NAMELIST file is an LKCD "Kerntypes" debuginfo file. .TP .BI -g \ [namelist] Determine if a .I vmlinux or .I xen-syms namelist file contains debugging data. .TP .B \-t Display the system-crash timestamp and exit. .TP .B \-L Attempt to lock all of its virtual address space into memory by calling mlockall(MCL_CURRENT|MCL_FUTURE) during initialization. If the system call fails, an error message will be displayed, but the session continues. .TP .BI \-c \ tty-device Open the .I tty-device as the console used for debug messages. .TP .BI \-p \ page-size If a processor's page size cannot be determined by the dumpfile, and the processor default cannot be used, use .I page-size. .TP .BI \-o \ filename Only used with the MEMORY-IMAGE@ADDRESS format for raw RAM dumpfiles, specifies a filename of a new ELF vmcore that will be created and used as the dumpfile. It will be saved to allow future use as a standalone vmcore, replacing the original raw RAM dumpfile. .P .B -m \fIoption=value\fR .br .B --machdep \fIoption=value\fR .RS Pass an option and value pair to machine-dependent code. These architecture-specific option/pairs should only be required in very rare circumstances: .P .nf X86_64: phys_base= irq_eframe_link= irq_stack_gap= max_physmem_bits= kernel_image_size= vm=orig (pre-2.6.11 virtual memory address ranges) vm=2.6.11 (2.6.11 and later virtual memory address ranges) vm=xen (Xen kernel virtual memory address ranges) vm=xen-rhel4 (RHEL4 Xen kernel virtual address ranges) vm=5level (5-level page tables) page_offset= PPC64: vm=orig vm=2.6.14 (4-level page tables) IA64: phys_start= init_stack_size= vm=4l (4-level page tables) ARM: phys_base= ARM64: phys_offset= kimage_voffset= max_physmem_bits= vabits_actual= X86: page_offset= .fi .RE .TP .B \-x Automatically load extension modules from a particular directory. If a directory is specified in the .B CRASH_EXTENSIONS shell environment variable, then that directory will be used. Otherwise .I /usr/lib64/crash/extensions (64-bit architectures) or .I /usr/lib/crash/extensions (32-bit architectures) will be used; if they do not exist, then the .I ./extensions directory will be used. .TP .BI --active Track only the active task on each cpu. .TP .BI --buildinfo Display the crash binary's build date, the user ID of the builder, the hostname of the machine where the build was done, the target architecture, the version number, and the compiler version. .TP .BI --memory_module \ modname Use the .I modname as an alternative kernel module to the .I crash.ko module that creates the .I /dev/crash device. .TP .BI --memory_device \ device Use .I device as an alternative device to the .I /dev/crash, /dev/mem or .I /proc/kcore devices. .TP .BI --log \ dumpfile Dump the contents of the kernel log buffer. A kernel namelist argument is not necessary, but the dumpfile must contain the VMCOREINFO data taken from the original /proc/vmcore ELF header. Note: this option is deprecated and will no longer work for kernel(>=v5.10). .TP .B --no_kallsyms Do not use kallsyms-generated symbol information contained within kernel module object files. .TP .B --no_modules Do not access or display any kernel module related information. .TP .B --no_ikconf Do not attempt to read configuration data that was built into kernels configured with .B CONFIG_IKCONFIG. .TP .B --no_data_debug Do not verify the validity of all structure member offsets and structure sizes that it uses. .TP .B --no_kmem_cache Do not initialize the kernel's slab cache infrastructure, and commands that use kmem_cache-related data will not work. .TP .B --no_elf_notes Do not use the registers from the ELF NT_PRSTATUS notes saved in a compressed kdump header for backtraces. .TP .B --kmem_cache_delay Delay the initialization of the kernel's slab cache infrastructure until it is required by a run-time command. .TP .B --readnow Pass this flag to the embedded .B gdb module, which will override its two-stage strategy that it uses for reading symbol tables from the NAMELIST. .TP .B --smp Specify that the system being analyzed is an SMP kernel. .P .B -v .br .B --version .RS Display the version of the .B crash utility, the version of the embedded .B gdb module, GPL information, and copyright notices. .RE .TP .BI --cpus \ number Specify the .I number of cpus in the SMP system being analyzed. .TP .BI --osrelease \ dumpfile Display the OSRELEASE vmcoreinfo string from a kdump .I dumpfile header. .TP .BI --hyper Force the session to be that of a Xen hypervisor. .TP .BI --p2m_mfn \ pfn When a Xen Hypervisor or its dom0 kernel crashes, the dumpfile is typically analyzed with either the Xen hypervisor or the dom0 kernel. It is also possible to analyze any of the guest domU kernels if the pfn_to_mfn_list_list .I pfn value of the guest kernel is passed on the command line along with its NAMELIST and the dumpfile. .TP .BI --xen_phys_start \ physical-address Supply the base physical address of the Xen hypervisor's text and static data for older xendump dumpfiles that did not pass that information in the dumpfile header. .TP .B --zero_excluded If the makedumpfile(8) facility has filtered a compressed kdump dumpfile to exclude various types of non-essential pages, or has marked a compressed or ELF kdump dumpfile as incomplete due to an ENOSPC or other error during its creation, any attempt to read missing pages will fail. With this flag, reads from any of those pages will return zero-filled memory. .TP .B --no_panic Do not attempt to find the task that was running when the kernel crashed. Set the initial context to that of the "swapper" task on cpu 0. .TP .B --more Use .I /bin/more as the command output scroller, overriding the default of .I /usr/bin/less and any settings in either .I ./.crashrc or .I $HOME/.crashrc. .TP .B --less Use .I /usr/bin/less as the command output scroller, overriding any settings in either .I ./.crashrc or .I $HOME/.crashrc. .TP .B --hex Set the default command output radix to 16, overriding the default radix of 10, and any radix settings in either .I ./.crashrc or .I $HOME/.crashrc. .TP .B --dec Set the default command output radix to 10, overriding any radix settings in either .I ./.crashrc or .I $HOME/.crashrc. This is the default radix setting. .TP .B --CRASHPAGER Use the output paging command defined in the .B CRASHPAGER shell environment variable, overriding any settings in either .I ./.crashrc or .I $HOME/.crashrc. .TP .B --no_scroll Do not pass run-time command output to any scrolling command. .TP .B --no_strip Do not strip cloned kernel text symbol names. .TP .B --no_crashrc Do not execute the commands in either .I $HOME/.crashrc or .I ./.crashrc. .TP .BI --mod \ directory When loading the debuginfo data of kernel modules with the .I mod -S command, search for their object files in .I directory instead of in the standard location. .TP .BI --src \ directory Search for the kernel source code in directory instead of in the standard location that is compiled into the debuginfo data. .TP .BI --kaslr \ offset | auto If an x86, x86_64, s390x or loongarch64 kernel was configured with .B CONFIG_RANDOMIZE_BASE, the offset value is equal to the difference between the symbol values compiled into the vmlinux file and their relocated KASLR values. If set to auto, the KASLR offset value will be automatically calculated. .TP .BI --reloc \ size When analyzing live x86 kernels that were configured with a .B CONFIG_PHYSICAL_START value that is larger than its .B CONFIG_PHYSICAL_ALIGN value, then it will be necessary to enter a relocation size equal to the difference between the two values. .TP .BI --hash \ count Set the number of internal hash queue heads used for list gathering and verification. The default count is 32768. .TP .B --minimal Bring up a session that is restricted to the .I log, dis, rd, sym, eval, set and .I exit commands. This option may provide a way to extract some minimal/quick information from a corrupted or truncated dumpfile, or in situations where one of the several kernel subsystem initialization routines would abort the .B crash session. .TP .BI --kvmhost \ [32|64] When examining an x86 KVM guest dumpfile, this option specifies that the KVM host that created the dumpfile was an x86 (32-bit) or an x86_64 (64-bit) machine, overriding the automatically determined value. .TP .BI --kvmio \ override the automatically-calculated KVM guest I/O hole size. .TP .BI --offline \ [show|hide] Show or hide command output that is related to offline cpus. The default setting is show. .SH COMMANDS Each .B crash command generally falls into one of the following categories: .TP .I Symbolic display Displays of kernel text/data, which take full advantage of the power of .B gdb to format and display data structures symbolically. .TP .I System state The majority of .B crash commands consist of a set of "kernel-aware" commands, which delve into various kernel subsystems on a system-wide or per-task basis. .TP .I Utility functions A set of useful helper commands serving various purposes, some simple, others quite powerful. .TP .I Session control Commands that control the .B crash session itself. .PP The following alphabetical list consists of a very simple overview of each .B crash command. However, since individual commands often have several options resulting in significantly different output, it is suggested that the full description of each command be viewed by executing .I crash\ -h\ \fI\fP, or during a .B crash session by simply entering .B \fIhelp command\fP. .TP .I * "pointer to" is shorthand for either the .I struct or .I union commands. It displays the contents of a kernel structure or union. .TP .I alias creates a single-word alias for a command. .TP .I ascii displays an ascii chart or translates a numeric value into its ascii components. .TP .I bpf provides information on currently-loaded eBPF programs and maps. .TP .I bt displays a task's kernel-stack backtrace. If it is given the .I \-a option, it displays the stack traces of the active tasks on all CPUs. It is often used with the .I foreach command to display the backtraces of all tasks with one command. .TP .I btop translates a byte value (physical offset) to its page number. .TP .I dev displays data concerning the character and block device assignments, I/O port usage, I/O memory usage, and PCI device data. .TP .I dis disassembles memory, either entire kernel functions, from a location for a specified number of instructions, or from the start of a function up to a specified memory location. .TP .I eval evaluates an expression or numeric type and displays the result in hexadecimal, decimal, octal and binary. .TP .I exit causes .B crash to exit. .TP .I extend dynamically loads or unloads .B crash shared object extension modules. .TP .I files displays information about open files in a context. .TP .I foreach repeats a specified command for the specified (or all) tasks in the system. .TP .I fuser displays the tasks using the specified file or socket. .TP .I gdb passes its argument to the embedded .B gdb module. It is useful for executing .B gdb commands that have the same name as .B crash commands. .TP .I help alone displays the command menu; if followed by a command name, a full description of a command, its options, and examples are displayed. Its output is far more complete and useful than this man page. .TP .I ipcs displays data about the System V IPC facilities. .TP .I irq displays data concerning interrupt request numbers and bottom-half interrupt handling. .TP .I kmem displays information about the use of kernel memory. .TP .I list displays the contents of a linked list. .TP .I log displays the kernel log_buf contents in chronological order. .TP .I mach displays data specific to the machine type. .TP .I mod displays information about the currently installed kernel modules, or adds or deletes symbolic or debugging information about specified kernel modules. .TP .I mount displays information about the currently-mounted filesystems. .TP .I net display various network related data. .TP .I p passes its arguments to the .B gdb "print" command for evaluation and display. .TP .I ps displays process status for specified, or all, processes in the system. .TP .I pte translates the hexadecimal contents of a PTE into its physical page address and page bit settings. .TP .I ptob translates a page frame number to its byte value. .TP .I ptov translates a hexadecimal physical address into a kernel virtual address. .TP .I q is an alias for the "exit" command. .TP .I rd displays the contents of memory, with the output formatted in several different manners. .TP .I repeat repeats a command indefinitely, optionally delaying a given number of seconds between each command execution. .TP .I runq displays the tasks on the run queue. .TP .I sbitmapq dumps the contents of the sbitmap_queue structure and the used bits in the bitmap. Also, it shows the dump of a structure array associated with the sbitmap_queue. .TP .I search searches a range of user or kernel memory space for given value. .TP .I set either sets a new context, or gets the current context for display. .TP .I sig displays signal-handling data of one or more tasks. .TP .I struct displays either a structure definition or the contents of a kernel structure at a specified address. .TP .I swap displays information about each configured swap device. .TP .I sym translates a symbol to its virtual address, or a static kernel virtual address to its symbol -- or to a symbol-plus-offset value, if appropriate. .TP .I sys displays system-specific data. .TP .I task displays the contents of a task_struct. .TP .I tree displays the contents of a red-black tree or a radix tree. .TP .I timer displays the timer queue entries, both old- and new-style, in chronological order. .TP .I union is similar to the .I struct command, except that it works on kernel unions. .TP .I vm displays basic virtual memory information of a context. .TP .I vtop translates a user or kernel virtual address to its physical address. .TP .I waitq walks the wait queue list displaying the tasks which are blocked on the specified wait queue. .TP .I whatis displays the definition of structures, unions, typedefs or text/data symbols. .TP .I wr modifies the contents of memory on a live system. It can only be used if .I /dev/mem is the device file being used to access system RAM, and should obviously be used with great care. .PP When .B crash is invoked with a Xen hypervisor binary as the NAMELIST, the command set is slightly modified. The .I *, alias, ascii, bt, dis, eval, exit, extend, .I gdb, help, list, log, p, pte, rd, repeat, .I search, set, struct, sym, sys, union, .I whatis, wr and .I q commands are the same as above. The following commands are specific to the Xen hypervisor: .TP .I domain displays the contents of the domain structure for selected, or all, domains. .TP .I doms displays domain status for selected, or all, domains. .TP .I dumpinfo displays Xen dump information for selected, or all, cpus. .TP .I pcpus displays physical cpu information for selected, or all, cpus. .TP .I vcpus displays vcpu status for selected, or all, vcpus. .SH FILES .TP .I .crashrc Initialization commands. The file can be located in the user's .B HOME directory and/or the current directory. Commands found in the .I .crashrc file in the .B HOME directory are executed before those in the current directory's .I .crashrc file. .SH ENVIRONMENT .TP .B EDITOR Command input is read using .BR readline(3). If .B EDITOR is set to .I emacs or .I vi then suitable keybindings are used. If .B EDITOR is not set, then .I vi is used. This can be overridden by .B set vi or .B set emacs commands located in a .IR .crashrc file, or by entering .B -e emacs on the .B crash command line. .TP .B CRASHPAGER If .B CRASHPAGER is set, its value is used as the name of the program to which command output will be sent. If not, then command output is sent to .B /usr/bin/less -E -X by default. .TP .B CRASH_MODULE_PATH Specifies an alternative directory tree to search for kernel module object files. .TP .B CRASH_EXTENSIONS Specifies a directory containing extension modules that will be loaded automatically if the .B -x command line option is used. .SH NOTES .PP If .B crash does not work, look for a newer version: kernel evolution frequently makes .B crash updates necessary. .PP The command .B set scroll off will cause output to be sent directly to the terminal rather than through a paging program. This is useful, for example, if you are running .B crash in a window of .BR emacs . .SH AUTHOR Dave Anderson wrote .B crash. .TP Jay Fenlason and Dave Anderson wrote this man page. .SH "SEE ALSO" .PP The .I help command within .B crash provides more complete and accurate documentation than this man page. .PP .I https://github.com/crash-utility - the home page of the .B crash utility. .PP .BR netdump (8), .BR gdb (1), .BR makedumpfile(8) crash-utility-crash-9cd43f5/lkcd_fix_mem.c0000664000372000037200000000464415107550337020171 0ustar juerghjuergh/* lkcd_fix_mem.c * * Copyright (C) 2004 Hewlett-Packard Development Company, L.P. * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef IA64 #define LKCD_COMMON #include "defs.h" #include "lkcd_dump_v8.h" static int fix_addr(dump_header_asm_t *); int fix_addr_v8(dump_header_asm_t *dha) { fix_addr(dha); return 0; } int fix_addr_v7(int fd) { static dump_header_asm_t dump_header_asm_v7 = { 0 }; dump_header_asm_t *dha; dha = &dump_header_asm_v7; if (read(lkcd->fd, dha, sizeof(dump_header_asm_t)) != sizeof(dump_header_asm_t)) return -1; fix_addr(dha); return 0; } static int fix_addr(dump_header_asm_t *dha) { lkcd->dump_header_asm = dha; if (dha->dha_magic_number == DUMP_ASM_MAGIC_NUMBER && dha->dha_version > 3) { int num; int i = 0; num = dha->dha_smp_num_cpus; lkcd->fix_addr_num = 0; if (num && (lkcd->fix_addr = malloc(num * sizeof(struct fix_addrs)))) { while (i < num) { if (dha->dha_stack[i] && dha->dha_smp_current_task[i]) { lkcd->fix_addr[i].task = (ulong)dha->dha_smp_current_task[i]; lkcd->fix_addr[i].saddr = (ulong)dha->dha_stack[i]; lkcd->fix_addr[i].sw = (ulong)dha->dha_stack_ptr[i]; /* remember the highest non-zero entry */ lkcd->fix_addr_num = i + 1; } else { lkcd->fix_addr[i].task = (ulong)0; } i++; } } } return 0; } ulong get_lkcd_switch_stack(ulong task) { int i; if (lkcd->fix_addr_num == 0) return 0; for (i = 0; i < lkcd->fix_addr_num; i++) { if (task == lkcd->fix_addr[i].task) { return lkcd->fix_addr[i].sw; } } return 0; } int lkcd_get_kernel_start_v8(ulong *addr) { if (!addr) return 0; *addr = ((dump_header_asm_t *)lkcd->dump_header_asm)->dha_kernel_addr; return 1; } #endif // IA64 crash-utility-crash-9cd43f5/netdump.c0000664000372000037200000044733615107550337017235 0ustar juerghjuergh/* netdump.c * * Copyright (C) 2002-2019 David Anderson * Copyright (C) 2002-2019 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: David Anderson */ #define _LARGEFILE64_SOURCE 1 /* stat64() */ #include "defs.h" #include "netdump.h" #include "sadump.h" #include "xen_dom0.h" static struct vmcore_data vmcore_data = { 0 }; static struct vmcore_data *nd = &vmcore_data; static struct proc_kcore_data proc_kcore_data = { 0 }; static struct proc_kcore_data *pkd = &proc_kcore_data; static void netdump_print(char *, ...); static size_t resize_elf_header(int, char *, char **, char **, ulong); static void dump_Elf32_Ehdr(Elf32_Ehdr *); static void dump_Elf32_Phdr(Elf32_Phdr *, int); static size_t dump_Elf32_Nhdr(Elf32_Off offset, int); static void dump_Elf64_Ehdr(Elf64_Ehdr *); static void dump_Elf64_Phdr(Elf64_Phdr *, int); static void dump_Elf64_Shdr(Elf64_Shdr *shdr); static size_t dump_Elf64_Nhdr(Elf64_Off offset, int); static void get_netdump_regs_32(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_ppc(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_ppc64(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_arm(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_arm64(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_mips(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_riscv(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_loongarch64(struct bt_info *, ulong *, ulong *); static void check_dumpfile_size(char *); static int proc_kcore_init_32(FILE *, int); static int proc_kcore_init_64(FILE *, int); static char *get_regs_from_note(char *, ulong *, ulong *); static void kdump_get_osrelease(void); static char *vmcoreinfo_read_string(const char *); #define ELFSTORE 1 #define ELFREAD 0 #define MIN_PAGE_SIZE (4096) /* * Architectures that have configurable page sizes, * can differ from the host machine's page size. */ #define READ_PAGESIZE_FROM_VMCOREINFO() \ (machine_type("IA64") || machine_type("PPC64") || machine_type("PPC") || machine_type("ARM64")) /* * kdump installs NT_PRSTATUS elf notes only to the cpus * that were online during dumping. Hence we call into * this function after reading the cpu map from the kernel, * to remap the NT_PRSTATUS notes only to the online cpus. */ void map_cpus_to_prstatus(void) { void **nt_ptr; int online, i, j, nrcpus; size_t size; if (pc->flags2 & QEMU_MEM_DUMP_ELF) /* notes exist for all cpus */ return; if (!(online = get_cpus_online()) || (online == kt->cpus)) return; if (CRASHDEBUG(1)) error(INFO, "cpus: %d online: %d NT_PRSTATUS notes: %d (remapping)\n", kt->cpus, online, nd->num_prstatus_notes); size = NR_CPUS * sizeof(void *); nt_ptr = (void **)GETBUF(size); BCOPY(nd->nt_prstatus_percpu, nt_ptr, size); BZERO(nd->nt_prstatus_percpu, size); /* * Re-populate the array with the notes mapping to online cpus */ nrcpus = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); for (i = 0, j = 0; i < nrcpus; i++) { if (in_cpu_map(ONLINE_MAP, i) && machdep->is_cpu_prstatus_valid(i)) { nd->nt_prstatus_percpu[i] = nt_ptr[j++]; nd->num_prstatus_notes = MAX(nd->num_prstatus_notes, i+1); } } FREEBUF(nt_ptr); } /* * Determine whether a file is a netdump/diskdump/kdump creation, * and if TRUE, initialize the vmcore_data structure. */ int is_netdump(char *file, ulong source_query) { int i, fd, swap; Elf32_Ehdr *elf32; Elf32_Phdr *load32; Elf64_Ehdr *elf64; Elf64_Phdr *load64; char *eheader, *sect0; char buf[BUFSIZE]; ssize_t size; size_t len, tot; Elf32_Off offset32; Elf64_Off offset64; ulong format; if ((fd = open(file, O_RDWR)) < 0) { if ((fd = open(file, O_RDONLY)) < 0) { sprintf(buf, "%s: open", file); perror(buf); return FALSE; } } size = SAFE_NETDUMP_ELF_HEADER_SIZE; if ((eheader = (char *)malloc(size)) == NULL) { fprintf(stderr, "cannot malloc ELF header buffer\n"); clean_exit(1); } if (FLAT_FORMAT()) { if (!read_flattened_format(fd, 0, eheader, size)) goto bailout; } else { size = read(fd, eheader, size); if (size < 0) { sprintf(buf, "%s: ELF header read", file); perror(buf); goto bailout; } else if (size < MIN_NETDUMP_ELF_HEADER_SIZE) { fprintf(stderr, "%s: file too small!\n", file); goto bailout; } } load32 = NULL; load64 = NULL; format = 0; elf32 = (Elf32_Ehdr *)&eheader[0]; elf64 = (Elf64_Ehdr *)&eheader[0]; /* * Verify the ELF header, and determine the dumpfile format. * * For now, kdump vmcores differ from netdump/diskdump like so: * * 1. The first kdump PT_LOAD segment is packed just after * the ELF header, whereas netdump/diskdump page-align * the first PT_LOAD segment. * 2. Each kdump PT_LOAD segment has a p_align field of zero, * whereas netdump/diskdump have their p_align fields set * to the system page-size. * * If either kdump difference is seen, presume kdump -- this * is obviously subject to change. */ if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) goto bailout; swap = (((eheader[EI_DATA] == ELFDATA2LSB) && (__BYTE_ORDER == __BIG_ENDIAN)) || ((eheader[EI_DATA] == ELFDATA2MSB) && (__BYTE_ORDER == __LITTLE_ENDIAN))); if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && (swap16(elf32->e_type, swap) == ET_CORE) && (swap32(elf32->e_version, swap) == EV_CURRENT) && (swap16(elf32->e_phnum, swap) >= 2)) { switch (swap16(elf32->e_machine, swap)) { case EM_386: if (machine_type_mismatch(file, "X86", NULL, source_query)) goto bailout; break; case EM_ARM: if (machine_type_mismatch(file, "ARM", NULL, source_query)) goto bailout; break; case EM_PPC: if (machine_type_mismatch(file, "PPC", NULL, source_query)) goto bailout; break; case EM_MIPS: if (machine_type_mismatch(file, "MIPS", NULL, source_query)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, source_query)) goto bailout; } if (endian_mismatch(file, elf32->e_ident[EI_DATA], source_query)) goto bailout; if (elf32->e_phoff != sizeof(Elf32_Ehdr)) { if (CRASHDEBUG(1)) error(WARNING, "%s: first PHdr not following " "EHdr (PHdr offset = %u)\n", file, elf32->e_phoff); /* it's okay as long as we've read enough data */ if (elf32->e_phoff > size - 2 * sizeof(Elf32_Phdr)) { error(WARNING, "%s: PHdr to far into file!\n", file); goto bailout; } } /* skip the NOTE program header */ load32 = (Elf32_Phdr *) &eheader[elf32->e_phoff+sizeof(Elf32_Phdr)]; if ((load32->p_offset & (MIN_PAGE_SIZE-1)) || (load32->p_align == 0)) format = KDUMP_ELF32; else format = NETDUMP_ELF32; } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && (swap16(elf64->e_type, swap) == ET_CORE) && (swap32(elf64->e_version, swap) == EV_CURRENT) && (swap16(elf64->e_phnum, swap) >= 2)) { switch (swap16(elf64->e_machine, swap)) { case EM_IA_64: if (machine_type_mismatch(file, "IA64", NULL, source_query)) goto bailout; break; case EM_PPC64: if (machine_type_mismatch(file, "PPC64", NULL, source_query)) goto bailout; break; case EM_X86_64: if (machine_type_mismatch(file, "X86_64", NULL, source_query)) goto bailout; break; case EM_S390: if (machine_type_mismatch(file, "S390X", NULL, source_query)) goto bailout; break; case EM_386: if (machine_type_mismatch(file, "X86", NULL, source_query)) goto bailout; break; case EM_ARM: if (machine_type_mismatch(file, "ARM", NULL, source_query)) goto bailout; break; case EM_AARCH64: if (machine_type_mismatch(file, "ARM64", NULL, source_query)) goto bailout; break; case EM_MIPS: if (machine_type_mismatch(file, "MIPS", "MIPS64", source_query)) goto bailout; break; case EM_RISCV: if (machine_type_mismatch(file, "RISCV64", NULL, source_query)) goto bailout; break; case EM_LOONGARCH: if (machine_type_mismatch(file, "LOONGARCH64", NULL, source_query)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, source_query)) goto bailout; } if (endian_mismatch(file, elf64->e_ident[EI_DATA], source_query)) goto bailout; if (elf64->e_phoff != sizeof(Elf64_Ehdr)) { if (CRASHDEBUG(1)) error(WARNING, "%s: first PHdr not following " "EHdr (PHdr offset = %u)\n", file, elf64->e_phoff); /* it's okay as long as we've read enough data */ if (elf64->e_phoff > size - 2 * sizeof(Elf64_Phdr)) { error(WARNING, "%s: PHdr to far into file!\n", file); goto bailout; } } /* skip the NOTE program header */ load64 = (Elf64_Phdr *) &eheader[elf64->e_phoff+sizeof(Elf64_Phdr)]; if ((load64->p_offset & (MIN_PAGE_SIZE-1)) || (load64->p_align == 0)) format = KDUMP_ELF64; else format = NETDUMP_ELF64; } else { if (CRASHDEBUG(2)) error(INFO, "%s: not a %s ELF dumpfile\n", file, source_query == NETDUMP_LOCAL ? "netdump" : "kdump"); goto bailout; } if (source_query == KCORE_LOCAL) { close(fd); return TRUE; } switch (format) { case NETDUMP_ELF32: case NETDUMP_ELF64: if (source_query & (NETDUMP_LOCAL|NETDUMP_REMOTE)) break; else goto bailout; case KDUMP_ELF32: case KDUMP_ELF64: if (source_query & KDUMP_LOCAL) break; else goto bailout; } sect0 = NULL; if (!(size = resize_elf_header(fd, file, &eheader, §0, format))) goto bailout; nd->ndfd = fd; nd->elf_header = eheader; nd->flags = format | source_query; switch (format) { case NETDUMP_ELF32: case KDUMP_ELF32: nd->header_size = size; nd->elf32 = (Elf32_Ehdr *)&nd->elf_header[0]; nd->num_pt_load_segments = nd->elf32->e_phnum - 1; if ((nd->pt_load_segments = (struct pt_load_segment *) malloc(sizeof(struct pt_load_segment) * nd->num_pt_load_segments)) == NULL) { fprintf(stderr, "cannot malloc PT_LOAD segment buffers\n"); clean_exit(1); } nd->notes32 = (Elf32_Phdr *) &nd->elf_header[nd->elf32->e_phoff]; nd->load32 = nd->notes32 + 1; if (format == NETDUMP_ELF32) nd->page_size = (uint)nd->load32->p_align; dump_Elf32_Ehdr(nd->elf32); dump_Elf32_Phdr(nd->notes32, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf32_Phdr(nd->load32 + i, ELFSTORE+i); offset32 = nd->notes32->p_offset; for (tot = 0; tot < nd->notes32->p_filesz; tot += len) { if (!(len = dump_Elf32_Nhdr(offset32, ELFSTORE))) break; offset32 += len; } break; case NETDUMP_ELF64: case KDUMP_ELF64: nd->header_size = size; nd->elf64 = (Elf64_Ehdr *)&nd->elf_header[0]; /* * Extended Numbering support * See include/uapi/linux/elf.h and elf(5) for more information */ if (nd->elf64->e_phnum == PN_XNUM) { nd->sect0_64 = (Elf64_Shdr *)sect0; nd->num_pt_load_segments = nd->sect0_64->sh_info - 1; } else nd->num_pt_load_segments = nd->elf64->e_phnum - 1; if ((nd->pt_load_segments = (struct pt_load_segment *) malloc(sizeof(struct pt_load_segment) * nd->num_pt_load_segments)) == NULL) { fprintf(stderr, "cannot malloc PT_LOAD segment buffers\n"); clean_exit(1); } nd->notes64 = (Elf64_Phdr *) &nd->elf_header[nd->elf64->e_phoff]; nd->load64 = nd->notes64 + 1; if (format == NETDUMP_ELF64) nd->page_size = (uint)nd->load64->p_align; dump_Elf64_Ehdr(nd->elf64); dump_Elf64_Phdr(nd->notes64, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf64_Phdr(nd->load64 + i, ELFSTORE+i); offset64 = nd->notes64->p_offset; for (tot = 0; tot < nd->notes64->p_filesz; tot += len) { if (!(len = dump_Elf64_Nhdr(offset64, ELFSTORE))) break; offset64 += len; } break; } if (CRASHDEBUG(1)) netdump_memory_dump(fp); pc->read_vmcoreinfo = vmcoreinfo_read_string; if ((source_query == KDUMP_LOCAL) && (pc->flags2 & GET_OSRELEASE)) kdump_get_osrelease(); if ((source_query == KDUMP_LOCAL) && (pc->flags2 & GET_LOG)) { pc->dfd = nd->ndfd; pc->readmem = read_kdump; nd->flags |= KDUMP_LOCAL; pc->flags |= KDUMP; get_log_from_vmcoreinfo(file); } return nd->header_size; bailout: close(fd); free(eheader); return FALSE; } /* * Search through all PT_LOAD segments to determine the * file offset where the physical memory segment(s) start * in the vmcore, and consider everything prior to that as * header contents. */ static size_t resize_elf_header(int fd, char *file, char **eheader_ptr, char **sect0_ptr, ulong format) { int i; char buf[BUFSIZE]; char *eheader; Elf32_Ehdr *elf32; Elf32_Phdr *load32; Elf64_Ehdr *elf64; Elf64_Phdr *load64; Elf32_Off p_offset32; Elf64_Off p_offset64; size_t header_size; uint num_pt_load_segments; eheader = *eheader_ptr; header_size = num_pt_load_segments = 0; elf32 = (Elf32_Ehdr *)&eheader[0]; elf64 = (Elf64_Ehdr *)&eheader[0]; switch (format) { case NETDUMP_ELF32: case KDUMP_ELF32: num_pt_load_segments = elf32->e_phnum - 1; header_size = MAX(sizeof(Elf32_Ehdr), elf32->e_phoff) + (sizeof(Elf32_Phdr) * (num_pt_load_segments + 1)); break; case NETDUMP_ELF64: case KDUMP_ELF64: /* * Extended Numbering support * See include/uapi/linux/elf.h and elf(5) for more information */ if (elf64->e_phnum == PN_XNUM) { Elf64_Shdr *shdr64; shdr64 = (Elf64_Shdr *)malloc(sizeof(*shdr64)); if (!shdr64) { fprintf(stderr, "cannot malloc a section header buffer\n"); return 0; } if (FLAT_FORMAT()) { if (!read_flattened_format(fd, elf64->e_shoff, shdr64, elf64->e_shentsize)) return 0; } else { if (lseek(fd, elf64->e_shoff, SEEK_SET) != elf64->e_shoff) { sprintf(buf, "%s: section header lseek", file); perror(buf); return 0; } if (read(fd, shdr64, elf64->e_shentsize) != elf64->e_shentsize) { sprintf(buf, "%s: section header read", file); perror(buf); return 0; } } num_pt_load_segments = shdr64->sh_info - 1; *sect0_ptr = (char *)shdr64; } else num_pt_load_segments = elf64->e_phnum - 1; header_size = MAX(sizeof(Elf64_Ehdr), elf64->e_phoff) + (sizeof(Elf64_Phdr) * (num_pt_load_segments + 1)); break; } if ((eheader = (char *)realloc(eheader, header_size)) == NULL) { fprintf(stderr, "cannot realloc interim ELF header buffer\n"); clean_exit(1); } else *eheader_ptr = eheader; elf32 = (Elf32_Ehdr *)&eheader[0]; elf64 = (Elf64_Ehdr *)&eheader[0]; if (FLAT_FORMAT()) { if (!read_flattened_format(fd, 0, eheader, header_size)) return 0; } else { if (lseek(fd, 0, SEEK_SET) != 0) { sprintf(buf, "%s: lseek", file); perror(buf); return 0; } if (read(fd, eheader, header_size) != header_size) { sprintf(buf, "%s: ELF header read", file); perror(buf); return 0; } } switch (format) { case NETDUMP_ELF32: case KDUMP_ELF32: load32 = (Elf32_Phdr *)&eheader[elf32->e_phoff+sizeof(Elf32_Phdr)]; p_offset32 = load32->p_offset; for (i = 0; i < num_pt_load_segments; i++, load32 += 1) { if (load32->p_offset && (p_offset32 > load32->p_offset)) p_offset32 = load32->p_offset; } header_size = (size_t)p_offset32; break; case NETDUMP_ELF64: case KDUMP_ELF64: load64 = (Elf64_Phdr *)&eheader[elf64->e_phoff+sizeof(Elf64_Phdr)]; p_offset64 = load64->p_offset; for (i = 0; i < num_pt_load_segments; i++, load64 += 1) { if (load64->p_offset && (p_offset64 > load64->p_offset)) p_offset64 = load64->p_offset; } header_size = (size_t)p_offset64; break; } if ((eheader = (char *)realloc(eheader, header_size)) == NULL) { perror("realloc"); fprintf(stderr, "cannot realloc resized ELF header buffer\n"); clean_exit(1); } else *eheader_ptr = eheader; if (FLAT_FORMAT()) { if (!read_flattened_format(fd, 0, eheader, header_size)) return 0; } else { if (lseek(fd, 0, SEEK_SET) != 0) { sprintf(buf, "%s: lseek", file); perror(buf); return 0; } if (read(fd, eheader, header_size) != header_size) { sprintf(buf, "%s: ELF header read", file); perror(buf); return 0; } } return header_size; } /* * Return the e_version number of an ELF file * (or -1 if its not readable ELF file) */ int file_elf_version(char *file) { int fd, size; Elf32_Ehdr *elf32; Elf64_Ehdr *elf64; char header[MIN_NETDUMP_ELF_HEADER_SIZE]; char buf[BUFSIZE]; if ((fd = open(file, O_RDONLY)) < 0) { sprintf(buf, "%s: open", file); perror(buf); return -1; } size = MIN_NETDUMP_ELF_HEADER_SIZE; if (read(fd, header, size) != size) { sprintf(buf, "%s: read", file); perror(buf); close(fd); return -1; } close(fd); elf32 = (Elf32_Ehdr *)&header[0]; elf64 = (Elf64_Ehdr *)&header[0]; if (STRNEQ(elf32->e_ident, ELFMAG) && (elf32->e_ident[EI_CLASS] == ELFCLASS32) && (elf32->e_ident[EI_DATA] == ELFDATA2LSB) && (elf32->e_ident[EI_VERSION] == EV_CURRENT)) { return (elf32->e_version); } else if (STRNEQ(elf64->e_ident, ELFMAG) && (elf64->e_ident[EI_CLASS] == ELFCLASS64) && (elf64->e_ident[EI_VERSION] == EV_CURRENT)) { return (elf64->e_version); } return -1; } /* * Check whether any PT_LOAD segment goes beyond the file size. */ static void check_dumpfile_size(char *file) { int i; struct stat64 stat; struct pt_load_segment *pls; uint64_t segment_end; if (is_ramdump_image()) return; if (stat64(file, &stat) < 0) return; if (S_ISBLK(stat.st_mode)) { error(NOTE, "%s: No dump complete check for block devices\n", file); return; } for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; segment_end = pls->file_offset + (pls->phys_end - pls->phys_start); if (segment_end > stat.st_size) { error(WARNING, "%s: may be truncated or incomplete\n" " PT_LOAD p_offset: %lld\n" " p_filesz: %lld\n" " bytes required: %lld\n" " dumpfile size: %lld\n\n", file, pls->file_offset, pls->phys_end - pls->phys_start, segment_end, stat.st_size); return; } } } /* * Perform any post-dumpfile determination stuff here. */ int netdump_init(char *unused, FILE *fptr) { if (!VMCORE_VALID()) return FALSE; machdep->is_cpu_prstatus_valid = diskdump_is_cpu_prstatus_valid; nd->ofp = fptr; check_dumpfile_size(pc->dumpfile); return TRUE; } /* * Read from a netdump-created dumpfile. */ int read_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { off_t offset; ssize_t read_ret; struct pt_load_segment *pls; int i; offset = 0; /* * The Elf32_Phdr has 32-bit fields for p_paddr, p_filesz and * p_memsz, so for now, multiple PT_LOAD segment support is * restricted to 64-bit machines for netdump/diskdump vmcores. * However, kexec/kdump has introduced the optional use of a * 64-bit ELF header for 32-bit processors. */ switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: offset = (off_t)paddr + (off_t)nd->header_size; break; case NETDUMP_ELF64: case KDUMP_ELF32: case KDUMP_ELF64: if (nd->num_pt_load_segments == 1) { offset = (off_t)paddr + (off_t)nd->header_size - (off_t)nd->pt_load_segments[0].phys_start; break; } for (i = offset = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if ((paddr >= pls->phys_start) && (paddr < pls->phys_end)) { offset = (off_t)(paddr - pls->phys_start) + pls->file_offset; break; } if (pls->zero_fill && (paddr >= pls->phys_end) && (paddr < pls->zero_fill)) { memset(bufptr, 0, cnt); if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: zero-fill: " "addr: %lx paddr: %llx cnt: %d\n", addr, (ulonglong)paddr, cnt); return cnt; } } if (!offset) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: READ_ERROR: " "offset not found for paddr: %llx\n", (ulonglong)paddr); return READ_ERROR; } break; } if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: addr: %lx paddr: %llx cnt: %d offset: %llx\n", addr, (ulonglong)paddr, cnt, (ulonglong)offset); if (FLAT_FORMAT()) { if (!read_flattened_format(nd->ndfd, offset, bufptr, cnt)) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: READ_ERROR: " "read_flattened_format failed for offset:" " %llx\n", (ulonglong)offset); return READ_ERROR; } } else { if (lseek(nd->ndfd, offset, SEEK_SET) == -1) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: SEEK_ERROR: " "offset: %llx\n", (ulonglong)offset); return SEEK_ERROR; } read_ret = read(nd->ndfd, bufptr, cnt); if (read_ret != cnt) { /* * First check whether zero_excluded has been set. */ if ((read_ret >= 0) && (*diskdump_flags & ZERO_EXCLUDED)) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: zero-fill: " "addr: %lx paddr: %llx cnt: %d\n", addr + read_ret, (ulonglong)paddr + read_ret, cnt - (int)read_ret); bufptr += read_ret; bzero(bufptr, cnt - read_ret); return cnt; } if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: READ_ERROR: " "offset: %llx\n", (ulonglong)offset); return READ_ERROR; } } return cnt; } /* * Write to a netdump-created dumpfile. Note that cmd_wr() does not * allow writes to dumpfiles, so you can't get here from there. * But, if it would ever be helpful, here it is... */ int write_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { off_t offset; struct pt_load_segment *pls; int i; offset = 0; switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: offset = (off_t)paddr + (off_t)nd->header_size; break; case NETDUMP_ELF64: case KDUMP_ELF32: case KDUMP_ELF64: if (nd->num_pt_load_segments == 1) { offset = (off_t)paddr + (off_t)nd->header_size; break; } for (i = offset = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if ((paddr >= pls->phys_start) && (paddr < pls->phys_end)) { offset = (off_t)(paddr - pls->phys_start) + pls->file_offset; break; } } if (!offset) return READ_ERROR; break; } if (lseek(nd->ndfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (write(nd->ndfd, bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Set the file pointer for debug output. */ FILE * set_netdump_fp(FILE *fp) { if (!VMCORE_VALID()) return NULL; nd->ofp = fp; return fp; } /* * Generic print routine to handle integral and remote daemon output. */ static void netdump_print(char *fmt, ...) { char buf[BUFSIZE]; va_list ap; if (!fmt || !strlen(fmt) || !VMCORE_VALID()) return; va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (nd->ofp) fprintf(nd->ofp, "%s", buf); else console(buf); } uint netdump_page_size(void) { if (!VMCORE_VALID()) return 0; return nd->page_size; } int netdump_free_memory(void) { return (VMCORE_VALID() ? 0 : 0); } int netdump_memory_used(void) { return (VMCORE_VALID() ? 0 : 0); } /* * The netdump server will eventually use the NT_TASKSTRUCT section * to pass the task address. Until such time, look at the ebp of the * user_regs_struct, which is located at the end of the NT_PRSTATUS * elf_prstatus structure, minus one integer: * * struct elf_prstatus * { * ... * elf_gregset_t pr_reg; (maps to user_regs_struct) * int pr_fpvalid; * }; * * If it's a kernel stack address who's adjusted task_struct value is * equal to one of the active set tasks, we'll presume it's legit. * */ ulong get_netdump_panic_task(void) { #ifdef DAEMON return nd->task_struct; #else int i, crashing_cpu; size_t len; char *user_regs; ulong ebp, esp, task; if (!VMCORE_VALID() || !get_active_set()) goto panic_task_undetermined; if (nd->task_struct) { if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: NT_TASKSTRUCT: %lx\n", nd->task_struct); return nd->task_struct; } switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: case NETDUMP_ELF64: crashing_cpu = -1; break; case KDUMP_ELF32: case KDUMP_ELF64: crashing_cpu = -1; if (kernel_symbol_exists("crashing_cpu")) { get_symbol_data("crashing_cpu", sizeof(int), &i); if ((i >= 0) && in_cpu_map(ONLINE_MAP, i)) { crashing_cpu = i; if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: active_set[crashing_cpu: %d]: %lx\n", crashing_cpu, tt->active_set[crashing_cpu]); } } if ((nd->num_prstatus_notes > 1) && (crashing_cpu == -1)) goto panic_task_undetermined; break; default: crashing_cpu = -1; break; } if (nd->elf32 && (nd->elf32->e_machine == EM_386)) { Elf32_Nhdr *note32 = NULL; if (nd->num_prstatus_notes > 1) { if (crashing_cpu != -1) note32 = (Elf32_Nhdr *) nd->nt_prstatus_percpu[crashing_cpu]; } else note32 = (Elf32_Nhdr *)nd->nt_prstatus; if (!note32) goto panic_task_undetermined; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); user_regs = ((char *)note32 + len) - SIZE(user_regs_struct) - sizeof(int); ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); check_ebp_esp: if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: NT_PRSTATUS esp: %lx ebp: %lx\n", esp, ebp); if (IS_KVADDR(esp)) { task = stkptr_to_task(esp); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: esp: %lx -> task: %lx\n", esp, task); for (i = 0; task && (i < NR_CPUS); i++) { if (task == tt->active_set[i]) return task; } } if (IS_KVADDR(ebp)) { task = stkptr_to_task(ebp); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: ebp: %lx -> task: %lx\n", ebp, task); for (i = 0; task && (i < NR_CPUS); i++) { if (task == tt->active_set[i]) return task; } } } else if (nd->elf64) { Elf64_Nhdr *note64 = NULL; if (nd->num_prstatus_notes > 1) { if (crashing_cpu != -1) note64 = (Elf64_Nhdr *) nd->nt_prstatus_percpu[crashing_cpu]; } else note64 = (Elf64_Nhdr *)nd->nt_prstatus; if (!note64) goto panic_task_undetermined; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); user_regs = (char *)((char *)note64 + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); if (nd->elf64->e_machine == EM_386) { ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); goto check_ebp_esp; } if (nd->elf64->e_machine == EM_PPC64) { /* * Get the GPR1 register value. */ esp = *(ulong *)((char *)user_regs + 8); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: NT_PRSTATUS esp: %lx\n", esp); if (IS_KVADDR(esp)) { task = stkptr_to_task(esp); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: esp: %lx -> task: %lx\n", esp, task); for (i = 0; task && (i < NR_CPUS); i++) { if (task == tt->active_set[i]) return task; } } } if (nd->elf64->e_machine == EM_X86_64) { if ((crashing_cpu != -1) && (crashing_cpu <= kt->cpus)) return (tt->active_set[crashing_cpu]); } } panic_task_undetermined: if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: failed\n"); return NO_TASK; #endif } /* * Get the switch_stack address of the passed-in task. Currently only * the panicking task reports its switch-stack address. */ ulong get_netdump_switch_stack(ulong task) { #ifdef DAEMON if (nd->task_struct == task) return nd->switch_stack; return 0; #else if (!VMCORE_VALID() || !get_active_set()) return 0; if (nd->task_struct == task) return nd->switch_stack; return 0; #endif } int netdump_memory_dump(FILE *fp) { int i, others, wrap, flen; size_t len, tot; FILE *fpsave; Elf32_Off offset32; Elf32_Off offset64; struct pt_load_segment *pls; if (!VMCORE_VALID()) return FALSE; fpsave = nd->ofp; nd->ofp = fp; if (FLAT_FORMAT()) dump_flat_header(nd->ofp); netdump_print("vmcore_data: \n"); netdump_print(" flags: %lx (", nd->flags); others = 0; if (nd->flags & NETDUMP_LOCAL) netdump_print("%sNETDUMP_LOCAL", others++ ? "|" : ""); if (nd->flags & KDUMP_LOCAL) netdump_print("%sKDUMP_LOCAL", others++ ? "|" : ""); if (nd->flags & NETDUMP_REMOTE) netdump_print("%sNETDUMP_REMOTE", others++ ? "|" : ""); if (nd->flags & NETDUMP_ELF32) netdump_print("%sNETDUMP_ELF32", others++ ? "|" : ""); if (nd->flags & NETDUMP_ELF64) netdump_print("%sNETDUMP_ELF64", others++ ? "|" : ""); if (nd->flags & KDUMP_ELF32) netdump_print("%sKDUMP_ELF32", others++ ? "|" : ""); if (nd->flags & KDUMP_ELF64) netdump_print("%sKDUMP_ELF64", others++ ? "|" : ""); if (nd->flags & PARTIAL_DUMP) netdump_print("%sPARTIAL_DUMP", others++ ? "|" : ""); if (nd->flags & QEMU_MEM_DUMP_KDUMP_BACKUP) netdump_print("%sQEMU_MEM_DUMP_KDUMP_BACKUP", others++ ? "|" : ""); netdump_print(") %s\n", FLAT_FORMAT() ? "[FLAT]" : ""); if ((pc->flags & RUNTIME) && symbol_exists("dump_level")) { int dump_level; if (readmem(symbol_value("dump_level"), KVADDR, &dump_level, sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) { netdump_print(" dump_level: %d (0x%x) %s", dump_level, dump_level, dump_level > 0 ? "(" : ""); #define DUMP_EXCLUDE_CACHE 0x00000001 /* Exclude LRU & SwapCache pages*/ #define DUMP_EXCLUDE_CLEAN 0x00000002 /* Exclude all-zero pages */ #define DUMP_EXCLUDE_FREE 0x00000004 /* Exclude free pages */ #define DUMP_EXCLUDE_ANON 0x00000008 /* Exclude Anon pages */ #define DUMP_SAVE_PRIVATE 0x00000010 /* Save private pages */ others = 0; if (dump_level & DUMP_EXCLUDE_CACHE) netdump_print("%sDUMP_EXCLUDE_CACHE", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_CLEAN) netdump_print("%sDUMP_EXCLUDE_CLEAN", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_FREE) netdump_print("%sDUMP_EXCLUDE_FREE", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_ANON) netdump_print("%sDUMP_EXCLUDE_ANON", others++ ? "|" : ""); if (dump_level & DUMP_SAVE_PRIVATE) netdump_print("%sDUMP_SAVE_PRIVATE", others++ ? "|" : ""); netdump_print("%s\n", dump_level > 0 ? ")" : ""); } else netdump_print(" dump_level: (unknown)\n"); } else if (!(pc->flags & RUNTIME) && symbol_exists("dump_level")) netdump_print(" dump_level: (undetermined)\n"); netdump_print(" ndfd: %d\n", nd->ndfd); netdump_print(" ofp: %lx\n", nd->ofp); netdump_print(" header_size: %d\n", nd->header_size); netdump_print(" num_pt_load_segments: %d\n", nd->num_pt_load_segments); for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; netdump_print(" pt_load_segment[%d]:\n", i); netdump_print(" file_offset: %lx\n", pls->file_offset); netdump_print(" phys_start: %llx\n", pls->phys_start); netdump_print(" phys_end: %llx\n", pls->phys_end); netdump_print(" zero_fill: %llx\n", pls->zero_fill); } netdump_print(" elf_header: %lx\n", nd->elf_header); netdump_print(" elf32: %lx\n", nd->elf32); netdump_print(" notes32: %lx\n", nd->notes32); netdump_print(" load32: %lx\n", nd->load32); netdump_print(" elf64: %lx\n", nd->elf64); netdump_print(" notes64: %lx\n", nd->notes64); netdump_print(" load64: %lx\n", nd->load64); netdump_print(" sect0_64: %lx\n", nd->sect0_64); netdump_print(" nt_prstatus: %lx\n", nd->nt_prstatus); netdump_print(" nt_prpsinfo: %lx\n", nd->nt_prpsinfo); netdump_print(" nt_taskstruct: %lx\n", nd->nt_taskstruct); netdump_print(" task_struct: %lx\n", nd->task_struct); netdump_print(" arch_data1: "); if (nd->arch_data1) { if (machine_type("X86_64")) netdump_print("%lx (relocate)\n", nd->arch_data1); else if (machine_type("ARM64")) netdump_print("%lx (kimage_voffset)\n", nd->arch_data1); } else netdump_print("(unused)\n"); netdump_print(" arch_data2: "); if (nd->arch_data2) { if (machine_type("ARM64")) netdump_print("%016lx\n" " CONFIG_ARM64_VA_BITS: %ld\n" " VA_BITS_ACTUAL: %lld\n", nd->arch_data2, nd->arch_data2 & 0xffffffff, ((ulonglong)nd->arch_data2 >> 32)); else netdump_print("%016lx (?)\n", nd->arch_data2); } else netdump_print("(unused)\n"); netdump_print(" switch_stack: %lx\n", nd->switch_stack); netdump_print(" page_size: %d\n", nd->page_size); dump_xen_kdump_data(fp); netdump_print(" num_prstatus_notes: %d\n", nd->num_prstatus_notes); netdump_print(" num_qemu_notes: %d\n", nd->num_qemu_notes); netdump_print(" vmcoreinfo: %lx\n", (ulong)nd->vmcoreinfo); netdump_print(" size_vmcoreinfo: %d\n", nd->size_vmcoreinfo); netdump_print(" nt_prstatus_percpu: "); wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4; flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16; if (nd->num_prstatus_notes == 1) netdump_print("%.*lx\n", flen, nd->nt_prstatus_percpu[0]); else { for (i = 0; i < nd->num_prstatus_notes; i++) { if ((i % wrap) == 0) netdump_print("\n "); netdump_print("%.*lx ", flen, nd->nt_prstatus_percpu[i]); } } netdump_print("\n"); netdump_print(" nt_qemu_percpu: "); if (nd->num_qemu_notes == 1) netdump_print("%.*lx\n", flen, nd->nt_qemu_percpu[0]); else { for (i = 0; i < nd->num_qemu_notes; i++) { if ((i % wrap) == 0) netdump_print("\n "); netdump_print("%.*lx ", flen, nd->nt_qemu_percpu[i]); } } netdump_print("\n"); netdump_print(" backup_src_start: %llx\n", nd->backup_src_start); netdump_print(" backup_src_size: %lx\n", nd->backup_src_size); netdump_print(" backup_offset: %llx\n", nd->backup_offset); netdump_print("\n"); switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: case KDUMP_ELF32: dump_Elf32_Ehdr(nd->elf32); dump_Elf32_Phdr(nd->notes32, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf32_Phdr(nd->load32 + i, ELFREAD); offset32 = nd->notes32->p_offset; for (tot = 0; tot < nd->notes32->p_filesz; tot += len) { if (!(len = dump_Elf32_Nhdr(offset32, ELFREAD))) break; offset32 += len; } break; case NETDUMP_ELF64: case KDUMP_ELF64: dump_Elf64_Ehdr(nd->elf64); dump_Elf64_Phdr(nd->notes64, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf64_Phdr(nd->load64 + i, ELFREAD); if (nd->sect0_64) dump_Elf64_Shdr(nd->sect0_64); offset64 = nd->notes64->p_offset; for (tot = 0; tot < nd->notes64->p_filesz; tot += len) { if (!(len = dump_Elf64_Nhdr(offset64, ELFREAD))) break; offset64 += len; } break; } dump_ramdump_data(); nd->ofp = fpsave; return TRUE; } /* * Dump an ELF file header. */ static void dump_Elf32_Ehdr(Elf32_Ehdr *elf) { char buf[BUFSIZE]; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); netdump_print("Elf32_Ehdr:\n"); netdump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); netdump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: netdump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: netdump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: netdump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: netdump_print("(ELFCLASSNUM)\n"); break; default: netdump_print("(?)\n"); break; } netdump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: netdump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: netdump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: netdump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: netdump_print("(ELFDATANUM)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) netdump_print("(EV_CURRENT)\n"); else netdump_print("(?)\n"); netdump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: netdump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: netdump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: netdump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: netdump_print("(ELFOSABI_STANDALONE)\n"); break; case ELFOSABI_LINUX: netdump_print("(ELFOSABI_LINUX)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); netdump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: netdump_print("(ET_NONE)\n"); break; case ET_REL: netdump_print("(ET_REL)\n"); break; case ET_EXEC: netdump_print("(ET_EXEC)\n"); break; case ET_DYN: netdump_print("(ET_DYN)\n"); break; case ET_CORE: netdump_print("(ET_CORE)\n"); break; case ET_NUM: netdump_print("(ET_NUM)\n"); break; case ET_LOOS: netdump_print("(ET_LOOS)\n"); break; case ET_HIOS: netdump_print("(ET_HIOS)\n"); break; case ET_LOPROC: netdump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: netdump_print("(ET_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_ARM: netdump_print("(EM_ARM)\n"); break; case EM_386: netdump_print("(EM_386)\n"); break; case EM_MIPS: netdump_print("(EM_MIPS)\n"); break; case EM_LOONGARCH: netdump_print("(EM_LOONGARCH)\n"); break; default: netdump_print("(unsupported)\n"); break; } netdump_print(" e_version: %ld ", elf->e_version); netdump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); netdump_print(" e_entry: %lx\n", elf->e_entry); netdump_print(" e_phoff: %lx\n", elf->e_phoff); netdump_print(" e_shoff: %lx\n", elf->e_shoff); netdump_print(" e_flags: %lx\n", elf->e_flags); if ((elf->e_flags & DUMP_ELF_INCOMPLETE) && (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF32)) pc->flags2 |= INCOMPLETE_DUMP; netdump_print(" e_ehsize: %x\n", elf->e_ehsize); netdump_print(" e_phentsize: %x\n", elf->e_phentsize); netdump_print(" e_phnum: %x\n", elf->e_phnum); netdump_print(" e_shentsize: %x\n", elf->e_shentsize); netdump_print(" e_shnum: %x\n", elf->e_shnum); netdump_print(" e_shstrndx: %x\n", elf->e_shstrndx); } static void dump_Elf64_Ehdr(Elf64_Ehdr *elf) { char buf[BUFSIZE]; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); netdump_print("Elf64_Ehdr:\n"); netdump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); netdump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: netdump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: netdump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: netdump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: netdump_print("(ELFCLASSNUM)\n"); break; default: netdump_print("(?)\n"); break; } netdump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: netdump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: netdump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: netdump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: netdump_print("(ELFDATANUM)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) netdump_print("(EV_CURRENT)\n"); else netdump_print("(?)\n"); netdump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: netdump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: netdump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: netdump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: netdump_print("(ELFOSABI_STANDALONE)\n"); break; case ELFOSABI_LINUX: netdump_print("(ELFOSABI_LINUX)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); netdump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: netdump_print("(ET_NONE)\n"); break; case ET_REL: netdump_print("(ET_REL)\n"); break; case ET_EXEC: netdump_print("(ET_EXEC)\n"); break; case ET_DYN: netdump_print("(ET_DYN)\n"); break; case ET_CORE: netdump_print("(ET_CORE)\n"); break; case ET_NUM: netdump_print("(ET_NUM)\n"); break; case ET_LOOS: netdump_print("(ET_LOOS)\n"); break; case ET_HIOS: netdump_print("(ET_HIOS)\n"); break; case ET_LOPROC: netdump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: netdump_print("(ET_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_386: netdump_print("(EM_386)\n"); break; case EM_IA_64: netdump_print("(EM_IA_64)\n"); break; case EM_PPC64: netdump_print("(EM_PPC64)\n"); break; case EM_X86_64: netdump_print("(EM_X86_64)\n"); break; case EM_S390: netdump_print("(EM_S390)\n"); break; case EM_ARM: netdump_print("(EM_ARM)\n"); break; case EM_AARCH64: netdump_print("(EM_AARCH64)\n"); break; case EM_LOONGARCH: netdump_print("(EM_LOONGARCH)\n"); break; default: netdump_print("(unsupported)\n"); break; } netdump_print(" e_version: %ld ", elf->e_version); netdump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); netdump_print(" e_entry: %lx\n", elf->e_entry); netdump_print(" e_phoff: %lx\n", elf->e_phoff); netdump_print(" e_shoff: %lx\n", elf->e_shoff); netdump_print(" e_flags: %lx\n", elf->e_flags); if ((elf->e_flags & DUMP_ELF_INCOMPLETE) && (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64)) pc->flags2 |= INCOMPLETE_DUMP; netdump_print(" e_ehsize: %x\n", elf->e_ehsize); netdump_print(" e_phentsize: %x\n", elf->e_phentsize); netdump_print(" e_phnum: %x\n", elf->e_phnum); netdump_print(" e_shentsize: %x\n", elf->e_shentsize); netdump_print(" e_shnum: %x\n", elf->e_shnum); netdump_print(" e_shstrndx: %x\n", elf->e_shstrndx); } /* * Dump a program segment header */ static void dump_Elf32_Phdr(Elf32_Phdr *prog, int store_pt_load_data) { int others; struct pt_load_segment *pls; if ((char *)prog > (nd->elf_header + nd->header_size)) error(FATAL, "Elf32_Phdr pointer: %lx ELF header end: %lx\n\n", (char *)prog, nd->elf_header + nd->header_size); if (store_pt_load_data) pls = &nd->pt_load_segments[store_pt_load_data-1]; else pls = NULL; netdump_print("Elf32_Phdr:\n"); netdump_print(" p_type: %lx ", prog->p_type); switch (prog->p_type) { case PT_NULL: netdump_print("(PT_NULL)\n"); break; case PT_LOAD: netdump_print("(PT_LOAD)\n"); break; case PT_DYNAMIC: netdump_print("(PT_DYNAMIC)\n"); break; case PT_INTERP: netdump_print("(PT_INTERP)\n"); break; case PT_NOTE: netdump_print("(PT_NOTE)\n"); break; case PT_SHLIB: netdump_print("(PT_SHLIB)\n"); break; case PT_PHDR: netdump_print("(PT_PHDR)\n"); break; case PT_NUM: netdump_print("(PT_NUM)\n"); break; case PT_LOOS: netdump_print("(PT_LOOS)\n"); break; case PT_HIOS: netdump_print("(PT_HIOS)\n"); break; case PT_LOPROC: netdump_print("(PT_LOPROC)\n"); break; case PT_HIPROC: netdump_print("(PT_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" p_offset: %ld (%lx)\n", prog->p_offset, prog->p_offset); if (store_pt_load_data) pls->file_offset = prog->p_offset; netdump_print(" p_vaddr: %lx\n", prog->p_vaddr); netdump_print(" p_paddr: %lx\n", prog->p_paddr); if (store_pt_load_data) pls->phys_start = prog->p_paddr; netdump_print(" p_filesz: %lu (%lx)\n", prog->p_filesz, prog->p_filesz); if (store_pt_load_data) { pls->phys_end = pls->phys_start + prog->p_filesz; pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? 0 : pls->phys_start + prog->p_memsz; } netdump_print(" p_memsz: %lu (%lx)\n", prog->p_memsz, prog->p_memsz); netdump_print(" p_flags: %lx (", prog->p_flags); others = 0; if (prog->p_flags & PF_X) netdump_print("PF_X", others++); if (prog->p_flags & PF_W) netdump_print("%sPF_W", others++ ? "|" : ""); if (prog->p_flags & PF_R) netdump_print("%sPF_R", others++ ? "|" : ""); netdump_print(")\n"); netdump_print(" p_align: %ld\n", prog->p_align); } static void dump_Elf64_Phdr(Elf64_Phdr *prog, int store_pt_load_data) { int others; struct pt_load_segment *pls; if (store_pt_load_data) pls = &nd->pt_load_segments[store_pt_load_data-1]; else pls = NULL; if ((char *)prog > (nd->elf_header + nd->header_size)) error(FATAL, "Elf64_Phdr pointer: %lx ELF header end: %lx\n\n", (char *)prog, nd->elf_header + nd->header_size); netdump_print("Elf64_Phdr:\n"); netdump_print(" p_type: %lx ", prog->p_type); switch (prog->p_type) { case PT_NULL: netdump_print("(PT_NULL)\n"); break; case PT_LOAD: netdump_print("(PT_LOAD)\n"); break; case PT_DYNAMIC: netdump_print("(PT_DYNAMIC)\n"); break; case PT_INTERP: netdump_print("(PT_INTERP)\n"); break; case PT_NOTE: netdump_print("(PT_NOTE)\n"); break; case PT_SHLIB: netdump_print("(PT_SHLIB)\n"); break; case PT_PHDR: netdump_print("(PT_PHDR)\n"); break; case PT_NUM: netdump_print("(PT_NUM)\n"); break; case PT_LOOS: netdump_print("(PT_LOOS)\n"); break; case PT_HIOS: netdump_print("(PT_HIOS)\n"); break; case PT_LOPROC: netdump_print("(PT_LOPROC)\n"); break; case PT_HIPROC: netdump_print("(PT_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" p_offset: %lld (%llx)\n", prog->p_offset, prog->p_offset); if (store_pt_load_data) pls->file_offset = prog->p_offset; netdump_print(" p_vaddr: %llx\n", prog->p_vaddr); netdump_print(" p_paddr: %llx\n", prog->p_paddr); if (store_pt_load_data) pls->phys_start = prog->p_paddr; netdump_print(" p_filesz: %llu (%llx)\n", prog->p_filesz, prog->p_filesz); if (store_pt_load_data) { pls->phys_end = pls->phys_start + prog->p_filesz; pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? 0 : pls->phys_start + prog->p_memsz; } netdump_print(" p_memsz: %llu (%llx)\n", prog->p_memsz, prog->p_memsz); netdump_print(" p_flags: %lx (", prog->p_flags); others = 0; if (prog->p_flags & PF_X) netdump_print("PF_X", others++); if (prog->p_flags & PF_W) netdump_print("%sPF_W", others++ ? "|" : ""); if (prog->p_flags & PF_R) netdump_print("%sPF_R", others++ ? "|" : ""); netdump_print(")\n"); netdump_print(" p_align: %lld\n", prog->p_align); } static void dump_Elf64_Shdr(Elf64_Shdr *shdr) { netdump_print("Elf64_Shdr:\n"); netdump_print(" sh_name: %x\n", shdr->sh_name); netdump_print(" sh_type: %x ", shdr->sh_type); switch (shdr->sh_type) { case SHT_NULL: netdump_print("(SHT_NULL)\n"); break; default: netdump_print("\n"); break; } netdump_print(" sh_flags: %lx\n", shdr->sh_flags); netdump_print(" sh_addr: %lx\n", shdr->sh_addr); netdump_print(" sh_offset: %lx\n", shdr->sh_offset); netdump_print(" sh_size: %lx\n", shdr->sh_size); netdump_print(" sh_link: %x\n", shdr->sh_link); netdump_print(" sh_info: %x (%u)\n", shdr->sh_info, shdr->sh_info); netdump_print(" sh_addralign: %lx\n", shdr->sh_addralign); netdump_print(" sh_entsize: %lx\n", shdr->sh_entsize); } /* * VMCOREINFO * * This is a ELF note intented for makedumpfile that is exported by the * kernel that crashes and presented as ELF note to the /proc/vmcore * of the panic kernel. */ #define VMCOREINFO_NOTE_NAME "VMCOREINFO" #define VMCOREINFO_NOTE_NAME_BYTES (sizeof(VMCOREINFO_NOTE_NAME)) /* * Reads a string value from VMCOREINFO. * * Returns a string (that has to be freed by the caller) that contains the * value for key or NULL if the key has not been found. */ static char * vmcoreinfo_read_string(const char *key) { int i, j, end; size_t value_length; size_t key_length = strlen(key); char *vmcoreinfo; uint size_vmcoreinfo; char *value = NULL; /* * Borrow this function for ELF vmcores created by the snap.so * extension module, where arch-specific data may be passed in * the NT_TASKSTRUCT note. */ if ((pc->flags2 & SNAP)) { if (STREQ(key, "NUMBER(kimage_voffset)") && nd->arch_data1) { value = calloc(VADDR_PRLEN+1, sizeof(char)); sprintf(value, "%lx", nd->arch_data1); if (nd->arch_data2 == 0) pc->read_vmcoreinfo = no_vmcoreinfo; return value; } if (STREQ(key, "NUMBER(VA_BITS)") && nd->arch_data2) { value = calloc(VADDR_PRLEN+1, sizeof(char)); sprintf(value, "%ld", nd->arch_data2 & 0xffffffff); return value; } if ((STREQ(key, "NUMBER(TCR_EL1_T1SZ)") || STREQ(key, "NUMBER(tcr_el1_t1sz)")) && nd->arch_data2) { value = calloc(VADDR_PRLEN+1, sizeof(char)); sprintf(value, "%lld", ((ulonglong)nd->arch_data2 >> 32) & 0xffffffff); pc->read_vmcoreinfo = no_vmcoreinfo; return value; } if (STREQ(key, "relocate") && nd->arch_data1) { value = calloc(VADDR_PRLEN+1, sizeof(char)); sprintf(value, "%lx", nd->arch_data1); pc->read_vmcoreinfo = no_vmcoreinfo; return value; } return NULL; } if (nd->vmcoreinfo) { vmcoreinfo = (char *)nd->vmcoreinfo; size_vmcoreinfo = nd->size_vmcoreinfo; } else if (ACTIVE() && pkd->vmcoreinfo) { vmcoreinfo = (char *)pkd->vmcoreinfo; size_vmcoreinfo = pkd->size_vmcoreinfo; } else { vmcoreinfo = NULL; size_vmcoreinfo = 0; } if (!vmcoreinfo) return NULL; /* the '+ 1' is the equal sign */ for (i = 0; i < (int)(size_vmcoreinfo - key_length + 1); i++) { /* * We must also check if we're at the beginning of VMCOREINFO * or the separating newline is there, and of course if we * have a equal sign after the key. */ if ((strncmp(vmcoreinfo+i, key, key_length) == 0) && (i == 0 || vmcoreinfo[i-1] == '\n') && (vmcoreinfo[i+key_length] == '=')) { end = -1; /* Found -- search for the next newline. */ for (j = i + key_length + 1; j < size_vmcoreinfo; j++) { if (vmcoreinfo[j] == '\n') { end = j; break; } } /* * If we didn't find an end, we assume it's the end * of VMCOREINFO data. */ if (end == -1) { /* Point after the end. */ end = size_vmcoreinfo + 1; } value_length = end - (1+ i + key_length); value = calloc(value_length+1, sizeof(char)); if (value) strncpy(value, vmcoreinfo + i + key_length + 1, value_length); break; } } return value; } /* * Reads an integer value from VMCOREINFO. */ static long vmcoreinfo_read_integer(const char *key, long default_value) { char *string; long retval = default_value; string = vmcoreinfo_read_string(key); if (string) { retval = atol(string); free(string); } return retval; } void display_vmcoredd_note(void *ptr, FILE *ofp) { int sp; unsigned int dump_size; struct vmcoredd_header *vh; sp = VMCORE_VALID() ? 25 : 22; vh = (struct vmcoredd_header *)ptr; dump_size = vh->n_descsz - VMCOREDD_MAX_NAME_BYTES; fprintf(ofp, "%sname: \"%s\"\n", space(sp), vh->dump_name); fprintf(ofp, "%ssize: %u\n", space(sp), dump_size); } /* * Dump a note section header -- the actual data is defined by netdump */ static size_t dump_Elf32_Nhdr(Elf32_Off offset, int store) { int i, lf; Elf32_Nhdr *note; size_t len; char buf[BUFSIZE]; char *ptr; ulong *uptr; int xen_core, vmcoreinfo, vmcoreinfo_xen, eraseinfo, qemuinfo; uint64_t remaining, notesize; note = (Elf32_Nhdr *)((char *)nd->elf32 + offset); BZERO(buf, BUFSIZE); xen_core = vmcoreinfo = eraseinfo = qemuinfo = FALSE; ptr = (char *)note + sizeof(Elf32_Nhdr); if (ptr > (nd->elf_header + nd->header_size)) { error(WARNING, "Elf32_Nhdr pointer: %lx ELF header end: %lx\n", (char *)note, nd->elf_header + nd->header_size); return 0; } else remaining = (uint64_t)((nd->elf_header + nd->header_size) - ptr); notesize = (uint64_t)note->n_namesz + (uint64_t)note->n_descsz; if ((note->n_namesz == 0) || !remaining || (notesize > remaining)) { error(WARNING, "possibly corrupt Elf32_Nhdr: " "n_namesz: %ld n_descsz: %ld n_type: %lx\n%s", note->n_namesz, note->n_descsz, note->n_type, note->n_namesz || note->n_descsz || !remaining ? "\n" : ""); if (note->n_namesz || note->n_descsz || !remaining) return 0; } netdump_print("Elf32_Nhdr:\n"); netdump_print(" n_namesz: %ld ", note->n_namesz); BCOPY(ptr, buf, note->n_namesz); netdump_print("(\"%s\")\n", buf); netdump_print(" n_descsz: %ld\n", note->n_descsz); netdump_print(" n_type: %lx ", note->n_type); switch (note->n_type) { case NT_PRSTATUS: netdump_print("(NT_PRSTATUS)\n"); if (store) { if (!nd->nt_prstatus) nd->nt_prstatus = (void *)note; for (i = 0; i < NR_CPUS; i++) { if (!nd->nt_prstatus_percpu[i]) { nd->nt_prstatus_percpu[i] = (void *)note; nd->num_prstatus_notes++; break; } } } if (machine_type("PPC") && (nd->num_prstatus_notes > 0)) pc->flags2 |= ELF_NOTES; break; case NT_PRPSINFO: netdump_print("(NT_PRPSINFO)\n"); if (store) nd->nt_prpsinfo = (void *)note; break; case NT_TASKSTRUCT: netdump_print("(NT_TASKSTRUCT)\n"); if (store) { nd->nt_taskstruct = (void *)note; nd->task_struct = *((ulong *)(ptr + note->n_namesz)); } break; case NT_DISKDUMP: netdump_print("(NT_DISKDUMP)\n"); uptr = (ulong *)(ptr + note->n_namesz); if (*uptr && store) nd->flags |= PARTIAL_DUMP; break; #ifdef NOTDEF /* * Note: Based upon the original, abandoned, proposal for * its contents -- keep around for potential future use. */ case NT_KDUMPINFO: netdump_print("(NT_KDUMPINFO)\n"); if (store) { uptr = (note->n_namesz == 5) ? (ulong *)(ptr + ((note->n_namesz + 3) & ~3)) : (ulong *)(ptr + note->n_namesz); nd->page_size = (uint)(1 << *uptr); uptr++; nd->task_struct = *uptr; } break; #endif case NT_VMCOREDD: netdump_print("(NT_VMCOREDD)\n"); if (store) { for (i = 0; i < NR_DEVICE_DUMPS; i++) { if (!nd->nt_vmcoredd_array[i]) { nd->nt_vmcoredd_array[i] = (void *)note; nd->num_vmcoredd_notes++; break; } } } break; default: xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); if (STRNEQ(buf, "VMCOREINFO_XEN")) vmcoreinfo_xen = TRUE; else vmcoreinfo = STRNEQ(buf, "VMCOREINFO"); eraseinfo = STRNEQ(buf, "ERASEINFO"); qemuinfo = STRNEQ(buf, "QEMU"); if (xen_core) { netdump_print("(unknown Xen n_type)\n"); if (store) error(WARNING, "unknown Xen n_type: %lx\n\n", note->n_type); } else if (vmcoreinfo) { netdump_print("(unused)\n"); nd->vmcoreinfo = (char *)(ptr + note->n_namesz + 1); nd->size_vmcoreinfo = note->n_descsz; if (READ_PAGESIZE_FROM_VMCOREINFO() && store) nd->page_size = (uint) vmcoreinfo_read_integer("PAGESIZE", 0); pc->flags2 |= VMCOREINFO; } else if (eraseinfo) { netdump_print("(unused)\n"); if (note->n_descsz) pc->flags2 |= ERASEINFO_DATA; } else if (qemuinfo) { pc->flags2 |= QEMU_MEM_DUMP_ELF; netdump_print("(QEMUCPUState)\n"); } else if (vmcoreinfo_xen) netdump_print("(unused)\n"); else netdump_print("(?)\n"); break; case NT_XEN_KDUMP_CR3: netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); /* FALL THROUGH */ case XEN_ELFNOTE_CRASH_INFO: /* * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure */ if (note->n_type == XEN_ELFNOTE_CRASH_INFO) netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); xen_core = TRUE; if (store) process_xen_note(note->n_type, ptr + roundup(note->n_namesz, 4), note->n_descsz); break; case XEN_ELFNOTE_CRASH_REGS: /* * x86 and x86_64: cr0, cr2, cr3, cr4 */ xen_core = TRUE; netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); break; } uptr = (ulong *)(ptr + note->n_namesz); /* * kdumps are off-by-1, because their n_namesz is 5 for "CORE". */ if ((nd->flags & KDUMP_ELF32) && (note->n_namesz == 5)) uptr = (ulong *)(ptr + ((note->n_namesz + 3) & ~3)); if (xen_core) uptr = (ulong *)roundup((ulong)uptr, 4); if (store && qemuinfo) { for(i = 0; i < NR_CPUS; i++) { if (!nd->nt_qemu_percpu[i]) { nd->nt_qemu_percpu[i] = (void *)uptr; nd->num_qemu_notes++; break; } } } if (vmcoreinfo || eraseinfo || vmcoreinfo_xen) { netdump_print(" "); ptr += note->n_namesz + 1; for (i = 0; i < note->n_descsz; i++, ptr++) { netdump_print("%c", *ptr); if (*ptr == '\n') netdump_print(" "); } lf = 0; } else if (note->n_type == NT_VMCOREDD) { if (nd->ofp) display_vmcoredd_note(note, nd->ofp); } else { if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { if (machine_type("X86")) { if (note->n_type == NT_PRSTATUS) display_ELF_note(EM_386, PRSTATUS_NOTE, note, nd->ofp); else if (qemuinfo) display_ELF_note(EM_386, QEMU_NOTE, note, nd->ofp); } } for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { if (((i%4)==0)) { netdump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; netdump_print("%08lx ", *uptr++); } } if (!lf || (note->n_type == NT_TASKSTRUCT) || (note->n_type == NT_DISKDUMP) || xen_core) netdump_print("\n"); len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); return len; } static size_t dump_Elf64_Nhdr(Elf64_Off offset, int store) { int i = 0, lf = 0; Elf64_Nhdr *note; size_t len; char buf[BUFSIZE]; char *ptr; ulonglong *uptr; int *iptr; int xen_core, vmcoreinfo, vmcoreinfo_xen, eraseinfo, qemuinfo; uint64_t remaining, notesize; note = (Elf64_Nhdr *)((char *)nd->elf64 + offset); BZERO(buf, BUFSIZE); ptr = (char *)note + sizeof(Elf64_Nhdr); xen_core = vmcoreinfo = vmcoreinfo_xen = eraseinfo = qemuinfo = FALSE; if (ptr > (nd->elf_header + nd->header_size)) { error(WARNING, "Elf64_Nhdr pointer: %lx ELF header end: %lx\n\n", (char *)note, nd->elf_header + nd->header_size); return 0; } else remaining = (uint64_t)((nd->elf_header + nd->header_size) - ptr); notesize = (uint64_t)note->n_namesz + (uint64_t)note->n_descsz; if ((note->n_namesz == 0) || !remaining || (notesize > remaining)) { error(WARNING, "possibly corrupt Elf64_Nhdr: " "n_namesz: %ld n_descsz: %ld n_type: %lx\n%s", note->n_namesz, note->n_descsz, note->n_type, note->n_namesz || note->n_descsz || !remaining ? "\n" : ""); if (note->n_namesz || note->n_descsz || !remaining) return 0; } netdump_print("Elf64_Nhdr:\n"); netdump_print(" n_namesz: %ld ", note->n_namesz); BCOPY(ptr, buf, note->n_namesz); netdump_print("(\"%s\")\n", buf); netdump_print(" n_descsz: %ld\n", note->n_descsz); netdump_print(" n_type: %lx ", note->n_type); switch (note->n_type) { case NT_PRSTATUS: netdump_print("(NT_PRSTATUS)\n"); if (store) { if (!nd->nt_prstatus) nd->nt_prstatus = (void *)note; for (i = 0; i < NR_CPUS; i++) { if (!nd->nt_prstatus_percpu[i]) { nd->nt_prstatus_percpu[i] = (void *)note; nd->num_prstatus_notes++; break; } } } break; case NT_PRPSINFO: netdump_print("(NT_PRPSINFO)\n"); if (store) nd->nt_prpsinfo = (void *)note; break; case NT_FPREGSET: netdump_print("(NT_FPREGSET)\n"); break; case NT_S390_TIMER: netdump_print("(NT_S390_TIMER)\n"); break; case NT_S390_TODCMP: netdump_print("(NT_S390_TODCMP)\n"); break; case NT_S390_TODPREG: netdump_print("(NT_S390_TODPREG)\n"); break; case NT_S390_CTRS: netdump_print("(NT_S390_CTRS)\n"); break; case NT_S390_PREFIX: netdump_print("(NT_S390_PREFIX)\n"); break; case NT_S390_VXRS_LOW: netdump_print("(NT_S390_VXRS_LOW)\n"); break; case NT_S390_VXRS_HIGH: netdump_print("(NT_S390_VXRS_HIGH)\n"); break; case NT_TASKSTRUCT: netdump_print("(NT_TASKSTRUCT)\n"); if (STRNEQ(buf, "SNAP")) pc->flags2 |= (LIVE_DUMP|SNAP); if (store) { nd->nt_taskstruct = (void *)note; nd->task_struct = *((ulong *)(ptr + note->n_namesz)); if (pc->flags2 & SNAP) { if (note->n_descsz >= 16) nd->arch_data1 = *((ulong *) (ptr + note->n_namesz + sizeof(ulong))); if (note->n_descsz >= 24) nd->arch_data2 = *((ulong *) (ptr + note->n_namesz + sizeof(ulong) + sizeof(ulong))); } else if (machine_type("IA64")) nd->switch_stack = *((ulong *) (ptr + note->n_namesz + sizeof(ulong))); } break; case NT_DISKDUMP: netdump_print("(NT_DISKDUMP)\n"); iptr = (int *)(ptr + note->n_namesz); if (*iptr && store) nd->flags |= PARTIAL_DUMP; if (note->n_descsz < sizeof(ulonglong)) netdump_print(" %08x", *iptr); break; #ifdef NOTDEF /* * Note: Based upon the original, abandoned, proposal for * its contents -- keep around for potential future use. */ case NT_KDUMPINFO: netdump_print("(NT_KDUMPINFO)\n"); if (store) { uint32_t *u32ptr; if (nd->elf64->e_machine == EM_386) { u32ptr = (note->n_namesz == 5) ? (uint *)(ptr + ((note->n_namesz + 3) & ~3)) : (uint *)(ptr + note->n_namesz); nd->page_size = 1 << *u32ptr; u32ptr++; nd->task_struct = *u32ptr; } else { uptr = (note->n_namesz == 5) ? (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)) : (ulonglong *)(ptr + note->n_namesz); nd->page_size = (uint)(1 << *uptr); uptr++; nd->task_struct = *uptr; } } break; #endif case NT_VMCOREDD: netdump_print("(NT_VMCOREDD)\n"); if (store) { for (i = 0; i < NR_DEVICE_DUMPS; i++) { if (!nd->nt_vmcoredd_array[i]) { nd->nt_vmcoredd_array[i] = (void *)note; nd->num_vmcoredd_notes++; break; } } } break; default: xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); if (STRNEQ(buf, "VMCOREINFO_XEN")) vmcoreinfo_xen = TRUE; else vmcoreinfo = STRNEQ(buf, "VMCOREINFO"); eraseinfo = STRNEQ(buf, "ERASEINFO"); qemuinfo = STRNEQ(buf, "QEMU"); if (xen_core) { netdump_print("(unknown Xen n_type)\n"); if (store) error(WARNING, "unknown Xen n_type: %lx\n\n", note->n_type); } else if (vmcoreinfo) { netdump_print("(unused)\n"); nd->vmcoreinfo = (char *)nd->elf64 + offset + (sizeof(Elf64_Nhdr) + ((note->n_namesz + 3) & ~3)); nd->size_vmcoreinfo = note->n_descsz; if (READ_PAGESIZE_FROM_VMCOREINFO() && store) nd->page_size = (uint) vmcoreinfo_read_integer("PAGESIZE", 0); pc->flags2 |= VMCOREINFO; } else if (eraseinfo) { netdump_print("(unused)\n"); if (note->n_descsz) pc->flags2 |= ERASEINFO_DATA; } else if (qemuinfo) { pc->flags2 |= QEMU_MEM_DUMP_ELF; netdump_print("(QEMUCPUState)\n"); } else if (vmcoreinfo_xen) netdump_print("(unused)\n"); else netdump_print("(?)\n"); break; case NT_XEN_KDUMP_CR3: netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); /* FALL THROUGH */ case XEN_ELFNOTE_CRASH_INFO: /* * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure */ if (note->n_type == XEN_ELFNOTE_CRASH_INFO) netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); xen_core = TRUE; if (store) process_xen_note(note->n_type, ptr + roundup(note->n_namesz, 4), note->n_descsz); break; case XEN_ELFNOTE_CRASH_REGS: /* * x86 and x86_64: cr0, cr2, cr3, cr4 */ xen_core = TRUE; netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); break; } if (machine_type("S390X")) { if (store) machdep->dumpfile_init(nd->num_prstatus_notes, note); uptr = (ulonglong *) ((void *)note + roundup(sizeof(*note) + note->n_namesz, 4)); } else { uptr = (ulonglong *)(ptr + note->n_namesz); /* * kdumps are off-by-1, because their n_namesz is 5 for "CORE". */ if ((nd->flags & KDUMP_ELF64) && (note->n_namesz == 5)) uptr = (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)); if (xen_core) uptr = (ulonglong *)roundup((ulong)uptr, 4); } if (store && qemuinfo) { for(i=0; int_qemu_percpu[i]) { nd->nt_qemu_percpu[i] = (void *)uptr; nd->num_qemu_notes++; break; } } } if (note->n_type == NT_VMCOREDD) { if (nd->ofp) display_vmcoredd_note(note, nd->ofp); } else if (BITS32() && (xen_core || (note->n_type == NT_PRSTATUS) || qemuinfo)) { if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { if (machine_type("X86")) { if (note->n_type == NT_PRSTATUS) display_ELF_note(EM_386, PRSTATUS_NOTE, note, nd->ofp); else if (qemuinfo) display_ELF_note(EM_386, QEMU_NOTE, note, nd->ofp); } } iptr = (int *)uptr; for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { if (((i%4)==0)) { netdump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; netdump_print("%08lx ", *iptr++); } } else if (vmcoreinfo || eraseinfo || vmcoreinfo_xen) { netdump_print(" "); ptr += note->n_namesz + 1; for (i = 0; i < note->n_descsz; i++, ptr++) { netdump_print("%c", *ptr); if (*ptr == '\n') netdump_print(" "); } lf = 0; } else if (note->n_descsz == 4) { i = 0; lf = 1; iptr = (int *)uptr; netdump_print(" %08lx\n", *iptr); } else { if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { if (machine_type("X86_64")) { if (note->n_type == NT_PRSTATUS) display_ELF_note(EM_X86_64, PRSTATUS_NOTE, note, nd->ofp); else if (qemuinfo) display_ELF_note(EM_X86_64, QEMU_NOTE, note, nd->ofp); } if (machine_type("PPC64") && (note->n_type == NT_PRSTATUS)) display_ELF_note(EM_PPC64, PRSTATUS_NOTE, note, nd->ofp); if (machine_type("ARM64") && (note->n_type == NT_PRSTATUS)) display_ELF_note(EM_AARCH64, PRSTATUS_NOTE, note, nd->ofp); if (machine_type("RISCV64") && (note->n_type == NT_PRSTATUS)) display_ELF_note(EM_RISCV, PRSTATUS_NOTE, note, nd->ofp); } for (i = lf = 0; i < note->n_descsz/sizeof(ulonglong); i++) { if (((i%2)==0)) { netdump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; netdump_print("%016llx ", *uptr++); } } if (!lf) netdump_print("\n"); else if (i && (i&1)) netdump_print("\n"); len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); return len; } void * netdump_get_prstatus_percpu(int cpu) { int online; if ((cpu < 0) || (cpu >= nd->num_prstatus_notes)) return NULL; /* * If no cpu mapping was done, then there must be * a one-to-one relationship between the number * of online cpus and the number of notes. */ if ((online = get_cpus_online()) && (online == kt->cpus) && (online != nd->num_prstatus_notes)) return NULL; return nd->nt_prstatus_percpu[cpu]; } /* * Send the request to the proper architecture hander. */ void get_netdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) { int e_machine; if (nd->elf32) e_machine = nd->elf32->e_machine; else if (nd->elf64) e_machine = nd->elf64->e_machine; else e_machine = EM_NONE; switch (e_machine) { case EM_386: return get_netdump_regs_x86(bt, eip, esp); break; case EM_IA_64: /* For normal backtraces, this information will be obtained * frome the switch_stack structure, which is pointed to by * the thread.ksp field of the task_struct. But it's still * needed by the "bt -t" option. */ machdep->get_stack_frame(bt, eip, esp); break; case EM_PPC: return get_netdump_regs_ppc(bt, eip, esp); break; case EM_PPC64: return get_netdump_regs_ppc64(bt, eip, esp); break; case EM_X86_64: return get_netdump_regs_x86_64(bt, eip, esp); break; case EM_S390: machdep->get_stack_frame(bt, eip, esp); break; case EM_ARM: return get_netdump_regs_arm(bt, eip, esp); break; case EM_AARCH64: return get_netdump_regs_arm64(bt, eip, esp); break; case EM_MIPS: return get_netdump_regs_mips(bt, eip, esp); break; case EM_RISCV: get_netdump_regs_riscv(bt, eip, esp); break; case EM_LOONGARCH: return get_netdump_regs_loongarch64(bt, eip, esp); break; default: error(FATAL, "support for ELF machine type %d not available\n", e_machine); } } /* * get regs from elf note, and return the address of user_regs. */ static char * get_regs_from_note(char *note, ulong *ip, ulong *sp) { Elf32_Nhdr *note32; Elf64_Nhdr *note64; size_t len; char *user_regs; long offset_sp, offset_ip; if (machine_type("X86_64")) { note64 = (Elf64_Nhdr *)note; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); offset_sp = OFFSET(user_regs_struct_rsp); offset_ip = OFFSET(user_regs_struct_rip); } else if (machine_type("X86")) { note32 = (Elf32_Nhdr *)note; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); offset_sp = OFFSET(user_regs_struct_esp); offset_ip = OFFSET(user_regs_struct_eip); } else return NULL; user_regs = note + len - SIZE(user_regs_struct) - sizeof(long); *sp = ULONG(user_regs + offset_sp); *ip = ULONG(user_regs + offset_ip); return user_regs; } void display_regs_from_elf_notes(int cpu, FILE *ofp) { Elf32_Nhdr *note32; Elf64_Nhdr *note64; size_t len; char *user_regs; int c, skipped_count; /* * Kdump NT_PRSTATUS notes are only related to online cpus, * so offline cpus should be skipped. */ if (pc->flags2 & QEMU_MEM_DUMP_ELF) skipped_count = 0; else { for (c = skipped_count = 0; c < cpu; c++) { if (check_offline_cpu(c)) skipped_count++; } } if (((cpu < 0 ) || ((!nd->nt_prstatus_percpu[cpu]) && (!nd->nt_prstatus)) || (cpu - skipped_count) >= nd->num_prstatus_notes) && !machine_type("MIPS")) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } if (machine_type("X86_64")) { if (nd->num_prstatus_notes > 1) note64 = (Elf64_Nhdr *) nd->nt_prstatus_percpu[cpu]; else note64 = (Elf64_Nhdr *)nd->nt_prstatus; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); user_regs = ((char *)note64) + len - SIZE(user_regs_struct) - sizeof(long); fprintf(ofp, " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" " RAX: %016llx RBX: %016llx RCX: %016llx\n" " RDX: %016llx RSI: %016llx RDI: %016llx\n" " RBP: %016llx R8: %016llx R9: %016llx\n" " R10: %016llx R11: %016llx R12: %016llx\n" " R13: %016llx R14: %016llx R15: %016llx\n" " CS: %04x SS: %04x\n", ULONGLONG(user_regs + OFFSET(user_regs_struct_rip)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_eflags)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rax)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rcx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r8)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r9)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r10)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r11)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r12)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r13)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r14)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r15)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)) ); } else if (machine_type("X86")) { if (nd->num_prstatus_notes > 1) note32 = (Elf32_Nhdr *) nd->nt_prstatus_percpu[cpu]; else note32 = (Elf32_Nhdr *)nd->nt_prstatus; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); user_regs = ((char *)note32) + len - SIZE(user_regs_struct) - sizeof(long); fprintf(ofp, " EAX: %08x EBX: %08x ECX: %08x EDX: %08x\n" " ESP: %08x EIP: %08x ESI: %08x EDI: %08x\n" " CS: %04x DS: %04x ES: %04x FS: %04x\n" " GS: %04x SS: %04x\n" " EBP: %08x EFLAGS: %08x\n", UINT(user_regs + OFFSET(user_regs_struct_eax)), UINT(user_regs + OFFSET(user_regs_struct_ebx)), UINT(user_regs + OFFSET(user_regs_struct_ecx)), UINT(user_regs + OFFSET(user_regs_struct_edx)), UINT(user_regs + OFFSET(user_regs_struct_esp)), UINT(user_regs + OFFSET(user_regs_struct_eip)), UINT(user_regs + OFFSET(user_regs_struct_esi)), UINT(user_regs + OFFSET(user_regs_struct_edi)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ds)), USHORT(user_regs + OFFSET(user_regs_struct_es)), USHORT(user_regs + OFFSET(user_regs_struct_fs)), USHORT(user_regs + OFFSET(user_regs_struct_gs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)), UINT(user_regs + OFFSET(user_regs_struct_ebp)), UINT(user_regs + OFFSET(user_regs_struct_eflags)) ); } else if (machine_type("PPC64")) { struct ppc64_elf_prstatus *prs; struct ppc64_pt_regs *pr; if (nd->num_prstatus_notes > 1) note64 = (Elf64_Nhdr *)nd->nt_prstatus_percpu[cpu]; else note64 = (Elf64_Nhdr *)nd->nt_prstatus; prs = (struct ppc64_elf_prstatus *) ((char *)note64 + sizeof(Elf64_Nhdr) + note64->n_namesz); prs = (struct ppc64_elf_prstatus *)roundup((ulong)prs, 4); pr = &prs->pr_reg; fprintf(ofp, " R0: %016lx R1: %016lx R2: %016lx\n" " R3: %016lx R4: %016lx R5: %016lx\n" " R6: %016lx R7: %016lx R8: %016lx\n" " R9: %016lx R10: %016lx R11: %016lx\n" " R12: %016lx R13: %016lx R14: %016lx\n" " R15: %016lx R16: %016lx R16: %016lx\n" " R18: %016lx R19: %016lx R20: %016lx\n" " R21: %016lx R22: %016lx R23: %016lx\n" " R24: %016lx R25: %016lx R26: %016lx\n" " R27: %016lx R28: %016lx R29: %016lx\n" " R30: %016lx R31: %016lx\n" " NIP: %016lx MSR: %016lx\n" " OGPR3: %016lx CTR: %016lx\n" " LINK: %016lx XER: %016lx\n" " CCR: %016lx MQ: %016lx\n" " TRAP: %016lx DAR: %016lx\n" " DSISR: %016lx RESULT: %016lx\n", pr->gpr[0], pr->gpr[1], pr->gpr[2], pr->gpr[3], pr->gpr[4], pr->gpr[5], pr->gpr[6], pr->gpr[7], pr->gpr[8], pr->gpr[9], pr->gpr[10], pr->gpr[11], pr->gpr[12], pr->gpr[13], pr->gpr[14], pr->gpr[15], pr->gpr[16], pr->gpr[17], pr->gpr[18], pr->gpr[19], pr->gpr[20], pr->gpr[21], pr->gpr[22], pr->gpr[23], pr->gpr[24], pr->gpr[25], pr->gpr[26], pr->gpr[27], pr->gpr[28], pr->gpr[29], pr->gpr[30], pr->gpr[31], pr->nip, pr->msr, pr->orig_gpr3, pr->ctr, pr->link, pr->xer, pr->ccr, pr->mq, pr->trap, pr->dar, pr->dsisr, pr->result); } else if (machine_type("ARM64")) { if (nd->num_prstatus_notes > 1) note64 = (Elf64_Nhdr *) nd->nt_prstatus_percpu[cpu]; else note64 = (Elf64_Nhdr *)nd->nt_prstatus; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); user_regs = (char *)note64 + len - SIZE(elf_prstatus) + OFFSET(elf_prstatus_pr_reg); fprintf(ofp, " X0: %016lx X1: %016lx X2: %016lx\n" " X3: %016lx X4: %016lx X5: %016lx\n" " X6: %016lx X7: %016lx X8: %016lx\n" " X9: %016lx X10: %016lx X11: %016lx\n" " X12: %016lx X13: %016lx X14: %016lx\n" " X15: %016lx X16: %016lx X17: %016lx\n" " X18: %016lx X19: %016lx X20: %016lx\n" " X21: %016lx X22: %016lx X23: %016lx\n" " X24: %016lx X25: %016lx X26: %016lx\n" " X27: %016lx X28: %016lx X29: %016lx\n" " LR: %016lx SP: %016lx PC: %016lx\n" " PSTATE: %08lx FPVALID: %08x\n", ULONG(user_regs + sizeof(ulong) * 0), ULONG(user_regs + sizeof(ulong) * 1), ULONG(user_regs + sizeof(ulong) * 2), ULONG(user_regs + sizeof(ulong) * 3), ULONG(user_regs + sizeof(ulong) * 4), ULONG(user_regs + sizeof(ulong) * 5), ULONG(user_regs + sizeof(ulong) * 6), ULONG(user_regs + sizeof(ulong) * 7), ULONG(user_regs + sizeof(ulong) * 8), ULONG(user_regs + sizeof(ulong) * 9), ULONG(user_regs + sizeof(ulong) * 10), ULONG(user_regs + sizeof(ulong) * 11), ULONG(user_regs + sizeof(ulong) * 12), ULONG(user_regs + sizeof(ulong) * 13), ULONG(user_regs + sizeof(ulong) * 14), ULONG(user_regs + sizeof(ulong) * 15), ULONG(user_regs + sizeof(ulong) * 16), ULONG(user_regs + sizeof(ulong) * 17), ULONG(user_regs + sizeof(ulong) * 18), ULONG(user_regs + sizeof(ulong) * 19), ULONG(user_regs + sizeof(ulong) * 20), ULONG(user_regs + sizeof(ulong) * 21), ULONG(user_regs + sizeof(ulong) * 22), ULONG(user_regs + sizeof(ulong) * 23), ULONG(user_regs + sizeof(ulong) * 24), ULONG(user_regs + sizeof(ulong) * 25), ULONG(user_regs + sizeof(ulong) * 26), ULONG(user_regs + sizeof(ulong) * 27), ULONG(user_regs + sizeof(ulong) * 28), ULONG(user_regs + sizeof(ulong) * 29), ULONG(user_regs + sizeof(ulong) * 30), ULONG(user_regs + sizeof(ulong) * 31), ULONG(user_regs + sizeof(ulong) * 32), ULONG(user_regs + sizeof(ulong) * 33), UINT(user_regs + sizeof(ulong) * 34)); } else if (machine_type("MIPS")) { mips_display_regs_from_elf_notes(cpu, ofp); } else if (machine_type("MIPS64")) { mips64_display_regs_from_elf_notes(cpu, ofp); } else if (machine_type("RISCV64")) { riscv64_display_regs_from_elf_notes(cpu, ofp); } else if (machine_type("LOONGARCH64")) { loongarch64_display_regs_from_elf_notes(cpu, ofp); } } void dump_registers_for_elf_dumpfiles(void) { int c; if (!(machine_type("X86") || machine_type("X86_64") || machine_type("ARM64") || machine_type("PPC64") || machine_type("MIPS") || machine_type("MIPS64") || machine_type("RISCV64") || machine_type("LOONGARCH64"))) error(FATAL, "-r option not supported for this dumpfile\n"); if (NETDUMP_DUMPFILE()) { display_regs_from_elf_notes(0, fp); return; } for (c = 0; c < kt->cpus; c++) { if (check_offline_cpu(c)) { fprintf(fp, "%sCPU %d: [OFFLINE]\n", c ? "\n" : "", c); continue; } fprintf(fp, "%sCPU %d:\n", c ? "\n" : "", c); display_regs_from_elf_notes(c, fp); } } struct x86_64_user_regs_struct { unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10; unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; unsigned long rip,cs,eflags; unsigned long rsp,ss; unsigned long fs_base, gs_base; unsigned long ds,es,fs,gs; }; struct x86_64_prstatus { int si_signo; int si_code; int si_errno; short cursig; unsigned long sigpend; unsigned long sighold; int pid; int ppid; int pgrp; int sid; struct timeval utime; struct timeval stime; struct timeval cutime; struct timeval cstime; struct x86_64_user_regs_struct regs; int fpvalid; }; static void display_prstatus_x86_64(void *note_ptr, FILE *ofp) { struct x86_64_prstatus *pr; Elf64_Nhdr *note; int sp; note = (Elf64_Nhdr *)note_ptr; pr = (struct x86_64_prstatus *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); pr = (struct x86_64_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold: %lx\n" "%spid: %d ppid: %d pgrp: %d sid:%d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n" "%sORIG_RAX: %lx fpvalid: %d\n" "%s R15: %016lx R14: %016lx\n" "%s R13: %016lx R12: %016lx\n" "%s RBP: %016lx RBX: %016lx\n" "%s R11: %016lx R10: %016lx\n" "%s R9: %016lx R8: %016lx\n" "%s RAX: %016lx RCX: %016lx\n" "%s RDX: %016lx RSI: %016lx\n" "%s RDI: %016lx RIP: %016lx\n" "%s RFLAGS: %016lx RSP: %016lx\n" "%s FS_BASE: %016lx\n" "%s GS_BASE: %016lx\n" "%s CS: %04lx SS: %04lx DS: %04lx\n" "%s ES: %04lx FS: %04lx GS: %04lx\n", space(sp), pr->si_signo, pr->si_code, pr->si_errno, space(sp), pr->cursig, pr->sigpend, pr->sighold, space(sp), pr->pid, pr->ppid, pr->pgrp, pr->sid, space(sp), (long long)pr->utime.tv_sec, (int)pr->utime.tv_usec, (long long)pr->stime.tv_sec, (int)pr->stime.tv_usec, space(sp), (long long)pr->cutime.tv_sec, (int)pr->cutime.tv_usec, (long long)pr->cstime.tv_sec, (int)pr->cstime.tv_usec, space(sp), pr->regs.orig_rax, pr->fpvalid, space(sp), pr->regs.r15, pr->regs.r14, space(sp), pr->regs.r13, pr->regs.r12, space(sp), pr->regs.rbp, pr->regs.rbx, space(sp), pr->regs.r11, pr->regs.r10, space(sp), pr->regs.r9, pr->regs.r8, space(sp), pr->regs.rax, pr->regs.rcx, space(sp), pr->regs.rdx, pr->regs.rsi, space(sp), pr->regs.rdi, pr->regs.rip, space(sp), pr->regs.eflags, pr->regs.rsp, space(sp), pr->regs.fs_base, space(sp), pr->regs.gs_base, space(sp), pr->regs.cs, pr->regs.ss, pr->regs.ds, space(sp), pr->regs.es, pr->regs.fs, pr->regs.gs); } struct x86_user_regs_struct { unsigned long ebx,ecx,edx,esi,edi,ebp,eax; unsigned long ds,es,fs,gs,orig_eax; unsigned long eip,cs,eflags; unsigned long esp,ss; }; struct x86_prstatus { int si_signo; int si_code; int si_errno; short cursig; unsigned long sigpend; unsigned long sighold; int pid; int ppid; int pgrp; int sid; struct timeval utime; struct timeval stime; struct timeval cutime; struct timeval cstime; struct x86_user_regs_struct regs; int fpvalid; }; static void display_prstatus_x86(void *note_ptr, FILE *ofp) { struct x86_prstatus *pr; Elf32_Nhdr *note; int sp; note = (Elf32_Nhdr *)note_ptr; pr = (struct x86_prstatus *)( (char *)note + sizeof(Elf32_Nhdr) + note->n_namesz); pr = (struct x86_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold : %lx\n" "%spid: %d ppid: %d pgrp: %d sid: %d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n" "%sORIG_EAX: %lx fpvalid: %d\n" "%s EBX: %08lx ECX: %08lx\n" "%s EDX: %08lx ESI: %08lx\n" "%s EDI: %08lx EBP: %08lx\n" "%s EAX: %08lx EIP: %08lx\n" "%s EFLAGS: %08lx ESP: %08lx\n" "%s DS: %04lx ES: %04lx FS: %04lx\n" "%s GS: %04lx CS: %04lx SS: %04lx\n", space(sp), pr->si_signo, pr->si_code, pr->si_errno, space(sp), pr->cursig, pr->sigpend, pr->sighold, space(sp), pr->pid, pr->ppid, pr->pgrp, pr->sid, space(sp), (long long)pr->utime.tv_sec, (int)pr->utime.tv_usec, (long long)pr->stime.tv_sec, (int)pr->stime.tv_usec, space(sp), (long long)pr->cutime.tv_sec, (int)pr->cutime.tv_usec, (long long)pr->cstime.tv_sec, (int)pr->cstime.tv_usec, space(sp), pr->regs.orig_eax, pr->fpvalid, space(sp), pr->regs.ebx, pr->regs.ecx, space(sp), pr->regs.edx, pr->regs.esi, space(sp), pr->regs.edi, pr->regs.ebp, space(sp), pr->regs.eax, pr->regs.eip, space(sp), pr->regs.eflags, pr->regs.esp, space(sp), pr->regs.ds, pr->regs.es, pr->regs.fs, space(sp), pr->regs.gs, pr->regs.cs, pr->regs.ss); } static void display_qemu_x86_64(void *note_ptr, FILE *ofp) { int i, sp; Elf64_Nhdr *note; QEMUCPUState *ptr; QEMUCPUSegment *seg; char *seg_names[] = {"CS", "DS", "ES", "FS", "GS", "SS", "LDT", "TR", "GDT", "IDT"}; note = (Elf64_Nhdr *)note_ptr; ptr = (QEMUCPUState *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); ptr = (QEMUCPUState *)roundup((ulong)ptr, 4); seg = &(ptr->cs); sp = VMCORE_VALID()? 25 : 22; fprintf(ofp, "%sversion: %d size: %d\n" "%sRAX: %016llx RBX: %016llx\n" "%sRCX: %016llx RDX: %016llx\n" "%sRSI: %016llx RDI: %016llx\n" "%sRSP: %016llx RBP: %016llx\n" "%sRIP: %016llx RFLAGS: %016llx\n" "%s R8: %016llx R9: %016llx\n" "%sR10: %016llx R11: %016llx\n" "%sR12: %016llx R13: %016llx\n" "%sR14: %016llx R15: %016llx\n", space(sp), ptr->version, ptr->size, space(sp), (ulonglong)ptr->rax, (ulonglong)ptr->rbx, space(sp), (ulonglong)ptr->rcx, (ulonglong)ptr->rdx, space(sp), (ulonglong)ptr->rsi, (ulonglong)ptr->rdi, space(sp), (ulonglong)ptr->rsp, (ulonglong)ptr->rbp, space(sp), (ulonglong)ptr->rip, (ulonglong)ptr->rflags, space(sp), (ulonglong)ptr->r8, (ulonglong)ptr->r9, space(sp), (ulonglong)ptr->r10, (ulonglong)ptr->r11, space(sp), (ulonglong)ptr->r12, (ulonglong)ptr->r13, space(sp), (ulonglong)ptr->r14, (ulonglong)ptr->r15); for (i = 0; i < sizeof(seg_names)/sizeof(seg_names[0]); i++) { fprintf(ofp, "%s%s", space(sp), strlen(seg_names[i]) > 2 ? "" : " "); fprintf(ofp, "%s: " "selector: %04x limit: %08x flags: %08x\n" "%spad: %08x base: %016llx\n", seg_names[i], seg->selector, seg->limit, seg->flags, space(sp+5), seg->pad, (ulonglong)seg->base); seg++; } fprintf(ofp, "%sCR0: %016llx CR1: %016llx\n" "%sCR2: %016llx CR3: %016llx\n" "%sCR4: %016llx\n", space(sp), (ulonglong)ptr->cr[0], (ulonglong)ptr->cr[1], space(sp), (ulonglong)ptr->cr[2], (ulonglong)ptr->cr[3], space(sp), (ulonglong)ptr->cr[4]); } static void display_qemu_x86(void *note_ptr, FILE *ofp) { int i, sp; Elf32_Nhdr *note; QEMUCPUState *ptr; QEMUCPUSegment *seg; char *seg_names[] = {"CS", "DS", "ES", "FS", "GS", "SS", "LDT", "TR", "GDT", "IDT"}; note = (Elf32_Nhdr *)note_ptr; ptr = (QEMUCPUState *)( (char *)note + sizeof(Elf32_Nhdr) + note->n_namesz); ptr = (QEMUCPUState *)roundup((ulong)ptr, 4); seg = &(ptr->cs); sp = VMCORE_VALID()? 25 : 22; fprintf(ofp, "%sversion: %d size: %d\n" "%sEAX: %016llx EBX: %016llx\n" "%sECX: %016llx EDX: %016llx\n" "%sESI: %016llx EDI: %016llx\n" "%sESP: %016llx EBP: %016llx\n" "%sEIP: %016llx EFLAGS: %016llx\n", space(sp), ptr->version, ptr->size, space(sp), (ulonglong)ptr->rax, (ulonglong)ptr->rbx, space(sp), (ulonglong)ptr->rcx, (ulonglong)ptr->rdx, space(sp), (ulonglong)ptr->rsi, (ulonglong)ptr->rdi, space(sp), (ulonglong)ptr->rsp, (ulonglong)ptr->rbp, space(sp), (ulonglong)ptr->rip, (ulonglong)ptr->rflags); for(i = 0; i < sizeof(seg_names)/sizeof(seg_names[0]); i++) { fprintf(ofp, "%s%s", space(sp), strlen(seg_names[i]) > 2 ? "" : " "); fprintf(ofp, "%s: " "selector: %04x limit: %08x flags: %08x\n" "%spad: %08x base: %016llx\n", seg_names[i], seg->selector, seg->limit, seg->flags, space(sp+5), seg->pad, (ulonglong)seg->base); seg++; } fprintf(ofp, "%sCR0: %016llx CR1: %016llx\n" "%sCR2: %016llx CR3: %016llx\n" "%sCR4: %016llx\n", space(sp), (ulonglong)ptr->cr[0], (ulonglong)ptr->cr[1], space(sp), (ulonglong)ptr->cr[2], (ulonglong)ptr->cr[3], space(sp), (ulonglong)ptr->cr[4]); } static void display_prstatus_ppc64(void *note_ptr, FILE *ofp) { struct ppc64_elf_prstatus *pr; Elf64_Nhdr *note; int sp; note = (Elf64_Nhdr *)note_ptr; pr = (struct ppc64_elf_prstatus *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); pr = (struct ppc64_elf_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold: %lx\n" "%spid: %d ppid: %d pgrp: %d sid:%d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n" "%s R0: %016lx R1: %016lx R2: %016lx\n" "%s R3: %016lx R4: %016lx R5: %016lx\n" "%s R6: %016lx R7: %016lx R8: %016lx\n" "%s R9: %016lx R10: %016lx R11: %016lx\n" "%sR12: %016lx R13: %016lx R14: %016lx\n" "%sR15: %016lx R16: %016lx R16: %016lx\n" "%sR18: %016lx R19: %016lx R20: %016lx\n" "%sR21: %016lx R22: %016lx R23: %016lx\n" "%sR24: %016lx R25: %016lx R26: %016lx\n" "%sR27: %016lx R28: %016lx R29: %016lx\n" "%sR30: %016lx R31: %016lx\n" "%s NIP: %016lx MSR: %016lx\n" "%sOGPR3: %016lx CTR: %016lx\n" "%s LINK: %016lx XER: %016lx\n" "%s CCR: %016lx MQ: %016lx\n" "%s TRAP: %016lx DAR: %016lx\n" "%sDSISR: %016lx RESULT: %016lx\n", space(sp), pr->pr_info.si_signo, pr->pr_info.si_code, pr->pr_info.si_errno, space(sp), pr->pr_cursig, pr->pr_sigpend, pr->pr_sighold, space(sp), pr->pr_pid, pr->pr_ppid, pr->pr_pgrp, pr->pr_sid, space(sp), (long long)pr->pr_utime.tv_sec, (int)pr->pr_utime.tv_usec, (long long)pr->pr_stime.tv_sec, (int)pr->pr_stime.tv_usec, space(sp), (long long)pr->pr_cutime.tv_sec, (int)pr->pr_cutime.tv_usec, (long long)pr->pr_cstime.tv_sec, (int)pr->pr_cstime.tv_usec, space(sp), pr->pr_reg.gpr[0], pr->pr_reg.gpr[1], pr->pr_reg.gpr[2], space(sp), pr->pr_reg.gpr[3], pr->pr_reg.gpr[4], pr->pr_reg.gpr[5], space(sp), pr->pr_reg.gpr[6], pr->pr_reg.gpr[7], pr->pr_reg.gpr[8], space(sp), pr->pr_reg.gpr[9], pr->pr_reg.gpr[10], pr->pr_reg.gpr[11], space(sp), pr->pr_reg.gpr[12], pr->pr_reg.gpr[13], pr->pr_reg.gpr[14], space(sp), pr->pr_reg.gpr[15], pr->pr_reg.gpr[16], pr->pr_reg.gpr[17], space(sp), pr->pr_reg.gpr[18], pr->pr_reg.gpr[19], pr->pr_reg.gpr[20], space(sp), pr->pr_reg.gpr[21], pr->pr_reg.gpr[22], pr->pr_reg.gpr[23], space(sp), pr->pr_reg.gpr[24], pr->pr_reg.gpr[25], pr->pr_reg.gpr[26], space(sp), pr->pr_reg.gpr[27], pr->pr_reg.gpr[28], pr->pr_reg.gpr[29], space(sp), pr->pr_reg.gpr[30], pr->pr_reg.gpr[31], space(sp), pr->pr_reg.nip, pr->pr_reg.msr, space(sp), pr->pr_reg.orig_gpr3, pr->pr_reg.ctr, space(sp), pr->pr_reg.link, pr->pr_reg.xer, space(sp), pr->pr_reg.ccr, pr->pr_reg.mq, space(sp), pr->pr_reg.trap, pr->pr_reg.dar, space(sp), pr->pr_reg.dsisr, pr->pr_reg.result); } struct arm64_elf_siginfo { int si_signo; int si_code; int si_errno; }; struct arm64_elf_prstatus { struct arm64_elf_siginfo pr_info; short pr_cursig; unsigned long pr_sigpend; unsigned long pr_sighold; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct timeval pr_utime; struct timeval pr_stime; struct timeval pr_cutime; struct timeval pr_cstime; /* arm64_elf_gregset_t pr_reg; -> typedef unsigned long [34] arm64_elf_gregset_t */ unsigned long pr_reg[34]; int pr_fpvalid; }; /* Note that the ARM64 elf_gregset_t includes the 31 numbered registers plus the sp, pc and pstate: typedef unsigned long [34] elf_gregset_t; struct pt_regs { union { struct user_pt_regs user_regs; struct { u64 regs[31]; u64 sp; u64 pc; u64 pstate; }; }; u64 orig_x0; u64 syscallno; } */ static void display_prstatus_arm64(void *note_ptr, FILE *ofp) { struct arm64_elf_prstatus *pr; Elf64_Nhdr *note; int sp; note = (Elf64_Nhdr *)note_ptr; pr = (struct arm64_elf_prstatus *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); pr = (struct arm64_elf_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold: %lx\n" "%spid: %d ppid: %d pgrp: %d sid:%d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n", space(sp), pr->pr_info.si_signo, pr->pr_info.si_code, pr->pr_info.si_errno, space(sp), pr->pr_cursig, pr->pr_sigpend, pr->pr_sighold, space(sp), pr->pr_pid, pr->pr_ppid, pr->pr_pgrp, pr->pr_sid, space(sp), (long long)pr->pr_utime.tv_sec, (int)pr->pr_utime.tv_usec, (long long)pr->pr_stime.tv_sec, (int)pr->pr_stime.tv_usec, space(sp), (long long)pr->pr_cutime.tv_sec, (int)pr->pr_cutime.tv_usec, (long long)pr->pr_cstime.tv_sec, (int)pr->pr_cstime.tv_usec); fprintf(ofp, "%s X0: %016lx X1: %016lx X2: %016lx\n" "%s X3: %016lx X4: %016lx X5: %016lx\n" "%s X6: %016lx X7: %016lx X8: %016lx\n" "%s X9: %016lx X10: %016lx X11: %016lx\n" "%sX12: %016lx X13: %016lx X14: %016lx\n" "%sX15: %016lx X16: %016lx X17: %016lx\n" "%sX18: %016lx X19: %016lx X20: %016lx\n" "%sX21: %016lx X22: %016lx X23: %016lx\n" "%sX24: %016lx X25: %016lx X26: %016lx\n" "%sX27: %016lx X28: %016lx X29: %016lx\n" "%s LR: %016lx SP: %016lx PC: %016lx\n" "%sPSTATE: %08lx FPVALID: %08x\n", space(sp), pr->pr_reg[0], pr->pr_reg[1], pr->pr_reg[2], space(sp), pr->pr_reg[3], pr->pr_reg[4], pr->pr_reg[5], space(sp), pr->pr_reg[6], pr->pr_reg[7], pr->pr_reg[8], space(sp), pr->pr_reg[9], pr->pr_reg[10], pr->pr_reg[11], space(sp), pr->pr_reg[12], pr->pr_reg[13], pr->pr_reg[14], space(sp), pr->pr_reg[15], pr->pr_reg[16], pr->pr_reg[17], space(sp), pr->pr_reg[18], pr->pr_reg[19], pr->pr_reg[20], space(sp), pr->pr_reg[21], pr->pr_reg[22], pr->pr_reg[23], space(sp), pr->pr_reg[24], pr->pr_reg[25], pr->pr_reg[26], space(sp), pr->pr_reg[27], pr->pr_reg[28], pr->pr_reg[29], space(sp), pr->pr_reg[30], pr->pr_reg[31], pr->pr_reg[32], space(sp), pr->pr_reg[33], pr->pr_fpvalid); } struct riscv64_elf_siginfo { int si_signo; int si_code; int si_errno; }; struct riscv64_elf_prstatus { struct riscv64_elf_siginfo pr_info; short pr_cursig; unsigned long pr_sigpend; unsigned long pr_sighold; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct timeval pr_utime; struct timeval pr_stime; struct timeval pr_cutime; struct timeval pr_cstime; /* elf_gregset_t pr_reg; => typedef struct user_regs_struct elf_gregset_t; */ unsigned long pr_reg[32]; int pr_fpvalid; }; static void display_prstatus_riscv64(void *note_ptr, FILE *ofp) { struct riscv64_elf_prstatus *pr; Elf64_Nhdr *note; int sp; note = (Elf64_Nhdr *)note_ptr; pr = (struct riscv64_elf_prstatus *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); pr = (struct riscv64_elf_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold: %lx\n" "%spid: %d ppid: %d pgrp: %d sid:%d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n", space(sp), pr->pr_info.si_signo, pr->pr_info.si_code, pr->pr_info.si_errno, space(sp), pr->pr_cursig, pr->pr_sigpend, pr->pr_sighold, space(sp), pr->pr_pid, pr->pr_ppid, pr->pr_pgrp, pr->pr_sid, space(sp), (long long)pr->pr_utime.tv_sec, (int)pr->pr_utime.tv_usec, (long long)pr->pr_stime.tv_sec, (int)pr->pr_stime.tv_usec, space(sp), (long long)pr->pr_cutime.tv_sec, (int)pr->pr_cutime.tv_usec, (long long)pr->pr_cstime.tv_sec, (int)pr->pr_cstime.tv_usec); fprintf(ofp, "%sepc: %016lx ra: %016lx sp: %016lx\n" "%s gp: %016lx tp: %016lx t0: %016lx\n" "%s t1: %016lx t2: %016lx s0: %016lx\n" "%s s1: %016lx a0: %016lx a1: %016lx\n" "%s a2: %016lx a3: %016lx a4: %016lx\n" "%s a5: %016lx a6: %016lx a7: %016lx\n" "%s s2: %016lx s3: %016lx s4: %016lx\n" "%s s5: %016lx s6: %016lx s7: %016lx\n" "%s s8: %016lx s9: %016lx s10: %016lx\n" "%ss11: %016lx t3: %016lx t4: %016lx\n" "%s t5: %016lx t6: %016lx\n", space(sp), pr->pr_reg[0], pr->pr_reg[1], pr->pr_reg[2], space(sp), pr->pr_reg[3], pr->pr_reg[4], pr->pr_reg[5], space(sp), pr->pr_reg[6], pr->pr_reg[7], pr->pr_reg[8], space(sp), pr->pr_reg[9], pr->pr_reg[10], pr->pr_reg[11], space(sp), pr->pr_reg[12], pr->pr_reg[13], pr->pr_reg[14], space(sp), pr->pr_reg[15], pr->pr_reg[16], pr->pr_reg[17], space(sp), pr->pr_reg[18], pr->pr_reg[19], pr->pr_reg[20], space(sp), pr->pr_reg[21], pr->pr_reg[22], pr->pr_reg[23], space(sp), pr->pr_reg[24], pr->pr_reg[25], pr->pr_reg[26], space(sp), pr->pr_reg[27], pr->pr_reg[28], pr->pr_reg[29], space(sp), pr->pr_reg[30], pr->pr_reg[31]); } void display_ELF_note(int machine, int type, void *note, FILE *ofp) { if (note == NULL) return; switch (machine) { case EM_386: switch (type) { case PRSTATUS_NOTE: display_prstatus_x86(note, ofp); break; case QEMU_NOTE: display_qemu_x86(note, ofp); break; } break; case EM_X86_64: switch (type) { case PRSTATUS_NOTE: display_prstatus_x86_64(note, ofp); break; case QEMU_NOTE: display_qemu_x86_64(note, ofp); break; } break; case EM_PPC64: switch (type) { case PRSTATUS_NOTE: display_prstatus_ppc64(note, ofp); break; } break; case EM_AARCH64: switch (type) { case PRSTATUS_NOTE: display_prstatus_arm64(note, ofp); break; } break; case EM_RISCV: switch (type) { case PRSTATUS_NOTE: display_prstatus_riscv64(note, ofp); break; } break; default: return; } } void get_netdump_regs_x86_64(struct bt_info *bt, ulong *ripp, ulong *rspp) { Elf64_Nhdr *note; size_t len; char *user_regs; ulong regs_size, rsp_offset, rip_offset; ulong rip, rsp; if (is_task_active(bt->task)) bt->flags |= BT_DUMPFILE_SEARCH; if (((NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) && VALID_STRUCT(user_regs_struct) && ((bt->task == tt->panic_task) || (pc->flags2 & QEMU_MEM_DUMP_ELF))) || (KDUMP_DUMPFILE() && (kt->flags & DWARF_UNWIND) && (bt->flags & BT_DUMPFILE_SEARCH))) { if (nd->num_prstatus_notes > 1) note = (Elf64_Nhdr *) nd->nt_prstatus_percpu[bt->tc->processor]; else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); regs_size = VALID_STRUCT(user_regs_struct) ? SIZE(user_regs_struct) : sizeof(struct x86_64_user_regs_struct); rsp_offset = VALID_MEMBER(user_regs_struct_rsp) ? OFFSET(user_regs_struct_rsp) : offsetof(struct x86_64_user_regs_struct, rsp); rip_offset = VALID_MEMBER(user_regs_struct_rip) ? OFFSET(user_regs_struct_rip) : offsetof(struct x86_64_user_regs_struct, rip); user_regs = ((char *)note + len) - regs_size - sizeof(long); rsp = ULONG(user_regs + rsp_offset); rip = ULONG(user_regs + rip_offset); if (INSTACK(rsp, bt) || in_alternate_stack(bt->tc->processor, rsp)) { if (CRASHDEBUG(1)) netdump_print("ELF prstatus rsp: %lx rip: %lx\n", rsp, rip); if (KDUMP_DUMPFILE()) { *rspp = rsp; *ripp = rip; if (*ripp && *rspp) bt->flags |= BT_KDUMP_ELF_REGS; } bt->machdep = (void *)user_regs; } } if (ELF_NOTES_VALID() && (bt->flags & BT_DUMPFILE_SEARCH) && DISKDUMP_DUMPFILE() && (note = (Elf64_Nhdr *) diskdump_get_prstatus_percpu(bt->tc->processor))) { if (!note) goto no_nt_prstatus_exists; user_regs = get_regs_from_note((char *)note, &rip, &rsp); if (INSTACK(rsp, bt) || in_alternate_stack(bt->tc->processor, rsp)) { if (CRASHDEBUG(1)) netdump_print("ELF prstatus rsp: %lx rip: %lx\n", rsp, rip); *rspp = rsp; *ripp = rip; if (*ripp && *rspp) bt->flags |= BT_KDUMP_ELF_REGS; bt->machdep = (void *)user_regs; } } no_nt_prstatus_exists: machdep->get_stack_frame(bt, ripp, rspp); } /* * Netdump doesn't save state of the active tasks in the TSS, so poke around * the raw stack for some reasonable hooks. */ void get_netdump_regs_x86(struct bt_info *bt, ulong *eip, ulong *esp) { int i, search, panic, panic_task, altered; char *sym; ulong *up; ulong ipintr_eip, ipintr_esp, ipintr_func; ulong halt_eip, halt_esp, panic_eip, panic_esp; int check_hardirq, check_softirq; ulong stackbase, stacktop; Elf32_Nhdr *note; char *user_regs ATTRIBUTE_UNUSED; ulong ip, sp; if (!is_task_active(bt->task)) { machdep->get_stack_frame(bt, eip, esp); return; } panic_task = tt->panic_task == bt->task ? TRUE : FALSE; ipintr_eip = ipintr_esp = ipintr_func = panic = altered = 0; halt_eip = halt_esp = panic_eip = panic_esp = 0; check_hardirq = check_softirq = tt->flags & IRQSTACKS ? TRUE : FALSE; search = ((bt->flags & BT_TEXT_SYMBOLS) && (tt->flags & TASK_INIT_DONE)) || (machdep->flags & OMIT_FRAME_PTR); stackbase = bt->stackbase; stacktop = bt->stacktop; retry: for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){ sym = closest_symbol(*up); if (XEN_CORE_DUMPFILE()) { if (STREQ(sym, "xen_machine_kexec")) { *eip = *up; *esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); return; } if (STREQ(sym, "crash_kexec")) { halt_eip = *up; halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); } } else if (STREQ(sym, "netconsole_netdump") || STREQ(sym, "netpoll_start_netdump") || STREQ(sym, "start_disk_dump") || (STREQ(sym, "crash_kexec") && !KVMDUMP_DUMPFILE()) || STREQ(sym, "disk_dump")) { crash_kexec: *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); return; } if (STREQ(sym, "panic")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); panic_eip = *eip; panic_esp = *esp; panic = TRUE; continue; /* keep looking for die */ } if (STREQ(sym, "die")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); for (i++, up++; i < LONGS_PER_STACK; i++, up++) { sym = closest_symbol(*up); if (STREQ(sym, "sysrq_handle_crash")) goto next_sysrq; } return; } if (STREQ(sym, "sysrq_handle_crash")) { next_sysrq: *eip = *up; *esp = bt->stackbase + ((char *)(up+4) - bt->stackbuf); pc->flags |= SYSRQ; for (i++, up++; i < LONGS_PER_STACK; i++, up++) { sym = closest_symbol(*up); if (STREQ(sym, "crash_kexec") && !KVMDUMP_DUMPFILE()) goto crash_kexec; if (STREQ(sym, "sysrq_handle_crash")) goto next_sysrq; } if (!panic) return; } /* * Obsolete -- replaced by sysrq_handle_crash */ if (STREQ(sym, "sysrq_handle_netdump")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); pc->flags |= SYSRQ; return; } if (STREQ(sym, "crash_nmi_callback")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); return; } if (STREQ(sym, "stop_this_cpu")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); return; } if (STREQ(sym, "smp_call_function_interrupt")) { if (ipintr_eip && IS_VMALLOC_ADDR(ipintr_func) && IS_KERNEL_STATIC_TEXT(*(up - 2))) continue; ipintr_eip = *up; ipintr_esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : bt->stackbase + ((char *)(up-1) - bt->stackbuf); ipintr_func = *(up - 2); } if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && STREQ(sym, "safe_halt")) { halt_eip = *up; halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); } if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && !halt_eip && STREQ(sym, "xen_idle")) { halt_eip = *up; halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); } } if (panic) { *eip = panic_eip; *esp = panic_esp; return; } if (ipintr_eip) { *eip = ipintr_eip; *esp = ipintr_esp; return; } if (halt_eip && halt_esp) { *eip = halt_eip; *esp = halt_esp; return; } bt->flags &= ~(BT_HARDIRQ|BT_SOFTIRQ); if (check_hardirq && (tt->hardirq_tasks[bt->tc->processor] == bt->tc->task)) { bt->stackbase = tt->hardirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_HARDIRQ; check_hardirq = FALSE; altered = TRUE; goto retry; } if (check_softirq && (tt->softirq_tasks[bt->tc->processor] == bt->tc->task)) { bt->stackbase = tt->softirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_SOFTIRQ; check_softirq = FALSE; altered = TRUE; goto retry; } if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE() && (note = (Elf32_Nhdr *) diskdump_get_prstatus_percpu(bt->tc->processor))) { user_regs = get_regs_from_note((char *)note, &ip, &sp); if (is_kernel_text(ip) && (((sp >= GET_STACKBASE(bt->task)) && (sp < GET_STACKTOP(bt->task))) || in_alternate_stack(bt->tc->processor, sp))) { bt->flags |= BT_KERNEL_SPACE; *eip = ip; *esp = sp; return; } if (!is_kernel_text(ip) && in_user_stack(bt->tc->task, sp)) { bt->flags |= BT_USER_SPACE; *eip = ip; *esp = sp; return; } } if (CRASHDEBUG(1)) error(INFO, "get_netdump_regs_x86: cannot find anything useful (task: %lx)\n", bt->task); if (altered) { bt->stackbase = stackbase; bt->stacktop = stacktop; alter_stackbuf(bt); } if (XEN_CORE_DUMPFILE() && !panic_task && is_task_active(bt->task) && !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS))) error(FATAL, "starting backtrace locations of the active (non-crashing) " "xen tasks\n cannot be determined: try -t or -T options\n"); if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE()) bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_32(struct bt_info *bt, ulong *eip, ulong *esp) { Elf32_Nhdr *note; size_t len; if ((bt->task == tt->panic_task) || (is_task_active(bt->task) && nd->num_prstatus_notes)) { /* * Registers are saved during the dump process for the * panic task. Whereas in kdump, regs are captured for all * CPUs if they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { if (!nd->nt_prstatus_percpu[bt->tc->processor]) error(FATAL, "cannot determine NT_PRSTATUS ELF note " "for %s task: %lx\n", (bt->task == tt->panic_task) ? "panic" : "active", bt->task); note = (Elf32_Nhdr *) nd->nt_prstatus_percpu[bt->tc->processor]; } else note = (Elf32_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); bt->machdep = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); } no_nt_prstatus_exists: machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_ppc(struct bt_info *bt, ulong *eip, ulong *esp) { ppc_relocate_nt_prstatus_percpu(nd->nt_prstatus_percpu, &nd->num_prstatus_notes); get_netdump_regs_32(bt, eip, esp); } static void get_netdump_regs_ppc64(struct bt_info *bt, ulong *eip, ulong *esp) { Elf64_Nhdr *note; size_t len; if ((bt->task == tt->panic_task) || (is_task_active(bt->task) && nd->num_prstatus_notes > 1)) { /* * Registers are saved during the dump process for the * panic task. Whereas in kdump, regs are captured for all * CPUs if they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { if (!nd->nt_prstatus_percpu[bt->tc->processor]) error(FATAL, "cannot determine NT_PRSTATUS ELF note " "for %s task: %lx\n", (bt->task == tt->panic_task) ? "panic" : "active", bt->task); note = (Elf64_Nhdr *) nd->nt_prstatus_percpu[bt->tc->processor]; } else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); bt->machdep = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); } no_nt_prstatus_exists: machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_arm(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_arm64(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_mips(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_riscv(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_loongarch64(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } int is_partial_netdump(void) { return (nd->flags & PARTIAL_DUMP ? TRUE : FALSE); } /* * kexec/kdump generated vmcore files are similar enough in * nature to netdump/diskdump such that most vmcore access * functionality may be borrowed from the equivalent netdump * function. If not, re-work them here. */ int is_kdump(char *file, ulong source_query) { return is_netdump(file, source_query); } int kdump_init(char *unused, FILE *fptr) { return netdump_init(unused, fptr); } ulong get_kdump_panic_task(void) { return get_netdump_panic_task(); } int read_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { physaddr_t paddr_in = paddr; if ((nd->flags & QEMU_MEM_DUMP_KDUMP_BACKUP) && (paddr >= nd->backup_src_start) && (paddr < nd->backup_src_start + nd->backup_src_size)) { paddr += nd->backup_offset - nd->backup_src_start; if (CRASHDEBUG(1)) error(INFO, "qemu_mem_dump: kdump backup region: %#llx => %#llx\n", paddr_in, paddr); } if (XEN_CORE_DUMPFILE() && !XEN_HYPER_MODE()) { if ((paddr = xen_kdump_p2m(paddr)) == P2M_FAILURE) { if (CRASHDEBUG(8)) fprintf(fp, "read_kdump: xen_kdump_p2m(%llx): " "P2M_FAILURE\n", (ulonglong)paddr_in); return READ_ERROR; } if (CRASHDEBUG(8)) fprintf(fp, "read_kdump: xen_kdump_p2m(%llx): %llx\n", (ulonglong)paddr_in, (ulonglong)paddr); } return read_netdump(fd, bufptr, cnt, addr, paddr); } int write_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return write_netdump(fd, bufptr, cnt, addr, paddr); } void get_kdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) { get_netdump_regs(bt, eip, esp); } uint kdump_page_size(void) { uint pagesz; if (!VMCORE_VALID()) return 0; if (!(pagesz = nd->page_size)) pagesz = (uint)getpagesize(); return pagesz; } int kdump_free_memory(void) { return netdump_free_memory(); } int kdump_memory_used(void) { return netdump_memory_used(); } int kdump_memory_dump(FILE *fp) { return netdump_memory_dump(fp); } struct vmcore_data * get_kdump_vmcore_data(void) { if (!VMCORE_VALID() || !KDUMP_DUMPFILE()) return NULL; return &vmcore_data; } /* * The following set of functions are not used by the crash * source code, but are available to extension modules for * gathering register sets from ELF NT_PRSTATUS note sections. * * Contributed by: Sharyathi Nagesh (sharyath@in.ibm.com) */ static void *get_ppc_regs_from_elf_notes(struct task_context *); static void *get_ppc64_regs_from_elf_notes(struct task_context *); static void *get_x86_regs_from_elf_notes(struct task_context *); static void *get_x86_64_regs_from_elf_notes(struct task_context *); static void *get_arm_regs_from_elf_notes(struct task_context *); int get_netdump_arch(void) { int e_machine; if (nd->elf32) e_machine = nd->elf32->e_machine; else if (nd->elf64) e_machine = nd->elf64->e_machine; else e_machine = EM_NONE; return e_machine; } int exist_regs_in_elf_notes(struct task_context *tc) { if ((tc->task == tt->panic_task) || (is_task_active(tc->task) && (nd->num_prstatus_notes > 1) && (tc->processor < nd->num_prstatus_notes))) return TRUE; else return FALSE; } void * get_regs_from_elf_notes(struct task_context *tc) { int e_machine = get_netdump_arch(); switch (e_machine) { case EM_386: case EM_PPC: case EM_PPC64: case EM_X86_64: case EM_ARM: break; case EM_AARCH64: error(FATAL, "get_regs_from_elf_notes: ARM64 support TBD\n"); default: error(FATAL, "support for ELF machine type %d not available\n", e_machine); } if (!exist_regs_in_elf_notes(tc)) error(FATAL, "cannot determine register set " "for active task: %lx comm: \"%s\"\n", tc->task, tc->comm); switch(e_machine) { case EM_386: return get_x86_regs_from_elf_notes(tc); case EM_PPC: return get_ppc_regs_from_elf_notes(tc); case EM_PPC64: return get_ppc64_regs_from_elf_notes(tc); case EM_X86_64: return get_x86_64_regs_from_elf_notes(tc); case EM_ARM: return get_arm_regs_from_elf_notes(tc); case EM_AARCH64: break; /* TBD */ } return NULL; } static void * get_x86_regs_from_elf_notes(struct task_context *tc) { Elf32_Nhdr *note_32; Elf64_Nhdr *note_64; void *note; size_t len; void *pt_regs; len = 0; pt_regs = NULL; if (nd->num_prstatus_notes > 1) note = (void *)nd->nt_prstatus_percpu[tc->processor]; else note = (void *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; if (nd->elf32) { note_32 = (Elf32_Nhdr *)note; len = sizeof(Elf32_Nhdr); len = roundup(len + note_32->n_namesz, 4); } else if (nd->elf64) { note_64 = (Elf64_Nhdr *)note; len = sizeof(Elf64_Nhdr); len = roundup(len + note_64->n_namesz, 4); } pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); /* NEED TO BE FIXED: Hack to get the proper alignment */ pt_regs +=4; no_nt_prstatus_exists: return pt_regs; } static void * get_x86_64_regs_from_elf_notes(struct task_context *tc) { Elf64_Nhdr *note; size_t len; void *pt_regs; pt_regs = NULL; if (nd->num_prstatus_notes > 1) note = (Elf64_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } static void * get_ppc_regs_from_elf_notes(struct task_context *tc) { Elf32_Nhdr *note; size_t len; void *pt_regs; extern struct vmcore_data *nd; pt_regs = NULL; /* * Registers are always saved during the dump process for the * panic task. Kdump also captures registers for all CPUs if * they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { note = (Elf32_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; } else note = (Elf32_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } static void * get_ppc64_regs_from_elf_notes(struct task_context *tc) { Elf64_Nhdr *note; size_t len; void *pt_regs; extern struct vmcore_data *nd; pt_regs = NULL; /* * Registers are always saved during the dump process for the * panic task. Kdump also captures registers for all CPUs if * they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { note = (Elf64_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; } else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } int kdump_phys_base(ulong *phys_base) { if (!kdump_kaslr_check()) return FALSE; *phys_base = nd->phys_base; return TRUE; } int kdump_set_phys_base(ulong phys_base) { if (!kdump_kaslr_check()) return FALSE; nd->phys_base = phys_base; return TRUE; } /* * In case of ARM we need to determine correct PHYS_OFFSET from the kdump file. * This is done by taking lowest physical address (LMA) from given load * segments. Normally this is the right one. * * Alternative would be to store phys_base in VMCOREINFO but current kernel * kdump doesn't do that yet. */ int arm_kdump_phys_base(ulong *phys_base) { struct pt_load_segment *pls; ulong paddr = ULONG_MAX; int i; for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if (pls->phys_start < paddr) paddr = pls->phys_start; } if (paddr != ULONG_MAX) { *phys_base = paddr; return TRUE; } return FALSE; } /* * physical memory size, calculated by given load segments */ int arm_kdump_phys_end(ulong *phys_end) { struct pt_load_segment *pls; ulong paddr = 0; int i; for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if (pls->phys_end > paddr) paddr = pls->phys_end; } if (paddr != 0) { *phys_end = paddr; return TRUE; } return FALSE; } static void * get_arm_regs_from_elf_notes(struct task_context *tc) { Elf32_Nhdr *note_32; Elf64_Nhdr *note_64; void *note; size_t len; void *pt_regs; len = 0; pt_regs = NULL; if (nd->num_prstatus_notes > 1) note = (void *)nd->nt_prstatus_percpu[tc->processor]; else note = (void *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; if (nd->elf32) { note_32 = (Elf32_Nhdr *)note; len = sizeof(Elf32_Nhdr); len = roundup(len + note_32->n_namesz, 4); } else if (nd->elf64) { note_64 = (Elf64_Nhdr *)note; len = sizeof(Elf64_Nhdr); len = roundup(len + note_64->n_namesz, 4); } pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } /* * Read from /proc/kcore. */ int read_proc_kcore(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int i, ret; size_t readcnt; ulong kvaddr; Elf32_Phdr *lp32; Elf64_Phdr *lp64; off_t offset; if (paddr != KCORE_USE_VADDR) { if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } } /* * Unless specified otherwise, turn the physical address into * a unity-mapped kernel virtual address, which should work * for 64-bit architectures, and for lowmem access for 32-bit * architectures. */ if (paddr == KCORE_USE_VADDR) kvaddr = addr; else kvaddr = PTOV((ulong)paddr); offset = UNINITIALIZED; readcnt = cnt; switch (pkd->flags & (KCORE_ELF32|KCORE_ELF64)) { case KCORE_ELF32: for (i = 0; i < pkd->segments; i++) { lp32 = pkd->load32 + i; if ((kvaddr >= lp32->p_vaddr) && (kvaddr < (lp32->p_vaddr + lp32->p_memsz))) { offset = (off_t)(kvaddr - lp32->p_vaddr) + (off_t)lp32->p_offset; break; } } /* * If it's not accessible via unity-mapping, check whether * it's a request for a vmalloc address that can be found * in the header. */ if (pc->curcmd_flags & MEMTYPE_KVADDR) pc->curcmd_flags &= ~MEMTYPE_KVADDR; else break; for (i = 0; i < pkd->segments; i++) { lp32 = pkd->load32 + i; if ((addr >= lp32->p_vaddr) && (addr < (lp32->p_vaddr + lp32->p_memsz))) { offset = (off_t)(addr - lp32->p_vaddr) + (off_t)lp32->p_offset; break; } } break; case KCORE_ELF64: /* * If KASLR, the PAGE_OFFSET may be unknown early on, so try * the (hopefully) mapped kernel address first. */ if (!(pc->flags & RUNTIME) && (pc->curcmd_flags & MEMTYPE_KVADDR) && (kvaddr != addr)) { pc->curcmd_flags &= ~MEMTYPE_KVADDR; for (i = 0; i < pkd->segments; i++) { lp64 = pkd->load64 + i; if ((addr >= lp64->p_vaddr) && (addr < (lp64->p_vaddr + lp64->p_memsz))) { offset = (off_t)(addr - lp64->p_vaddr) + (off_t)lp64->p_offset; break; } } if (offset != UNINITIALIZED) break; } for (i = 0; i < pkd->segments; i++) { lp64 = pkd->load64 + i; if ((kvaddr >= lp64->p_vaddr) && (kvaddr < (lp64->p_vaddr + lp64->p_memsz))) { offset = (off_t)(kvaddr - lp64->p_vaddr) + (off_t)lp64->p_offset; break; } } break; } if (offset == UNINITIALIZED) return SEEK_ERROR; if (offset < 0) { if (CRASHDEBUG(8)) fprintf(fp, "read_proc_kcore: invalid offset: %lx\n", offset); return SEEK_ERROR; } if ((ret = pread(fd, bufptr, readcnt, offset)) != readcnt) { if (ret == -1 && CRASHDEBUG(8)) fprintf(fp, "read_proc_kcore: pread error: %s\n", strerror(errno)); return READ_ERROR; } return cnt; } /* * place holder -- cannot write to /proc/kcore */ int write_proc_kcore(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { error(FATAL, "cannot write to /proc/kcore\n"); return FALSE; } int is_proc_kcore(char *file, ulong source_query) { if (STREQ(file, "/proc/kcore") || same_file(file, "/proc/kcore")) { if (!is_netdump(file, source_query)) error(FATAL, "cannot translate the ELF header of /proc/kcore\n"); pkd->flags |= KCORE_LOCAL; return TRUE; } else return FALSE; } int proc_kcore_init(FILE *fp, int kcore_fd) { if (pkd->flags & (KCORE_ELF32|KCORE_ELF64)) return TRUE; if (BITS32()) return proc_kcore_init_32(fp, kcore_fd); else return proc_kcore_init_64(fp, kcore_fd); } static int proc_kcore_init_32(FILE *fp, int kcore_fd) { int fd; Elf32_Ehdr *elf32; Elf32_Phdr *load32; Elf32_Phdr *notes32; char eheader[MAX_KCORE_ELF_HEADER_SIZE]; char buf[BUFSIZE]; size_t load_size, notes_size; if (kcore_fd == UNUSED) { if ((fd = open("/proc/kcore", O_RDONLY)) < 0) { error(INFO, "/proc/kcore: %s\n", strerror(errno)); return FALSE; } } else fd = kcore_fd; if (read(fd, eheader, MAX_KCORE_ELF_HEADER_SIZE) != MAX_KCORE_ELF_HEADER_SIZE) { sprintf(buf, "/proc/kcore: read"); perror(buf); goto bailout; } if (lseek(fd, 0, SEEK_SET) != 0) { sprintf(buf, "/proc/kcore: lseek"); perror(buf); goto bailout; } if (fd != kcore_fd) close(fd); elf32 = (Elf32_Ehdr *)&eheader[0]; if (elf32->e_phoff > sizeof(eheader) - 2 * sizeof(Elf32_Phdr)) { error(INFO, "/proc/kcore: ELF program header offset too big!\n"); return FALSE; } notes32 = (Elf32_Phdr *)&eheader[elf32->e_phoff]; load32 = notes32 + 1; pkd->segments = elf32->e_phnum - 1; notes_size = load_size = 0; if (notes32->p_type == PT_NOTE) notes_size = notes32->p_offset + notes32->p_filesz; if (notes32->p_type == PT_LOAD) load_size = (ulong)(load32+(elf32->e_phnum)) - (ulong)elf32; pkd->header_size = MAX(notes_size, load_size); if (!pkd->header_size) pkd->header_size = MAX_KCORE_ELF_HEADER_SIZE; if ((pkd->elf_header = (char *)malloc(pkd->header_size)) == NULL) { error(INFO, "/proc/kcore: cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(&eheader[0], &pkd->elf_header[0], pkd->header_size); pkd->notes32 = (Elf32_Phdr *)&pkd->elf_header[elf32->e_phoff]; pkd->load32 = pkd->notes32 + 1; pkd->flags |= KCORE_ELF32; kcore_memory_dump(CRASHDEBUG(1) ? fp : pc->nullfp); return TRUE; bailout: if (fd != kcore_fd) close(fd); return FALSE; } static int proc_kcore_init_64(FILE *fp, int kcore_fd) { int fd; Elf64_Ehdr *elf64; Elf64_Phdr *load64; Elf64_Phdr *notes64; char eheader[MAX_KCORE_ELF_HEADER_SIZE]; char buf[BUFSIZE]; size_t load_size, notes_size; if (kcore_fd == UNUSED) { if ((fd = open("/proc/kcore", O_RDONLY)) < 0) { error(INFO, "/proc/kcore: %s\n", strerror(errno)); return FALSE; } } else fd = kcore_fd; if (read(fd, eheader, MAX_KCORE_ELF_HEADER_SIZE) != MAX_KCORE_ELF_HEADER_SIZE) { sprintf(buf, "/proc/kcore: read"); perror(buf); goto bailout; } if (lseek(fd, 0, SEEK_SET) != 0) { sprintf(buf, "/proc/kcore: lseek"); perror(buf); goto bailout; } if (fd != kcore_fd) close(fd); elf64 = (Elf64_Ehdr *)&eheader[0]; if (elf64->e_phoff > sizeof(eheader) - 2 * sizeof(Elf64_Phdr)) { error(INFO, "/proc/kcore: ELF program header offset too big!\n"); return FALSE; } notes64 = (Elf64_Phdr *)&eheader[elf64->e_phoff]; load64 = notes64 + 1; pkd->segments = elf64->e_phnum - 1; notes_size = load_size = 0; if (notes64->p_type == PT_NOTE) notes_size = notes64->p_offset + notes64->p_filesz; if (notes64->p_type == PT_LOAD) load_size = (ulong)(load64+(elf64->e_phnum)) - (ulong)elf64; pkd->header_size = MAX(notes_size, load_size); if (!pkd->header_size) pkd->header_size = MAX_KCORE_ELF_HEADER_SIZE; if ((pkd->elf_header = (char *)malloc(pkd->header_size)) == NULL) { error(INFO, "/proc/kcore: cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(&eheader[0], &pkd->elf_header[0], pkd->header_size); pkd->notes64 = (Elf64_Phdr *)&pkd->elf_header[elf64->e_phoff]; pkd->load64 = pkd->notes64 + 1; pkd->flags |= KCORE_ELF64; kcore_memory_dump(CRASHDEBUG(1) ? fp : pc->nullfp); return TRUE; bailout: if (fd != kcore_fd) close(fd); return FALSE; } int kcore_memory_dump(FILE *ofp) { int i, others; Elf32_Phdr *ph32; Elf64_Phdr *ph64; Elf32_Nhdr *note32; Elf64_Nhdr *note64; size_t tot, len; char *name, *ptr, buf[BUFSIZE]; fprintf(ofp, "proc_kcore_data:\n"); fprintf(ofp, " flags: %x (", pkd->flags); others = 0; if (pkd->flags & KCORE_LOCAL) fprintf(ofp, "%sKCORE_LOCAL", others++ ? "|" : ""); if (pkd->flags & KCORE_ELF32) fprintf(ofp, "%sKCORE_ELF32", others++ ? "|" : ""); if (pkd->flags & KCORE_ELF64) fprintf(ofp, "%sKCORE_ELF64", others++ ? "|" : ""); fprintf(ofp, ")\n"); fprintf(ofp, " segments: %d\n", pkd->segments); fprintf(ofp, " elf_header: %lx\n", (ulong)pkd->elf_header); fprintf(ofp, " header_size: %ld\n", (ulong)pkd->header_size); fprintf(ofp, " notes64: %lx\n", (ulong)pkd->notes64); fprintf(ofp, " load64: %lx\n", (ulong)pkd->load64); fprintf(ofp, " notes32: %lx\n", (ulong)pkd->notes32); fprintf(ofp, " load32: %lx\n", (ulong)pkd->load32); fprintf(ofp, " vmcoreinfo: %lx\n", (ulong)pkd->vmcoreinfo); fprintf(ofp, " size_vmcoreinfo: %d\n\n", pkd->size_vmcoreinfo); if (pkd->flags & KCORE_ELF32) { ph32 = pkd->notes32; fprintf(ofp, " Elf32_Phdr:\n"); fprintf(ofp, " p_type: %x ", ph32->p_type); switch (ph32->p_type) { case PT_NOTE: fprintf(ofp, "(PT_NOTE)\n"); break; case PT_LOAD: fprintf(ofp, "(PT_LOAD)\n"); break; default: fprintf(ofp, "(unknown)\n"); break; } fprintf(ofp, " p_flags: %x\n", ph32->p_flags); fprintf(ofp, " p_offset: %x\n", ph32->p_offset); fprintf(ofp, " p_vaddr: %x\n", ph32->p_vaddr); fprintf(ofp, " p_paddr: %x\n", ph32->p_paddr); fprintf(ofp, " p_filesz: %d\n", ph32->p_filesz); fprintf(ofp, " p_memsz: %d\n", ph32->p_memsz); fprintf(ofp, " p_align: %d\n", ph32->p_align); fprintf(ofp, "\n"); for (i = 0; i < pkd->segments; i++) { ph32 = pkd->load32 + i; fprintf(ofp, " Elf32_Phdr:\n"); fprintf(ofp, " p_type: %x ", ph32->p_type); switch (ph32->p_type) { case PT_NOTE: fprintf(ofp, "(PT_NOTE)\n"); break; case PT_LOAD: fprintf(ofp, "(PT_LOAD)\n"); break; default: fprintf(ofp, "(unknown)\n"); break; } fprintf(ofp, " p_flags: %x\n", ph32->p_flags); fprintf(ofp, " p_offset: %x\n", ph32->p_offset); fprintf(ofp, " p_vaddr: %x\n", ph32->p_vaddr); fprintf(ofp, " p_paddr: %x\n", ph32->p_paddr); fprintf(ofp, " p_filesz: %d\n", ph32->p_filesz); fprintf(ofp, " p_memsz: %d\n", ph32->p_memsz); fprintf(ofp, " p_align: %d\n", ph32->p_align); fprintf(ofp, "\n"); } note32 = (Elf32_Nhdr *)(pkd->elf_header + pkd->notes32->p_offset); for (tot = 0; tot < pkd->notes32->p_filesz; tot += len) { name = (char *)((ulong)note32 + sizeof(Elf32_Nhdr)); snprintf(buf, note32->n_namesz, "%s", name); fprintf(ofp, " Elf32_Nhdr:\n"); fprintf(ofp, " n_namesz: %d (\"%s\")\n", note32->n_namesz, buf); fprintf(ofp, " n_descsz: %d\n", note32->n_descsz); fprintf(ofp, " n_type: %d ", note32->n_type); switch (note32->n_type) { case NT_PRSTATUS: fprintf(ofp, "(NT_PRSTATUS)\n"); break; case NT_PRPSINFO: fprintf(ofp, "(NT_PRPSINFO)\n"); break; case NT_TASKSTRUCT: fprintf(ofp, "(NT_TASKSTRUCT)\n"); break; default: fprintf(ofp, "(unknown)\n"); if (STRNEQ(name, "VMCOREINFO")) { ptr = (char *)note32 + sizeof(Elf32_Nhdr) + note32->n_namesz + 1; pkd->vmcoreinfo = (void *)ptr; pkd->size_vmcoreinfo = note32->n_descsz; pc->read_vmcoreinfo = vmcoreinfo_read_string; fprintf(ofp, "\n "); for (i = 0; i < note32->n_descsz; i++, ptr++) { fprintf(ofp, "%c%s", *ptr, *ptr == '\n' ? " " : ""); } } break; } fprintf(ofp, "\n"); len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); note32 = (Elf32_Nhdr *)((ulong)note32 + len); } } if (pkd->flags & KCORE_ELF64) { ph64 = pkd->notes64; fprintf(ofp, " Elf64_Phdr:\n"); fprintf(ofp, " p_type: %x ", ph64->p_type); switch (ph64->p_type) { case PT_NOTE: fprintf(ofp, "(PT_NOTE)\n"); break; case PT_LOAD: fprintf(ofp, "(PT_LOAD)\n"); break; default: fprintf(ofp, "(unknown)\n"); break; } fprintf(ofp, " p_flags: %x\n", ph64->p_flags); fprintf(ofp, " p_offset: %llx\n", (ulonglong)ph64->p_offset); fprintf(ofp, " p_vaddr: %llx\n", (ulonglong)ph64->p_vaddr); fprintf(ofp, " p_paddr: %llx\n", (ulonglong)ph64->p_paddr); fprintf(ofp, " p_filesz: %lld\n", (ulonglong)ph64->p_filesz); fprintf(ofp, " p_memsz: %lld\n", (ulonglong)ph64->p_memsz); fprintf(ofp, " p_align: %lld\n", (ulonglong)ph64->p_align); fprintf(ofp, "\n"); for (i = 0; i < pkd->segments; i++) { ph64 = pkd->load64 + i; fprintf(ofp, " Elf64_Phdr:\n"); fprintf(ofp, " p_type: %x ", ph64->p_type); switch (ph64->p_type) { case PT_NOTE: fprintf(ofp, "(PT_NOTE)\n"); break; case PT_LOAD: fprintf(ofp, "(PT_LOAD)\n"); break; default: fprintf(ofp, "(unknown)\n"); break; } fprintf(ofp, " p_flags: %x\n", ph64->p_flags); fprintf(ofp, " p_offset: %llx\n", (ulonglong)ph64->p_offset); fprintf(ofp, " p_vaddr: %llx\n", (ulonglong)ph64->p_vaddr); fprintf(ofp, " p_paddr: %llx\n", (ulonglong)ph64->p_paddr); fprintf(ofp, " p_filesz: %lld\n", (ulonglong)ph64->p_filesz); fprintf(ofp, " p_memsz: %lld\n", (ulonglong)ph64->p_memsz); fprintf(ofp, " p_align: %lld\n", (ulonglong)ph64->p_align); fprintf(ofp, "\n"); } note64 = (Elf64_Nhdr *)(pkd->elf_header + pkd->notes64->p_offset); for (tot = 0; tot < pkd->notes64->p_filesz; tot += len) { name = (char *)((ulong)note64 + sizeof(Elf64_Nhdr)); snprintf(buf, note64->n_namesz, "%s", name); fprintf(ofp, " Elf64_Nhdr:\n"); fprintf(ofp, " n_namesz: %d (\"%s\")\n", note64->n_namesz, buf); fprintf(ofp, " n_descsz: %d\n", note64->n_descsz); fprintf(ofp, " n_type: %d ", note64->n_type); switch (note64->n_type) { case NT_PRSTATUS: fprintf(ofp, "(NT_PRSTATUS)\n"); break; case NT_PRPSINFO: fprintf(ofp, "(NT_PRPSINFO)\n"); break; case NT_TASKSTRUCT: fprintf(ofp, "(NT_TASKSTRUCT)\n"); break; default: fprintf(ofp, "(unknown)\n"); if (STRNEQ(name, "VMCOREINFO")) { ptr = (char *)note64 + sizeof(Elf64_Nhdr) + note64->n_namesz + 1; pkd->vmcoreinfo = (void *)ptr; pkd->size_vmcoreinfo = note64->n_descsz; pc->read_vmcoreinfo = vmcoreinfo_read_string; fprintf(ofp, "\n "); for (i = 0; i < note64->n_descsz; i++, ptr++) { fprintf(ofp, "%c%s", *ptr, *ptr == '\n' ? " " : ""); } } break; } fprintf(ofp, "\n"); len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); note64 = (Elf64_Nhdr *)((ulong)note64 + len); } } return TRUE; } static void kdump_get_osrelease(void) { char *string; if ((string = vmcoreinfo_read_string("OSRELEASE"))) { fprintf(fp, "%s\n", string); free(string); } else pc->flags2 &= ~GET_OSRELEASE; } void dump_registers_for_qemu_mem_dump(void) { int i; QEMUCPUState *ptr; FILE *fpsave; fpsave = nd->ofp; nd->ofp = fp; for (i = 0; i < nd->num_qemu_notes; i++) { ptr = (QEMUCPUState *)nd->nt_qemu_percpu[i]; if (i) netdump_print("\n"); if (hide_offline_cpu(i)) { netdump_print("CPU %d: [OFFLINE]\n", i); continue; } else netdump_print("CPU %d:\n", i); if (CRASHDEBUG(1)) netdump_print(" version:%d size:%d\n", ptr->version, ptr->size); netdump_print(" RAX: %016llx RBX: %016llx RCX: %016llx\n", ptr->rax, ptr->rbx, ptr->rcx); netdump_print(" RDX: %016llx RSI: %016llx RDI:%016llx\n", ptr->rdx, ptr->rsi, ptr->rdi); netdump_print(" RSP: %016llx RBP: %016llx ", ptr->rsp, ptr->rbp); if (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64) { netdump_print(" R8: %016llx\n", ptr->r8); netdump_print(" R9: %016llx R10: %016llx R11: %016llx\n", ptr->r9, ptr->r10, ptr->r11); netdump_print(" R12: %016llx R13: %016llx R14: %016llx\n", ptr->r12, ptr->r13, ptr->r14); netdump_print(" R15: %016llx", ptr->r15); } else netdump_print("\n"); netdump_print(" RIP: %016llx RFLAGS: %08llx\n", ptr->rip, ptr->rflags); netdump_print(" CS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->cs.selector, ptr->cs.limit, ptr->cs.flags, ptr->cs.pad, ptr->cs.base); netdump_print(" DS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->ds.selector, ptr->ds.limit, ptr->ds.flags, ptr->ds.pad, ptr->ds.base); netdump_print(" ES: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->es.selector, ptr->es.limit, ptr->es.flags, ptr->es.pad, ptr->es.base); netdump_print(" FS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->fs.selector, ptr->fs.limit, ptr->fs.flags, ptr->fs.pad, ptr->fs.base); netdump_print(" GS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->gs.selector, ptr->gs.limit, ptr->gs.flags, ptr->gs.pad, ptr->gs.base); netdump_print(" SS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->ss.selector, ptr->ss.limit, ptr->ss.flags, ptr->ss.pad, ptr->ss.base); netdump_print(" LDT: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->ldt.selector, ptr->ldt.limit, ptr->ldt.flags, ptr->ldt.pad, ptr->ldt.base); netdump_print(" TR: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->tr.selector, ptr->tr.limit, ptr->tr.flags, ptr->tr.pad, ptr->tr.base); netdump_print(" GDT: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->gdt.selector, ptr->gdt.limit, ptr->gdt.flags, ptr->gdt.pad, ptr->gdt.base); netdump_print(" IDT: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->idt.selector, ptr->idt.limit, ptr->idt.flags, ptr->idt.pad, ptr->idt.base); netdump_print(" CR0: %016llx CR1: %016llx CR2: %016llx\n", ptr->cr[0], ptr->cr[1], ptr->cr[2]); netdump_print(" CR3: %016llx CR4: %016llx\n", ptr->cr[3], ptr->cr[4]); } nd->ofp = fpsave; } /* * kdump saves the first 640kB physical memory for BIOS to use the * range on boot of 2nd kernel. Read request to the 640k should be * translated to the back up region. This function searches kexec * resources for the backup region. */ void kdump_backup_region_init(void) { char buf[BUFSIZE]; ulong i, total, kexec_crash_image_p, elfcorehdr_p; Elf32_Off e_phoff32; Elf64_Off e_phoff64; uint16_t e_phnum, e_phentsize; ulonglong backup_offset; ulonglong backup_src_start; ulong backup_src_size; int kimage_segment_len; size_t bufsize; struct vmcore_data *vd; struct sadump_data *sd; int is_32_bit; char typename[BUFSIZE]; e_phoff32 = e_phoff64 = 0; vd = NULL; sd = NULL; if (SADUMP_DUMPFILE()) { sd = get_sadump_data(); is_32_bit = FALSE; sprintf(typename, "sadump"); } else if (pc->flags2 & QEMU_MEM_DUMP_ELF) { vd = get_kdump_vmcore_data(); if (vd->flags & KDUMP_ELF32) is_32_bit = TRUE; else is_32_bit = FALSE; sprintf(typename, "qemu mem dump"); } else return; if (symbol_exists("kexec_crash_image")) { if (!readmem(symbol_value("kexec_crash_image"), KVADDR, &kexec_crash_image_p, sizeof(ulong), "kexec backup region: kexec_crash_image", QUIET|RETURN_ON_ERROR)) goto error; } else kexec_crash_image_p = 0; if (!kexec_crash_image_p) { if (CRASHDEBUG(1)) error(INFO, "%s: kexec_crash_image not loaded\n", typename); return; } kimage_segment_len = get_array_length("kimage.segment", NULL, STRUCT_SIZE("kexec_segment")); if (!readmem(kexec_crash_image_p + MEMBER_OFFSET("kimage", "segment"), KVADDR, buf, MEMBER_SIZE("kimage", "segment"), "kexec backup region: kexec_crash_image->segment", QUIET|RETURN_ON_ERROR)) goto error; elfcorehdr_p = 0; for (i = 0; i < kimage_segment_len; ++i) { char e_ident[EI_NIDENT]; ulong mem; mem = ULONG(buf + i * STRUCT_SIZE("kexec_segment") + MEMBER_OFFSET("kexec_segment", "mem")); if (!mem) continue; if (!readmem(mem, PHYSADDR, e_ident, SELFMAG, "elfcorehdr: e_ident", QUIET|RETURN_ON_ERROR)) goto error; if (strncmp(ELFMAG, e_ident, SELFMAG) == 0) { elfcorehdr_p = mem; break; } } if (!elfcorehdr_p) { if (CRASHDEBUG(1)) error(INFO, "%s: elfcorehdr not found in segments of kexec_crash_image\n", typename); goto error; } if (is_32_bit) { if (!readmem(elfcorehdr_p, PHYSADDR, buf, STRUCT_SIZE("elf32_hdr"), "elfcorehdr", QUIET|RETURN_ON_ERROR)) goto error; e_phnum = USHORT(buf + MEMBER_OFFSET("elf32_hdr", "e_phnum")); e_phentsize = USHORT(buf + MEMBER_OFFSET("elf32_hdr", "e_phentsize")); e_phoff32 = ULONG(buf + MEMBER_OFFSET("elf32_hdr", "e_phoff")); } else { if (!readmem(elfcorehdr_p, PHYSADDR, buf, STRUCT_SIZE("elf64_hdr"), "elfcorehdr", QUIET|RETURN_ON_ERROR)) goto error; e_phnum = USHORT(buf + MEMBER_OFFSET("elf64_hdr", "e_phnum")); e_phentsize = USHORT(buf + MEMBER_OFFSET("elf64_hdr", "e_phentsize")); e_phoff64 = ULONG(buf + MEMBER_OFFSET("elf64_hdr", "e_phoff")); } backup_src_start = backup_src_size = backup_offset = 0; for (i = 0; i < e_phnum; ++i) { uint32_t p_type; Elf32_Off p_offset32; Elf64_Off p_offset64; Elf32_Addr p_paddr32; Elf64_Addr p_paddr64; uint32_t p_memsz32; uint64_t p_memsz64; if (is_32_bit) { if (!readmem(elfcorehdr_p + e_phoff32 + i * e_phentsize, PHYSADDR, buf, e_phentsize, "elfcorehdr: program header", QUIET|RETURN_ON_ERROR)) goto error; p_type = UINT(buf+MEMBER_OFFSET("elf32_phdr","p_type")); p_offset32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_offset")); p_paddr32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_paddr")); p_memsz32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_memsz")); } else { if (!readmem(elfcorehdr_p + e_phoff64 + i * e_phentsize, PHYSADDR, buf, e_phentsize, "elfcorehdr: program header", QUIET|RETURN_ON_ERROR)) goto error; p_type = UINT(buf+MEMBER_OFFSET("elf64_phdr","p_type")); p_offset64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_offset")); p_paddr64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_paddr")); p_memsz64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_memsz")); } /* * kexec marks backup region PT_LOAD by assigning * backup region address in p_offset, and p_addr in * p_offsets for other PT_LOAD entries. */ if (is_32_bit) { if (p_type == PT_LOAD && p_paddr32 <= KEXEC_BACKUP_SRC_END && p_paddr32 != p_offset32) { backup_src_start = p_paddr32; backup_src_size = p_memsz32; backup_offset = p_offset32; if (CRASHDEBUG(1)) error(INFO, "%s: kexec backup region found: " "START: %#016llx SIZE: %#016lx OFFSET: %#016llx\n", typename, backup_src_start, backup_src_size, backup_offset); break; } } else { if (p_type == PT_LOAD && p_paddr64 <= KEXEC_BACKUP_SRC_END && p_paddr64 != p_offset64) { backup_src_start = p_paddr64; backup_src_size = p_memsz64; backup_offset = p_offset64; if (CRASHDEBUG(1)) error(INFO, "%s: kexec backup region found: " "START: %#016llx SIZE: %#016lx OFFSET: %#016llx\n", typename, backup_src_start, backup_src_size, backup_offset); break; } } } if (!backup_offset) { if (CRASHDEBUG(1)) error(WARNING, "%s: backup region not found in elfcorehdr\n", typename); return; } bufsize = BUFSIZE; for (total = 0; total < backup_src_size; total += bufsize) { char backup_buf[BUFSIZE]; int j; if (backup_src_size - total < BUFSIZE) bufsize = backup_src_size - total; if (!readmem(backup_offset + total, PHYSADDR, backup_buf, bufsize, "backup source", QUIET|RETURN_ON_ERROR)) goto error; /* * We're assuming the backup region is initialized * with 0 filled if kdump has not run. */ for (j = 0; j < bufsize; ++j) { if (backup_buf[j]) { if (SADUMP_DUMPFILE()) { sd->flags |= SADUMP_KDUMP_BACKUP; sd->backup_src_start = backup_src_start; sd->backup_src_size = backup_src_size; sd->backup_offset = backup_offset; } else if (pc->flags2 & QEMU_MEM_DUMP_ELF) { vd->flags |= QEMU_MEM_DUMP_KDUMP_BACKUP; vd->backup_src_start = backup_src_start; vd->backup_src_size = backup_src_size; vd->backup_offset = backup_offset; } if (CRASHDEBUG(1)) error(INFO, "%s: backup region is used: %llx\n", typename, backup_offset + total + j); return; } } } if (CRASHDEBUG(1)) error(INFO, "%s: kexec backup region not used\n", typename); return; error: error(WARNING, "failed to init kexec backup region\n"); } int kdump_kaslr_check(void) { if (!QEMU_MEM_DUMP_NO_VMCOREINFO()) return FALSE; /* If vmcore has QEMU note, need to calculate kaslr offset */ if (nd->num_qemu_notes) return TRUE; else return FALSE; } int kdump_get_nr_cpus(void) { if (nd->num_prstatus_notes) return nd->num_prstatus_notes; else if (nd->num_qemu_notes) return nd->num_qemu_notes; else if (nd->num_vmcoredd_notes) return nd->num_vmcoredd_notes; return 1; } QEMUCPUState * kdump_get_qemucpustate(int cpu) { if (cpu >= nd->num_qemu_notes) { if (CRASHDEBUG(1)) error(INFO, "Invalid index for QEMU Note: %d (>= %d)\n", cpu, nd->num_qemu_notes); return NULL; } if (!nd->elf64 || (nd->elf64->e_machine != EM_X86_64)) { if (CRASHDEBUG(1)) error(INFO, "Only x86_64 64bit is supported.\n"); return NULL; } return (QEMUCPUState *)nd->nt_qemu_percpu[cpu]; } static void * get_kdump_device_dump_offset(void) { void *elf_base = NULL; if (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64) elf_base = (void *)nd->elf64; else if (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF32) elf_base = (void *)nd->elf32; else error(FATAL, "no device dumps found in this dumpfile\n"); return elf_base; } /* * extract hardware specific device dumps from coredump. */ void kdump_device_dump_extract(int index, char *outfile, FILE *ofp) { ulonglong offset; void *elf_base; if (!nd->num_vmcoredd_notes) error(FATAL, "no device dumps found in this dumpfile\n"); else if (index >= nd->num_vmcoredd_notes) error(FATAL, "no device dump found at index: %d", index); elf_base = get_kdump_device_dump_offset(); offset = nd->nt_vmcoredd_array[index] - elf_base; devdump_extract(nd->nt_vmcoredd_array[index], offset, outfile, ofp); } /* * list all hardware specific device dumps present in coredump. */ void kdump_device_dump_info(FILE *ofp) { ulonglong offset; char buf[BUFSIZE]; void *elf_base; ulong i; if (!nd->num_vmcoredd_notes) error(FATAL, "no device dumps found in this dumpfile\n"); fprintf(fp, "%s ", mkstring(buf, strlen("INDEX"), LJUST, "INDEX")); fprintf(fp, " %s ", mkstring(buf, LONG_LONG_PRLEN, LJUST, "OFFSET")); fprintf(fp, " %s ", mkstring(buf, LONG_PRLEN, LJUST, "SIZE")); fprintf(fp, "NAME\n"); elf_base = get_kdump_device_dump_offset(); for (i = 0; i < nd->num_vmcoredd_notes; i++) { fprintf(fp, "%s ", mkstring(buf, strlen("INDEX"), CENTER | INT_DEC, MKSTR(i))); offset = nd->nt_vmcoredd_array[i] - elf_base; devdump_info(nd->nt_vmcoredd_array[i], offset, ofp); } } crash-utility-crash-9cd43f5/lkcd_x86_trace.c0000664000372000037200000042346215107550337020353 0ustar juerghjuergh/* * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ /* * lkcd_x86_trace.c * * Copyright (C) 2002-2012, 2017-2018 David Anderson * Copyright (C) 2002-2012, 2017-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted as noted from the following LKCD files: * * lkcdutils-4.1/lcrash/arch/i386/lib/dis.c * lkcdutils-4.1/lcrash/arch/i386/lib/trace.c * lkcdutils-4.1/libutil/kl_queue.c */ #ifdef X86 #ifdef REDHAT #include "lkcd_x86_trace.h" #undef XEN_HYPER_MODE static int XEN_HYPER_MODE(void) { return (pc->flags & XEN_HYPER) != 0; } static void *kl_alloc_block(int, int); static void kl_free_block(void *); static void GET_BLOCK(kaddr_t, unsigned, void *); static void kl_get_kaddr(kaddr_t, void *); static char *kl_funcname(kaddr_t); static kaddr_t kl_funcaddr(kaddr_t); static syment_t *kl_lkup_symaddr(kaddr_t); static k_error_t kl_get_task_struct(kaddr_t, int, void *); static kaddr_t kl_kernelstack(kaddr_t); static kaddr_t get_call_pc(kaddr_t); static kaddr_t get_call_pc_v2(kaddr_t); static int get_jmp_instr(kaddr_t, kaddr_t, kaddr_t *, char *, char **); static int is_push(unsigned int); static int is_pop(unsigned int); static int get_framesize(kaddr_t, struct bt_info *); static int cache_framesize(int, kaddr_t funcaddr, int *, void **); struct framesize_cache; static int framesize_modify(struct framesize_cache *); struct framesize_mods; static int compiler_matches(struct framesize_mods *); static sframe_t *alloc_sframe(trace_t *, int); static void free_sframes(trace_t *); static void free_trace_rec(trace_t *); static void clean_trace_rec(trace_t *); static int setup_trace_rec(kaddr_t, kaddr_t, int, trace_t *); static int valid_ra(kaddr_t); static int valid_ra_function(kaddr_t, char *); static int eframe_incr(kaddr_t, char *); static int find_trace(kaddr_t, kaddr_t, kaddr_t, kaddr_t, trace_t *, int); static void dump_stack_frame(trace_t *, sframe_t *, FILE *); static void print_trace(trace_t *, int, FILE *); static int eframe_type(uaddr_t *); static char *funcname_display(char *, ulong, struct bt_info *, char *); static void print_eframe(FILE *, uaddr_t *); static void trace_banner(FILE *); static void print_kaddr(kaddr_t, FILE *, int); int do_text_list(kaddr_t, int, FILE *); int print_traces(struct bt_info *, int, int, FILE *); static int get_instr_info(kaddr_t, instr_rec_t *); static instr_rec_t *get_instr_stream(kaddr_t, int, int); static void free_instr_stream(instr_rec_t *); static trace_t *alloc_trace_rec(int); static void kl_enqueue(element_t**, element_t*); static element_t *kl_dequeue(element_t**); static void handle_trace_error(struct bt_info *, int, FILE *); static int verify_back_trace(struct bt_info *); static int recoverable(struct bt_info *, FILE *); static void fill_instr_cache(kaddr_t, char *); static void do_bt_reference_check(struct bt_info *, sframe_t *); static void print_stack_entry(struct bt_info *, int, ulong, ulong, char *, sframe_t *, FILE *); static struct syment *eframe_label(char *, ulong); static int dump_framesize_cache(FILE *, struct framesize_cache *); static int modify_framesize_cache_entry(FILE *, ulong, int); static int framesize_debug(struct bt_info *, FILE *); static int kernel_entry_from_user_space(sframe_t *, struct bt_info *); k_error_t klib_error = 0; static void * kl_alloc_block(int size, int flags) { return ((void *)GETBUF(size)); } static void kl_free_block(void *blk) { if (blk) FREEBUF(blk); } static void GET_BLOCK(kaddr_t addr, unsigned size, void *buffer) { KL_ERROR = 0; if (!readmem(addr, KVADDR, (void *)buffer, (ulong)size, "GET_BLOCK", RETURN_ON_ERROR|QUIET)) { console("GET_BLOCK: %lx (%d/0x%x)\n", addr, size, size); KL_ERROR = KLE_INVALID_READ; } } static void kl_get_kaddr(kaddr_t addr, void *bp) { KL_ERROR = 0; GET_BLOCK(addr, 4, bp); } static char * kl_funcname(kaddr_t pc) { struct syment *sp; char *buf, *name; struct load_module *lm; if ((sp = value_search(pc, NULL))) { if (STREQ(sp->name, "_stext") && (sp->value == (sp+1)->value)) sp++; switch (sp->type) { case 'r': if (strstr(sp->name, "_interrupt") || STREQ(sp->name, "call_do_IRQ")) return sp->name; break; case 't': case 'T': return sp->name; } if (is_kernel_text(pc)) return sp->name; } if (IS_MODULE_VADDR(pc)) { buf = GETBUF(BUFSIZE); name = &buf[BUFSIZE/2]; if (module_symbol(pc, NULL, NULL, buf, output_radix)) { sprintf(name, "(%s)", buf); return name; } else { FREEBUF(buf); return "(unknown module)"; } } if ((lm = init_module_function(pc))) return ("init_module"); return NULL; } static kaddr_t kl_funcaddr(kaddr_t pc) { struct syment *sp; struct load_module *lm; if ((sp = value_search(pc, NULL))) { switch (sp->type) { case 'r': if (strstr(sp->name, "_interrupt") || STREQ(sp->name, "call_do_IRQ")) return sp->value; break; case 't': case 'T': return sp->value; } if (is_kernel_text(pc)) return sp->value; } if ((lm = init_module_function(pc))) return lm->mod_init_module_ptr; return((kaddr_t)NULL); } static struct syment init_module_syment = { .name = "init_module", .type = 't', }; static syment_t * kl_lkup_symaddr(kaddr_t addr) { struct syment *sp; struct load_module *lm; if ((sp = value_search(addr, NULL))) return sp; if ((lm = init_module_function(addr))) { init_module_syment.value = lm->mod_init_module_ptr; return &init_module_syment; } return NULL; } static k_error_t kl_get_task_struct(kaddr_t value, int mode, void *tsp) { KL_ERROR = 0; if (value == tt->last_task_read) BCOPY(tt->task_struct, tsp, TASK_STRUCT_SZ); else GET_BLOCK(value, TASK_STRUCT_SZ, tsp); return KL_ERROR; } static kaddr_t kl_kernelstack(kaddr_t task) { kaddr_t saddr; return (saddr = (task + KSTACK_SIZE)); } static void print_kaddr(kaddr_t kaddr, FILE *ofp, int flag) { fprintf(ofp, "%lx", (ulong)kaddr); } #endif /* REDHAT */ /* * lkcdutils-4.1/lcrash/arch/i386/lib/trace.c */ #ifndef REDHAT /* * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ #include #include #include #endif /* !REDHAT */ /* * get_call_pc() */ kaddr_t get_call_pc(kaddr_t ra) { kaddr_t addr = 0; instr_rec_t *irp; if (!(irp = get_instr_stream(ra, 1, 0))) { return((kaddr_t)NULL); } if (!irp->prev) { free_instr_stream(irp); return((kaddr_t)NULL); } if ((irp->prev->opcode == 0x00e8) || (irp->prev->opcode == 0xff02)) { addr = irp->prev->addr; } free_instr_stream(irp); /* * If the old LKCD code fails, try disassembling... */ if (!addr) return get_call_pc_v2(ra); return(addr); } kaddr_t get_call_pc_v2(kaddr_t ra) { int c ATTRIBUTE_UNUSED; int line, len; kaddr_t addr, addr2; ulong offset; struct syment *sp; char *arglist[MAXARGS]; char buf[BUFSIZE]; if ((sp = value_search(ra, &offset))) { if (offset == 0) return 0; } else return 0; addr = 0; for (len = 2; len < 8; len++) { open_tmpfile2(); sprintf(buf, "x/2i 0x%x", ra - len); if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { close_tmpfile2(); return 0; } rewind(pc->tmpfile2); line = 1; while (fgets(buf, BUFSIZE, pc->tmpfile2)) { c = parse_line(buf, arglist); if ((line == 1) && !STREQ(arglist[2], "call")) break; if (line == 2) { addr2 = (kaddr_t)htol(arglist[0], RETURN_ON_ERROR|QUIET, 0); if (addr2 == ra) { addr = ra - len; break; } } line++; } close_tmpfile2(); if (addr) { if (CRASHDEBUG(1)) { fprintf(fp, "get_call_pc_v2(ra: %x) -> %x -> ", ra, addr); if (value_to_symstr(addr, buf, 0)) fprintf(fp, "%s", buf); fprintf(fp, "\n"); } break; } } return addr; } /* * get_jmp_instr() */ int get_jmp_instr(kaddr_t addr, kaddr_t isp, kaddr_t *caddr, char *fname, char **cfname) { kaddr_t a; int offset; instr_rec_t *irp; if (!(irp = get_instr_stream(addr, 1, 0))) { return(1); } if (!irp->prev) { free_instr_stream(irp); return(1); } irp = irp->prev; if (!(irp->opcode == 0x00e8) && !(irp->opcode == 0xff02)) { free_instr_stream(irp); return(1); } /* Check for the easiest case first... */ if (irp->opcode == 0xe8) { a = irp->operand[0].op_addr; if ((*cfname = kl_funcname(a))) { *caddr = a; } } else if (irp->opcode == 0xff02) { switch (irp->modrm) { case 0x14: if (irp->sib == 0x85) { kl_get_kaddr(addr - 4, &a); if (KL_ERROR) { free_instr_stream(irp); return(1); } if (strstr(fname, "system_call")) { GET_BLOCK(isp + 28, 4, &offset); a += (offset * 4); kl_get_kaddr(a, &a); if ((*cfname = kl_funcname(a))) { *caddr = a; } } } break; case 0xc2: /* EAX */ case 0xca: /* ECX */ case 0xd2: /* EDX */ case 0xda: /* EBX */ case 0xea: /* EBP */ case 0xf2: /* ESI */ case 0xfa: /* EDI */ break; } } free_instr_stream(irp); return(0); } /* * is_push() */ int is_push(unsigned int opcode) { switch(opcode) { case 0x0006: case 0x000e: case 0x0016: case 0x001e: case 0x0050: case 0x0051: case 0x0052: case 0x0053: case 0x0054: case 0x0055: case 0x0056: case 0x0057: case 0x0068: case 0x006a: case 0x009c: case 0x0fa0: case 0x0fa8: case 0xff06: return(1); case 0x0060: return(2); } return(0); } /* * is_pop() */ int is_pop(unsigned int opcode) { switch(opcode) { case 0x0007: case 0x0017: case 0x001f: case 0x0058: case 0x0059: case 0x005a: case 0x005b: case 0x005c: case 0x005d: case 0x005e: case 0x005f: case 0x008f: case 0x009d: case 0x0fa1: case 0x0fa9: return(1); case 0x0061: return(2); } return(0); } #ifdef REDHAT #define FRAMESIZE_VALIDATE (0x1) struct framesize_cache { kaddr_t pc; int flags; int frmsize; int bp_adjust; }; #define FRAMESIZE_CACHE (200) static struct framesize_cache framesize_cache[FRAMESIZE_CACHE] = {{0}}; static struct framesize_cache framesize_cache_empty = {0}; #define FSZ_QUERY (1) #define FSZ_VALIDATE (2) #define FSZ_ENTER (3) #define FRAMESIZE_CACHE_QUERY(pc,szp) cache_framesize(FSZ_QUERY, pc, szp, NULL) #define FRAMESIZE_CACHE_ENTER(pc,szp) cache_framesize(FSZ_ENTER, pc, szp, NULL) #define FRAMESIZE_CACHE_VALIDATE(pc,fcpp) cache_framesize(FSZ_VALIDATE, pc, NULL, fcpp) static int cache_framesize(int cmd, kaddr_t funcaddr, int *fsize, void **ptr) { int i; static ulong last_cleared = 0; retry: for (i = 0; i < FRAMESIZE_CACHE; i++) { if (framesize_cache[i].pc == funcaddr) { switch (cmd) { case FSZ_VALIDATE: *ptr = &framesize_cache[i]; return TRUE; case FSZ_QUERY: *fsize = framesize_cache[i].frmsize; return TRUE; case FSZ_ENTER: *fsize = framesize_cache[i].frmsize; return TRUE; } } /* * The entry does not exist. * * If FSZ_QUERY or FSZ_VALIDATE, return their * no-such-entry indications. * * Otherwise, load up the entry with the new data, and * and modify it with known kludgery. */ if (framesize_cache[i].pc == 0) { switch (cmd) { case FSZ_QUERY: return FALSE; case FSZ_VALIDATE: *ptr = &framesize_cache_empty; return FALSE; case FSZ_ENTER: framesize_cache[i].pc = funcaddr; framesize_cache[i].frmsize = *fsize; framesize_cache[i].bp_adjust = 0; framesize_modify(&framesize_cache[i]); *fsize = framesize_cache[i].frmsize; return TRUE; } } } console("framesize_cache is full\n"); /* * No place to put it, or it doesn't exist. */ switch (cmd) { case FSZ_VALIDATE: *ptr = &framesize_cache_empty; return FALSE; case FSZ_QUERY: return FALSE; case FSZ_ENTER: BZERO(&framesize_cache[last_cleared % FRAMESIZE_CACHE], sizeof(struct framesize_cache)); last_cleared++; goto retry; } return FALSE; /* can't get here -- for compiler happiness */ } /* * More kludgery for compiler oddities. */ #define COMPILER_VERSION_MASK (1) /* deprecated -- usable up to 3.3.3 */ #define COMPILER_VERSION_EQUAL (2) #define COMPILER_VERSION_START (3) #define COMPILER_VERSION_RANGE (4) struct framesize_mods { char *funcname; char *called_function; ulong compiler_flag; ulong compiler1; ulong compiler2; int pre_adjust; int post_adjust; } framesize_mods[] = { { "do_select", "schedule_timeout", COMPILER_VERSION_START, GCC(3,3,2), 0, 0, 0 }, { "svc_recv", "schedule_timeout", COMPILER_VERSION_START, GCC(3,3,2), 0, 0, 0 }, { "__down_interruptible", "schedule", COMPILER_VERSION_START, GCC(3,3,2), 0, 0, 0 }, { "netconsole_netdump", NULL, COMPILER_VERSION_START, GCC(3,3,2), 0, 0, -28 }, { "generic_file_write", NULL, COMPILER_VERSION_EQUAL, GCC(2,96,0), 0, 0, 20 }, { "block_prepare_write", NULL, COMPILER_VERSION_EQUAL, GCC(2,96,0), 0, 0, 72 }, { "receive_chars", NULL, COMPILER_VERSION_EQUAL, GCC(2,96,0), 0, 0, 48 }, { "default_idle", NULL, COMPILER_VERSION_START, GCC(2,96,0), 0, -4, 0 }, { "hidinput_hid_event", NULL, COMPILER_VERSION_START, GCC(4,1,2), 0, 0, 28 }, { NULL, NULL, 0, 0, 0, 0, 0 }, }; static int framesize_modify(struct framesize_cache *fc) { char *funcname; struct framesize_mods *fmp; if (!(funcname = kl_funcname(fc->pc))) return FALSE; if (fc->frmsize < 0) { if (CRASHDEBUG(1)) error(INFO, "bogus framesize: %d for pc: %lx (%s)\n", fc->frmsize, fc->pc, funcname); fc->frmsize = 0; } for (fmp = &framesize_mods[0]; fmp->funcname; fmp++) { if (STREQ(funcname, fmp->funcname) && compiler_matches(fmp)) break; } if (!fmp->funcname) return FALSE; if (fmp->pre_adjust) fc->frmsize += fmp->pre_adjust; if (fmp->post_adjust) fc->bp_adjust = fmp->post_adjust; if (fmp->called_function) { if (STREQ(fmp->called_function,x86_function_called_by(fc->pc))) fc->flags |= FRAMESIZE_VALIDATE; } return TRUE; } static int compiler_matches(struct framesize_mods *fmp) { switch (fmp->compiler_flag) { case COMPILER_VERSION_MASK: if (fmp->compiler1 & (kt->flags & GCC_VERSION_DEPRECATED)) return TRUE; break; case COMPILER_VERSION_EQUAL: if (THIS_GCC_VERSION == fmp->compiler1) return TRUE; break; case COMPILER_VERSION_START: if (THIS_GCC_VERSION >= fmp->compiler1) return TRUE; break; case COMPILER_VERSION_RANGE: if ((THIS_GCC_VERSION >= fmp->compiler1) && (THIS_GCC_VERSION <= fmp->compiler2)) return TRUE; break; } return FALSE; } static int dump_framesize_cache(FILE *ofp, struct framesize_cache *fcp) { int i, count; struct syment *sp, *spm; ulong offset; int once; for (i = once = count = 0; i < FRAMESIZE_CACHE; i++) { if (framesize_cache[i].pc == 0) break; count++; if (fcp && (fcp != &framesize_cache[i])) continue; if (!once) { fprintf(ofp, "RET ADDR FSZ BPA V FUNCTION\n"); once++; } fprintf(ofp, "%8x %4d %4d %s ", framesize_cache[i].pc, framesize_cache[i].frmsize, framesize_cache[i].bp_adjust, framesize_cache[i].flags & FRAMESIZE_VALIDATE ? "V" : "-"); if ((sp = value_search(framesize_cache[i].pc, &offset)) || (spm = kl_lkup_symaddr(framesize_cache[i].pc))) { if (sp) fprintf(ofp, "(%s+", sp->name); else { fprintf(ofp, "(%s+", spm->name); offset = framesize_cache[i].pc - spm->value; } switch (pc->output_radix) { case 10: fprintf(ofp, "%ld)", offset); break; default: case 16: fprintf(ofp, "%lx)", offset); break; } } fprintf(ofp, "\n"); if (fcp) return 0; } if (!count) fprintf(ofp, "framesize cache emtpy\n"); if (kt->flags & RA_SEEK) fprintf(ofp, "RA_SEEK: ON\n"); if (kt->flags & NO_RA_SEEK) fprintf(ofp, "NO_RA_SEEK: ON\n"); return count; } static int modify_framesize_cache_entry(FILE *ofp, ulong eip, int framesize) { int i, found, all_cleared; for (i = found = all_cleared = 0; i < FRAMESIZE_CACHE; i++) { if (!eip) { switch (framesize) { case -1: framesize_cache[i].flags |= FRAMESIZE_VALIDATE; break; case -2: framesize_cache[i].flags &= ~FRAMESIZE_VALIDATE; break; default: framesize_cache[i].pc = 0; framesize_cache[i].frmsize = 0; framesize_cache[i].flags = 0; all_cleared = TRUE; break; } continue; } if (framesize_cache[i].pc == 0) break; if (framesize_cache[i].pc == eip) { found++; switch (framesize) { case -1: framesize_cache[i].flags |= FRAMESIZE_VALIDATE; break; case -2: framesize_cache[i].flags &= ~FRAMESIZE_VALIDATE; break; default: framesize_cache[i].frmsize = framesize; break; } dump_framesize_cache(ofp, &framesize_cache[i]); return TRUE; } } if (eip && !found) fprintf(ofp, "eip: %lx not found in framesize cache\n", eip); if (all_cleared) fprintf(ofp, "framesize cache cleared\n"); return FALSE; } /* * If eip, look for it and replace its frmsize with the passed-in value. * If no eip, frmsize of zero means clear the cache, non-zero displays it. */ static int framesize_debug(struct bt_info *bt, FILE *ofp) { ulong eip; int frmsize; eip = bt->hp->eip; frmsize = (int)bt->hp->esp; if (!eip) { switch (frmsize) { case 0: case -1: case -2: return modify_framesize_cache_entry(ofp, 0, frmsize); default: return dump_framesize_cache(ofp, NULL); } } return modify_framesize_cache_entry(ofp, eip, frmsize); } #endif /* REDHAT */ /* #define FRMSIZE_DBG 1 #define FRMSIZE2_DBG 1 */ /* * get_framesize() */ int #ifdef REDHAT get_framesize(kaddr_t pc, struct bt_info *bt) #else get_framesize(kaddr_t pc) #endif { int size, ret, frmsize = 0; kaddr_t addr; instr_rec_t irp; syment_t *sp; #ifdef REDHAT int check_IRQ_stack_switch = 0; syment_t *jmpsp, *trampsp; ulong offset; int frmsize_restore = 0; int last_add = 0; if (FRAMESIZE_CACHE_QUERY(pc, &frmsize)) return frmsize; frmsize = 0; #endif if (!(sp = kl_lkup_symaddr(pc))) { return(0); } #ifdef REDHAT if (STREQ(sp->name, "do_IRQ") && (tt->flags & IRQSTACKS)) check_IRQ_stack_switch++; if (STREQ(sp->name, "stext_lock") || STRNEQ(sp->name, ".text.lock.")) { jmpsp = x86_text_lock_jmp(pc, &offset); if (jmpsp) { console("get_framesize: stext_lock %lx => %s\n", pc, jmpsp->name); pc = jmpsp->value + offset; sp = jmpsp; } } if ((trampsp = x86_is_entry_tramp_address(pc, &offset))) { if (STREQ(sp->name, "system_call")) return 0; pc = trampsp->value + offset; } #endif #ifdef FRMSIZE_DBG fprintf(stderr, "get_framesize(): pc=0x%x (0x%x:%s)\n", pc, sp->s_addr, sp->s_name); #endif addr = sp->s_addr; while (addr <= pc) { bzero(&irp, sizeof(irp)); irp.aflag = 1; irp.dflag = 1; if (!(size = get_instr_info(addr, &irp))) { fprintf(stderr, "ZERO SIZE!!\n"); return(-1); } if (size != irp.size) { fprintf(stderr, "SIZE DOES NOT MATCH!!\n"); } #ifdef REDHAT /* * Account for do_IRQ() stack switch. */ if (check_IRQ_stack_switch && (irp.opcode == 0xff02) && (irp.operand[0].op_reg == 0x7)) break; /* * Account for embedded "ret" instructions screwing up * the frame size calculation. */ if (irp.opcode == 0xc3) { frmsize += frmsize_restore; frmsize_restore = 0; last_add = FALSE; } else if ((irp.opcode == 0x8300) && (irp.operand[0].op_reg == R_eSP)) { frmsize_restore += irp.operand[1].op_addr; last_add = TRUE; } else if ((irp.opcode == 0x8100) && (irp.operand[0].op_reg == R_eSP)) { frmsize_restore += irp.operand[1].op_addr; last_add = TRUE; } else if ((ret = is_pop(irp.opcode))) { if (ret == 2) frmsize_restore += (8 * 4); else frmsize_restore += 4; last_add = FALSE; } else { if (last_add) last_add = FALSE; else frmsize_restore = 0; } #endif /* REDHAT */ #ifdef REDHAT if ((irp.opcode == 0x8300) || (irp.opcode == 0x8100)) { #else if (irp.opcode == 0x8300) { #endif /* e.g., addl $0x8,%esp */ if (irp.operand[0].op_reg == R_eSP) { frmsize -= irp.operand[1].op_addr; #ifdef FRMSIZE_DBG fprintf(stderr, " addl --> 0x%x: -%d\n", addr, irp.operand[1].op_addr); #endif } } else if ((irp.opcode == 0x8305) || (irp.opcode == 0x8105)) { /* e.g., subl $0x40,%esp */ if (irp.operand[0].op_reg == R_eSP) { frmsize += irp.operand[1].op_addr; #ifdef FRMSIZE_DBG fprintf(stderr, " subl --> 0x%x: +%d\n", addr, irp.operand[1].op_addr); #endif } } else if ((ret = is_push(irp.opcode))) { if (ret == 2) { frmsize += (8 * 4); #ifdef FRMSIZE_DBG fprintf(stderr, " pusha --> 0x%x: +%d\n", addr, (8 * 4)); #endif } else { frmsize += 4; #ifdef FRMSIZE_DBG fprintf(stderr, " pushl --> 0x%x: +%d\n" , addr, 4); #endif } } else if ((ret = is_pop(irp.opcode))) { if (ret == 2) { frmsize -= (8 * 4); #ifdef FRMSIZE_DBG fprintf(stderr, " popa --> 0x%x: -%d\n", addr, (8 * 4)); #endif } else { frmsize -= 4; #ifdef FRMSIZE_DBG fprintf(stderr, " popl --> 0x%x: -%d\n", addr, 4); #endif } #ifdef FRMSIZE2_DBG } else { fprintf(stderr, " 0x%x: opcode=0x%x\n", addr, irp.opcode); #endif } addr += size; } #ifdef REDHAT /* * Account for fact that schedule may not "call" anybody, plus * the difference between gcc 3.2 and earlier compilers. */ if (STREQ(kl_funcname(pc), "schedule") && !(bt->flags & BT_CONTEXT_SWITCH)) frmsize -= THIS_GCC_VERSION == GCC(3,2,0) ? 4 : 8; FRAMESIZE_CACHE_ENTER(pc, &frmsize); #endif return(frmsize); } #ifndef REDHAT /* * print_pc() */ void print_pc(kaddr_t addr, FILE *ofp) { int offset = 0; syment_t *sp; if ((sp = kl_lkup_symaddr(addr))) { offset = addr - sp->s_addr; } /* Print out address */ fprintf(ofp, "0x%x", addr); /* Print out symbol name */ if (sp) { if (offset) { fprintf(ofp, " <%s+%d>", sp->s_name, offset); } else { fprintf(ofp, " <%s>", sp->s_name); } } } #endif /* !REDHAT */ /* * alloc_sframe() -- Allocate a stack frame record */ sframe_t * alloc_sframe(trace_t *trace, int flags) { sframe_t *f; if (flags & C_PERM) { f = (sframe_t *)kl_alloc_block(sizeof(sframe_t), K_PERM); } else { f = (sframe_t *)kl_alloc_block(sizeof(sframe_t), K_TEMP); } if (!f) { return((sframe_t *)NULL); } f->level = trace->nframes; return(f); } /* * free_sframes() -- Free all stack frames allocated to a trace record. */ void free_sframes(trace_t *t) { sframe_t *sf; t->nframes = 0; sf = t->frame; while(t->frame) { sf = (sframe_t *)kl_dequeue((element_t **)&t->frame); if (sf->srcfile) { kl_free_block((void *)sf->srcfile); } kl_free_block((void *)sf); } t->frame = (sframe_t *)NULL; } /* * alloc_trace_rec() -- Allocate stack trace header */ trace_t * alloc_trace_rec(int flags) { trace_t *t; if (flags & C_PERM) { t = (trace_t *)kl_alloc_block(sizeof(trace_t), K_PERM); } else { t = (trace_t *)kl_alloc_block(sizeof(trace_t), K_TEMP); } return(t); } /* * free_trace_rec() -- Free memory associated with stack trace header */ void free_trace_rec(trace_t *t) { int i; if (t->tsp) { kl_free_block(t->tsp); } for (i = 0; i < STACK_SEGMENTS; i++) { if (t->stack[i].ptr) { kl_free_block((void *)t->stack[i].ptr); } } free_sframes(t); kl_free_block((void *)t); } /* * clean_trace_rec() -- Clean up stack trace record without releasing * any of the allocated memory (except sframes). */ void clean_trace_rec(trace_t *t) { int i; t->flags = 0; t->task = 0; if (t->tsp) { kl_free_block(t->tsp); t->tsp = 0; } t->stackcnt = 0; for (i = 0; i < STACK_SEGMENTS; i++) { if (t->stack[i].ptr) { t->stack[i].type = 0; t->stack[i].size = 0; t->stack[i].addr = (kaddr_t)NULL; kl_free_block((void *)t->stack[i].ptr); t->stack[i].ptr = (uaddr_t *)NULL; } } free_sframes(t); } /* * setup_trace_rec() */ int setup_trace_rec(kaddr_t saddr, kaddr_t task, int flag, trace_t *trace) { int aflag = K_TEMP; #ifdef REDHAT KL_ERROR = 0; #else kl_reset_error(); #endif if (flag & C_PERM) { aflag = K_PERM; } if (task) { trace->tsp = kl_alloc_block(TASK_STRUCT_SZ, aflag); if (kl_get_task_struct(task, 2, trace->tsp)) { kl_free_block(trace->tsp); trace->tsp = NULL; return(1); } } trace->stack[0].type = S_KERNELSTACK; trace->stack[0].size = STACK_SIZE; /* Get the base address of the stack */ trace->stack[0].addr = saddr - trace->stack[0].size; trace->stack[0].ptr = kl_alloc_block(STACK_SIZE, aflag); if (KL_ERROR) { clean_trace_rec(trace); return(1); } #ifdef REDHAT BCOPY(trace->bt->stackbuf, trace->stack[0].ptr, STACK_SIZE); #else GET_BLOCK(trace->stack[0].addr, STACK_SIZE, trace->stack[0].ptr); #endif if (KL_ERROR) { clean_trace_rec(trace); return(1); } return(0); } /* * valid_ra() */ int valid_ra(kaddr_t ra) { kaddr_t pc; if ((ra < KL_PAGE_OFFSET) || !kl_funcaddr(ra)) return(0); if ((pc = get_call_pc(ra))) return(1); return(0); } /* * valid_ra_function() * * Same as above, but ensure that it calls the funcname passed in. */ int valid_ra_function(kaddr_t ra, char *funcname) { kaddr_t pc; if ((ra < KL_PAGE_OFFSET) || !kl_funcaddr(ra)) return(0); if (!(pc = get_call_pc(ra))) return(0); if (STREQ(x86_function_called_by(ra-5), funcname)) return(1); return(0); } #ifndef REDHAT #include #endif #define KERNEL_EFRAME 0 #define USER_EFRAME 1 #define KERNEL_EFRAME_SZ 13 /* no ss and esp */ #define USER_EFRAME_SZ 15 #ifdef REDHAT #undef __KERNEL_CS #undef __KERNEL_DS #undef __USER_CS #undef __USER_DS #define __KERNEL_CS 0x10 #define __KERNEL_DS 0x18 #define __USER_CS 0x23 #define __USER_DS 0x2B #endif /* * Check if the exception frame is of kernel or user type * Is checking only DS and CS values sufficient ? */ int eframe_type(uaddr_t *int_eframe) { ushort xcs, xds; xcs = (ushort)(int_eframe[INT_EFRAME_CS] & 0xffff); xds = (ushort)(int_eframe[INT_EFRAME_DS] & 0xffff); if ((xcs == __KERNEL_CS) && (xds == __KERNEL_DS)) return KERNEL_EFRAME; #ifdef REDHAT else if ((xcs == 0x60) && (xds == 0x68)) return KERNEL_EFRAME; else if ((xcs == 0x60) && (xds == 0x7b)) return KERNEL_EFRAME; else if (XEN() && (xcs == 0x61) && (xds == 0x7b)) return KERNEL_EFRAME; #endif else if ((xcs == __USER_CS) && (xds == __USER_DS)) return USER_EFRAME; #ifdef REDHAT else if ((xcs == 0x73) && (xds == 0x7b)) return USER_EFRAME; #endif return -1; } void print_eframe(FILE *ofp, uaddr_t *regs) { int type = eframe_type(regs); #ifdef REDHAT x86_dump_eframe_common(NULL, (ulong *)regs, (type == KERNEL_EFRAME)); #else fprintf(ofp, " ebx: %08lx ecx: %08lx edx: %08lx esi: %08lx\n", regs->ebx, regs->ecx, regs->edx, regs->esi); fprintf(ofp, " edi: %08lx ebp: %08lx eax: %08lx ds: %04x\n", regs->edi, regs->ebp, regs->eax, regs->xds & 0xffff); fprintf(ofp, " es: %04x eip: %08lx cs: %04x eflags: %08lx\n", regs->xes & 0xffff, regs->eip, regs->xcs & 0xffff, regs->eflags); if (type == USER_EFRAME) fprintf(ofp, " esp: %08lx ss: %04x\n", regs->esp, regs->xss); #endif } #ifdef REDHAT #define SEEK_VALID_RA() \ { \ while (!valid_ra(ra)) { \ if ((bp + 4) < bt->stacktop) { \ bp += 4; \ ra = GET_STACK_ULONG(bp + 4); \ } else \ break; \ } \ } #define SEEK_VALID_RA_FUNCTION(F) \ { \ while (!valid_ra_function(ra, (F))) { \ if ((bp + 4) < bt->stacktop) { \ bp += 4; \ ra = GET_STACK_ULONG(bp + 4); \ } else \ break; \ } \ } #endif /* * Determine how much to increment the stack pointer to find the * exception frame associated with a generic "error_code" or "nmi" * exception. * * The incoming addr is that of the call to the generic error_code * or nmi exception handler function. Until later 2.6 kernels, the next * instruction had always been an "addl $8,%esp". However, with later * 2.6 kernels, that esp adjustment is no long valid, and there will be * an immediate "jmp" instruction. Returns 4 or 12, whichever is appropriate. * Cache the value the first time, and allow for future changes or additions. */ #define NMI_ADJ (0) #define ERROR_CODE_ADJ (1) #define EFRAME_ADJUSTS (ERROR_CODE_ADJ+1) static int eframe_adjust[EFRAME_ADJUSTS] = { 0 }; static int eframe_incr(kaddr_t addr, char *funcname) { instr_rec_t irp; kaddr_t next; int size, adj, val; if (STRNEQ(funcname, "nmi")) { adj = NMI_ADJ; val = eframe_adjust[NMI_ADJ]; } else if (strstr(funcname, "error_code")) { adj = ERROR_CODE_ADJ; val = eframe_adjust[ERROR_CODE_ADJ]; } else { adj = -1; val = 0; error(INFO, "unexpected exception frame marker: %lx (%s)\n", addr, funcname); } if (val) { console("eframe_incr(%lx, %s): eframe_adjust[%d]: %d\n", addr, funcname, adj, val); return val; } console("eframe_incr(%lx, %s): TBD:\n", addr, funcname); bzero(&irp, sizeof(irp)); irp.aflag = 1; irp.dflag = 1; if (!(size = get_instr_info(addr, &irp))) { if (CRASHDEBUG(1)) error(INFO, "eframe_incr(%lx, %s): get_instr_info(%lx) failed\n", addr, funcname, addr); return((THIS_KERNEL_VERSION > LINUX(2,6,9)) ? 4 : 12); } console(" addr: %lx size: %d opcode: 0x%x insn: \"%s\"\n", addr, size, irp.opcode, irp.opcodep->name); next = addr + size; bzero(&irp, sizeof(irp)); irp.aflag = 1; irp.dflag = 1; if (!(size = get_instr_info(next, &irp))) { if (CRASHDEBUG(1)) error(INFO, "eframe_incr(%lx, %s): get_instr_info(%lx) failed\n", addr, funcname, next); return((THIS_KERNEL_VERSION > LINUX(2,6,9)) ? 4 : 12); } console(" next: %lx size: %d opcode: 0x%x insn: \"%s\"\n", next, size, irp.opcode, irp.opcodep->name); if (STREQ(irp.opcodep->name, "jmp") || STREQ(irp.opcodep->name, "nop")) val = 4; else val = 12; if (adj >= 0) eframe_adjust[adj] = val; return val; } static int xen_top_of_stack(struct bt_info *bt, char *funcname) { ulong stkptr, contents; for (stkptr = bt->stacktop-4; stkptr > bt->stackbase; stkptr--) { contents = GET_STACK_ULONG(stkptr); if (kl_funcname(contents) == funcname) return TRUE; if (valid_ra(contents)) break; } return FALSE; } static char * xen_funcname(struct bt_info *bt, ulong pc) { char *funcname = kl_funcname(pc); if (xen_top_of_stack(bt, funcname) && (pc >= symbol_value("hypercall")) && (pc < symbol_value("ret_from_intr"))) return "hypercall"; return funcname; } static int userspace_return(kaddr_t frame, struct bt_info *bt) { ulong esp0, eframe_addr; uint32_t *stkptr, *eframeptr; if (INVALID_MEMBER(task_struct_thread) || (((esp0 = MEMBER_OFFSET("thread_struct", "esp0")) < 0) && ((esp0 = MEMBER_OFFSET("thread_struct", "sp0")) < 0))) eframe_addr = bt->stacktop - SIZE(pt_regs); else eframe_addr = ULONG(tt->task_struct + OFFSET(task_struct_thread) + esp0) - SIZE(pt_regs); if (!INSTACK(eframe_addr, bt)) return FALSE; stkptr = (uint32_t *)(bt->stackbuf + ((ulong)frame - bt->stackbase)); eframeptr = (uint32_t *)(bt->stackbuf + (eframe_addr - bt->stackbase)); while (stkptr < eframeptr) { if (is_kernel_text_offset(*stkptr)) return FALSE; stkptr++; } return TRUE; } /* * find_trace() * * Given a starting pc (start_cp), starting stack pointer (start_sp), * and stack address, check to see if a valid trace is possible. A * trace is considered valid if no errors are encountered (bad PC, * bad SP, etc.) Certain errors are tolorated however. For example, * if the current stack frame is an exception frame (e.g., VEC_*), * go ahead and return success -- even if PC and SP obtained from * the exception frame are bad (a partial trace is better than no * trace).. * * Return zero if no valid trace was found. Otherwise, return the * number of frames found. If the C_ALL flag is passed in, then * return a trace even if it is a subtrace of a trace that was * previously found. * * Parameters: * * start_pc starting program counter * start_sp starting stack pointer * check_pc if non-NULL, check to see if check_pc/check_sp * check_sp are a sub-trace of trace beginning with spc/ssp * trace structure containing all trace related info (frames, * pages, page/frame counts, etc. * flags */ int find_trace( kaddr_t start_pc, kaddr_t start_sp, kaddr_t check_pc, kaddr_t check_sp, trace_t *trace, int flags) { int curstkidx = 0, frame_size, frame_type; kaddr_t sp, pc, ra, bp, sbase, saddr, func_addr; sframe_t *curframe; char *func_name; uaddr_t *sbp, *asp; #ifdef REDHAT struct syment *sp1; ulong offset; int flag; int interrupted_system_call = FALSE; struct bt_info *bt = trace->bt; uaddr_t *pt; curframe = NULL; #endif sbp = trace->stack[curstkidx].ptr; sbase = trace->stack[curstkidx].addr; saddr = sbase + trace->stack[curstkidx].size; #ifdef REDHAT bp = start_sp + get_framesize(start_pc, bt); #else bp = start_sp + get_framesize(start_pc); #endif if (KL_ERROR || (bp < sbase) || (bp >= saddr)) { return(0); } pc = start_pc; sp = start_sp; func_name = kl_funcname(pc); #ifdef REDHAT if (STREQ(func_name, "context_switch")) bt->flags |= BT_CONTEXT_SWITCH; #endif while (pc) { /* LOOP TRAP! Make sure we are not just looping on the * same frame forever. */ if ((trace->nframes > 1) && (curframe->funcname == curframe->prev->funcname) && (curframe->sp == curframe->prev->sp)) { curframe->error = 1; #ifdef REDHAT bt->flags |= BT_LOOP_TRAP; #endif return(trace->nframes); } #ifdef REDHAT /* * If we wrap back to a lower stack location, we're cooked. */ if ((trace->nframes > 1) && (curframe->sp < curframe->prev->sp)) { curframe->error = 1; bt->flags |= BT_WRAP_TRAP; return(trace->nframes); } #endif /* Allocate space for a stack frame rec */ curframe = alloc_sframe(trace, flags); if (!(func_addr = kl_funcaddr(pc))) { curframe->error = KLE_BAD_PC; UPDATE_FRAME(0, pc, 0, 0, 0, 0, 0, 0, 0, 0); return(trace->nframes); } /* Check to see if check_pc/check_sp points to a sub-trace * of spc/ssp. If it does then don't return a trace (unless * C_ALL). Make sure we free the curframe block since we * wont be linking it in to the trace rec. */ if (check_pc && ((pc == check_pc) && (sp == check_sp))) { kl_free_block((void *)curframe); if (flags & C_ALL) { return(trace->nframes); } else { return(0); } } asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); #ifdef REDHAT if (XEN_HYPER_MODE()) { func_name = xen_funcname(bt, pc); if (STREQ(func_name, "idle_loop") || STREQ(func_name, "hypercall") || STREQ(func_name, "process_softirqs") || STREQ(func_name, "tracing_off") || STREQ(func_name, "page_fault") || STREQ(func_name, "handle_exception") || xen_top_of_stack(bt, func_name)) { UPDATE_FRAME(func_name, pc, 0, sp, bp, asp, 0, 0, bp - sp, 0); return(trace->nframes); } } else if (STREQ(closest_symbol(pc), "cpu_idle")) { func_name = kl_funcname(pc); UPDATE_FRAME(func_name, pc, 0, sp, bp, asp, 0, 0, bp - sp, 0); return(trace->nframes); } ra = GET_STACK_ULONG(bp + 4); /* * HACK: The get_framesize() function can return the proper * value -- as verified by disassembling the function -- but * in rare circumstances there's more to the stack frame than * meets the eye. Until I can figure out why, extra space * can be added here for any "known" anomolies. gcc version * restrictions are also added rather than assuming anything. * See framesize_modify() for kludgery. */ if (!valid_ra(ra)) { char *funcname; struct framesize_cache *fcp; funcname = kl_funcname(pc); FRAMESIZE_CACHE_VALIDATE(pc, (void **)&fcp); bp += fcp->bp_adjust; ra = GET_STACK_ULONG(bp + 4); /* * This anomoly would be caught by the recovery * speculation, but since we know it's an issue * just catch it here first. */ if (STREQ(funcname, "schedule") && (THIS_GCC_VERSION >= GCC(3,2,3))) { SEEK_VALID_RA(); /* * else FRAMESIZE_VALIDATE has been turned on */ } else if (fcp->flags & FRAMESIZE_VALIDATE) { SEEK_VALID_RA_FUNCTION(funcname); /* * Generic speculation continues the search for * a valid RA at a higher stack address. */ } else if ((bt->flags & BT_SPECULATE) && !STREQ(funcname, "context_switch") && !STREQ(funcname, "die") && !(bt->frameptr && ((bp+4) < bt->frameptr))) SEEK_VALID_RA(); } #else kl_get_kaddr(bp + 4, &ra); #endif /* Make sure that the ra we have is a valid one. If not * then back up in the frame, word by word, until we find * one that is good. */ if (!valid_ra(ra)) { int i; i = ((bp - sp + 8) / 4); while (i) { bp -= 4; #ifdef REDHAT ra = GET_STACK_ULONG(bp + 4); #else kl_get_kaddr(bp + 4, &ra); #endif if (valid_ra(ra)) { break; } i--; } if (i == 0) { #ifdef REDHAT if (interrupted_system_call) { if ((sp1 = x86_is_entry_tramp_address (pc, &offset))) pc = sp1->value + offset; flag = EX_FRAME; } else { if (!XEN_HYPER_MODE() && !is_kernel_thread(bt->task) && (bt->stacktop == machdep->get_stacktop(bt->task))) { if (((ulong)(bp+4) + SIZE(pt_regs)) > bt->stacktop) flag = INCOMPLETE_EX_FRAME; else if ((sp1 = eframe_label(NULL, pc)) && STREQ(sp1->name, "system_call")) flag = EX_FRAME|SET_EX_FRAME_ADDR; else if (STREQ(closest_symbol(pc), "ret_from_fork")) flag = EX_FRAME|SET_EX_FRAME_ADDR; else if (userspace_return(bp, bt)) flag = EX_FRAME|SET_EX_FRAME_ADDR; else { curframe->error = KLE_BAD_RA; flag = 0; } } else { curframe->error = KLE_BAD_RA; flag = 0; } } #else curframe->error = KLE_BAD_RA; #endif UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, 0, 0, 0, flag); return(trace->nframes); } } UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, 0, 0, 0, 0); curframe->frame_size = curframe->fp - curframe->sp + 4; /* Gather starting information for the next frame */ pc = get_call_pc(ra); #ifdef USE_FRAMEPTRS kl_get_kaddr(bp, &bp); if (KL_ERROR) { curframe->error = 2; return(trace->nframes); } #else /* It's possible for get_framesize() to return a size * that is larger than the actual frame size (because * all it does is count the push, pop, addl, and subl * instructions that effect the SP). If we are real near * the top of the stack, this might cause bp to overflow. * This will be fixed above, but we need to bring bp * back into the legal range so we don't crap out * before we can get to it... */ #ifdef REDHAT frame_size = get_framesize(pc, bt); interrupted_system_call = FALSE; #else frame_size = get_framesize(pc); #endif if ((curframe->fp + frame_size) >= saddr) { bp = saddr - 4; } else { bp = curframe->fp + frame_size; } #endif func_name = kl_funcname(pc); if (func_name && !XEN_HYPER_MODE()) { if (strstr(func_name, "kernel_thread")) { ra = 0; bp = saddr - 4; asp = (uaddr_t*) ((uaddr_t)sbp + (STACK_SIZE - 12)); curframe = alloc_sframe(trace, flags); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, 16, 0); return(trace->nframes); } else if (strstr(func_name, "is386")) { ra = 0; bp = sp = saddr - 4; asp = curframe->asp; curframe = alloc_sframe(trace, flags); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, 0, 0); return(trace->nframes); } else if (STREQ(func_name, "ret_from_fork")) { ra = 0; bp = sp = saddr - 4; asp = curframe->asp; curframe = alloc_sframe(trace, flags); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, 0, EX_FRAME|SET_EX_FRAME_ADDR); return(trace->nframes); #ifdef REDHAT } else if (STREQ(func_name, "cpu_idle") || STREQ(func_name, "cpu_startup_entry") || STREQ(func_name, "start_secondary")) { ra = 0; bp = sp = saddr - 4; asp = curframe->asp; curframe = alloc_sframe(trace, flags); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, 0, 0); return(trace->nframes); } else if (strstr(func_name, "system_call") || strstr(func_name, "sysenter_past_esp") || eframe_label(func_name, pc) || strstr(func_name, "syscall_call") || strstr(func_name, "signal_return") || strstr(func_name, "reschedule") || kernel_entry_from_user_space(curframe, bt)) { #else } else if (strstr(func_name, "system_call")) { #endif /* * user exception frame, kernel stack ends * here. */ bp = saddr - 4; sp = curframe->fp + 4; #ifdef REDHAT ra = GET_STACK_ULONG(bp-16); #else kl_get_kaddr(bp-16, &ra); #endif curframe = alloc_sframe(trace, flags); asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, (bp - sp + 4), EX_FRAME); return(trace->nframes); #ifdef REDHAT } else if (strstr(func_name, "error_code") || STREQ(func_name, "nmi_stack_correct") || STREQ(func_name, "nmi")) { #else } else if (strstr(func_name, "error_code")) { #endif /* an exception frame */ sp = curframe->fp + eframe_incr(pc, func_name); bp = sp + (KERNEL_EFRAME_SZ-1)*4; asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); curframe = alloc_sframe(trace, flags); ra = asp[INT_EFRAME_EIP]; frame_type = eframe_type(asp); UPDATE_FRAME(func_name, pc, ra, sp, bp, asp, 0, 0, (bp - sp + 4), EX_FRAME); /* prepare for next kernel frame, if present */ if (frame_type == KERNEL_EFRAME) { pc = asp[INT_EFRAME_EIP]; sp = curframe->fp+4; #ifdef REDHAT bp = sp + get_framesize(pc, bt); #else bp = sp + get_framesize(pc); #endif func_name = kl_funcname(pc); continue; } else { return(trace->nframes); } } else if (is_task_active(bt->task) && (strstr(func_name, "call_do_IRQ") || strstr(func_name, "common_interrupt") || strstr(func_name, "reboot_interrupt") || strstr(func_name, "call_function_interrupt"))) { /* Interrupt frame */ sp = curframe->fp + 4; asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); frame_type = eframe_type(asp); if (frame_type == KERNEL_EFRAME) bp = curframe->fp+(KERNEL_EFRAME_SZ-1)*4; else bp = curframe->fp+(USER_EFRAME_SZ-1)*4; curframe = alloc_sframe(trace, flags); ra = asp[INT_EFRAME_EIP]; UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, 0, 0, curframe->fp - curframe->sp+4, EX_FRAME); /* prepare for next kernel frame, if present */ if (frame_type == KERNEL_EFRAME) { sp = curframe->fp + 4; pc = asp[INT_EFRAME_EIP]; #ifdef REDHAT bp = sp + get_framesize(pc, bt); #else bp = sp + get_framesize(pc); #endif func_name = kl_funcname(pc); #ifdef REDHAT /* interrupted system_call entry */ if (STREQ(func_name, "system_call")) interrupted_system_call = TRUE; #endif continue; } else { return trace->nframes; } } } if (func_name && XEN_HYPER_MODE()) { if (STREQ(func_name, "continue_nmi") || STREQ(func_name, "vmx_asm_vmexit_handler") || STREQ(func_name, "common_interrupt") || STREQ(func_name, "handle_nmi_mce") || STREQ(func_name, "deferred_nmi")) { /* Interrupt frame */ sp = curframe->fp + 4; asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sp))); bp = curframe->fp + (12 * 4); curframe = alloc_sframe(trace, flags); ra = *(asp + 9); UPDATE_FRAME(func_name, pc, ra, sp, bp + 4, asp, 0, 0, curframe->fp - curframe->sp+4, 12 * 4); /* contunue next frame */ pc = ra; sp = curframe->fp + 4; bp = sp + get_framesize(pc, bt); func_name = kl_funcname(pc); if (!func_name) return trace->nframes; continue; } } /* * Check for hypervisor_callback from user-space. */ if ((bt->flags & BT_XEN_STOP_THIS_CPU) && bt->tc->mm_struct && STREQ(kl_funcname(curframe->pc), "hypervisor_callback")) { pt = curframe->asp+1; if (eframe_type(pt) == USER_EFRAME) { if (program_context.debug >= 1) /* pc above */ error(INFO, "hypervisor_callback from user space\n"); curframe->asp++; curframe->flag |= EX_FRAME; return(trace->nframes); } } /* Make sure our next frame pointer is valid (in the stack). */ if ((bp < sbase) || (bp >= saddr)) { curframe->error = 3; return(trace->nframes); } sp = curframe->fp + 4; } return(trace->nframes); } static int kernel_entry_from_user_space(sframe_t *curframe, struct bt_info *bt) { ulong stack_segment; if (is_kernel_thread(bt->tc->task)) return FALSE; stack_segment = GET_STACK_ULONG(curframe->fp + 4 + SIZE(pt_regs) - sizeof(kaddr_t)); if ((curframe->fp + 4 + SIZE(pt_regs)) == GET_STACKTOP(bt->task)) { if ((stack_segment == 0x7b) || (stack_segment == 0x2b)) return TRUE; } if ((curframe->fp + 4 + SIZE(pt_regs) + 8) == GET_STACKTOP(bt->task)) { if ((stack_segment == 0x7b) || (stack_segment == 0x2b)) return TRUE; } if (userspace_return(curframe->fp+4, bt)) return TRUE; else return FALSE; } #ifndef REDHAT /* * pc_offset() */ int pc_offset(kaddr_t pc) { kaddr_t func_addr; if ((func_addr = kl_funcaddr(pc))) { return(pc - func_addr); } return(-1); } #endif /* !REDHAT */ /* * dump_stack_frame() */ void dump_stack_frame(trace_t *trace, sframe_t *curframe, FILE *ofp) { int i, first_time = 1; kaddr_t sp; uaddr_t *asp; char buf[BUFSIZE]; sp = curframe->sp; asp = curframe->asp; for (i = 0; i < curframe->frame_size / 4; i++) { if (!(i % 4)) { if (first_time) { first_time = 0; #ifdef REDHAT fprintf(ofp, " %x: %s ", sp, format_stack_entry(trace->bt, buf, *asp++, 0)); #else fprintf(ofp, " %x: %08x ", sp, *asp++); #endif } else { #ifdef REDHAT fprintf(ofp, "\n %x: ", sp); #else fprintf(ofp, "\n %x: ", sp); #endif fprintf(ofp, "%s ", format_stack_entry(trace->bt, buf, *asp++, 0)); } sp += 16; } else { fprintf(ofp, "%s ", format_stack_entry(trace->bt, buf, *asp++, 0)); } } if (curframe->frame_size) { #ifdef REDHAT fprintf(ofp, "\n"); #else fprintf(ofp, "\n\n"); #endif } } /* * eframe_address() */ static uaddr_t * eframe_address(sframe_t *frmp, struct bt_info *bt) { ulong esp0, pt; if (!(frmp->flag & SET_EX_FRAME_ADDR) || INVALID_MEMBER(task_struct_thread) || (((esp0 = MEMBER_OFFSET("thread_struct", "esp0")) < 0) && ((esp0 = MEMBER_OFFSET("thread_struct", "sp0")) < 0))) return frmp->asp; /* * Work required in rarely-seen SET_EX_FRAME_ADDR circumstances. */ pt = ULONG(tt->task_struct + OFFSET(task_struct_thread) + esp0) - SIZE(pt_regs); if (!INSTACK(pt, bt)) return frmp->asp; return ((uint32_t *)(bt->stackbuf + (pt - bt->stackbase))); } /* * print_trace() */ void print_trace(trace_t *trace, int flags, FILE *ofp) { sframe_t *frmp; #ifdef REDHAT kaddr_t fp = 0; kaddr_t last_fp ATTRIBUTE_UNUSED; kaddr_t last_pc, next_fp, next_pc; struct bt_info *bt; bt = trace->bt; last_fp = last_pc = next_fp = next_pc = 0; #else int offset; #endif if ((frmp = trace->frame)) { do { #ifdef REDHAT if (trace->bt->flags & BT_LOOP_TRAP) { if (frmp->prev && frmp->error && (frmp->pc == frmp->prev->pc) && (frmp->fp == frmp->prev->fp)) goto print_trace_error; } if ((trace->bt->flags & BT_WRAP_TRAP) && frmp->error) goto print_trace_error; /* * We're guaranteed to run into an error when unwinding * a hard or soft IRQ stack, so just bail with success. */ if ((frmp->next != trace->frame) && frmp->next->error && (bt->flags & (BT_LOOP_TRAP|BT_WRAP_TRAP)) && (bt->flags & (BT_HARDIRQ|BT_SOFTIRQ))) return; if ((frmp->level == 0) && (bt->flags & BT_XEN_STOP_THIS_CPU)) { print_stack_entry(trace->bt, 0, trace->bt->stkptr, symbol_value("stop_this_cpu"), value_symbol(symbol_value("stop_this_cpu")), frmp, ofp); } print_stack_entry(trace->bt, (trace->bt->flags & (BT_BUMP_FRAME_LEVEL|BT_XEN_STOP_THIS_CPU)) ? frmp->level + 1 : frmp->level, fp ? (ulong)fp : trace->bt->stkptr, (ulong)frmp->pc, frmp->funcname, frmp, ofp); if (trace->bt->flags & BT_LOOP_TRAP) { last_fp = fp ? (ulong)fp : trace->bt->stkptr; last_pc = frmp->pc; } fp = frmp->fp; #else fprintf(ofp, "%2d %s", frmp->level, frmp->funcname); offset = pc_offset(frmp->pc); if (offset > 0) { fprintf(ofp, "+%d", offset); } else if (offset < 0) { fprintf(ofp, "+"); } fprintf(ofp, " [0x%x]\n", frmp->pc); #endif if (frmp->flag & EX_FRAME) { if (CRASHDEBUG(1)) fprintf(ofp, " EXCEPTION FRAME: %lx\n", (unsigned long)frmp->sp); print_eframe(ofp, eframe_address(frmp, bt)); } #ifdef REDHAT if (CRASHDEBUG(1) && (frmp->flag & INCOMPLETE_EX_FRAME)) { fprintf(ofp, " INCOMPLETE EXCEPTION FRAME:\n"); fprintf(ofp, " user stacktop: %lx frame #%d: %lx (+pt_regs: %lx)\n", bt->stacktop, frmp->level, (ulong)frmp->fp, (ulong)frmp->fp + SIZE(pt_regs)); } if (trace->bt->flags & BT_FULL) { fprintf(ofp, " [RA: %x SP: %x FP: %x " "SIZE: %d]\n", frmp->ra, frmp->sp, frmp->fp, frmp->frame_size); dump_stack_frame(trace, frmp, ofp); } #else if (flags & C_FULL) { fprintf(ofp, "\n"); fprintf(ofp, " RA=0x%x, SP=0x%x, FP=0x%x, " "SIZE=%d\n\n", frmp->ra, frmp->sp, frmp->fp, frmp->frame_size); #ifdef FRMSIZE_DBG fprintf(ofp, "\n FRAMESIZE=%d\n\n", #ifdef REDHAT get_framesize(frmp->pc, bt)); #else get_framesize(frmp->pc)); #endif #endif dump_stack_frame(trace, frmp, ofp); } #endif /* !REDHAT */ if (frmp->error) { #ifdef REDHAT print_trace_error: KL_ERROR = KLE_PRINT_TRACE_ERROR; if (CRASHDEBUG(1) || trace->bt->debug) fprintf(ofp, "TRACE ERROR: 0x%llx %llx\n", frmp->error, trace->bt->flags); if (trace->bt->flags & BT_WRAP_TRAP) return; #else fprintf(ofp, "TRACE ERROR: 0x%llx\n", frmp->error); #endif } frmp = frmp->next; } while (frmp != trace->frame); } } /* * trace_banner() */ void trace_banner(FILE *ofp) { fprintf(ofp, "====================================================" "============\n"); } /* * task_trace() */ int #ifdef REDHAT lkcd_x86_back_trace(struct bt_info *bt, int flags, FILE *ofp) #else task_trace(kaddr_t task, int flags, FILE *ofp) #endif { void *tsp; kaddr_t saddr, eip, esp; ulong contents; trace_t *trace; #ifdef REDHAT int nframes = 0; kaddr_t task = bt->task; KL_ERROR = 0; tsp = NULL; if (bt->flags & BT_FRAMESIZE_DEBUG) return(framesize_debug(bt, ofp)); if (kt->flags & RA_SEEK) bt->flags |= BT_SPECULATE; if (XENDUMP_DUMPFILE() && XEN() && is_task_active(bt->task) && STREQ(kl_funcname(bt->instptr), "stop_this_cpu")) { /* * bt->instptr of "stop_this_cpu" is not a return * address -- replace it with the actual return * address found at the bt->stkptr location. */ if (readmem((ulong)bt->stkptr, KVADDR, &eip, sizeof(ulong), "xendump eip", RETURN_ON_ERROR)) bt->instptr = eip; bt->flags |= BT_XEN_STOP_THIS_CPU; if (CRASHDEBUG(1)) error(INFO, "replacing stop_this_cpu with %s\n", kl_funcname(bt->instptr)); } if (XENDUMP_DUMPFILE() && XEN() && is_idle_thread(bt->task) && is_task_active(bt->task) && !(kt->xen_flags & XEN_SUSPEND) && STREQ(kl_funcname(bt->instptr), "schedule")) { /* * This is an invalid (stale) schedule reference * left in the task->thread. Move down the stack * until the smp_call_function_interrupt return * address is found. */ saddr = bt->stkptr; while (readmem(saddr, KVADDR, &eip, sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { if (STREQ(kl_funcname(eip), "smp_call_function_interrupt")) { bt->instptr = eip; bt->stkptr = saddr; bt->flags |= BT_XEN_STOP_THIS_CPU; if (CRASHDEBUG(1)) error(INFO, "switch schedule to smp_call_function_interrupt\n"); break; } saddr -= sizeof(void *); if (saddr <= bt->stackbase) break; } } if (XENDUMP_DUMPFILE() && XEN() && is_idle_thread(bt->task) && is_task_active(bt->task) && (kt->xen_flags & XEN_SUSPEND) && STREQ(kl_funcname(bt->instptr), "schedule")) { int framesize = 0; /* * This is an invalid (stale) schedule reference * left in the task->thread. Move down the stack * until the hypercall_page() return address is * found, and fix up its framesize as we go. */ saddr = bt->stacktop; while (readmem(saddr, KVADDR, &eip, sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { if (STREQ(kl_funcname(eip), "xen_idle")) framesize += sizeof(ulong); else if (framesize) framesize += sizeof(ulong); if (STREQ(kl_funcname(eip), "hypercall_page")) { int framesize = 24; bt->instptr = eip; bt->stkptr = saddr; if (CRASHDEBUG(1)) error(INFO, "switch schedule to hypercall_page (framesize: %d)\n", framesize); FRAMESIZE_CACHE_ENTER(eip, &framesize); break; } saddr -= sizeof(void *); if (saddr <= bt->stackbase) break; } } if (XENDUMP_DUMPFILE() && XEN() && !is_idle_thread(bt->task) && is_task_active(bt->task) && STREQ(kl_funcname(bt->instptr), "schedule")) { /* * This is an invalid (stale) schedule reference * left in the task->thread. Move down the stack * until the smp_call_function_interrupt return * address is found. */ saddr = bt->stacktop; while (readmem(saddr, KVADDR, &eip, sizeof(ulong), "xendump esp", RETURN_ON_ERROR)) { if (STREQ(kl_funcname(eip), "smp_call_function_interrupt")) { bt->instptr = eip; bt->stkptr = saddr; bt->flags |= BT_XEN_STOP_THIS_CPU; if (CRASHDEBUG(1)) error(INFO, "switch schedule to smp_call_function_interrupt\n"); break; } saddr -= sizeof(void *); if (saddr <= bt->stackbase) break; } } if (STREQ(kl_funcname(bt->instptr), "crash_kexec") || STREQ(kl_funcname(bt->instptr), "crash_nmi_callback")) { if (readmem(bt->stkptr-4, KVADDR, &contents, sizeof(ulong), "stkptr-4 contents", RETURN_ON_ERROR|QUIET) && (contents == bt->instptr)) bt->stkptr -= 4; } if (!verify_back_trace(bt) && !recoverable(bt, ofp) && !BT_REFERENCE_CHECK(bt)) error(INFO, "cannot resolve stack trace:\n"); if (BT_REFERENCE_CHECK(bt)) return(0); #endif if (!XEN_HYPER_MODE()) { if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) { return(1); } if (kl_get_task_struct(task, 2, tsp)) { kl_free_block(tsp); return(1); } } trace = (trace_t *)alloc_trace_rec(C_TEMP); if (!trace) { #ifdef REDHAT error(INFO, "Could not alloc trace rec!\n"); #else fprintf(KL_ERRORFP, "Could not alloc trace rec!\n"); #endif return(1); } else { #ifdef REDHAT saddr = kl_kernelstack(bt->stackbase); eip = bt->instptr; esp = bt->stkptr; trace->bt = bt; #else saddr = kl_kernelstack(task); if (kl_smp_dumptask(task)) { eip = kl_dumpeip(task); esp = kl_dumpesp(task); } else { if (LINUX_2_2_X(KL_LINUX_RELEASE)) { eip = KL_UINT(K_PTR(tsp, "task_struct", "tss"), "thread_struct", "eip"); esp = KL_UINT(K_PTR(tsp, "task_struct", "tss"), "thread_struct", "esp"); } else { eip = KL_UINT( K_PTR(tsp, "task_struct", "thread"), "thread_struct", "eip"); esp = KL_UINT( K_PTR(tsp, "task_struct", "thread"), "thread_struct", "esp"); } } #endif if (esp < KL_PAGE_OFFSET || eip < KL_PAGE_OFFSET) { #ifdef REDHAT error(INFO, "Task in user space -- no backtrace\n"); #else fprintf(KL_ERRORFP, "Task in user space, No backtrace\n"); #endif return 1; } setup_trace_rec(saddr, 0, 0, trace); if (KL_ERROR) { #ifdef REDHAT error(INFO, "Error setting up trace rec!\n"); #else fprintf(KL_ERRORFP, "Error setting up trace rec!\n"); #endif free_trace_rec(trace); return(1); } #ifdef REDHAT nframes = find_trace(eip, esp, 0, 0, trace, 0); #else find_trace(eip, esp, 0, 0, trace, 0); trace_banner(ofp); fprintf(ofp, "STACK TRACE FOR TASK: 0x%x", task); if (KL_TYPEINFO()) { fprintf(ofp, "(%s)\n\n", (char *)K_PTR(tsp, "task_struct", "comm")); } else { fprintf(ofp, "(%s)\n\n", (char *)K_PTR(tsp, "task_struct", "comm")); } #endif print_trace(trace, flags, ofp); } if (!XEN_HYPER_MODE()) kl_free_block(tsp); free_trace_rec(trace); #ifdef REDHAT if (KL_ERROR == KLE_PRINT_TRACE_ERROR) { handle_trace_error(bt, nframes, ofp); return(1); } #endif return(0); } #ifdef REDHAT /* * Run find_trace() and check for any errors encountered. */ static int verify_back_trace(struct bt_info *bt) { void *tsp; kaddr_t saddr, eip, esp; int errcnt; trace_t *trace; sframe_t *frmp; errcnt = 0; KL_ERROR = 0; tsp = NULL; if (!XEN_HYPER_MODE()) { if (!(tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP))) return FALSE; if (kl_get_task_struct(bt->task, 2, tsp)) { kl_free_block(tsp); return FALSE; } } trace = (trace_t *)alloc_trace_rec(C_TEMP); if (!trace) return FALSE; saddr = kl_kernelstack(bt->stackbase); eip = bt->instptr; esp = bt->stkptr; trace->bt = bt; if (esp < KL_PAGE_OFFSET || eip < KL_PAGE_OFFSET) return FALSE; setup_trace_rec(saddr, 0, 0, trace); if (KL_ERROR) { free_trace_rec(trace); return FALSE; } find_trace(eip, esp, 0, 0, trace, 0); if ((frmp = trace->frame)) { do { if (frmp->error) { /* * We're guaranteed to run into an error when * unwinding and IRQ stack, so bail out without * reporting the error. */ if ((bt->flags & (BT_HARDIRQ|BT_SOFTIRQ)) && (bt->flags & (BT_LOOP_TRAP|BT_WRAP_TRAP))) break; errcnt++; if (!(bt->flags & BT_SPECULATE) && !bt->frameptr) bt->frameptr = frmp->fp; } if (BT_REFERENCE_CHECK(bt)) do_bt_reference_check(bt, frmp); frmp = frmp->next; } while (frmp != trace->frame); } if (!XEN_HYPER_MODE()) kl_free_block(tsp); free_trace_rec(trace); return (errcnt ? FALSE : TRUE); } /* * Check a frame for a requested reference. */ static void do_bt_reference_check(struct bt_info *bt, sframe_t *frmp) { int type; struct syment *sp; sp = frmp->prev && STREQ(frmp->funcname, "error_code") ? x86_jmp_error_code((ulong)frmp->prev->pc) : NULL; switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(kl_funcname(frmp->pc), bt->ref->str) || (sp && STREQ(sp->name, bt->ref->str))) bt->ref->cmdflags |= BT_REF_FOUND; break; case BT_REF_HEXVAL: if ((bt->ref->hexval == frmp->pc) || (sp && (bt->ref->hexval == sp->value))) bt->ref->cmdflags |= BT_REF_FOUND; if (frmp->flag & EX_FRAME) { type = eframe_type(frmp->asp); x86_dump_eframe_common(bt, (ulong *)frmp->asp, (type == KERNEL_EFRAME)); } break; } } /* * This function is a repository for "known" find_trace() failures that * can be "fixed" on the fly. * * Currently the routine only deals with BT_LOOP_TRAP/BT_WRAP_TRAP errors * where get_framesize() leaves the bp in an invalid location, where * where schedule() coming from schedule_timeout() is interrupted by a * false return address in between, those where the cpu_idle() trail * cannot be followed, and where the functions called by kernel_thread() * can't find their way back to kernel_thread(). As new fixable trace * instances are discovered, add them in. * * NOTE: the schedule() BT_LOOP_TRAP may have been subsequently fixed * by the get_framesize() adjustment for schedule(), but it's worth * keeping it around if a new schedule framesize anomoly pops up in * the future. */ static int recoverable(struct bt_info *bt, FILE *ofp) { ulong esp, eip; sframe_t sframe; struct stack_hook *hp; struct bt_info btloc; ulong kernel_thread; int calls_schedule; if (!(kt->flags & NO_RA_SEEK)) { BCOPY(bt, &btloc, sizeof(struct bt_info)); btloc.flags &= ~(ulonglong)BT_ERROR_MASK; btloc.flags |= BT_SPECULATE; if (verify_back_trace(&btloc)) { bt->flags &= ~(ulonglong)BT_ERROR_MASK; bt->flags |= BT_SPECULATE; if (CRASHDEBUG(1) || bt->debug) error(INFO, "recovered back trace with RA seek\n"); return TRUE; } } if (!gather_text_list(bt) || !STREQ(kl_funcname(bt->instptr), "schedule")) return FALSE; if (!is_idle_thread(bt->task) && !(bt->flags & BT_ERROR_MASK)) return FALSE; esp = eip = 0; calls_schedule = FALSE; kernel_thread = 0; for (hp = bt->textlist; hp->esp; hp++) { if (STREQ(kl_funcname(hp->eip), "kernel_thread")) { kernel_thread = hp->eip; continue; } if (!calls_schedule && STREQ(x86_function_called_by(hp->eip-5), "schedule")) calls_schedule = TRUE; if (STREQ(kl_funcname(hp->eip), "schedule_timeout")) { esp = hp->esp; eip = hp->eip; break; } if (STREQ(kl_funcname(hp->eip), "cpu_idle") && (bt->tc->pid == 0)) { esp = hp->esp; eip = hp->eip; bt->flags |= BT_CPU_IDLE; for ( ; BT_REFERENCE_CHECK(bt) && hp->esp; hp++) { if (STREQ(kl_funcname(hp->eip), "rest_init") || STREQ(kl_funcname(hp->eip), "start_kernel")) { BZERO(&sframe, sizeof(sframe_t)); sframe.pc = hp->eip; do_bt_reference_check(bt, &sframe); } } break; } } BCOPY(bt, &btloc, sizeof(struct bt_info)); btloc.flags &= ~(ulonglong)BT_ERROR_MASK; if (esp && eip) { btloc.instptr = eip; btloc.stkptr = esp; if (verify_back_trace(&btloc)) { if (CRASHDEBUG(1) || bt->debug) error(INFO, "recovered stack trace:\n"); if (!BT_REFERENCE_CHECK(bt)) fprintf(ofp, " #0 [%08lx] %s at %lx\n", bt->stkptr, kl_funcname(bt->instptr), bt->instptr); bt->instptr = eip; bt->stkptr = esp; bt->flags &= ~(ulonglong)BT_ERROR_MASK; bt->flags |= BT_BUMP_FRAME_LEVEL; FREEBUF(bt->textlist); return TRUE; } if (bt->flags & BT_CPU_IDLE) { if (CRASHDEBUG(1) || bt->debug) error(INFO, "recovered stack trace:\n"); return TRUE; } } if (kernel_thread && calls_schedule && is_kernel_thread(bt->tc->task)) { if (CRASHDEBUG(1) || bt->debug) error(INFO, "recovered stack trace:\n"); if (BT_REFERENCE_CHECK(bt)) { BZERO(&sframe, sizeof(sframe_t)); sframe.pc = kernel_thread; do_bt_reference_check(bt, &sframe); } bt->flags |= BT_KERNEL_THREAD; return TRUE; } return FALSE; } /* * If a trace is recoverable from this point finish it here. Otherwise, * if a back trace fails and is unrecoverable, dump the text symbols along * with any possible exception frames that can be found on the stack. */ static void handle_trace_error(struct bt_info *bt, int nframes, FILE *ofp) { int cnt, level; struct stack_hook *hp; if (CRASHDEBUG(2) || (bt->debug >= 2)) { for (hp = bt->textlist; hp->esp; hp++) { char *func; if ((func = x86_function_called_by(hp->eip-5))) fprintf(ofp, "%lx %s calls %s\n", hp->eip, kl_funcname(hp->eip), func); } } if (bt->flags & BT_CPU_IDLE) { for (hp = bt->textlist, level = 2; hp->esp; hp++) { if (STREQ(kl_funcname(hp->eip), "rest_init") || STREQ(kl_funcname(hp->eip), "start_kernel")) print_stack_entry(bt, level++, hp->esp, hp->eip, kl_funcname(hp->eip), NULL, ofp); } FREEBUF(bt->textlist); return; } if (bt->flags & BT_KERNEL_THREAD) { for (hp = bt->textlist; hp->esp; hp++) { if (STREQ(kl_funcname(hp->eip), "kernel_thread")) print_stack_entry(bt, nframes-1, hp->esp, hp->eip, "kernel_thread", NULL, ofp); } FREEBUF(bt->textlist); return; } error(INFO, "text symbols on stack:\n"); bt->flags |= BT_TEXT_SYMBOLS_PRINT|BT_ERROR_MASK; back_trace(bt); if (!XEN_HYPER_MODE()) { bt->flags = BT_EFRAME_COUNT; if ((cnt = machdep->eframe_search(bt))) { error(INFO, "possible exception frame%s:\n", cnt > 1 ? "s" : ""); bt->flags &= ~(ulonglong)BT_EFRAME_COUNT; machdep->eframe_search(bt); } } } /* * Print a stack entry, and its line number if requested. */ static void print_stack_entry(struct bt_info *bt, int level, ulong esp, ulong eip, char *funcname, sframe_t *frmp, FILE *ofp) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; struct syment *sp; struct load_module *lm; if (frmp && frmp->prev && STREQ(frmp->funcname, "error_code") && (sp = x86_jmp_error_code((ulong)frmp->prev->pc))) sprintf(buf1, " (via %s)", sp->name); else if (frmp && (STREQ(frmp->funcname, "stext_lock") || STRNEQ(frmp->funcname, ".text.lock")) && (sp = x86_text_lock_jmp(eip, NULL))) sprintf(buf1, " (via %s)", sp->name); else buf1[0] = NULLCHAR; if ((sp = eframe_label(funcname, eip))) funcname = sp->name; fprintf(ofp, "%s#%d [%8lx] %s%s at %lx", level < 10 ? " " : "", level, esp, funcname_display(funcname, eip, bt, buf2), strlen(buf1) ? buf1 : "", eip); if (module_symbol(eip, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); fprintf(ofp, "\n"); if (bt->flags & BT_LINE_NUMBERS) { get_line_number(eip, buf1, FALSE); if (strlen(buf1)) fprintf(ofp, " %s\n", buf1); } } /* * The new process accounting stuff installs a label between system_call and * ret_from_sys_call, confusing the code that recognizes exception frame * symbols. This function has been put in place to catch that anomoly, as * well as serving as a template for any future labels that get placed in the * kernel entry point code. It returns the syment of the "real" kernel entry * point. */ #define EFRAME_LABELS 10 static struct eframe_labels { int init; ulong syscall_labels[EFRAME_LABELS]; struct syment *syscall; struct syment *syscall_end; ulong tracesys_labels[EFRAME_LABELS]; struct syment *tracesys; struct syment *tracesys_exit; ulong sysenter_labels[EFRAME_LABELS]; struct syment *sysenter; struct syment *sysenter_end; } eframe_labels = { 0 }; static struct syment * eframe_label(char *funcname, ulong eip) { int i; struct eframe_labels *efp; struct syment *sp; if (XEN_HYPER_MODE()) return NULL; /* ODA: need support ? */ efp = &eframe_labels; if (!efp->init) { if (!(efp->syscall = symbol_search("system_call"))) { if (CRASHDEBUG(1)) error(WARNING, "\"system_call\" symbol does not exist\n"); } if ((sp = symbol_search("ret_from_sys_call"))) efp->syscall_end = sp; else if ((sp = symbol_search("syscall_badsys"))) efp->syscall_end = sp; else { if (CRASHDEBUG(1)) error(WARNING, "neither \"ret_from_sys_call\" nor \"syscall_badsys\" symbols exist\n"); } if (efp->syscall) { efp->tracesys = symbol_search("tracesys"); efp->tracesys_exit = symbol_search("tracesys_exit"); } if ((efp->sysenter = symbol_search("sysenter_entry")) || (efp->sysenter = symbol_search("ia32_sysenter_target"))) { if ((sp = symbol_search("sysexit_ret_end_marker"))) efp->sysenter_end = sp; else if (THIS_KERNEL_VERSION >= LINUX(2,6,32)) { if ((sp = symbol_search("sysexit_audit")) || (sp = symbol_search("sysenter_exit"))) efp->sysenter_end = next_symbol(NULL, sp); else error(WARNING, "cannot determine end of %s function\n", efp->sysenter->name); } else if ((sp = symbol_search("system_call"))) efp->sysenter_end = sp; else error(WARNING, "neither \"sysexit_ret_end_marker\" nor \"system_call\" symbols exist\n"); } efp->init = TRUE; } /* * First search for the currently-known system_call labels. */ for (i = 0; (i < EFRAME_LABELS) && efp->syscall_labels[i]; i++) { if (efp->syscall_labels[i] == eip) return efp->syscall; } for (i = 0; (i < EFRAME_LABELS) && efp->tracesys_labels[i]; i++) { if (efp->tracesys_labels[i] == eip) return efp->syscall; } for (i = 0; (i < EFRAME_LABELS) && efp->sysenter_labels[i]; i++) { if (efp->sysenter_labels[i] == eip) return efp->sysenter; } /* * If the eip fits in any of the label arrays, try to store it, * but always return the real function it's referencing. */ if (efp->syscall && efp->syscall_end) { if (((eip >= efp->syscall->value) && (eip < efp->syscall_end->value))) { for (i = 0; i < EFRAME_LABELS; i++) if (!efp->syscall_labels[i]) efp->syscall_labels[i] = eip; return efp->syscall; } } if (efp->tracesys && efp->tracesys_exit) { if (((eip >= efp->tracesys->value) && (eip < efp->tracesys_exit->value))) { for (i = 0; i < EFRAME_LABELS; i++) if (!efp->tracesys_labels[i]) efp->tracesys_labels[i] = eip; return efp->syscall; } } if (efp->sysenter && efp->sysenter_end) { if (((eip >= efp->sysenter->value) && (eip < efp->sysenter_end->value))) { for (i = 0; i < EFRAME_LABELS; i++) if (!efp->sysenter_labels[i]) efp->sysenter_labels[i] = eip; return efp->sysenter; } } return NULL; } /* * If it makes sense to display a different function/label name * in a stack entry, it can be done here. Unlike eframe_label(), * this routine won't cause the passed-in function name pointer * to be changed -- this is strictly for display purposes only. */ static char * funcname_display(char *funcname, ulong eip, struct bt_info *bt, char *buf) { struct syment *sp; ulong offset; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(eip, &offset); if (sp && offset) return value_to_symstr(eip, buf, bt->radix); } if (STREQ(funcname, "nmi_stack_correct") && (sp = symbol_search("nmi"))) return sp->name; return funcname; } /* * Cache 2k starting from the passed-in text address. This sits on top * of the instrbuf 256-byte cache, but we don't want to extend its size * because we can run off the end of a module segment -- if this routine * does so, it's benign. Tests of "foreach bt" result in more than an * 80% cache-hit rate. */ #define TEXT_BLOCK_SIZE (2048) static void fill_instr_cache(kaddr_t pc, char *buf) { static kaddr_t last_block = 0; static char block[TEXT_BLOCK_SIZE]; ulong offset; if ((pc >= last_block) && ((pc+256) < (last_block+TEXT_BLOCK_SIZE))) { offset = pc - last_block; } else { if (readmem(pc, KVADDR, block, TEXT_BLOCK_SIZE, "fill_instr_cache", RETURN_ON_ERROR|QUIET)) { last_block = pc; offset = 0; } else { GET_BLOCK(pc, 256, block); last_block = 0; offset = 0; } } BCOPY(&block[offset], buf, 256); } #endif /* * print_traces() * * Output a list of all valid code addresses contained in a stack * along with their function name and stack location. */ int #ifdef REDHAT print_traces(struct bt_info *bt, int level, int flags, FILE *ofp) #else print_traces(kaddr_t saddr, int level, int flags, FILE *ofp) #endif { int nfrms; char *fname, *cfname; uaddr_t *wordp, *stackp; trace_t *trace; kaddr_t addr, isp, caddr, sbase; #ifdef REDHAT kaddr_t saddr = bt->stkptr; #endif stackp = (uaddr_t*)kl_alloc_block(STACK_SIZE, K_TEMP); sbase = saddr - STACK_SIZE; GET_BLOCK(sbase, STACK_SIZE, stackp); if (KL_ERROR) { kl_free_block(stackp); return(1); } if (!(trace = (trace_t *)alloc_trace_rec(K_TEMP))) { #ifdef REDHAT error(INFO, "Could not alloc trace rec!\n"); #else fprintf(KL_ERRORFP, "Could not alloc trace rec!\n"); #endif kl_free_block(stackp); return(1); } setup_trace_rec(saddr, 0, 0, trace); #ifdef REDHAT trace->bt = bt; #endif wordp = stackp; while(wordp < (stackp + (STACK_SIZE / 4))) { if ((addr = (kaddr_t)(*(uaddr_t*)wordp))) { /* check to see if this is a valid code address */ if ((fname = kl_funcname(addr))) { /* Now use the instruction to back up and * see if this RA was saved after a call. * If it was, then try to determine what * function was called. At the very least, * only print out info for true return * addresses (coming right after a call * instruction -- even if we can't tell * what function was called). */ isp = sbase + (((uaddr_t)wordp) - ((uaddr_t)stackp)); cfname = (char *)NULL; caddr = 0; if (get_jmp_instr(addr, isp, &caddr, fname, &cfname)) { wordp++; continue; } /* We have found a valid jump address. Now, * try and get a backtrace. */ nfrms = find_trace(addr, isp, 0, 0, trace, 0); if (nfrms) { if ((nfrms >= level) && (!trace->frame->prev->error || (flags & C_ALL))) { fprintf(ofp, "\nPC="); print_kaddr(addr, ofp, 0); fprintf(ofp, " SP="); print_kaddr(isp, ofp, 0); fprintf(ofp, " SADDR="); print_kaddr(saddr, ofp, 0); fprintf(ofp, "\n"); trace_banner(ofp); print_trace(trace, flags, ofp); trace_banner(ofp); } free_sframes(trace); } } wordp++; } else { wordp++; } } kl_free_block(stackp); return(0); } /* * do_list() * * Output a list of all valid code addresses contained in a stack * along with their function name and stack location. */ int #ifdef REDHAT do_text_list(kaddr_t saddr, int size, FILE *ofp) #else do_list(kaddr_t saddr, int size, FILE *ofp) #endif { char *fname, *cfname; uaddr_t *wordp, *stackp; kaddr_t addr, isp, caddr, sbase; stackp = (uaddr_t*)kl_alloc_block(size, K_TEMP); sbase = saddr - size; GET_BLOCK(sbase, size, stackp); if (KL_ERROR) { kl_free_block(stackp); return(1); } wordp = stackp; while(wordp < (stackp + (size / 4))) { if ((addr = (kaddr_t)(*(uaddr_t*)wordp))) { /* check to see if this is a valid code address */ if ((fname = kl_funcname(addr))) { /* Now use the instruction to back up and * see if this RA was saved after a call. * If it was, then try to determine what * function was called. At the very least, * only print out info for true return * addresses (coming right after a call * instruction -- even if we can't tell * what function was called). */ isp = sbase + (((uaddr_t)wordp) - ((uaddr_t)stackp)); cfname = (char *)NULL; caddr = 0; if (get_jmp_instr(addr, isp, &caddr, fname, &cfname)) { wordp++; continue; } fprintf(ofp, "0x%x -- 0x%x (%s)", isp, addr, fname); if (cfname) { fprintf(ofp, " --> 0x%x (%s)\n", caddr, cfname); } else { fprintf(ofp, "\n"); } } wordp++; } else { wordp++; } } kl_free_block(stackp); return(0); } #ifndef REDHAT /* * add_frame() */ int add_frame(trace_t *trace, kaddr_t fp, kaddr_t ra) { sframe_t *cf, *sf; /* Check to make sure that sp is from the stack in the trace * record. * * XXX -- todo */ sf = (sframe_t *)alloc_sframe(trace, C_PERM); sf->fp = fp; sf->ra = ra; if ((cf = trace->frame)) { do { if (cf->fp && (sf->fp < cf->fp)) { if (cf->next == cf) { cf->prev = sf; sf->next = cf; cf->next = sf; sf->prev = cf; trace->frame = sf; } else { cf->prev->next = sf; sf->prev = cf->prev; cf->prev = sf; sf->next = cf; } return(0); } cf = cf->next; } while (cf != trace->frame); cf = 0; } if (!cf) { kl_enqueue((element_t **)&trace->frame, (element_t *)sf); } return(1); } /* * finish_trace() */ void finish_trace(trace_t *trace) { int level = 0, curstkidx = 0; uaddr_t *sbp; kaddr_t sbase, saddr; sframe_t *sf; sbp = trace->stack[curstkidx].ptr; sbase = trace->stack[curstkidx].addr; saddr = sbase + trace->stack[curstkidx].size; if ((sf = trace->frame)) { do { if (!sf->pc) { if (sf != trace->frame) { sf->sp = sf->prev->fp + 4; sf->pc = get_call_pc(sf->prev->ra); } if (!sf->pc) { sf = sf->next; continue; } } sf->level = level++; sf->frame_size = sf->fp - sf->sp + 4; sf->funcname = kl_funcname(sf->pc); sf->asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sf->sp))); sf = sf->next; } while (sf != trace->frame); if (level > 0) { sf = (sframe_t *)alloc_sframe(trace, C_PERM); sf->level = level; sf->sp = trace->frame->prev->fp + 4; sf->pc = get_call_pc(trace->frame->prev->ra); sf->funcname = kl_funcname(sf->pc); if (sf->funcname && strstr(sf->funcname, "kernel_thread")) { sf->ra = 0; sf->fp = saddr - 4; sf->asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - 12)); } else { sf->fp = saddr - 20; kl_get_kaddr(sf->fp, &sf->ra); sf->asp = (uaddr_t*)((uaddr_t)sbp + (STACK_SIZE - (saddr - sf->sp))); } sf->frame_size = sf->fp - sf->sp + 4; kl_enqueue((element_t **)&trace->frame, (element_t *)sf); } } } /* * dumptask_trace() */ int dumptask_trace( kaddr_t curtask, dump_header_asm_t *dha, int flags, FILE *ofp) { kaddr_t eip, esp, saddr; void *tsp; trace_t *trace; int i; for (i = 0; i < dha->dha_smp_num_cpus; i++) { if (curtask == (kaddr_t)dha->dha_smp_current_task[i]) { eip = dha->dha_smp_regs[i].eip; esp = dha->dha_smp_regs[i].esp; break; } } tsp = kl_alloc_block(TASK_STRUCT_SZ, K_TEMP); if (!tsp) { return(1); } if (kl_get_task_struct(curtask, 2, tsp)) { kl_free_block(tsp); return(1); } if (!(trace = alloc_trace_rec(K_TEMP))) { fprintf(KL_ERRORFP, "Could not alloc trace rec!\n"); } else { saddr = kl_kernelstack(curtask); setup_trace_rec(saddr, 0, 0, trace); find_trace(eip, esp, 0, 0, trace, 0); trace_banner(ofp); fprintf(ofp, "STACK TRACE FOR TASK: 0x%"FMTPTR"x (%s)\n\n", curtask, (char*)K_PTR(tsp, "task_struct", "comm")); print_trace(trace, flags, ofp); trace_banner(ofp); free_trace_rec(trace); } return(0); } #endif /* !REDHAT */ /* * lkcdutils-4.1/lcrash/arch/i386/lib/dis.c */ /* * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ #ifndef REDHAT #include #include #include #endif /* !REDHAT */ static int instr_buf_init = 1; static instr_buf_t instrbuf; static unsigned char *codeptr; /* Forward declarations for local functions */ static int seg_prefix(int); static int op_e(int, int, instr_rec_t *); static opcode_rec_t op_386[] = { /* 0x00 */ { "addb", Eb, Gb }, { "addS", Ev, Gv }, { "addb", Gb, Eb }, { "addS", Gv, Ev }, { "addb", AL, Ib }, { "addS", eAX, Iv }, { "pushS", es }, { "popS", es }, /* 0x08 */ { "orb", Eb, Gb }, { "orS", Ev, Gv }, { "orb", Gb, Eb }, { "orS", Gv, Ev }, { "orb", AL, Ib }, { "orS", eAX, Iv }, { "pushS", cs }, { "(bad)", BAD }, /* 0x10 */ { "adcb", Eb, Gb }, { "adcS", Ev, Gv }, { "adcb", Gb, Eb }, { "adcS", Gv, Ev }, { "adcb", AL, Ib }, { "adcS", eAX, Iv }, { "pushS", ss }, { "popS", ss }, /* 0x18 */ { "sbbb", Eb, Gb }, { "sbbS", Ev, Gv }, { "sbbb", Gb, Eb }, { "sbbS", Gv, Ev }, { "sbbb", AL, Ib }, { "sbbS", eAX, Iv }, { "pushS", ds }, { "popS", ds }, /* 0x20 */ { "andb", Eb, Gb }, { "andS", Ev, Gv }, { "andb", Gb, Eb }, { "andS", Gv, Ev }, { "andb", AL, Ib }, { "andS", eAX, Iv }, { "(bad)", BAD }, /* SEG ES prefix */ { "daa", NONE }, /* 0x28 */ { "subb", Eb, Gb }, { "subS", Ev, Gv }, { "subb", Gb, Eb }, { "subS", Gv, Ev }, { "subb", AL, Ib }, { "subS", eAX, Iv }, { "(bad)", BAD }, /* SEG CS prefix */ { "das", NONE }, /* 0x30 */ { "xorb", Eb, Gb }, { "xorS", Ev, Gv }, { "xorb", Gb, Eb }, { "xorS", Gv, Ev }, { "xorb", AL, Ib }, { "xorS", eAX, Iv }, { "(bad)", BAD }, /* SEG SS prefix */ { "aaa", NONE }, /* 0x38 */ { "cmpb", Eb, Gb }, { "cmpS", Ev, Gv }, { "cmpb", Gb, Eb }, { "cmpS", Gv, Ev }, { "cmpb", AL, Ib }, { "cmpS", eAX, Iv }, { "(bad)", BAD }, /* SEG DS previx */ { "aas", NONE }, /* 0x40 */ { "incS", eAX }, { "incS", eCX }, { "incS", eDX }, { "incS", eBX }, { "incS", eSP }, { "incS", eBP }, { "incS", eSI }, { "incS", eDI }, /* 0x48 */ { "decS", eAX }, { "decS", eCX }, { "decS", eDX }, { "decS", eBX }, { "decS", eSP }, { "decS", eBP }, { "decS", eSI }, { "decS", eDI }, /* 0x50 */ { "pushS", eAX }, { "pushS", eCX }, { "pushS", eDX }, { "pushS", eBX }, { "pushS", eSP }, { "pushS", eBP }, { "pushS", eSI }, { "pushS", eDI }, /* 0x58 */ { "popS", eAX }, { "popS", eCX }, { "popS", eDX }, { "popS", eBX }, { "popS", eSP }, { "popS", eBP }, { "popS", eSI }, { "popS", eDI }, /* 0x60 */ { "pusha", NONE }, { "popa", NONE }, { "boundS", Gv, Ma }, { "arpl", Ew, Gw }, { "(bad)", BAD }, /* seg fs */ { "(bad)", BAD }, /* seg gs */ { "(bad)", BAD }, /* op size prefix */ { "(bad)", BAD }, /* adr size prefix */ /* 0x68 */ { "pushS", Iv }, { "imulS", Gv, Ev, Iv }, { "pushS", sIb }, /* push of byte really pushes 2 or 4 bytes */ { "imulS", Gv, Ev, Ib }, { "insb", Yb, indirDX }, { "insS", Yv, indirDX }, { "outsb", indirDX, Xb }, { "outsS", indirDX, Xv }, /* 0x70 */ { "jo", Jb }, { "jno", Jb }, { "jb", Jb }, { "jae", Jb }, { "je", Jb }, { "jne", Jb }, { "jbe", Jb }, { "ja", Jb }, /* 0x78 */ { "js", Jb }, { "jns", Jb }, { "jp", Jb }, { "jnp", Jb }, { "jl", Jb }, { "jnl", Jb }, { "jle", Jb }, { "jg", Jb }, /* 0x80 */ { GRP1b }, { GRP1S }, { "(bad)", BAD }, { GRP1Ss }, { "testb", Eb, Gb }, { "testS", Ev, Gv }, { "xchgb", Eb, Gb }, { "xchgS", Ev, Gv }, /* 0x88 */ { "movb", Eb, Gb }, { "movS", Ev, Gv }, { "movb", Gb, Eb }, { "movS", Gv, Ev }, { "movw", Ew, Sw }, { "leaS", Gv, M }, { "movw", Sw, Ew }, { "popS", Ev }, /* 0x90 */ { "nop", NONE }, { "xchgS", eCX, eAX }, { "xchgS", eDX, eAX }, { "xchgS", eBX, eAX }, { "xchgS", eSP, eAX }, { "xchgS", eBP, eAX }, { "xchgS", eSI, eAX }, { "xchgS", eDI, eAX }, /* 0x98 */ { "cWtS", NONE }, { "cStd", NONE }, { "lcall", Ap }, { "(bad)", BAD }, /* fwait */ { "pushf", NONE }, { "popf", NONE }, { "sahf", NONE }, { "lahf", NONE }, /* 0xa0 */ { "movb", AL, Ob }, { "movS", eAX, Ov }, { "movb", Ob, AL }, { "movS", Ov, eAX }, { "movsb", Yb, Xb }, { "movsS", Yv, Xv }, { "cmpsb", Yb, Xb }, { "cmpsS", Yv, Xv }, /* 0xa8 */ { "testb", AL, Ib }, { "testS", eAX, Iv }, { "stosb", Yb, AL }, { "stosS", Yv, eAX }, { "lodsb", AL, Xb }, { "lodsS", eAX, Xv }, { "scasb", AL, Yb }, { "scasS", eAX, Yv }, /* 0xb0 */ { "movb", AL, Ib }, { "movb", CL, Ib }, { "movb", DL, Ib }, { "movb", BL, Ib }, { "movb", AH, Ib }, { "movb", CH, Ib }, { "movb", DH, Ib }, { "movb", BH, Ib }, /* 0xb8 */ { "movS", eAX, Iv }, { "movS", eCX, Iv }, { "movS", eDX, Iv }, { "movS", eBX, Iv }, { "movS", eSP, Iv }, { "movS", eBP, Iv }, { "movS", eSI, Iv }, { "movS", eDI, Iv }, /* 0xc0 */ { GRP2b }, { GRP2S }, { "ret", Iw }, { "ret", NONE }, { "lesS", Gv, Mp }, { "ldsS", Gv, Mp }, { "movb", Eb, Ib }, { "movS", Ev, Iv }, /* 0xc8 */ { "enter", Iw, Ib }, { "leave", NONE }, { "lret", Iw }, { "lret", NONE }, { "int3", NONE }, { "int", Ib }, { "into", NONE }, { "iret", NONE }, /* 0xd0 */ { GRP2b_one }, { GRP2S_one }, { GRP2b_cl }, { GRP2S_cl }, { "aam", Ib }, { "aad", Ib }, { "(bad)", BAD }, { "xlat", NONE }, /* 0xd8 */ { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, { FLOAT, NONE }, /* 0xe0 */ { "loopne", Jb }, { "loope", Jb }, { "loop", Jb }, { "jCcxz", Jb }, { "inb", AL, Ib }, { "inS", eAX, Ib }, { "outb", Ib, AL }, { "outS", Ib, eAX }, /* 0xe8 */ { "call", Av }, { "jmp", Jv }, { "ljmp", Ap }, { "jmp", Jb }, { "inb", AL, indirDX }, { "inS", eAX, indirDX }, { "outb", indirDX, AL }, { "outS", indirDX, eAX }, /* 0xf0 */ { "(bad)", BAD }, /* lock prefix */ { "(bad)", BAD }, { "(bad)", BAD }, /* repne */ { "(bad)", BAD }, /* repz */ { "hlt", NONE }, { "cmc", NONE }, { GRP3b }, { GRP3S }, /* 0xf8 */ { "clc", NONE }, { "stc", NONE }, { "cli", NONE }, { "sti", NONE }, { "cld", NONE }, { "std", NONE }, { GRP4 }, { GRP5 }, }; static opcode_rec_t op_386_twobyte[] = { /* 0x00 */ { GRP6 }, { GRP7 }, { "larS", Gv, Ew }, { "lslS", Gv, Ew }, { "(bad)", BAD }, { "(bad)", BAD }, { "clts", NONE }, { "(bad)", BAD }, /* 0x08 */ { "invd", NONE }, { "wbinvd", NONE }, { "(bad)", BAD }, { "ud2a", NONE }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x10 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x18 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x20 */ /* these are all backward in appendix A of the intel book */ { "movl", Rd, Cd }, { "movl", Rd, Dd }, { "movl", Cd, Rd }, { "movl", Dd, Rd }, { "movl", Rd, Td }, { "(bad)", BAD }, { "movl", Td, Rd }, { "(bad)", BAD }, /* 0x28 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x30 */ { "wrmsr", NONE }, { "rdtsc", NONE }, { "rdmsr", NONE }, { "rdpmc", NONE }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x38 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x40 */ { "cmovo", Gv,Ev }, { "cmovno", Gv,Ev }, { "cmovb", Gv,Ev }, { "cmovae", Gv,Ev }, { "cmove", Gv,Ev }, { "cmovne", Gv,Ev }, { "cmovbe", Gv,Ev }, { "cmova", Gv,Ev }, /* 0x48 */ { "cmovs", Gv,Ev }, { "cmovns", Gv,Ev }, { "cmovp", Gv,Ev }, { "cmovnp", Gv,Ev }, { "cmovl", Gv,Ev }, { "cmovge", Gv,Ev }, { "cmovle", Gv,Ev }, { "cmovg", Gv,Ev }, /* 0x50 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x58 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0x60 */ { "punpcklbw", MX, EM }, { "punpcklwd", MX, EM }, { "punpckldq", MX, EM }, { "packsswb", MX, EM }, { "pcmpgtb", MX, EM }, { "pcmpgtw", MX, EM }, { "pcmpgtd", MX, EM }, { "packuswb", MX, EM }, /* 0x68 */ { "punpckhbw", MX, EM }, { "punpckhwd", MX, EM }, { "punpckhdq", MX, EM }, { "packssdw", MX, EM }, { "(bad)", BAD }, { "(bad)", BAD }, { "movd", MX, Ev }, { "movq", MX, EM }, /* 0x70 */ { "(bad)", BAD }, { GRP10 }, { GRP11 }, { GRP12 }, { "pcmpeqb", MX, EM }, { "pcmpeqw", MX, EM }, { "pcmpeqd", MX, EM }, { "emms" , NONE }, /* 0x78 */ { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "movd", Ev, MX }, { "movq", EM, MX }, /* 0x80 */ { "jo", Jv }, { "jno", Jv }, { "jb", Jv }, { "jae", Jv }, { "je", Jv }, { "jne", Jv }, { "jbe", Jv }, { "ja", Jv }, /* 0x88 */ { "js", Jv }, { "jns", Jv }, { "jp", Jv }, { "jnp", Jv }, { "jl", Jv }, { "jge", Jv }, { "jle", Jv }, { "jg", Jv }, /* 0x90 */ { "seto", Eb }, { "setno", Eb }, { "setb", Eb }, { "setae", Eb }, { "sete", Eb }, { "setne", Eb }, { "setbe", Eb }, { "seta", Eb }, /* 0x98 */ { "sets", Eb }, { "setns", Eb }, { "setp", Eb }, { "setnp", Eb }, { "setl", Eb }, { "setge", Eb }, { "setle", Eb }, { "setg", Eb }, /* 0xa0 */ { "pushS", fs }, { "popS", fs }, { "cpuid", NONE }, { "btS", Ev, Gv }, { "shldS", Ev, Gv, Ib }, { "shldS", Ev, Gv, CL }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0xa8 */ { "pushS", gs }, { "popS", gs }, { "rsm", NONE }, { "btsS", Ev, Gv }, { "shrdS", Ev, Gv, Ib }, { "shrdS", Ev, Gv, CL }, { "(bad)", BAD }, { "imulS", Gv, Ev }, /* 0xb0 */ { "cmpxchgb", Eb, Gb }, { "cmpxchgS", Ev, Gv }, { "lssS", Gv, Mp }, /* 386 lists only Mp */ { "btrS", Ev, Gv }, { "lfsS", Gv, Mp }, /* 386 lists only Mp */ { "lgsS", Gv, Mp }, /* 386 lists only Mp */ { "movzbS", Gv, Eb }, { "movzwS", Gv, Ew }, /* 0xb8 */ { "ud2b", NONE }, { "(bad)", BAD }, { GRP8 }, { "btcS", Ev, Gv }, { "bsfS", Gv, Ev }, { "bsrS", Gv, Ev }, { "movsbS", Gv, Eb }, { "movswS", Gv, Ew }, /* 0xc0 */ { "xaddb", Eb, Gb }, { "xaddS", Ev, Gv }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { GRP9 }, /* 0xc8 */ { "bswap", eAX }, { "bswap", eCX }, { "bswap", eDX }, { "bswap", eBX }, { "bswap", eSP }, { "bswap", eBP }, { "bswap", eSI }, { "bswap", eDI }, /* 0xd0 */ { "(bad)", BAD }, { "psrlw", MX, EM }, { "psrld", MX, EM }, { "psrlq", MX, EM }, { "(bad)", BAD }, { "pmullw", MX, EM }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0xd8 */ { "psubusb", MX, EM }, { "psubusw", MX, EM }, { "(bad)", BAD }, { "pand", MX, EM }, { "paddusb", MX, EM }, { "paddusw", MX, EM }, { "(bad)", BAD }, { "pandn", MX, EM }, /* 0xe0 */ { "(bad)", BAD }, { "psraw", MX, EM }, { "psrad", MX, EM }, { "(bad)", BAD }, { "(bad)", BAD }, { "pmulhw", MX, EM }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0xe8 */ { "psubsb", MX, EM }, { "psubsw", MX, EM }, { "(bad)", BAD }, { "por", MX, EM }, { "paddsb", MX, EM }, { "paddsw", MX, EM }, { "(bad)", BAD }, { "pxor", MX, EM }, /* 0xf0 */ { "(bad)", BAD }, { "psllw", MX, EM }, { "pslld", MX, EM }, { "psllq", MX, EM }, { "(bad)", BAD }, { "pmaddwd", MX, EM }, { "(bad)", BAD }, { "(bad)", BAD }, /* 0xf8 */ { "psubb", MX, EM }, { "psubw", MX, EM }, { "psubd", MX, EM }, { "(bad)", BAD }, { "paddb", MX, EM }, { "paddw", MX, EM }, { "paddd", MX, EM }, { "(bad)", BAD }, }; static opcode_rec_t grps[][8] = { /* GRP1b */ { { "addb", Eb, Ib }, { "orb", Eb, Ib }, { "adcb", Eb, Ib }, { "sbbb", Eb, Ib }, { "andb", Eb, Ib }, { "subb", Eb, Ib }, { "xorb", Eb, Ib }, { "cmpb", Eb, Ib } }, /* GRP1S */ { { "addS", Ev, Iv }, { "orS", Ev, Iv }, { "adcS", Ev, Iv }, { "sbbS", Ev, Iv }, { "andS", Ev, Iv }, { "subS", Ev, Iv }, { "xorS", Ev, Iv }, { "cmpS", Ev, Iv } }, /* GRP1Ss */ { { "addS", Ev, sIb }, { "orS", Ev, sIb }, { "adcS", Ev, sIb }, { "sbbS", Ev, sIb }, { "andS", Ev, sIb }, { "subS", Ev, sIb }, { "xorS", Ev, sIb }, { "cmpS", Ev, sIb } }, /* GRP2b */ { { "rolb", Eb, Ib }, { "rorb", Eb, Ib }, { "rclb", Eb, Ib }, { "rcrb", Eb, Ib }, { "shlb", Eb, Ib }, { "shrb", Eb, Ib }, { "(bad)", BAD }, { "sarb", Eb, Ib }, }, /* GRP2S */ { { "rolS", Ev, Ib }, { "rorS", Ev, Ib }, { "rclS", Ev, Ib }, { "rcrS", Ev, Ib }, { "shlS", Ev, Ib }, { "shrS", Ev, Ib }, { "(bad)", BAD }, { "sarS", Ev, Ib }, }, /* GRP2b_one */ { { "rolb", Eb }, { "rorb", Eb }, { "rclb", Eb }, { "rcrb", Eb }, { "shlb", Eb }, { "shrb", Eb }, { "(bad)", BAD }, { "sarb", Eb }, }, /* GRP2S_one */ { { "rolS", Ev }, { "rorS", Ev }, { "rclS", Ev }, { "rcrS", Ev }, { "shlS", Ev }, { "shrS", Ev }, { "(bad)", BAD }, { "sarS", Ev }, }, /* GRP2b_cl */ { { "rolb", Eb, CL }, { "rorb", Eb, CL }, { "rclb", Eb, CL }, { "rcrb", Eb, CL }, { "shlb", Eb, CL }, { "shrb", Eb, CL }, { "(bad)", BAD }, { "sarb", Eb, CL }, }, /* GRP2S_cl */ { { "rolS", Ev, CL }, { "rorS", Ev, CL }, { "rclS", Ev, CL }, { "rcrS", Ev, CL }, { "shlS", Ev, CL }, { "shrS", Ev, CL }, { "(bad)", BAD }, { "sarS", Ev, CL } }, /* GRP3b */ { { "testb", Eb, Ib }, { "(bad)", Eb }, { "notb", Eb }, { "negb", Eb }, { "mulb", AL, Eb }, { "imulb", AL, Eb }, { "divb", AL, Eb }, { "idivb", AL, Eb } }, /* GRP3S */ { { "testS", Ev, Iv }, { "(bad)", BAD }, { "notS", Ev }, { "negS", Ev }, { "mulS", eAX, Ev }, { "imulS", eAX, Ev }, { "divS", eAX, Ev }, { "idivS", eAX, Ev }, }, /* GRP4 */ { { "incb", Eb }, { "decb", Eb }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, }, /* GRP5 */ { { "incS", Ev }, { "decS", Ev }, { "call", indirEv }, { "lcall", indirEv }, { "jmp", indirEv }, { "ljmp", indirEv }, { "pushS", Ev }, { "(bad)", BAD }, }, /* GRP6 */ { { "sldt", Ew }, { "str", Ew }, { "lldt", Ew }, { "ltr", Ew }, { "verr", Ew }, { "verw", Ew }, { "(bad)", BAD }, { "(bad)", BAD } }, /* GRP7 */ { { "sgdt", Ew }, { "sidt", Ew }, { "lgdt", Ew }, { "lidt", Ew }, { "smsw", Ew }, { "(bad)", BAD }, { "lmsw", Ew }, { "invlpg", Ew }, }, /* GRP8 */ { { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "btS", Ev, Ib }, { "btsS", Ev, Ib }, { "btrS", Ev, Ib }, { "btcS", Ev, Ib }, }, /* GRP9 */ { { "(bad)", BAD }, { "cmpxchg8b", Ev }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, }, /* GRP10 */ { { "(bad)", BAD }, { "(bad)", BAD }, { "psrlw", MS, Ib }, { "(bad)", BAD }, { "psraw", MS, Ib }, { "(bad)", BAD }, { "psllw", MS, Ib }, { "(bad)", BAD }, }, /* GRP11 */ { { "(bad)", BAD }, { "(bad)", BAD }, { "psrld", MS, Ib }, { "(bad)", BAD }, { "psrad", MS, Ib }, { "(bad)", BAD }, { "pslld", MS, Ib }, { "(bad)", BAD }, }, /* GRP12 */ { { "(bad)", BAD }, { "(bad)", BAD }, { "psrlq", MS, Ib }, { "(bad)", BAD }, { "(bad)", BAD }, { "(bad)", BAD }, { "psllq", MS, Ib }, { "(bad)", BAD }, } }; static opcode_rec_t float_grps[][8] = { /* d8 */ { { "fadd", ST, STi }, { "fmul", ST, STi }, { "fcom", STi }, { "fcomp", STi }, { "fsub", ST, STi }, { "fsubr", ST, STi }, { "fdiv", ST, STi }, { "fdivr", ST, STi }, }, /* d9 */ { { "fld", STi }, { "fxch", STi }, { FGRPd9_2 }, { "(bad)" }, { FGRPd9_4 }, { FGRPd9_5 }, { FGRPd9_6 }, { FGRPd9_7 }, }, /* da */ { { "fcmovb", ST, STi }, { "fcmove", ST, STi }, { "fcmovbe",ST, STi }, { "fcmovu", ST, STi }, { "(bad)" }, { FGRPda_5 }, { "(bad)" }, { "(bad)" }, }, /* db */ { { "fcmovnb",ST, STi }, { "fcmovne",ST, STi }, { "fcmovnbe",ST, STi }, { "fcmovnu",ST, STi }, { FGRPdb_4 }, { "fucomi", ST, STi }, { "fcomi", ST, STi }, { "(bad)" }, }, /* dc */ { { "fadd", STi, ST }, { "fmul", STi, ST }, { "(bad)" }, { "(bad)" }, { "fsub", STi, ST }, { "fsubr", STi, ST }, { "fdiv", STi, ST }, { "fdivr", STi, ST }, }, /* dd */ { { "ffree", STi }, { "(bad)" }, { "fst", STi }, { "fstp", STi }, { "fucom", STi }, { "fucomp", STi }, { "(bad)" }, { "(bad)" }, }, /* de */ { { "faddp", STi, ST }, { "fmulp", STi, ST }, { "(bad)" }, { FGRPde_3 }, { "fsubp", STi, ST }, { "fsubrp", STi, ST }, { "fdivp", STi, ST }, { "fdivrp", STi, ST }, }, /* df */ { { "(bad)" }, { "(bad)" }, { "(bad)" }, { "(bad)" }, { FGRPdf_4 }, { "fucomip",ST, STi }, { "fcomip", ST, STi }, { "(bad)" }, }, }; static char *fgrps[][8] = { /* d9_2 0 */ { "fnop","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", }, /* d9_4 1 */ { "fchs","fabs","(bad)","(bad)","ftst","fxam","(bad)","(bad)", }, /* d9_5 2 */ { "fld1","fldl2t","fldl2e","fldpi","fldlg2","fldln2","fldz","(bad)", }, /* d9_6 3 */ { "f2xm1","fyl2x","fptan","fpatan","fxtract","fprem1","fdecstp","fincstp", }, /* d9_7 4 */ { "fprem","fyl2xp1","fsqrt","fsincos","frndint","fscale","fsin","fcos", }, /* da_5 5 */ { "(bad)","fucompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", }, /* db_4 6 */ { "feni(287 only)","fdisi(287 only)","fNclex","fNinit", "fNsetpm(287 only)","(bad)","(bad)","(bad)", }, /* de_3 7 */ { "(bad)","fcompp","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", }, /* df_4 8 */ { "fNstsw","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)","(bad)", }, }; static char *float_mem[] = { /* 0xd8 */ "fadds","fmuls","fcoms","fcomps","fsubs","fsubrs","fdivs","fdivrs", /* 0xd9 */ "flds","(bad)","fsts","fstps","fldenv","fldcw","fNstenv","fNstcw", /* 0xda */ "fiaddl","fimull","ficoml","ficompl","fisubl","fisubrl","fidivl", "fidivrl", /* 0xdb */ "fildl","(bad)","fistl","fistpl","(bad)","fldt","(bad)","fstpt", /* 0xdc */ "faddl","fmull","fcoml","fcompl","fsubl","fsubrl","fdivl","fdivrl", /* 0xdd */ "fldl","(bad)","fstl","fstpl","frstor","(bad)","fNsave","fNstsw", /* 0xde */ "fiadd","fimul","ficom","ficomp","fisub","fisubr","fidiv","fidivr", /* 0xdf */ "fild","(bad)","fist","fistp","fbld","fildll","fbstp","fistpll", }; static const unsigned char onebyte_has_modrm[256] = { /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 }; static const unsigned char twobyte_has_modrm[256] = { /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0, /* 0f */ /* 10 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1f */ /* 20 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* 2f */ /* 30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 3f */ /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */ /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 5f */ /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1, /* 6f */ /* 70 */ 0,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1, /* 7f */ /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */ /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */ /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */ /* b0 */ 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1, /* bf */ /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */ /* d0 */ 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1, /* df */ /* e0 */ 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1, /* ef */ /* f0 */ 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0 /* ff */ }; #ifdef NOT_USED static int reg_num[] = { 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7, }; #endif #ifndef REDHAT static char *reg_name[] = { "%eax","%ecx","%edx","%ebx","%esp","%ebp","%esi","%edi", "%ax","%cx","%dx","%bx","%sp","%bp","%si","%di", "%al","%cl","%dl","%bl","%ah","%ch","%dh","%bh", "%es","%cs","%ss","%ds","%fs","%gs", "bx+si","bx+di","bp+si","bp+di", }; #endif /* !REDHAT */ static int reg_32[] = { R_eAX, R_eCX, R_eDX, R_eBX, R_eSP, R_eBP, R_eSI, R_eDI, }; static int reg_16[] = { R_AX, R_CX, R_DX, R_BX, R_SP, R_BP, R_SI, R_DI, }; static int reg_8[] = { R_AL, R_CL, R_DL, R_BL, R_AH, R_CH, R_DH, R_BH, }; static int reg_seg[] = { R_ES, R_CS, R_SS, R_DS, R_FS, R_GS, R_BAD, R_BAD, }; static int reg_index[] = { R_BX_SI, R_BX_DI, R_BP_SI, R_BP_DI, R_SI, R_DI, R_BP, R_BX, }; #ifndef REDHAT static char *optype_name[] = { "NONE","A","C","D","E","M_indirE","F","G","I","sI","J","M", "O","P","Q","R","S","T","V","W","X","Y","MMX","EM","MS","GRP", "REG", }; static char *opmods[] = { "NONE","a","b","c","d","dg","p","pi", "ps","q","s","ss","si","v","w", }; static char *reg_opname[] = { "eAX","eCX","eDX","eBX","eSP","eBP","eSI","eDI", "AX","CX","DX","BX","SP","BP","SI","DI", "AL","CL","DL","BL","AH","CH","DH","BH", "ES","CS","SS","DS","FS","GS", }; static void printaddr(kaddr_t addr, int flag, FILE *ofp) { int offset = 0; syment_t *sp; if ((sp = kl_lkup_symaddr(addr))) { offset = addr - sp->s_addr; } /* Print out address */ fprintf(ofp, "0x%x", addr); /* Print out symbol name */ if (sp) { if (offset) { fprintf(ofp, " <%s+%d>", sp->s_name, offset); } else { fprintf(ofp, " <%s>", sp->s_name); } } /* Line things up properly for current function */ if (flag) { if (offset == 0) { fprintf(ofp, ": "); } else if (offset < 10) { fprintf(ofp, ": "); } else if (offset < 100) { fprintf(ofp, ": "); } else if (offset < 1000) { fprintf(ofp, ": "); } else if (offset < 10000) { fprintf(ofp, ": "); } else { fprintf(ofp, ": "); } } } static void print_optype(int m, int t, FILE *ofp) { if (m >= M_BAD) { fprintf(ofp, "BAD"); } else if (m == M_REG) { if (t >= R_BAD) { fprintf(ofp, "REG_BAD"); } else { fprintf(ofp, "%s", reg_opname[t]); } } else { if (t == T_NONE) { fprintf(ofp, "%s", optype_name[m]); } else if (t >= T_BAD) { fprintf(ofp, "%s(bad)", optype_name[m]); } else { fprintf(ofp, "%s%s", optype_name[m], opmods[t]); } } } #endif /* !REDHAT */ static void get_modrm_info(unsigned char modr, int *mod_rm, int *reg_op) { *mod_rm = ((modr >> 6) << 3) | (modr & 7); *reg_op = (modr >> 3) & 7; } static int is_prefix(unsigned char c) { int prefix = 0; switch(c) { case 0xf3: prefix = PREFIX_REPZ; break; case 0xf2: prefix = PREFIX_REPNZ; break; case 0xf0: prefix = PREFIX_LOCK; break; case 0x2e: prefix = PREFIX_CS; break; case 0x36: prefix = PREFIX_SS; break; case 0x3e: prefix = PREFIX_DS; break; case 0x26: prefix = PREFIX_ES; break; case 0x64: prefix = PREFIX_FS; break; case 0x65: prefix = PREFIX_GS; break; case 0x66: prefix = PREFIX_DATA; break; case 0x67: prefix = PREFIX_ADR; break; case 0x9b: prefix = PREFIX_FWAIT; break; } return(prefix); } static int get_modrm_reg16(int mod_rm, int opdata, instr_rec_t *irp) { int reg, mod; mod = irp->modrm >> 6; switch (mod_rm) { case 0x6: break; default: reg = mod_rm - (mod * 8); return(reg_index[reg]); } return(R_BAD); } static int get_modrm_reg32(int mod_rm, int opdata, instr_rec_t *irp) { int reg; switch (mod_rm) { case 0x0: case 0x1: case 0x2: case 0x3: case 0x6: case 0x7: return(mod_rm); case 0x18: case 0x19: case 0x1a: case 0x1b: case 0x1c: case 0x1d: case 0x1e: case 0x1f: reg = mod_rm - 0x18; switch (opdata) { case T_b: return(reg_8[reg]); case T_w: return(reg_16[reg]); case T_v: if (irp->dflag) { return(reg_32[reg]); } else { return(reg_16[reg]); } } } return(R_BAD); } #ifndef REDHAT static void print_instrname(char *name, instr_rec_t *irp, FILE *ofp) { char *cp, *np, name_str[100]; strncpy (name_str, name, 100); np = name; cp = name_str; while (*np) { if (*np == 'C') { /* For jcxz/jecxz */ if (irp->aflag) { *cp++ = 'e'; } } else if (*np == 'N') { if ((irp->prefixes & PREFIX_FWAIT) == 0) { *cp++ = 'n'; } } else if (*np == 'S') { /* operand size flag */ if (irp->dflag) { *cp++ = 'l'; } else { *cp++ = 'w'; } } else if (*np == 'W') { /* operand size flag for cwtl, cbtw */ if (irp->dflag) { *cp++ = 'w'; } else { *cp++ = 'b'; } } else { *cp++ = *np; } np++; } while(*cp) { *cp++ = ' '; } *cp = 0; fprintf(ofp, "%s", name_str); } #endif /* !REDHAT */ static void op_a(int opnum, int opdata, instr_rec_t *irp) { int offset; kaddr_t pc; pc = instrbuf.addr + (instrbuf.ptr - instrbuf.buf); switch(opdata) { case T_p: if (irp->aflag) { irp->operand[opnum].op_addr = *(uint32_t*)codeptr; codeptr += 4; } else { irp->operand[opnum].op_addr = *(uint16_t*)codeptr; codeptr += 2; } irp->operand[opnum].op_seg = *(uint16_t*)codeptr; irp->operand[opnum].op_type = O_LPTR; codeptr += 2; break; case T_v: if (irp->aflag) { offset = *(int*)codeptr; irp->operand[opnum].op_addr = pc + offset + 5; codeptr += 4; } else { offset = *(short*)codeptr; irp->operand[opnum].op_addr = pc + offset + 3; codeptr += 2; } irp->operand[opnum].op_type = O_ADDR; break; default: break; } } static void op_c(int opnum, int opdata, instr_rec_t *irp) { int reg; reg = (irp->modrm >> 3) & 7; irp->operand[opnum].op_type = (O_REG|O_CR); irp->operand[opnum].op_reg = reg; } static void op_d(int opnum, int opdata, instr_rec_t *irp) { int reg; reg = (irp->modrm >> 3) & 7; irp->operand[opnum].op_type = (O_REG|O_DB); irp->operand[opnum].op_reg = reg; } static void op_indir_e(int opnum, int opdata, instr_rec_t *irp) { op_e(opnum, opdata, irp); irp->operand[opnum].op_type |= O_INDIR; } static void get_modrm_data16(int opnum, int opdata, instr_rec_t *irp) { int mod ATTRIBUTE_UNUSED; int reg, mod_rm, reg_op; get_modrm_info(irp->modrm, &mod_rm, ®_op); mod = irp->modrm >> 6; switch(mod_rm) { case 0: case 1: case 2: case 3: case 4: case 5: case 7: reg = get_modrm_reg16(mod_rm, opdata, irp); irp->operand[opnum].op_reg = reg; irp->operand[opnum].op_type = (O_REG|O_BASE); break; case 6: /* 16-bit displacement */ irp->operand[opnum].op_type = O_DISP; irp->operand[opnum].op_disp = *(uint16_t*)codeptr; codeptr += 2; break; case 8: /* disp8[BX+SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX_SI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 9: /* disp8[BX+DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX_DI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 10: /* disp8[BP+SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP_SI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 11: /* disp8[BP+DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP_DI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 12: /* disp8[SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_SI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 13: /* disp8[DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_DI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 14: /* disp8[BP] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 15: /* disp8[BX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 16: /* disp16[BX+SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX_SI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 17: /* disp16[BX+DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX_DI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 18: /* disp16[BP+SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP_SI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 19: /* disp16[BP+DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP_DI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 20: /* disp16[SI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_SI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 21: /* disp16[DI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_DI; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 22: /* disp16[BP] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BP; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; case 23: /* disp16[BX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_BX; irp->operand[opnum].op_disp = *(short*)codeptr; codeptr += 2; break; } } static void get_modrm_data32(int opnum, int opdata, instr_rec_t *irp) { int mod ATTRIBUTE_UNUSED; int reg, mod_rm, reg_op; get_modrm_info(irp->modrm, &mod_rm, ®_op); mod = irp->modrm >> 6; switch(mod_rm) { case 0: case 1: case 2: case 3: case 6: case 7: reg = get_modrm_reg32(mod_rm, opdata, irp); irp->operand[opnum].op_reg = reg; irp->operand[opnum].op_type = (O_REG|O_BASE); break; case 5: /* 32-bit displacement */ irp->operand[opnum].op_type = O_DISP; irp->operand[opnum].op_disp = *(kaddr_t*)codeptr; codeptr += 4; break; case 8: /* disp8[EAX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eAX; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 9: /* disp8[ECX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eCX; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 10: /* disp8[EDX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eDX; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 11: /* disp8[EBX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eBX; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 13: /* disp8[EBP] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eBP; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 14: /* disp8[ESI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eSI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 15: /* disp8[EDI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eDI; irp->operand[opnum].op_disp = *(signed char*)codeptr; codeptr++; break; case 16: /* disp32[EAX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eAX; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 17: /* disp32[ECX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eCX; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 18: /* disp32[EDX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eDX; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 19: /* disp32[EBX] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eBX; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 4: /* [..][..] (SIB) */ case 12: /* disp8[..][..] (SIB) */ case 20: { /* disp32[..][..] (SIB) */ int rm ATTRIBUTE_UNUSED; int s, i, b, mod, havebase; s = (irp->sib >> 6) & 3; i = (irp->sib >> 3) & 7; b = irp->sib & 7; mod = irp->modrm >> 6; rm = irp->modrm & 7; havebase = 1; switch (mod) { case 0: if (b == 5) { havebase = 0; irp->operand[opnum].op_disp = *(int*)codeptr; irp->operand[opnum].op_type = O_DISP; codeptr += 4; } break; case 1: irp->operand[opnum].op_disp = *(signed char*) codeptr; codeptr++; irp->operand[opnum].op_type = O_DISP; break; case 2: irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; irp->operand[opnum].op_type = O_DISP; break; } if (havebase) { irp->operand[opnum].op_base = b; irp->operand[opnum].op_type |= O_BASE; } if (i != 4) { irp->operand[opnum].op_index = i; irp->operand[opnum].op_type |= O_INDEX; } if (s) { irp->operand[opnum].op_scale = s; irp->operand[opnum].op_type |= O_SCALE; } break; } case 21: /* disp32[EBP] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eBP; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 22: /* disp32[ESI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eSI; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; case 23: /* disp32[EDI] */ irp->operand[opnum].op_type = (O_REG|O_DISP); irp->operand[opnum].op_reg = R_eDI; irp->operand[opnum].op_disp = *(int*)codeptr; codeptr += 4; break; } } static int op_e(int opnum, int opdata, instr_rec_t *irp) { int reg, mod, mod_rm, reg_op; get_modrm_info(irp->modrm, &mod_rm, ®_op); mod = irp->modrm >> 6; if (mod == 3) { /* ((mod_rm >= 24) && (mod_rm <=31)) */ if (opdata == T_NONE) { return(1); } if (irp->aflag) { reg = get_modrm_reg32(mod_rm, opdata, irp); } else { reg = get_modrm_reg16(mod_rm, opdata, irp); } irp->operand[opnum].op_type = O_REG; irp->operand[opnum].op_reg = reg; if ((reg = R_BAD)) { return(1); } else { return(0); } } if (irp->aflag) { get_modrm_data32(opnum, opdata, irp); } else { get_modrm_data16(opnum, opdata, irp); } if (seg_prefix(irp->prefixes)) { irp->operand[opnum].op_type |= O_SEG; irp->operand[opnum].op_seg = seg_prefix(irp->prefixes); } return(0); } static int op_g(int opnum, int opdata, instr_rec_t *irp) { int reg, mod_rm, reg_op; get_modrm_info(irp->modrm, &mod_rm, ®_op); irp->operand[opnum].op_type = O_REG; if ((reg_op < 0) || (reg_op >= 8)){ irp->operand[opnum].op_reg = R_BAD; return(1); } switch(opdata) { case T_b: reg = reg_8[reg_op]; break; case T_w: reg = reg_16[reg_op]; break; case T_d: reg = reg_32[reg_op]; break; case T_v: if (irp->dflag) { reg = reg_32[reg_op]; } else { reg = reg_16[reg_op]; } break; default: irp->operand[opnum].op_reg = R_BAD; return(1); } irp->operand[opnum].op_reg = reg; return(0); } static void op_i(int opnum, int opdata, instr_rec_t *irp) { irp->operand[opnum].op_type = O_IMMEDIATE; switch (opdata) { case T_b: irp->operand[opnum].op_addr = *(unsigned char*)codeptr; codeptr++; break; case T_w: irp->operand[opnum].op_addr = *(uint16_t*)codeptr; codeptr += 2; break; case T_v: if (irp->dflag) { irp->operand[opnum].op_addr = *(uint32_t*)codeptr; codeptr += 4; } else { irp->operand[opnum].op_addr = *(uint16_t*)codeptr; codeptr += 2; } break; } } static void op_s(int opnum, int opdata, instr_rec_t *irp) { int reg; reg = (irp->modrm >> 3) & 7; irp->operand[opnum].op_reg = reg_seg[reg]; irp->operand[opnum].op_type = O_REG; } static void op_si(int opnum, int opdata, instr_rec_t *irp) { int val; irp->operand[opnum].op_type = O_IMMEDIATE; switch (opdata) { case T_b: val = *(signed char*)codeptr++; irp->operand[opnum].op_addr = val; break; case T_v: if (irp->dflag) { irp->operand[opnum].op_addr = *(int*)codeptr; codeptr += 4; } else { val = *(short*)codeptr; irp->operand[opnum].op_addr = val; codeptr += 2; } break; case T_w: val = *(short*)codeptr; irp->operand[opnum].op_addr = val; codeptr += 2; break; } } static void op_j(int opnum, int opdata, instr_rec_t *irp) { kaddr_t pc; pc = instrbuf.addr + (instrbuf.ptr - instrbuf.buf); pc += (codeptr - instrbuf.ptr); switch (opdata) { case T_b: pc++; pc += *(signed char *)codeptr++; break; case T_v: if (irp->dflag) { /* 32-bit */ pc += 4; pc += *(int*)codeptr; codeptr += 4; } else { /* 16-bit */ pc += 2; pc += *(short*)codeptr; codeptr += 2; } break; } irp->operand[opnum].op_type = O_ADDR; irp->operand[opnum].op_addr = pc; } static void op_m(int opnum, int opdata, instr_rec_t *irp) { op_e(opnum, 0, irp); } static void op_o(int opnum, int opdata, instr_rec_t *irp) { if (irp->aflag) { irp->operand[opnum].op_addr = *(uint32_t*)codeptr; codeptr += 4; } else { irp->operand[opnum].op_addr = *(uint16_t*)codeptr; codeptr += 2; } irp->operand[opnum].op_type = O_OFF; } static void op_r(int opnum, int opdata, instr_rec_t *irp) { int rm; rm = irp->modrm & 7; switch (opdata) { case T_d: irp->operand[opnum].op_reg = reg_32[rm]; break; case T_w: irp->operand[opnum].op_reg = reg_16[rm]; break; } irp->operand[opnum].op_type = O_REG; } static void op_x(int opnum, int opdata, instr_rec_t *irp) { irp->operand[opnum].op_seg = R_DS; if (irp->aflag) { irp->operand[opnum].op_reg = R_eSI; } else { irp->operand[opnum].op_reg = R_SI; } irp->operand[opnum].op_type = O_SEG; } static void op_y(int opnum, int opdata, instr_rec_t *irp) { irp->operand[opnum].op_seg = R_ES; if (irp->aflag) { irp->operand[opnum].op_reg = R_eDI; } else { irp->operand[opnum].op_reg = R_DI; } irp->operand[opnum].op_type = O_SEG; } static void get_operand_info(int opnum, instr_rec_t *irp) { int opcode, opdata; opcode = opdata = 0; switch(opnum) { case 0: opcode = irp->opcodep->Op1; opdata = irp->opcodep->opdata1; break; case 1: opcode = irp->opcodep->Op2; opdata = irp->opcodep->opdata2; break; case 2: opcode = irp->opcodep->Op3; opdata = irp->opcodep->opdata3; break; } switch (opcode) { case M_A: op_a(opnum, opdata, irp); break; case M_C: op_c(opnum, opdata, irp); break; case M_D: op_d(opnum, opdata, irp); break; case M_E: op_e(opnum, opdata, irp); break; case M_indirE: op_indir_e(opnum, opdata, irp); break; case M_G: op_g(opnum, opdata, irp); break; case M_I: op_i(opnum, opdata, irp); break; case M_sI: op_si(opnum, opdata, irp); break; case M_J: op_j(opnum, opdata, irp); break; case M_M: op_m(opnum, opdata, irp); break; case M_O: op_o(opnum, opdata, irp); break; case M_R: op_r(opnum, opdata, irp); break; case M_S: op_s(opnum, opdata, irp); break; case M_X: op_x(opnum, opdata, irp); break; case M_Y: op_y(opnum, opdata, irp); break; case M_REG: case M_indirREG: irp->operand[opnum].op_type = O_REG; if (opdata >= R_AX) { irp->operand[opnum].op_reg = opdata; } else { if (irp->dflag) { irp->operand[opnum].op_reg = reg_32[opdata]; } else { irp->operand[opnum].op_reg = reg_16[opdata]; } } if (opcode == M_indirREG) { /* The O_BASE gets the right results */ irp->operand[opnum].op_type |= O_BASE; } break; } } /* Temporary opcode_rec_s struct that we keep around for the times * when we have to construct a special case instruction (e.g. some * floating point instructions). */ static opcode_rec_t tempop; static char fwait_name[] = "fwait"; int get_instr_info(kaddr_t pc, instr_rec_t *irp) { int opcode, size = 0, p, prefixes = 0; unsigned char modrm = 0; opcode_rec_t *op; if (instr_buf_init) { bzero(&instrbuf, sizeof(instrbuf)); instr_buf_init = 0; } /* Check to see instrbuf is valid and if there are enough * bytes in our instruction cache to cover the worst case * scenario for this pc. */ if (!instrbuf.addr || (pc < instrbuf.addr) || (pc > (instrbuf.addr + instrbuf.size - 15))) { instrbuf.addr = pc; instrbuf.size = 256; #ifdef REDHAT fill_instr_cache(pc, (char *)instrbuf.buf); #else GET_BLOCK(pc, 256, instrbuf.buf); #endif if (KL_ERROR) { return(0); } } /* Make sure that the instruction pointer points to the * right byte in the buffer. */ instrbuf.ptr = instrbuf.buf + (pc - instrbuf.addr); codeptr = instrbuf.ptr; irp->addr = pc; /* Check for prefixes */ while((p = is_prefix(*codeptr))) { prefixes |= p; codeptr++; if ((prefixes & PREFIX_FWAIT) && ((*codeptr < 0xd8) || (*codeptr > 0xdf))) { /* If there is an fwait prefix that is not * followed by a float instruction, we need to * create a special instruction record so that * the "fwait" gets printed out. */ bzero(&tempop, sizeof(tempop)); tempop.name = fwait_name; irp->opcodep = &tempop; size = ((unsigned)codeptr - (unsigned)instrbuf.ptr); instrbuf.ptr = codeptr; irp->size = size; return(size); } } if (prefixes & PREFIX_DATA) { irp->dflag ^= 1; } if (prefixes & PREFIX_ADR) { irp->aflag ^= 1; } /* Check for one or two byte opcode, capture the opcode and * check for a ModR/M byte. */ if (*codeptr == 0x0f) { opcode = *((unsigned short*)codeptr); codeptr++; op = &op_386_twobyte[*codeptr]; if(twobyte_has_modrm[*codeptr]) { codeptr++; modrm = *codeptr++; } else { codeptr++; } if (STREQ(op->name, "ud2a")) codeptr += kt->BUG_bytes; } else { opcode = *codeptr; op = &op_386[*codeptr]; if(onebyte_has_modrm[*codeptr]) { codeptr++; modrm = *codeptr++; } else { codeptr++; } } /* See if the get_op bits from the modrm are needed to determine * the actual instruction. */ if (op->Op1 == M_GRP) { op = &grps[op->opdata1][(modrm & 0x38) >> 3]; /* Put something unique in opcode */ opcode = ((opcode << 8)|((modrm & 0x38) >> 3)); } else if (op->Op1 == M_FLOAT) { int mod, rm, reg; mod = modrm >> 6; rm = modrm & 7; reg = (modrm >> 3) & 7; bzero(&tempop, sizeof(tempop)); if (mod != 3) { tempop.name = float_mem[(opcode - 0xd8) * 8 + reg]; tempop.Op1 = M_E; tempop.opdata1 = T_v; op = &tempop; } else { op = &float_grps[opcode - 0xd8][reg]; if (op->Op1 == M_FGRP) { tempop.name = fgrps[op->opdata1][rm]; /* instruction fnstsw is only one with * strange arg */ if ((opcode == 0xdf) && (*codeptr == 0xe0)) { irp->operand[1].op_type = O_REG; irp->operand[1].op_reg = R_eAX; } op = &tempop; } } } irp->opcodep = op; irp->opcode = opcode; irp->modrm = modrm; irp->prefixes = prefixes; /* Check to see if this is a bad instruction (per a table entry) */ if (op->opdata1 == T_BAD) { /* Back off the modrm if we grabbed one and return * from here. */ if (modrm) { codeptr--; size = ((unsigned)codeptr - (unsigned)instrbuf.ptr); instrbuf.ptr = codeptr; irp->size = size; return(size); } } /* Check to see if there is an SIB byte. */ if (((modrm & 0xc0) != 0xc0) && ((modrm & 7) == 4)) { /* There is an SIB byte */ irp->sib = *codeptr++; irp->have_sib = 1; } /* Gather information on operands */ if (op->Op1 && (op->Op1 != M_BAD)) { get_operand_info(0, irp); } if (op->Op2 && (op->Op2 != M_BAD)) { get_operand_info(1, irp); } if (op->Op3 && (op->Op3 != M_BAD)) { get_operand_info(2, irp); } /* Determine total instruction size and adjust instrbuf ptr */ size = ((unsigned)codeptr - (unsigned)instrbuf.ptr); instrbuf.ptr = codeptr; irp->size = size; return(size); } static int seg_prefix(int prefixes) { if (prefixes & PREFIX_CS) { return(R_CS); } else if (prefixes & PREFIX_DS) { return(R_DS); } else if (prefixes & PREFIX_SS) { return(R_SS); } else if (prefixes & PREFIX_ES) { return(R_ES); } else if (prefixes & PREFIX_FS) { return(R_FS); } else if (prefixes & PREFIX_GS) { return(R_GS); } return(0); } #ifdef NOT_USED static void print_seg_prefix(instr_rec_t *irp, FILE *ofp) { if (irp->prefixes & PREFIX_CS) { fprintf(ofp, "%%cs:"); } if (irp->prefixes & PREFIX_DS) { fprintf(ofp, "%%ds:"); } if (irp->prefixes & PREFIX_SS) { fprintf(ofp, "%%ss:"); } if (irp->prefixes & PREFIX_ES) { fprintf(ofp, "%%es:"); } if (irp->prefixes & PREFIX_FS) { fprintf(ofp, "%%fs:"); } if (irp->prefixes & PREFIX_GS) { fprintf(ofp, "%%gs:"); } } #endif #ifndef REDHAT static int print_prefixes(instr_rec_t *irp, FILE *ofp) { int cnt = 0; if (irp->prefixes & PREFIX_REPZ) { fprintf(ofp, "repz "); cnt++; } if (irp->prefixes & PREFIX_REPNZ) { fprintf(ofp, "repnz "); cnt++; } if (irp->prefixes & PREFIX_LOCK) { fprintf(ofp, "lock "); cnt++; } if (irp->prefixes & PREFIX_ADR) { if (irp->aflag) { fprintf(ofp, "addr32 "); } else { fprintf(ofp, "addr16 "); } cnt++; } return(cnt); } static void print_sib_value(int opnum, instr_rec_t *irp, FILE *ofp) { if (irp->operand[opnum].op_type & O_REG) { if (irp->operand[opnum].op_type & O_BASE) { fprintf(ofp, "(%s)", reg_name[irp->operand[opnum].op_reg]); } else { fprintf(ofp, "%s", reg_name[irp->operand[opnum].op_reg]); } return; } else if (irp->operand[opnum].op_type & O_IMMEDIATE) { fprintf(ofp, "$0x%x", irp->operand[opnum].op_addr); return; } fprintf(ofp, "("); if (irp->operand[opnum].op_type & O_BASE) { fprintf(ofp, "%s,", reg_name[irp->operand[opnum].op_base]); } else { fprintf(ofp, ","); } if (irp->operand[opnum].op_type & O_INDEX) { fprintf(ofp, "%s,", reg_name[irp->operand[opnum].op_index]); } fprintf(ofp, "%d)", (1 << irp->operand[opnum].op_scale)); } static void print_opvalue(int opnum, instr_rec_t *irp, FILE *ofp) { if (irp->operand[opnum].op_type & O_REG) { if (irp->operand[opnum].op_type & (O_BASE|O_DISP)) { fprintf(ofp, "(%s)", reg_name[irp->operand[opnum].op_reg]); } else { fprintf(ofp, "%s", reg_name[irp->operand[opnum].op_reg]); } } else if (irp->operand[opnum].op_type & O_IMMEDIATE) { fprintf(ofp, "$0x%x", irp->operand[opnum].op_addr); } else if (irp->operand[opnum].op_type & O_ADDR) { /* jump or call address */ printaddr(irp->operand[opnum].op_addr, 0, ofp); } else if (irp->operand[opnum].op_type & O_OFF) { fprintf(ofp, "0x%x", irp->operand[opnum].op_addr); } } int print_instr(kaddr_t pc, FILE *ofp, int flag) { int p = 0, i, j, size, print_comma = 0; instr_rec_t irp; opcode_rec_t *op; bzero(&irp, sizeof(irp)); /* XXX -- For now, make aflag and dflag equal to one. Should get * this from some sort of configuration struct (set via * initialization) */ irp.aflag = 1; irp.dflag = 1; size = get_instr_info(pc, &irp); op = irp.opcodep; if (!op) { fprintf(ofp, "BAD INSTR (pc=0x%x)\n", pc); return(0); } printaddr(pc, 1, ofp); if (flag) { fprintf(ofp, "0x%04x ", irp.opcode); } if (irp.prefixes) { p = print_prefixes(&irp, ofp); } print_instrname(op->name, &irp, ofp); /* HACK! but necessary to match i386-dis.c output for fwait. */ if (!strcmp(op->name, "fwait")) { fprintf(ofp, "\n"); return(irp.size); } if (p || (strlen(op->name) >= 7)) { fprintf(ofp, " "); } else { for (i = 0; i < (7 - strlen(op->name)); i++) { fprintf(ofp, " "); } } for (j = 0; j < 3; j++) { if (irp.opcode == 0xc8) { i = j; } else { i = 2 - j; } if(irp.operand[i].op_type) { if (print_comma) { fprintf(ofp, ","); } if (irp.operand[i].op_type & O_LPTR) { fprintf(ofp, "0x%x,0x%x", irp.operand[i].op_seg, irp.operand[i].op_addr); print_comma++; continue; } if (irp.operand[i].op_type & O_CR) { fprintf(ofp, "%%cr%d", irp.operand[i].op_reg); print_comma++; continue; } if (irp.operand[i].op_type & O_DB) { fprintf(ofp, "%%db%d", irp.operand[i].op_reg); print_comma++; continue; } if (irp.operand[i].op_type & O_SEG) { fprintf(ofp, "%s:(%s)", reg_name[irp.operand[i].op_seg], reg_name[irp.operand[i].op_reg]); print_comma++; continue; } if (irp.operand[i].op_type & O_INDIR) { fprintf(ofp, "*"); } if (irp.operand[i].op_type & O_DISP) { fprintf(ofp, "0x%x", irp.operand[i].op_disp); } if (irp.have_sib) { print_sib_value(i, &irp, ofp); } else { print_opvalue(i, &irp, ofp); } print_comma++; } } if (flag) { fprintf(ofp, " (%d %s)\n", irp.size, (irp.size > 1) ? "bytes" : "byte"); } else { fprintf(ofp, "\n"); } return(irp.size); } void list_instructions(FILE *ofp) { int i, j, print_comma = 0; fprintf(ofp, "ONE BYTE INSTRUCTIONS:\n\n"); for(i = 0; i < 256; i++) { fprintf(ofp, "0x%04x %s", i, op_386[i].name); for (j = 0; j < (10 - strlen(op_386[i].name)); j++) { fprintf(ofp, " "); } if (op_386[i].Op1) { print_optype(op_386[i].Op1, op_386[i].opdata1, ofp); print_comma++; } if (op_386[i].Op2) { if (print_comma) { fprintf(ofp, ","); } print_optype(op_386[i].Op2, op_386[i].opdata2, ofp); print_comma++; } if (op_386[i].Op3) { if (print_comma) { fprintf(ofp, ","); } print_optype(op_386[i].Op3, op_386[i].opdata3, ofp); } fprintf(ofp, "\n"); } fprintf(ofp, "\nTWO BYTE INSTRUCTIONS:\n\n"); for(i = 0; i < 256; i++) { fprintf(ofp, "0x0f%02x %s", i, op_386_twobyte[i].name); for (j = 0; j < (10 - strlen(op_386_twobyte[i].name)); j++) { fprintf(ofp, " "); } if (op_386_twobyte[i].Op1) { print_optype(op_386_twobyte[i].Op1, op_386_twobyte[i].opdata1, ofp); print_comma++; } if (op_386_twobyte[i].Op2) { if (print_comma) { fprintf(ofp, ","); } print_optype(op_386_twobyte[i].Op2, op_386_twobyte[i].opdata2, ofp); print_comma++; } if (op_386_twobyte[i].Op3) { if (print_comma) { fprintf(ofp, ","); } print_optype(op_386_twobyte[i].Op3, op_386_twobyte[i].opdata3, ofp); } fprintf(ofp, "\n"); } } #endif /* !REDHAT */ void free_instr_stream(instr_rec_t *irp) { instr_rec_t *ptr; if(irp) { while (irp->prev) { irp = irp->prev; } while (irp) { ptr = irp; irp = irp->next; kl_free_block(ptr); } } } instr_rec_t * get_instr_stream(kaddr_t pc, int bcount, int acount) { int size, count = 0; kaddr_t addr, start_addr, end_addr; syment_t *sp1, *sp2; #ifdef REDHAT syment_t *sp, *sp_next, *sp_next_next; ulong offset; #endif instr_rec_t *fst = (instr_rec_t *)NULL, *lst, *ptr, *cur; #ifdef REDHAT cur = NULL; if ((sp = x86_is_entry_tramp_address(pc, &offset))) pc = sp->value + offset; #endif if (!(sp1 = kl_lkup_symaddr(pc))) { return((instr_rec_t *)NULL); } start_addr = sp1->s_addr; if (pc <= (sp1->s_addr + (bcount * 15))) { if ((sp2 = kl_lkup_symaddr(sp1->s_addr - 4))) { start_addr = sp2->s_addr; } } #ifdef REDHAT sp_next = next_symbol(NULL, sp1); if (!sp_next) return((instr_rec_t *)NULL); sp_next_next = next_symbol(NULL, sp_next); if (pc > (sp_next->s_addr - (acount * 15))) { if (sp_next_next) { end_addr = sp_next_next->s_addr; } else { end_addr = sp_next->s_addr; } } else { end_addr = sp_next->s_addr; } #else if (pc > (sp1->s_next->s_addr - (acount * 15))) { if (sp1->s_next->s_next) { end_addr = sp1->s_next->s_next->s_addr; } else { end_addr = sp1->s_next->s_addr; } } else { end_addr = sp1->s_next->s_addr; } #endif addr = start_addr; while (addr <= pc) { if (addr >= end_addr) { /* We've gone too far (beyond the end of this * function) The pc most likely was not valid * (it pointed into the middle of an instruction). */ free_instr_stream(cur); return((instr_rec_t *)NULL); } if (count <= bcount) { /* Allocate another record */ cur = (instr_rec_t *) kl_alloc_block(sizeof(instr_rec_t), K_TEMP); count++; cur->aflag = cur->dflag = 1; if ((ptr = fst)) { while (ptr->next) { ptr = ptr->next; } ptr->next = cur; cur->prev = ptr; } else { fst = cur; } } else { /* Pull the last record to the front of the list */ ptr = fst; if (ptr->next) { fst = ptr->next; fst->prev = (instr_rec_t *)NULL; cur->next = ptr; } bzero(ptr, sizeof(*ptr)); ptr->aflag = ptr->dflag = 1; if (ptr != fst) { ptr->prev = cur; } cur = ptr; } size = get_instr_info(addr, cur); if (size == 0) { free_instr_stream(cur); return((instr_rec_t *)NULL); } addr += size; } if (acount) { lst = cur; for (count = 0; count < acount; count++) { ptr = (instr_rec_t *) kl_alloc_block(sizeof(instr_rec_t), K_TEMP); ptr->aflag = ptr->dflag = 1; size = get_instr_info(addr, ptr); if (size == 0) { kl_free_block(ptr); return(cur); } lst->next = ptr; ptr->prev = lst; lst = ptr; addr += size; } } return(cur); } #ifndef REDHAT /* * print_instr_stream() */ kaddr_t print_instr_stream(kaddr_t value, int bcount, int acount, int flags, FILE *ofp) { kaddr_t v = value; instr_rec_t *cur_irp, *irp; if ((cur_irp = get_instr_stream(v, bcount, acount))) { irp = cur_irp; /* Walk back to the start of the stream and then * print out all instructions in the stream. */ while (irp->prev) { irp = irp->prev; } while (irp) { if (flags & C_FULL) { print_instr(irp->addr, ofp, 1); } else { print_instr(irp->addr, ofp, 0); } if (irp->addr >= value) { v += irp->size; } irp = irp->next; } free_instr_stream(cur_irp); } return(v); } /* * dump_instr() -- architecture specific instruction dump routine */ void dump_instr(kaddr_t addr, uint64_t count, int flags, FILE *ofp) { fprintf(ofp, "This operation not supported for i386 architecture.\n"); } #endif /* !REDHAT */ /* * lkcdutils-4.1/libutil/kl_queue.c */ /* * Copyright 2002 Silicon Graphics, Inc. All rights reserved. */ #ifndef REDHAT #include #endif /* * kl_enqueue() -- Add a new element to the tail of doubly linked list. */ void kl_enqueue(element_t **list, element_t *new) { element_t *head; /* * If there aren't any elements on the list, then make new element the * head of the list and make it point to itself (next and prev). */ if (!(head = *list)) { new->next = new; new->prev = new; *list = new; } else { head->prev->next = new; new->prev = head->prev; new->next = head; head->prev = new; } } /* * kl_dequeue() -- Remove an element from the head of doubly linked list. */ element_t * kl_dequeue(element_t **list) { element_t *head; /* If there's nothing queued up, just return */ if (!*list) { return((element_t *)NULL); } head = *list; /* If there is only one element on list, just remove it */ if (head->next == head) { *list = (element_t *)NULL; } else { head->next->prev = head->prev; head->prev->next = head->next; *list = head->next; } head->next = 0; return(head); } #ifndef REDHAT /* * kl_findqueue() */ int kl_findqueue(element_t **list, element_t *item) { element_t *e; /* If there's nothing queued up, just return */ if (!*list) { return(0); } e = *list; /* Check to see if there is only one element on the list. */ if (e->next == e) { if (e != item) { return(0); } } else { /* Now walk linked list looking for item */ while(1) { if (e == item) { break; } else if (e->next == *list) { return(0); } e = e->next; } } return(1); } /* * kl_findlist_queue() */ int kl_findlist_queue(list_of_ptrs_t **list, list_of_ptrs_t *item, int (*compare)(void *,void *)) { list_of_ptrs_t *e; /* If there's nothing queued up, just return */ if (!*list) { return(0); } e = *list; /* Check to see if there is only one element on the list. */ if (((element_t *)e)->next == (element_t *)e) { if (compare(e,item)) { return(0); } } else { /* Now walk linked list looking for item */ while(1) { if (!compare(e,item)) { break; } else if (((element_t *)e)->next == (element_t *)*list) { return(0); } e = (list_of_ptrs_t *)((element_t *)e)->next; } } return(1); } /* * kl_remqueue() -- Remove specified element from doubly linked list. */ void kl_remqueue(element_t **list, element_t *item) { /* Check to see if item is first on the list */ if (*list == item) { if (item->next == item) { *list = (element_t *)NULL; return; } else { *list = item->next; } } /* Remove item from list */ item->next->prev = item->prev; item->prev->next = item->next; } #endif /* !REDHAT */ #endif /* X86 */ crash-utility-crash-9cd43f5/va_server_v1.c0000664000372000037200000002276515107550337020156 0ustar juerghjuergh/* va_server_v1.c - kernel crash dump file translation library * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * 11/12/99, Dave Winchell, Preserve V1 interface. */ #include #include #include #include #include #include #include #include #include "va_server.h" #include #include #include struct map_hdr_v1 *vas_map_base_v1 = (struct map_hdr_v1 *)0; /* base of tree */ #ifdef NOT_DEF #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(page_size - 1)))) #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(page_size - 1))) #endif extern u_long vas_base_va; extern u_long vas_start_va; u_long vas_end_va; void find_data_v1(u_long va, u_long *buf, u_long *len, u_long *offset); void load_data_v1(struct map_hdr_v1 *hdr, u_long index, u_long *buf, u_long *len); struct map_hdr_v1 *find_header_v1(u_long va); u_long vas_find_start_v1(void); u_long vas_find_end_v1(void); int read_maps_v1(char *crash_file); int read_map_v1(int blk_pos); extern int Page_Size; extern FILE *vas_file_p; extern void *malloc(size_t); int va_server_init_v1(char *crash_file, u_long *start, u_long *end, u_long *stride) { if(read_maps_v1(crash_file)) return -1; vas_base_va = vas_start_va = vas_find_start_v1(); vas_end_va = vas_find_end_v1(); if(start) *start = vas_start_va; if(end) *end = vas_end_va; if(stride) *stride = vas_map_base_v1->va_per_entry; return 0; } int vas_lseek_v1(u_long position, int whence) { if(whence != SEEK_SET) return -1; if(position > (vas_end_va - vas_start_va)) { printf("position 0x%lx beyond dump range of 0x%lx\n", position, (vas_end_va - vas_start_va)); return -1; } vas_base_va = vas_start_va + position; return 0; } size_t vas_read_v1(void *buf_in, size_t count) { u_long len, offset, buf, va; u_long num, output, remaining; if(count > (vas_end_va - vas_base_va)) { printf("count 0x%lx greater than remaining dump of 0x%lx\n", (ulong)count, (vas_end_va - vas_base_va)); return -1; } va = vas_base_va; remaining = count; output = (u_long)buf_in; while(remaining) { find_data_v1(va, &buf, &len, &offset); num = (remaining > (len - offset)) ? (len - offset) : remaining; bcopy((const void *)(buf+offset), (void *)output, num); remaining -= num; va += num; output += num; } vas_base_va += count; return count; } size_t vas_write_v1(void *buf_in, size_t count) { u_long len, offset, buf, va; if(count != sizeof(u_long)) { printf("count %d not %d\n", (int)count, (int)sizeof(u_long)); return -1; } va = vas_base_va; find_data_v1(va, &buf, &len, &offset); *(u_long *)(buf+offset) = *(u_long *)buf_in; vas_base_va += count; return count; } void find_data_v1(u_long va, u_long *buf, u_long *len, u_long *offset) { struct map_hdr_v1 *hdr; u_long index, off; hdr = find_header_v1(va); index = (va - hdr->start_va) / hdr->va_per_entry; off = (va - hdr->start_va) % hdr->va_per_entry; load_data_v1(hdr, index, buf, len); if(offset) *offset = off; } void vas_free_data_v1(u_long va) { struct map_hdr_v1 *hdr; u_long index; hdr = find_header_v1(va); index = (va - hdr->start_va) / hdr->va_per_entry; if(hdr->map[index].exp_data) { free((void *)hdr->map[index].exp_data); hdr->map[index].exp_data = 0; } } void load_data_v1(struct map_hdr_v1 *hdr, u_long index, u_long *buf, u_long *len) { char *compr_buf; char *exp_buf; int ret, items; uLongf destLen; if(hdr->map[index].exp_data) goto out; ret = fseek(vas_file_p, (long)((hdr->blk_offset + hdr->map[index].start_blk) * hdr->blk_size), SEEK_SET); if(ret == -1) { printf("load_data: unable to fseek, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } compr_buf = (char *)malloc(2*hdr->va_per_entry); if(!compr_buf) { printf("load_data: bad ret from malloc, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } items = fread((void *)compr_buf, sizeof(char), hdr->map[index].num_blks * hdr->blk_size, vas_file_p); if(items != hdr->map[index].num_blks * hdr->blk_size) { printf("unable to read blocks from errno = %d\n", ferror(vas_file_p)); clean_exit(1); } hdr->map[index].exp_data = exp_buf = (char *)malloc(hdr->va_per_entry); if(!exp_buf) { printf("load_data: bad ret from malloc, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } destLen = (uLongf)(2*hdr->va_per_entry); ret = uncompress((Bytef *)exp_buf, &destLen, (const Bytef *)compr_buf, (uLong)items); /* if(destLen != hdr->va_per_entry) { printf("uncompress error\n"); exit(1); } */ if(ret) { if(ret == Z_MEM_ERROR) printf("load_data, bad ret Z_MEM_ERROR from uncompress\n"); else if(ret == Z_BUF_ERROR) printf("load_data, bad ret Z_BUF_ERROR from uncompress\n"); else if(ret == Z_DATA_ERROR) printf("load_data, bad ret Z_DATA_ERROR from uncompress\n"); else printf("load_data, bad ret %d from uncompress\n", ret); clean_exit(1); } free((void *)compr_buf); out: if(buf) *buf = (u_long)hdr->map[index].exp_data; if(len) *len = hdr->va_per_entry; return; } struct map_hdr_v1 *find_header_v1(u_long va) { struct map_hdr_v1 *hdr; int found = 0; for(hdr = vas_map_base_v1; hdr; hdr = hdr->next) if((va >= hdr->start_va) && (va < hdr->end_va)) { found = 1; break; } if(found) return hdr; else return (struct map_hdr_v1 *)0; } u_long vas_find_start_v1(void) { struct map_hdr_v1 *hdr; u_long start; start = vas_map_base_v1->start_va; for(hdr = vas_map_base_v1; hdr; hdr = hdr->next) if(hdr->start_va < start) start = hdr->start_va; return start; } u_long vas_find_end_v1(void) { struct map_hdr_v1 *hdr; u_long end; end = vas_map_base_v1->end_va; for(hdr = vas_map_base_v1; hdr; hdr = hdr->next) if(hdr->end_va > end) end = hdr->end_va; return end; } int read_maps_v1(char *crash_file) { int *cur_entry_p, *cp; int ret, items, blk_pos; cur_entry_p = (int *)malloc(Page_Size); if(!cur_entry_p) { printf("read_maps: bad ret from malloc, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } bzero((void *)cur_entry_p, Page_Size); vas_file_p = fopen(crash_file, "r"); if(vas_file_p == (FILE *)0) { printf("read_maps: bad ret from fopen for %s: %s\n", crash_file, strerror(errno)); free(cur_entry_p); return -1; } ret = fseek(vas_file_p, (long)0, SEEK_SET); if(ret == -1) { printf("read_maps: unable to fseek in %s, errno = %d\n", crash_file, ferror(vas_file_p)); free(cur_entry_p); return -1; } items = fread((void *)cur_entry_p, 1, Page_Size, vas_file_p); if(items != Page_Size) { printf("read_maps: unable to read header from %s, errno = %d\n", crash_file, ferror(vas_file_p)); free(cur_entry_p); return -1; } ret = -1; cp = cur_entry_p; while ((blk_pos = *cp++)) { if (read_map_v1(blk_pos)) { free(cur_entry_p); return -1; } ret = 0; } free(cur_entry_p); return ret; } int read_map_v1(int blk_pos) { struct crash_map_hdr_v1 *disk_hdr; int ret, items; struct map_hdr_v1 *hdr, *hdr1; extern int console(char *, ...); hdr = (struct map_hdr_v1 *)malloc(sizeof(struct map_hdr_v1)); if(!hdr) { printf("read_map: unable to malloc mem\n"); return -1; } bzero((void *)hdr, sizeof(struct map_hdr_v1)); disk_hdr = (struct crash_map_hdr_v1 *)malloc(Page_Size); ret = fseek(vas_file_p, (long)(blk_pos*Page_Size), SEEK_SET); if(ret == -1) { console("va_server: unable to fseek, err = %d\n", ferror(vas_file_p)); free(hdr); free(disk_hdr); return -1; } items = fread((void *)disk_hdr, 1, Page_Size, vas_file_p); if(items != Page_Size) { free(hdr); free(disk_hdr); return -1; } if(disk_hdr->magic[0] != CRASH_MAGIC) { console("va_server: bad magic 0x%lx\n", disk_hdr->magic[0]); free(hdr); free(disk_hdr); return -1; } ret = fseek(vas_file_p, (long)((blk_pos + disk_hdr->map_block) * disk_hdr->blk_size), SEEK_SET); if(ret == -1) { printf("va_server: unable to fseek, err = %d\n", ferror(vas_file_p)); free(hdr); free(disk_hdr); return -1; } hdr->map_entries = disk_hdr->map_entries; hdr->va_per_entry = disk_hdr->va_per_entry; hdr->blk_offset = blk_pos - CRASH_OFFSET_BLKS; hdr->blk_size = disk_hdr->blk_size; Page_Size = disk_hdr->blk_size; /* over-ride PAGE_SIZE */ hdr->map = (struct crash_map_entry_v1 *)malloc(hdr->map_entries * sizeof(struct crash_map_entry_v1)); items = fread((void *)hdr->map, sizeof(struct crash_map_entry_v1), hdr->map_entries, vas_file_p); if(items != hdr->map_entries) { printf("unable to read map entries, err = %d\n", errno); free(hdr); free(disk_hdr); return -1; } hdr->start_va = hdr->map[0].start_va; hdr->end_va = hdr->start_va + hdr->map_entries * hdr->va_per_entry; if(!vas_map_base_v1) { vas_map_base_v1 = hdr; hdr->next = (struct map_hdr_v1 *)0; } else { hdr1 = vas_map_base_v1; while(hdr1->next) hdr1 = hdr1->next; hdr1->next = hdr; hdr->next = (struct map_hdr_v1 *)0; } free((void *)disk_hdr); return 0; } crash-utility-crash-9cd43f5/printk.c0000664000372000037200000002513115107550337017051 0ustar juerghjuergh#include "defs.h" #include #include "demangle.h" /* convenience struct for passing many values to helper functions */ struct prb_map { char *prb; char *desc_ring; unsigned long desc_ring_count; char *descs; char *infos; unsigned int pid_max_chars; char *text_data_ring; unsigned long text_data_ring_size; char *text_data; }; /* * desc_state and DESC_* definitions taken from kernel source: * * kernel/printk/printk_ringbuffer.h */ /* The possible responses of a descriptor state-query. */ enum desc_state { desc_miss = -1, /* ID mismatch (pseudo state) */ desc_reserved = 0x0, /* reserved, in use by writer */ desc_committed = 0x1, /* committed by writer, could get reopened */ desc_finalized = 0x2, /* committed, no further modification allowed */ desc_reusable = 0x3, /* free, not yet used by any writer */ }; #define DESC_SV_BITS (sizeof(unsigned long) * 8) #define DESC_FLAGS_SHIFT (DESC_SV_BITS - 2) #define DESC_FLAGS_MASK (3UL << DESC_FLAGS_SHIFT) #define DESC_STATE(sv) (3UL & (sv >> DESC_FLAGS_SHIFT)) #define DESC_ID_MASK (~DESC_FLAGS_MASK) #define DESC_ID(sv) ((sv) & DESC_ID_MASK) /* * get_desc_state() taken from kernel source: * * kernel/printk/printk_ringbuffer.c */ /* Query the state of a descriptor. */ static enum desc_state get_desc_state(unsigned long id, unsigned long state_val) { if (id != DESC_ID(state_val)) return desc_miss; return DESC_STATE(state_val); } static void init_offsets(void) { char *n; n = "printk_info"; STRUCT_SIZE_INIT(printk_info, n); MEMBER_OFFSET_INIT(printk_info_seq, n, "seq"); MEMBER_OFFSET_INIT(printk_info_ts_nsec, n, "ts_nsec"); MEMBER_OFFSET_INIT(printk_info_text_len, n, "text_len"); MEMBER_OFFSET_INIT(printk_info_level, n, "level"); MEMBER_OFFSET_INIT(printk_info_caller_id, n, "caller_id"); MEMBER_OFFSET_INIT(printk_info_dev_info, n, "dev_info"); n = "dev_printk_info"; MEMBER_OFFSET_INIT(dev_printk_info_subsystem, n, "subsystem"); MEMBER_OFFSET_INIT(dev_printk_info_device, n, "device"); n = "printk_ringbuffer"; STRUCT_SIZE_INIT(printk_ringbuffer, n); MEMBER_OFFSET_INIT(prb_desc_ring, n, "desc_ring"); MEMBER_OFFSET_INIT(prb_text_data_ring, n, "text_data_ring"); n = "prb_desc_ring"; MEMBER_OFFSET_INIT(prb_desc_ring_count_bits, n, "count_bits"); MEMBER_OFFSET_INIT(prb_desc_ring_descs, n, "descs"); MEMBER_OFFSET_INIT(prb_desc_ring_infos, n, "infos"); MEMBER_OFFSET_INIT(prb_desc_ring_head_id, n, "head_id"); MEMBER_OFFSET_INIT(prb_desc_ring_tail_id, n, "tail_id"); n = "prb_desc"; STRUCT_SIZE_INIT(prb_desc, n); MEMBER_OFFSET_INIT(prb_desc_state_var, n, "state_var"); MEMBER_OFFSET_INIT(prb_desc_text_blk_lpos, n, "text_blk_lpos"); n = "prb_data_blk_lpos"; MEMBER_OFFSET_INIT(prb_data_blk_lpos_begin, n, "begin"); MEMBER_OFFSET_INIT(prb_data_blk_lpos_next, n, "next"); n = "prb_data_ring"; MEMBER_OFFSET_INIT(prb_data_ring_size_bits, n, "size_bits"); MEMBER_OFFSET_INIT(prb_data_ring_data, n, "data"); n = "atomic_long_t"; MEMBER_OFFSET_INIT(atomic_long_t_counter, n, "counter"); } static void dump_record(struct prb_map *m, unsigned long id, int msg_flags) { unsigned short text_len; unsigned long state_var; unsigned int caller_id; enum desc_state state; unsigned char level; unsigned long begin; unsigned long next; char buf[BUFSIZE]; uint64_t ts_nsec; ulonglong nanos; ulonglong seq; int ilen = 0, i, nlines; char *desc, *info, *text, *p; ulong rem; desc = m->descs + ((id % m->desc_ring_count) * SIZE(prb_desc)); /* skip non-committed record */ state_var = ULONG(desc + OFFSET(prb_desc_state_var) + OFFSET(atomic_long_t_counter)); state = get_desc_state(id, state_var); if (state != desc_committed && state != desc_finalized) return; info = m->infos + ((id % m->desc_ring_count) * SIZE(printk_info)); seq = ULONGLONG(info + OFFSET(printk_info_seq)); caller_id = UINT(info + OFFSET(printk_info_caller_id)); if (CRASHDEBUG(1)) fprintf(fp, "seq: %llu caller_id: %x (%s: %u)\n", seq, caller_id, caller_id & 0x80000000 ? "cpu" : "pid", caller_id & ~0x80000000); text_len = USHORT(info + OFFSET(printk_info_text_len)); begin = ULONG(desc + OFFSET(prb_desc_text_blk_lpos) + OFFSET(prb_data_blk_lpos_begin)) % m->text_data_ring_size; next = ULONG(desc + OFFSET(prb_desc_text_blk_lpos) + OFFSET(prb_data_blk_lpos_next)) % m->text_data_ring_size; /* skip data-less text blocks */ if (begin == next) goto out; if ((msg_flags & SHOW_LOG_TEXT) == 0) { ts_nsec = ULONGLONG(info + OFFSET(printk_info_ts_nsec)); nanos = (ulonglong)ts_nsec / (ulonglong)1000000000; rem = (ulonglong)ts_nsec % (ulonglong)1000000000; if (msg_flags & SHOW_LOG_CTIME) { time_t t = kt->boot_date.tv_sec + nanos; sprintf(buf, "[%s] ", ctime_tz(&t)); } else sprintf(buf, "[%5lld.%06ld] ", nanos, rem/1000); ilen += strlen(buf); fprintf(fp, "%s", buf); } /* * The lockless ringbuffer introduced in Linux-5.10 always has * the caller_id field available, so if requested, print it. */ if (msg_flags & SHOW_LOG_CALLER) { const unsigned int cpuid = 0x80000000; char cbuf[PID_CHARS_MAX]; unsigned int cid; /* Get id type, isolate id value in cid for print */ cid = UINT(info + OFFSET(printk_info_caller_id)); sprintf(cbuf, "%c%d", (cid & cpuid) ? 'C' : 'T', cid & ~cpuid); sprintf(buf, "[%*s] ", m->pid_max_chars, cbuf); ilen += strlen(buf); fprintf(fp, "%s", buf); } if (msg_flags & SHOW_LOG_LEVEL) { level = UCHAR(info + OFFSET(printk_info_level)) >> 5; sprintf(buf, "<%x>", level); ilen += strlen(buf); fprintf(fp, "%s", buf); } /* handle wrapping data block */ if (begin > next) begin = 0; /* skip over descriptor ID */ begin += sizeof(unsigned long); /* handle truncated messages */ if (next - begin < text_len) text_len = next - begin; text = m->text_data + begin; if ((msg_flags & SHOW_LOG_RUST) && (text_len > BUFSIZE)) { error(WARNING, "\nThe messages could be truncated!\n"); text_len = BUFSIZE; } for (i = 0, nlines = 0, p = text; i < text_len; i++, p++) { if (*p == '\n') { /* * When printing disassembly code blocks in the log, saw number * of empty lines printed and some disassembly code was missed. * So far I haven't got better solution to handle the current * case(when the input data contains several lines, those '\n' * are written one by one), here try to check if there are multiple * line breaks to decide what to do next. */ if ((msg_flags & SHOW_LOG_RUST) && (i != text_len - 1)) { nlines++; if (strlen(buf)) { fprintf(fp, "%s", buf); memset(buf, 0, strlen(buf)); } } fprintf(fp, "\n%s", space(ilen)); } else if ((msg_flags & SHOW_LOG_RUST) && (isprint(*p) || isspace(*p))) { if (nlines >= 1) fputc(*p, fp); else sprintf(&buf[i], "%c", *p); } else if (isprint(*p) || isspace(*p)) fputc(*p, fp); else fputc('.', fp); } /* * Try to demangle a mangled Rust symbol(calltrace) from log buffer */ if (msg_flags & SHOW_LOG_RUST) { char *p1 = strstr(buf, "_R"); if (!p1) p1 = strstr(buf, "_ZN"); char *p2 = strrchr(buf, '+'); if (p1 && p2) { char mangled[BUFSIZE] = {0}; char demangled[BUFSIZE] = {0}; char *res; size_t slen = p1 - buf; if (slen) memcpy(demangled, buf, slen); memcpy(mangled, p1, p2-p1); res = rust_demangle(mangled, DMGL_RUST); if (res) { snprintf(demangled+slen, BUFSIZE-slen, "%s%s", res, p2); fprintf(fp, "%s",demangled); free(res); } else fprintf(fp, "%s", buf); } else fprintf(fp, "%s", buf); } if (msg_flags & SHOW_LOG_DICT) { text = info + OFFSET(printk_info_dev_info) + OFFSET(dev_printk_info_subsystem); if (strlen(text)) fprintf(fp, "\n%sSUBSYSTEM=%s", space(ilen), text); text = info + OFFSET(printk_info_dev_info) + OFFSET(dev_printk_info_device); if (strlen(text)) fprintf(fp, "\n%sDEVICE=%s", space(ilen), text); } out: fprintf(fp, "\n"); } /* * Handle the lockless printk_ringbuffer. */ void dump_lockless_record_log(int msg_flags) { unsigned long head_id; unsigned long tail_id; unsigned long kaddr; unsigned long id; struct prb_map m; if (INVALID_SIZE(printk_info)) init_offsets(); /* setup printk_ringbuffer */ get_symbol_data("prb", sizeof(char *), &kaddr); m.prb = GETBUF(SIZE(printk_ringbuffer)); if (!readmem(kaddr, KVADDR, m.prb, SIZE(printk_ringbuffer), "printk_ringbuffer contents", RETURN_ON_ERROR|QUIET)) { error(WARNING, "\ncannot read printk_ringbuffer contents\n"); goto out_prb; } /* setup descriptor ring */ m.desc_ring = m.prb + OFFSET(prb_desc_ring); m.desc_ring_count = 1 << UINT(m.desc_ring + OFFSET(prb_desc_ring_count_bits)); kaddr = ULONG(m.desc_ring + OFFSET(prb_desc_ring_descs)); m.descs = GETBUF(SIZE(prb_desc) * m.desc_ring_count); if (!readmem(kaddr, KVADDR, m.descs, SIZE(prb_desc) * m.desc_ring_count, "prb_desc_ring contents", RETURN_ON_ERROR|QUIET)) { error(WARNING, "\ncannot read prb_desc_ring contents\n"); goto out_descs; } kaddr = ULONG(m.desc_ring + OFFSET(prb_desc_ring_infos)); m.infos = GETBUF(SIZE(printk_info) * m.desc_ring_count); if (!readmem(kaddr, KVADDR, m.infos, SIZE(printk_info) * m.desc_ring_count, "prb_info_ring contents", RETURN_ON_ERROR|QUIET)) { error(WARNING, "\ncannot read prb_info_ring contents\n"); goto out_infos; } /* setup text data ring */ m.text_data_ring = m.prb + OFFSET(prb_text_data_ring); m.text_data_ring_size = 1 << UINT(m.text_data_ring + OFFSET(prb_data_ring_size_bits)); kaddr = ULONG(m.text_data_ring + OFFSET(prb_data_ring_data)); m.text_data = GETBUF(m.text_data_ring_size); if (!readmem(kaddr, KVADDR, m.text_data, m.text_data_ring_size, "prb_text_data_ring contents", RETURN_ON_ERROR|QUIET)) { error(WARNING, "\ncannot read prb_text_data_ring contents\n"); goto out_text_data; } /* If caller_id was requested, get the pid_max value for print */ if (msg_flags & SHOW_LOG_CALLER) { unsigned int pidmax; if (!try_get_symbol_data("pid_max", sizeof(pidmax), &pidmax)) m.pid_max_chars = PID_CHARS_DEFAULT; else if (pidmax <= 99999) m.pid_max_chars = 6; else if (pidmax <= 999999) m.pid_max_chars = 7; else m.pid_max_chars = PID_CHARS_DEFAULT; } else { m.pid_max_chars = PID_CHARS_DEFAULT; } /* ready to go */ tail_id = ULONG(m.desc_ring + OFFSET(prb_desc_ring_tail_id) + OFFSET(atomic_long_t_counter)); head_id = ULONG(m.desc_ring + OFFSET(prb_desc_ring_head_id) + OFFSET(atomic_long_t_counter)); hq_open(); for (id = tail_id; id != head_id; id = (id + 1) & DESC_ID_MASK) dump_record(&m, id, msg_flags); /* dump head record */ dump_record(&m, id, msg_flags); hq_close(); out_text_data: FREEBUF(m.text_data); out_infos: FREEBUF(m.infos); out_descs: FREEBUF(m.descs); out_prb: FREEBUF(m.prb); } crash-utility-crash-9cd43f5/sadump.h0000664000372000037200000001471415107550337017045 0ustar juerghjuergh/* * sadump.h - core analysis suite * * Copyright (c) 2011 FUJITSU LIMITED * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: HATAYAMA Daisuke */ #include #include typedef struct efi_time { uint16_t year; uint8_t month; uint8_t day; uint8_t hour; uint8_t minute; uint8_t second; uint8_t pad1; uint32_t nanosecond; #define EFI_UNSPECIFIED_TIMEZONE 2047 int16_t timezone; uint8_t daylight; uint8_t pad2; } efi_time_t; typedef struct { uint32_t data1; uint16_t data2; uint16_t data3; uint8_t data4[8]; } efi_guid_t; #define SADUMP_EFI_GUID_TEXT_REPR_LEN 36 struct sadump_part_header { #define SADUMP_SIGNATURE1 0x75646173 #define SADUMP_SIGNATURE2 0x0000706d uint32_t signature1; /* sadu */ uint32_t signature2; /* mp\0\0 */ uint32_t enable; /* set sadump service */ uint32_t reboot; /* number of seconds until reboot. 1-3600 */ uint32_t compress; /* memory image format. */ uint32_t recycle; /* dump device recycle */ uint32_t label[16]; /* reserve */ efi_guid_t sadump_id; /* system UUID */ efi_guid_t disk_set_id; /* disk set UUID */ efi_guid_t vol_id; /* device UUID */ efi_time_t time_stamp; /* time stamp */ uint32_t set_disk_set; /* device type */ #define SADUMP_MAX_DISK_SET_NUM 16 uint32_t reserve; /* Padding for Alignment */ uint64_t used_device; /* used device */ #define DUMP_PART_HEADER_MAGICNUM_SIZE 982 uint32_t magicnum[DUMP_PART_HEADER_MAGICNUM_SIZE]; /* magic number */ }; struct sadump_volume_info { efi_guid_t id; /* volume id */ uint64_t vol_size; /* device size */ uint32_t status; /* device status */ uint32_t cache_size; /* cache size */ }; struct sadump_disk_set_header { uint32_t disk_set_header_size; /* disk set header size */ uint32_t disk_num; /* disk number */ uint64_t disk_set_size; /* disk set size */ #define DUMP_DEVICE_MAX 16 struct sadump_volume_info vol_info[DUMP_DEVICE_MAX - 1]; /* struct VOL_INFO array */ }; struct sadump_header { #define SADUMP_SIGNATURE "sadump\0\0" char signature[8]; /* = "sadump\0\0" */ uint32_t header_version; /* Dump header version */ uint32_t reserve; /* Padding for Alignment */ efi_time_t timestamp; /* Time stamp */ uint32_t status; /* Above flags */ uint32_t compress; /* Above flags */ uint32_t block_size; /* Size of a block in byte */ #define SADUMP_DEFAULT_BLOCK_SIZE 4096 uint32_t extra_hdr_size; /* Size of host dependent * header in blocks (reserve) */ uint32_t sub_hdr_size; /* Size of arch dependent header in blocks */ uint32_t bitmap_blocks; /* Size of Memory bitmap in block */ uint32_t dumpable_bitmap_blocks; /* Size of Memory bitmap in block */ uint32_t max_mapnr; /* = max_mapnr */ uint32_t total_ram_blocks; /* Size of Memory in block */ uint32_t device_blocks; /* Number of total blocks in the dump device */ uint32_t written_blocks; /* Number of written blocks */ uint32_t current_cpu; /* CPU# which handles dump */ uint32_t nr_cpus; /* Number of CPUs */ /* * The members from below are supported in header version 1 * and later. */ uint64_t max_mapnr_64; uint64_t total_ram_blocks_64; uint64_t device_blocks_64; uint64_t written_blocks_64; }; struct sadump_apic_state { uint64_t ApicId; /* Local Apic ID register */ uint64_t Ldr; /* Logical Destination Register */ }; struct sadump_smram_cpu_state { uint64_t Reserved1[58]; uint32_t GdtUpper, LdtUpper, IdtUpper; uint32_t Reserved2[3]; uint64_t IoEip; uint64_t Reserved3[10]; uint32_t Cr4; uint32_t Reserved4[18]; uint32_t GdtLower; uint32_t GdtLimit; uint32_t IdtLower; uint32_t IdtLimit; uint32_t LdtLower; uint32_t LdtLimit; uint32_t LdtInfo; uint64_t Reserved5[6]; uint64_t Eptp; uint32_t EptpSetting; uint32_t Reserved6[5]; uint32_t Smbase; uint32_t SmmRevisionId; uint16_t IoInstructionRestart; uint16_t AutoHaltRestart; uint32_t Reserved7[6]; uint32_t R15Lower, R15Upper, R14Lower, R14Upper; uint32_t R13Lower, R13Upper, R12Lower, R12Upper; uint32_t R11Lower, R11Upper, R10Lower, R10Upper; uint32_t R9Lower, R9Upper, R8Lower, R8Upper; uint32_t RaxLower, RaxUpper, RcxLower, RcxUpper; uint32_t RdxLower, RdxUpper, RbxLower, RbxUpper; uint32_t RspLower, RspUpper, RbpLower, RbpUpper; uint32_t RsiLower, RsiUpper, RdiLower, RdiUpper; uint32_t IoMemAddrLower, IoMemAddrUpper; uint32_t IoMisc, Es, Cs, Ss, Ds, Fs, Gs; uint32_t Ldtr, Tr; uint64_t Dr7, Dr6, Rip, Ia32Efer, Rflags; uint64_t Cr3, Cr0; }; struct sadump_page_header { uint64_t page_flags; uint32_t size; uint32_t flags; }; struct sadump_media_header { efi_guid_t sadump_id; // system UUID efi_guid_t disk_set_id; // disk set UUID efi_time_t time_stamp; /* time stamp */ char sequential_num; // Medium sequential number char term_cord; // Termination cord char disk_set_header_size; // Size of original disk set header char disks_in_use; // Number of used disks of original dump device char reserve[4044]; // reserve feild }; #define divideup(x, y) (((x) + ((y) - 1)) / (y)) #define SADUMP_PF_SECTION_NUM 4096 struct sadump_diskset_data { char *filename; int dfd; struct sadump_part_header *header; ulong data_offset; }; struct sadump_data { char *filename; ulong flags; int dfd; /* dumpfile file descriptor */ int machine_type; /* machine type identifier */ struct sadump_part_header *header; struct sadump_header *dump_header; struct sadump_disk_set_header *diskset_header; struct sadump_media_header *media_header; char *bitmap; char *dumpable_bitmap; size_t sub_hdr_offset; uint32_t smram_cpu_state_size; ulong data_offset; int block_size; int block_shift; char *page_buf; uint64_t *block_table; int sd_list_len; struct sadump_diskset_data **sd_list; /* Backup Region, First 640K of System RAM. */ #define KEXEC_BACKUP_SRC_END 0x0009ffff ulonglong backup_src_start; ulong backup_src_size; ulonglong backup_offset; uint64_t max_mapnr; ulong phys_base; }; struct sadump_data *sadump_get_sadump_data(void); int sadump_cleanup_sadump_data(void); ulong sadump_identify_format(int *block_size); int sadump_get_smram_cpu_state(int apicid, struct sadump_smram_cpu_state *smram); crash-utility-crash-9cd43f5/defs.h0000664000372000037200000076215115107550337016502 0ustar juerghjuergh/* defs.h - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2020 David Anderson * Copyright (C) 2002-2020 Red Hat, Inc. All rights reserved. * Copyright (C) 2002 Silicon Graphics, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef GDB_COMMON #include #include #include #include #include #include #include #include #undef basename #if !defined(__USE_GNU) #define __USE_GNU #include #undef __USE_GNU #else #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* backtrace() */ #include #ifdef LZO #include #endif #ifdef SNAPPY #include #endif #ifdef ZSTD #include #endif #ifndef ATTRIBUTE_UNUSED #define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) #endif #undef TRUE #undef FALSE #define TRUE (1) #define FALSE (0) #define STR(x) #x #ifndef offsetof # define offsetof(TYPE, MEMBER) ((ulong)&((TYPE *)0)->MEMBER) #endif #if !defined(X86) && !defined(X86_64) && !defined(ALPHA) && !defined(PPC) && \ !defined(IA64) && !defined(PPC64) && !defined(S390) && !defined(S390X) && \ !defined(ARM) && !defined(ARM64) && !defined(MIPS) && !defined(MIPS64) && \ !defined(RISCV64) && !defined(LOONGARCH64) && !defined(SPARC64) #ifdef __alpha__ #define ALPHA #endif #ifdef __i386__ #define X86 #endif #ifdef __powerpc64__ #define PPC64 #else #ifdef __powerpc__ #define PPC #endif #endif #ifdef __ia64__ #define IA64 #endif #ifdef __s390__ #define S390 #endif #ifdef __s390x__ #define S390X #endif #ifdef __x86_64__ #define X86_64 #endif #ifdef __arm__ #define ARM #endif #ifdef __aarch64__ #define ARM64 #endif #ifdef __mipsel__ #ifndef __mips64 #define MIPS #else #define MIPS64 #endif #endif #ifdef __sparc_v9__ #define SPARC64 #endif #if defined(__riscv) && (__riscv_xlen == 64) #define RISCV64 #endif #ifdef __loongarch64 #define LOONGARCH64 #endif #endif #ifdef X86 #define NR_CPUS (256) #endif #ifdef X86_64 #define NR_CPUS (8192) #endif #ifdef ALPHA #define NR_CPUS (64) #endif #ifdef PPC #define NR_CPUS (32) #endif #ifdef IA64 #define NR_CPUS (4096) #endif #ifdef PPC64 #define NR_CPUS (8192) #endif #ifdef S390 #define NR_CPUS (512) #endif #ifdef S390X #define NR_CPUS (512) #endif #ifdef ARM #define NR_CPUS (32) #endif #ifdef ARM64 #define NR_CPUS (4096) /* TBD */ #endif #ifdef MIPS #define NR_CPUS (32) #endif #ifdef MIPS64 #define NR_CPUS (256) #endif #ifdef SPARC64 #define NR_CPUS (4096) #endif #ifdef RISCV64 #define NR_CPUS (256) #endif #ifdef LOONGARCH64 #define NR_CPUS (256) #endif #define NR_DEVICE_DUMPS (64) /* Some architectures require memory accesses to be aligned. */ #if defined(SPARC64) #define NEED_ALIGNED_MEM_ACCESS #endif #define BUFSIZE (1500) #define NULLCHAR ('\0') #define MAXARGS (100) /* max number of arguments to one function */ #define MAXARGLEN (40) /* max length of argument */ #define HIST_BLKSIZE (4096) static inline int string_exists(char *s) { return (s ? TRUE : FALSE); } #define STREQ(A, B) (string_exists((char *)A) && string_exists((char *)B) && \ (strcmp((char *)(A), (char *)(B)) == 0)) #define STRNEQ(A, B) (string_exists((char *)A) && string_exists((char *)B) && \ (strncmp((char *)(A), (char *)(B), strlen((char *)(B))) == 0)) #define BZERO(S, N) (memset(S, NULLCHAR, N)) #define BCOPY(S, D, C) (memcpy(D, S, C)) #define BNEG(S, N) (memset(S, 0xff, N)) #define BEEP() fprintf(stderr, "%c", 0x7) #define LASTCHAR(s) (s[strlen(s)-1]) #define FIRSTCHAR(s) (s[0]) #define QUOTED_STRING(s) ((FIRSTCHAR(s) == '"') && (LASTCHAR(s) == '"')) #define SINGLE_QUOTED_STRING(s) ((FIRSTCHAR(s) == '\'') && (LASTCHAR(s) == '\'')) #define PATHEQ(A, B) ((A) && (B) && (pathcmp((char *)(A), (char *)(B)) == 0)) #ifdef roundup #undef roundup #endif #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) typedef uint64_t physaddr_t; #define PADDR_NOT_AVAILABLE (0x1ULL) #define KCORE_USE_VADDR (-1ULL) typedef unsigned long long int ulonglong; struct number_option { ulong num; ulonglong ll_num; ulong retflags; }; /* * program_context flags */ #define LIVE_SYSTEM (0x1ULL) #define TTY (0x2ULL) #define RUNTIME (0x4ULL) #define IN_FOREACH (0x8ULL) #define MCLXCD (0x10ULL) #define CMDLINE_IFILE (0x20ULL) #define MFD_RDWR (0x40ULL) #define KVMDUMP (0x80ULL) #define SILENT (0x100ULL) #define SADUMP (0x200ULL) #define HASH (0x400ULL) #define SCROLL (0x800ULL) #define NO_CONSOLE (0x1000ULL) #define RUNTIME_IFILE (0x2000ULL) #define DROP_CORE (0x4000ULL) #define LKCD (0x8000ULL) #define GDB_INIT (0x10000ULL) #define IN_GDB (0x20000ULL) #define RCLOCAL_IFILE (0x40000ULL) #define RCHOME_IFILE (0x80000ULL) #define VMWARE_VMSS (0x100000ULL) #define READLINE (0x200000ULL) #define _SIGINT_ (0x400000ULL) #define IN_RESTART (0x800000ULL) #define KERNEL_DEBUG_QUERY (0x1000000ULL) #define DEVMEM (0x2000000ULL) #define REM_LIVE_SYSTEM (0x4000000ULL) #define NAMELIST_LOCAL (0x8000000ULL) #define LIVE_RAMDUMP (0x10000000ULL) #define NAMELIST_SAVED (0x20000000ULL) #define DUMPFILE_SAVED (0x40000000ULL) #define UNLINK_NAMELIST (0x80000000ULL) #define NAMELIST_UNLINKED (0x100000000ULL) #define REM_MCLXCD (0x200000000ULL) #define REM_LKCD (0x400000000ULL) #define NAMELIST_NO_GZIP (0x800000000ULL) #define UNLINK_MODULES (0x1000000000ULL) #define S390D (0x2000000000ULL) #define REM_S390D (0x4000000000ULL) #define SYSRQ (0x8000000000ULL) #define KDUMP (0x10000000000ULL) #define NETDUMP (0x20000000000ULL) #define REM_NETDUMP (0x40000000000ULL) #define SYSMAP (0x80000000000ULL) #define SYSMAP_ARG (0x100000000000ULL) #define MEMMOD (0x200000000000ULL) #define MODPRELOAD (0x400000000000ULL) #define DISKDUMP (0x800000000000ULL) #define DATADEBUG (0x1000000000000ULL) #define FINDKERNEL (0x2000000000000ULL) #define VERSION_QUERY (0x4000000000000ULL) #define READNOW (0x8000000000000ULL) #define NOCRASHRC (0x10000000000000ULL) #define INIT_IFILE (0x20000000000000ULL) #define XENDUMP (0x40000000000000ULL) #define XEN_HYPER (0x80000000000000ULL) #define XEN_CORE (0x100000000000000ULL) #define PLEASE_WAIT (0x200000000000000ULL) #define IFILE_ERROR (0x400000000000000ULL) #define KERNTYPES (0x800000000000000ULL) #define MINIMAL_MODE (0x1000000000000000ULL) #define CRASHBUILTIN (0x2000000000000000ULL) #define PRELOAD_EXTENSIONS \ (0x4000000000000000ULL) #define PROC_KCORE (0x8000000000000000ULL) #define ACTIVE() (pc->flags & LIVE_SYSTEM) #define LOCAL_ACTIVE() ((pc->flags & (LIVE_SYSTEM|LIVE_RAMDUMP)) == LIVE_SYSTEM) #define DUMPFILE() (!(pc->flags & LIVE_SYSTEM)) #define LIVE() (pc->flags2 & LIVE_DUMP || pc->flags & LIVE_SYSTEM) #define MEMORY_SOURCES (NETDUMP|KDUMP|MCLXCD|LKCD|DEVMEM|S390D|MEMMOD|DISKDUMP|XENDUMP|CRASHBUILTIN|KVMDUMP|PROC_KCORE|SADUMP|VMWARE_VMSS|LIVE_RAMDUMP) #define DUMPFILE_TYPES (DISKDUMP|NETDUMP|KDUMP|MCLXCD|LKCD|S390D|XENDUMP|KVMDUMP|SADUMP|VMWARE_VMSS|LIVE_RAMDUMP) #define REMOTE() (pc->flags2 & REMOTE_DAEMON) #define REMOTE_ACTIVE() (pc->flags & REM_LIVE_SYSTEM) #define REMOTE_DUMPFILE() \ (pc->flags & (REM_NETDUMP|REM_MCLXCD|REM_LKCD|REM_S390D)) #define REMOTE_MEMSRC() (REMOTE_ACTIVE() || REMOTE_PAUSED() || REMOTE_DUMPFILE()) #define LKCD_DUMPFILE() (pc->flags & (LKCD|REM_LKCD)) #define NETDUMP_DUMPFILE() (pc->flags & (NETDUMP|REM_NETDUMP)) #define DISKDUMP_DUMPFILE() (pc->flags & DISKDUMP) #define KDUMP_DUMPFILE() (pc->flags & KDUMP) #define XENDUMP_DUMPFILE() (pc->flags & XENDUMP) #define XEN_HYPER_MODE() (pc->flags & XEN_HYPER) #define SYSRQ_TASK(X) ((pc->flags & SYSRQ) && is_task_active(X)) #define XEN_CORE_DUMPFILE() (pc->flags & XEN_CORE) #define LKCD_KERNTYPES() (pc->flags & KERNTYPES) #define KVMDUMP_DUMPFILE() (pc->flags & KVMDUMP) #define SADUMP_DUMPFILE() (pc->flags & SADUMP) #define VMSS_DUMPFILE() (pc->flags & VMWARE_VMSS) #define QEMU_MEM_DUMP_NO_VMCOREINFO() \ ((pc->flags2 & (QEMU_MEM_DUMP_ELF|QEMU_MEM_DUMP_COMPRESSED)) && !(pc->flags2 & VMCOREINFO)) #define NETDUMP_LOCAL (0x1) /* netdump_data flags */ #define NETDUMP_REMOTE (0x2) #define VMCORE_VALID() (nd->flags & (NETDUMP_LOCAL|NETDUMP_REMOTE|KDUMP_LOCAL)) #define NETDUMP_ELF32 (0x4) #define NETDUMP_ELF64 (0x8) #define PARTIAL_DUMP (0x10) /* netdump or diskdump */ #define KDUMP_ELF32 (0x20) #define KDUMP_ELF64 (0x40) #define KDUMP_LOCAL (0x80) #define KCORE_LOCAL (0x100) #define KCORE_ELF32 (0x200) #define KCORE_ELF64 (0x400) #define QEMU_MEM_DUMP_KDUMP_BACKUP \ (0x800) #define KVMDUMP_LOCAL (0x1) #define KVMDUMP_VALID() (kvm->flags & (KVMDUMP_LOCAL)) #define DUMPFILE_FORMAT(flags) ((flags) & \ (NETDUMP_ELF32|NETDUMP_ELF64|KDUMP_ELF32|KDUMP_ELF64)) #define DISKDUMP_LOCAL (0x1) #define KDUMP_CMPRS_LOCAL (0x2) #define ERROR_EXCLUDED (0x4) #define ZERO_EXCLUDED (0x8) #define DUMPFILE_SPLIT (0x10) #define NO_ELF_NOTES (0x20) #define LZO_SUPPORTED (0x40) #define SNAPPY_SUPPORTED (0x80) #define ZSTD_SUPPORTED (0x100) #define DISKDUMP_VALID() (dd->flags & DISKDUMP_LOCAL) #define KDUMP_CMPRS_VALID() (dd->flags & KDUMP_CMPRS_LOCAL) #define KDUMP_SPLIT() (dd->flags & DUMPFILE_SPLIT) #define XENDUMP_LOCAL (0x1) #define XENDUMP_VALID() (xd->flags & XENDUMP_LOCAL) #define SADUMP_LOCAL (0x1) #define SADUMP_DISKSET (0x2) #define SADUMP_MEDIA (0x4) #define SADUMP_ZERO_EXCLUDED (0x8) #define SADUMP_KDUMP_BACKUP (0x10) #define SADUMP_VALID() (sd->flags & SADUMP_LOCAL) #define CRASHDEBUG(x) (pc->debug >= (x)) #define CRASHDEBUG_SUSPEND(X) { pc->debug_save = pc->debug; pc->debug = X; } #define CRASHDEBUG_RESTORE() { pc->debug = pc->debug_save; } #define VERBOSE (0x1) #define ADDRESS_SPECIFIED (0x2) #define FAULT_ON_ERROR (0x1) #define RETURN_ON_ERROR (0x2) #define QUIET (0x4) #define HEX_BIAS (0x8) #define LONG_LONG (0x10) #define RETURN_PARTIAL (0x20) #define NO_DEVMEM_SWITCH (0x40) #define SEEK_ERROR (-1) #define READ_ERROR (-2) #define WRITE_ERROR (-3) #define PAGE_EXCLUDED (-4) #define PAGE_INCOMPLETE (-5) #define RESTART() (longjmp(pc->main_loop_env, 1)) #define RESUME_FOREACH() (longjmp(pc->foreach_loop_env, 1)) #define INFO (1) #define FATAL (2) #define FATAL_RESTART (3) #define WARNING (4) #define NOTE (5) #define CONT (6) #define FATAL_ERROR(x) (((x) == FATAL) || ((x) == FATAL_RESTART)) #define CONSOLE_OFF(x) ((x) = console_off()) #define CONSOLE_ON(x) (console_on(x)) #define RADIX(X) (X) #define NUM_HEX (0x1) #define NUM_DEC (0x2) #define NUM_EXPR (0x4) #define NUM_ANY (NUM_HEX|NUM_DEC|NUM_EXPR) /* * program context redirect flags */ #define FROM_COMMAND_LINE (0x1) #define FROM_INPUT_FILE (0x2) #define REDIRECT_NOT_DONE (0x4) #define REDIRECT_TO_PIPE (0x8) #define REDIRECT_TO_STDPIPE (0x10) #define REDIRECT_TO_FILE (0x20) #define REDIRECT_FAILURE (0x40) #define REDIRECT_SHELL_ESCAPE (0x80) #define REDIRECT_SHELL_COMMAND (0x100) #define REDIRECT_PID_KNOWN (0x200) #define REDIRECT_MULTI_PIPE (0x400) #define PIPE_OPTIONS (FROM_COMMAND_LINE | FROM_INPUT_FILE | REDIRECT_TO_PIPE | \ REDIRECT_TO_STDPIPE | REDIRECT_TO_FILE) #define DEFAULT_REDHAT_DEBUG_LOCATION "/usr/lib/debug/lib/modules" #define MEMORY_DRIVER_MODULE "crash" #define MEMORY_DRIVER_DEVICE "/dev/crash" #define MEMORY_DRIVER_DEVICE_MODE (S_IFCHR|S_IRUSR) /* * structure definitions */ struct program_context { char *program_name; /* this program's name */ char *program_path; /* unadulterated argv[0] */ char *program_version; /* this program's version */ char *gdb_version; /* embedded gdb version */ char *prompt; /* this program's prompt */ unsigned long long flags; /* flags from above */ char *namelist; /* linux namelist */ char *dumpfile; /* dumpfile or /dev/kmem */ char *live_memsrc; /* live memory driver */ char *system_map; /* get symbol values from System.map */ char *namelist_debug; /* namelist containing debug data */ char *debuginfo_file; /* separate debuginfo file */ char *memory_module; /* alternative to mem.c driver */ char *memory_device; /* alternative to /dev/[k]mem device */ char *machine_type; /* machine's processor type */ char *editing_mode; /* readline vi or emacs */ char *server; /* network daemon */ char *server_memsrc; /* memory source on server */ char *server_namelist; /* kernel namelist on server */ int nfd; /* linux namelist fd */ int mfd; /* /dev/mem fd */ int kfd; /* /dev/kmem fd */ int dfd; /* dumpfile fd */ int confd; /* console fd */ int sockfd; /* network daemon socket */ ushort port; /* network daemon port */ int rmfd; /* remote server memory source fd */ int rkfd; /* remote server /dev/kmem fd */ ulong program_pid; /* program pid */ ulong server_pid; /* server pid */ ulong rcvbufsize; /* client-side receive buffer size */ char *home; /* user's home directory */ char command_line[BUFSIZE]; /* possibly parsed input command line */ char orig_line[BUFSIZE]; /* original input line */ char *readline; /* pointer to last readline() return */ char my_tty[10]; /* real tty name (shown by ps -ef) */ ulong debug; /* level of debug */ ulong debug_save; /* saved level for debug-suspend */ char *console; /* current debug console device */ char *redhat_debug_loc; /* location of matching debug objects */ int pipefd[2]; /* output pipe file descriptors */ FILE *nullfp; /* bitbucket */ FILE *stdpipe; /* standard pipe for output */ FILE *pipe; /* command line specified pipe */ FILE *ofile; /* command line specified output file */ FILE *ifile; /* command line specified input file */ FILE *ifile_pipe; /* output pipe specified from file */ FILE *ifile_ofile; /* output file specified from file */ FILE *symfile; /* symbol table data file */ FILE *symfile2; /* alternate access to above */ FILE *tmpfile; /* tmpfile for selective data output */ FILE *saved_fp; /* for printing while parsing tmpfile */ FILE *tmp_fp; /* stored tmpfile pointer */ char *input_file; /* input file specified at invocation */ FILE *tmpfile2; /* tmpfile2 does not use save_fp! */ int eoc_index; /* end of redirected command index */ int scroll_command; /* default scroll command for output */ #define SCROLL_NONE 0 #define SCROLL_LESS 1 #define SCROLL_MORE 2 #define SCROLL_CRASHPAGER 3 ulong redirect; /* per-cmd origin and output flags */ pid_t stdpipe_pid; /* per-cmd standard output pipe's pid */ pid_t pipe_pid; /* per-cmd output pipe's pid */ pid_t pipe_shell_pid; /* per-cmd output pipe's shell pid */ char pipe_command[BUFSIZE]; /* pipe command line */ struct command_table_entry *cmd_table; /* linux/xen command table */ char *curcmd; /* currently-executing command */ char *lastcmd; /* previously-executed command */ ulong cmdgencur; /* current command generation number */ ulong curcmd_flags; /* general purpose per-command flag */ #define XEN_MACHINE_ADDR (0x1) #define REPEAT (0x2) #define IDLE_TASK_SHOWN (0x4) #define TASK_SPECIFIED (0x8) #define MEMTYPE_UVADDR (0x10) #define MEMTYPE_FILEADDR (0x20) #define HEADER_PRINTED (0x40) #define BAD_INSTRUCTION (0x80) #define UD2A_INSTRUCTION (0x100) #define IRQ_IN_USE (0x200) #define NO_MODIFY (0x400) #define IGNORE_ERRORS (0x800) #define FROM_RCFILE (0x1000) #define MEMTYPE_KVADDR (0x2000) #define MOD_SECTIONS (0x4000) #define MOD_READNOW (0x8000) #define MM_STRUCT_FORCE (0x10000) #define CPUMASK (0x20000) #define PARTIAL_READ_OK (0x40000) ulonglong curcmd_private; /* general purpose per-command info */ int cur_gdb_cmd; /* current gdb command */ int last_gdb_cmd; /* previously-executed gdb command */ int sigint_cnt; /* number of ignored SIGINTs */ struct gnu_request *cur_req; /* current gdb gnu_request */ struct sigaction sigaction; /* general usage sigaction. */ struct sigaction gdb_sigaction; /* gdb's SIGINT sigaction. */ jmp_buf main_loop_env; /* longjmp target default */ jmp_buf foreach_loop_env; /* longjmp target within foreach */ struct termios termios_orig; /* non-raw settings */ struct termios termios_raw; /* while gathering command input */ int ncmds; /* number of commands in menu */ char **cmdlist; /* current list of available commands */ int cmdlistsz; /* space available in cmdlist */ unsigned output_radix; /* current gdb output_radix */ void *sbrk; /* current sbrk value */ struct extension_table *curext; /* extension being loaded */ int (*readmem)(int, void *, int, ulong, physaddr_t); /* memory access */ int (*writemem)(int, void *, int, ulong, physaddr_t);/* memory access */ ulong ifile_in_progress; /* original xxx_IFILE flags */ off_t ifile_offset; /* current offset into input file */ char *runtime_ifile_cmd; /* runtime command using input file */ char *kvmdump_mapfile; /* storage of physical to file offsets */ ulonglong flags2; /* flags overrun */ #define FLAT (0x01ULL) #define ELF_NOTES (0x02ULL) #define GET_OSRELEASE (0x04ULL) #define REMOTE_DAEMON (0x08ULL) #define ERASEINFO_DATA (0x10ULL) #define GDB_CMD_MODE (0x20ULL) #define LIVE_DUMP (0x40ULL) #define FLAT_FORMAT() (pc->flags2 & FLAT) #define ELF_NOTES_VALID() (pc->flags2 & ELF_NOTES) #define RADIX_OVERRIDE (0x80ULL) #define QEMU_MEM_DUMP_ELF (0x100ULL) #define GET_LOG (0x200ULL) #define VMCOREINFO (0x400ULL) #define ALLOW_FP (0x800ULL) #define REM_PAUSED_F (0x1000ULL) #define RAMDUMP (0x2000ULL) #define REMOTE_PAUSED() (pc->flags2 & REM_PAUSED_F) #define OFFLINE_HIDE (0x4000ULL) #define INCOMPLETE_DUMP (0x8000ULL) #define is_incomplete_dump() (pc->flags2 & INCOMPLETE_DUMP) #define QEMU_MEM_DUMP_COMPRESSED (0x10000ULL) #define SNAP (0x20000ULL) #define EXCLUDED_VMEMMAP (0x40000ULL) #define is_excluded_vmemmap() (pc->flags2 & EXCLUDED_VMEMMAP) #define MEMSRC_LOCAL (0x80000ULL) #define REDZONE (0x100000ULL) #define VMWARE_VMSS_GUESTDUMP (0x200000ULL) char *cleanup; char *namelist_orig; char *namelist_debug_orig; FILE *args_ifile; /* per-command args input file */ void (*cmd_cleanup)(void *); /* per-command cleanup function */ void *cmd_cleanup_arg; /* optional cleanup function argument */ ulong scope; /* optional text context address */ ulong nr_hash_queues; /* hash queue head count */ char *(*read_vmcoreinfo)(const char *); FILE *error_fp; /* error() message direction */ char *error_path; /* stderr path information */ }; #define READMEM pc->readmem typedef void (*cmd_func_t)(void); struct command_table_entry { /* one for each command in menu */ char *name; cmd_func_t func; char **help_data; ulong flags; }; struct args_input_file { int index; int args_used; int is_gdb_cmd; int in_expression; int start; int resume; char *fileptr; }; #define REFRESH_TASK_TABLE (0x1) /* command_table_entry flags */ #define HIDDEN_COMMAND (0x2) #define CLEANUP (0x4) /* for extensions only */ #define MINIMAL (0x8) /* * A linked list of extension table structures keeps track of the current * set of shared library extensions. */ struct extension_table { void *handle; /* handle from dlopen() */ char *filename; /* name of shared library */ struct command_table_entry *command_table; /* list of commands */ ulong flags; /* registration flags */ struct extension_table *next, *prev; /* bookkeeping */ }; #define REGISTERED (0x1) /* extension_table flags */ #define DUPLICATE_COMMAND_NAME (0x2) #define NO_MINIMAL_COMMANDS (0x4) struct new_utsname { char sysname[65]; char nodename[65]; char release[65]; char version[65]; char machine[65]; char domainname[65]; }; #define NO_MODULE_ACCESS (0x1) #define TVEC_BASES_V1 (0x2) #define GCC_3_2 (0x4) #define GCC_3_2_3 (0x8) #define GCC_2_96 (0x10) #define RA_SEEK (0x20) #define NO_RA_SEEK (0x40) #define KALLSYMS_V1 (0x80) #define NO_KALLSYMS (0x100) #define PER_CPU_OFF (0x200) #define SMP (0x400) #define GCC_3_3_2 (0x800) #define KMOD_V1 (0x1000) #define KMOD_V2 (0x2000) #define KALLSYMS_V2 (0x2000) #define TVEC_BASES_V2 (0x4000) #define GCC_3_3_3 (0x8000) #define USE_OLD_BT (0x10000) #define USE_OPT_BT (0x10000) #define ARCH_XEN (0x20000) #define NO_IKCONFIG (0x40000) #define DWARF_UNWIND (0x80000) #define NO_DWARF_UNWIND (0x100000) #define DWARF_UNWIND_MEMORY (0x200000) #define DWARF_UNWIND_EH_FRAME (0x400000) #define DWARF_UNWIND_CAPABLE (DWARF_UNWIND_MEMORY|DWARF_UNWIND_EH_FRAME) #define DWARF_UNWIND_MODULES (0x800000) #define BUGVERBOSE_OFF (0x1000000) #define RELOC_SET (0x2000000) #define RELOC_FORCE (0x4000000) #define ARCH_OPENVZ (0x8000000) #define ARCH_PVOPS (0x10000000) #define PRE_KERNEL_INIT (0x20000000) #define ARCH_PVOPS_XEN (0x40000000) #define GCC_VERSION_DEPRECATED (GCC_3_2|GCC_3_2_3|GCC_2_96|GCC_3_3_2|GCC_3_3_3) /* flags2 */ #define RELOC_AUTO (0x1ULL) #define KASLR (0x2ULL) #define KASLR_CHECK (0x4ULL) #define GET_TIMESTAMP (0x8ULL) #define TVEC_BASES_V3 (0x10ULL) #define TIMER_BASES (0x20ULL) #define IRQ_DESC_TREE_RADIX (0x40ULL) #define IRQ_DESC_TREE_XARRAY (0x80ULL) #define KMOD_PAX (0x100ULL) #define KMOD_MEMORY (0x200ULL) #define IRQ_DESC_TREE_MAPLE (0x400ULL) #define XEN() (kt->flags & ARCH_XEN) #define OPENVZ() (kt->flags & ARCH_OPENVZ) #define PVOPS() (kt->flags & ARCH_PVOPS) #define PVOPS_XEN() (kt->flags & ARCH_PVOPS_XEN) #define PAX_MODULE_SPLIT() (kt->flags2 & KMOD_PAX) #define MODULE_MEMORY() (kt->flags2 & KMOD_MEMORY) #define XEN_MACHINE_TO_MFN(m) ((ulonglong)(m) >> PAGESHIFT()) #define XEN_PFN_TO_PSEUDO(p) ((ulonglong)(p) << PAGESHIFT()) #define XEN_MFN_NOT_FOUND (~0UL) #define XEN_PFNS_PER_PAGE (PAGESIZE()/sizeof(ulong)) #define XEN_FOREIGN_FRAME (1UL << (BITS()-1)) #define XEN_MACHADDR_NOT_FOUND (~0ULL) #define XEN_P2M_PER_PAGE (PAGESIZE() / sizeof(unsigned long)) #define XEN_P2M_MID_PER_PAGE (PAGESIZE() / sizeof(unsigned long *)) #define XEN_P2M_TOP_PER_PAGE (PAGESIZE() / sizeof(unsigned long **)) struct kernel_table { /* kernel data */ ulong flags; ulong stext; ulong etext; ulong stext_init; ulong etext_init; ulong init_begin; ulong init_end; ulong end; int cpus; char *cpus_override; void (*display_bh)(void); ulong module_list; ulong kernel_module; int mods_installed; struct timespec date; char proc_version[BUFSIZE]; struct new_utsname utsname; uint kernel_version[3]; uint gcc_version[3]; int runq_siblings; int kernel_NR_CPUS; long __per_cpu_offset[NR_CPUS]; long *__rq_idx; long *__cpu_idx; ulong *cpu_flags; #define POSSIBLE (0x1) #define PRESENT (0x2) #define ONLINE (0x4) #define NMI (0x8) #define POSSIBLE_MAP (POSSIBLE) #define PRESENT_MAP (PRESENT) #define ONLINE_MAP (ONLINE) #define ACTIVE_MAP (0x10) int BUG_bytes; ulong xen_flags; #define WRITABLE_PAGE_TABLES (0x1) #define SHADOW_PAGE_TABLES (0x2) #define CANONICAL_PAGE_TABLES (0x4) #define XEN_SUSPEND (0x8) char *m2p_page; ulong phys_to_machine_mapping; ulong p2m_table_size; #define P2M_MAPPING_CACHE (512) struct p2m_mapping_cache { ulong mapping; ulong pfn; ulong start; ulong end; } p2m_mapping_cache[P2M_MAPPING_CACHE]; #define P2M_MAPPING_PAGE_PFN(c) \ (PVOPS_XEN() ? kt->p2m_mapping_cache[c].pfn : \ (((kt->p2m_mapping_cache[c].mapping - kt->phys_to_machine_mapping)/PAGESIZE()) \ * XEN_PFNS_PER_PAGE)) ulong last_mapping_read; ulong p2m_cache_index; ulong p2m_pages_searched; ulong p2m_mfn_cache_hits; ulong p2m_page_cache_hits; ulong relocate; char *module_tree; struct pvops_xen_info { int p2m_top_entries; ulong p2m_top; ulong p2m_mid_missing; ulong p2m_missing; } pvops_xen; int highest_irq; #define IKCONFIG_AVAIL 0x1 /* kernel contains ikconfig data */ #define IKCONFIG_LOADED 0x2 /* ikconfig data is currently loaded */ int ikconfig_flags; int ikconfig_ents; char *hypervisor; struct vmcoreinfo_data { ulong log_buf_SYMBOL; ulong log_end_SYMBOL; ulong log_buf_len_SYMBOL; ulong logged_chars_SYMBOL; ulong log_first_idx_SYMBOL; ulong log_next_idx_SYMBOL; long log_SIZE; long log_ts_nsec_OFFSET; long log_len_OFFSET; long log_text_len_OFFSET; long log_dict_len_OFFSET; ulong phys_base_SYMBOL; ulong _stext_SYMBOL; } vmcoreinfo; ulonglong flags2; char *source_tree; struct timespec boot_date; }; /* * Aid for the two versions of the kernel's module list linkage. */ #define NEXT_MODULE(next_module, modbuf) \ { \ switch (kt->flags & (KMOD_V1|KMOD_V2)) \ { \ case KMOD_V1: \ next_module = ULONG(modbuf + OFFSET(module_next)); \ break; \ case KMOD_V2: \ next_module = ULONG(modbuf + OFFSET(module_list)); \ if (next_module != kt->kernel_module) \ next_module -= OFFSET(module_list); \ break; \ } \ } #define THIS_KERNEL_VERSION ((kt->kernel_version[0] << 24) + \ (kt->kernel_version[1] << 16) + \ (kt->kernel_version[2])) #define LINUX(x,y,z) (((uint)(x) << 24) + ((uint)(y) << 16) + (uint)(z)) #define THIS_GCC_VERSION ((kt->gcc_version[0] << 16) + \ (kt->gcc_version[1] << 8) + \ (kt->gcc_version[2])) #define GCC(x,y,z) (((uint)(x) << 16) + ((uint)(y) << 8) + (uint)(z)) #define IS_KERNEL_STATIC_TEXT(x) (((ulong)(x) >= kt->stext) && \ ((ulong)(x) < kt->etext)) #define TASK_COMM_LEN 16 /* task command name length including NULL */ struct task_context { /* context stored for each task */ ulong task; ulong thread_info; ulong pid; char comm[TASK_COMM_LEN+1]; int processor; ulong ptask; ulong mm_struct; struct task_context *tc_next; }; struct tgid_context { /* tgid and task stored for each task */ ulong tgid; ulong task; long rss_cache; }; struct task_table { /* kernel/local task table data */ struct task_context *current; struct task_context *context_array; void (*refresh_task_table)(void); ulong flags; ulong task_start; ulong task_end; void *task_local; int max_tasks; int nr_threads; ulong running_tasks; ulong retries; ulong panicmsg; int panic_processor; ulong *idle_threads; ulong *panic_threads; ulong *active_set; ulong *panic_ksp; ulong *hardirq_ctx; ulong *hardirq_tasks; ulong *softirq_ctx; ulong *softirq_tasks; ulong panic_task; ulong this_task; int pidhash_len; ulong pidhash_addr; ulong last_task_read; ulong last_thread_info_read; ulong last_mm_read; char *task_struct; char *thread_info; char *mm_struct; ulong init_pid_ns; struct tgid_context *tgid_array; struct tgid_context *last_tgid; ulong tgid_searches; ulong tgid_cache_hits; long filepages; long anonpages; ulong stack_end_magic; ulong pf_kthread; ulong pid_radix_tree; int callbacks; struct task_context **context_by_task; /* task_context sorted by task addr */ ulong pid_xarray; long shmempages; }; #define TASK_INIT_DONE (0x1) #define TASK_ARRAY_EXISTS (0x2) #define PANIC_TASK_NOT_FOUND (0x4) #define TASK_REFRESH (0x8) #define TASK_REFRESH_OFF (0x10) #define PANIC_KSP (0x20) #define ACTIVE_SET (0x40) #define POPULATE_PANIC (0x80) #define PIDHASH (0x100) #define PID_HASH (0x200) #define THREAD_INFO (0x400) #define IRQSTACKS (0x800) #define TIMESPEC (0x1000) #define NO_TIMESPEC (0x2000) #define ACTIVE_ONLY (0x4000) #define START_TIME_NSECS (0x8000) #define THREAD_INFO_IN_TASK (0x10000) #define PID_RADIX_TREE (0x20000) #define INDEXED_CONTEXTS (0x40000) #define PID_XARRAY (0x80000) #define TASK_SLUSH (20) #define NO_PROC_ID 0xFF /* No processor magic marker (from kernel) */ /* * Global "tt" points to task_table */ #define CURRENT_CONTEXT() (tt->current) #define CURRENT_TASK() (tt->current->task) #define CURRENT_PID() (tt->current->pid) #define CURRENT_COMM() (tt->current->comm) #define RUNNING_TASKS() (tt->running_tasks) #define FIRST_CONTEXT() (tt->context_array) #define NO_PID ((ulong)-1) #define NO_TASK (0) #define IS_TASK_ADDR(X) (machdep->is_task_addr(X)) #define GET_STACKBASE(X) (machdep->get_stackbase(X)) #define GET_STACKTOP(X) (machdep->get_stacktop(X)) #define STACKSIZE() (machdep->stacksize) #define LONGS_PER_STACK (machdep->stacksize/sizeof(ulong)) #define INSTACK(X,BT) \ (((ulong)(X) >= (BT)->stackbase) && ((ulong)(X) < (BT)->stacktop)) #define ALIGNED_STACK_OFFSET(task) ((ulong)(task) & (STACKSIZE()-1)) #define BITS() (machdep->bits) #define BITS32() (machdep->bits == 32) #define BITS64() (machdep->bits == 64) #define IS_KVADDR(X) (machdep->is_kvaddr(X)) #define IS_UVADDR(X,C) (machdep->is_uvaddr(X,C)) #define PID_ALIVE(x) (kill(x, 0) == 0) struct kernel_list_head { struct kernel_list_head *next, *prev; }; struct stack_hook { ulong esp; ulong eip; }; struct bt_info { ulong task; ulonglong flags; ulong instptr; ulong stkptr; ulong bptr; ulong stackbase; ulong stacktop; char *stackbuf; struct task_context *tc; struct stack_hook *hp; struct stack_hook *textlist; struct reference *ref; ulong frameptr; char *call_target; void *machdep; ulong debug; ulong eframe_ip; ulong radix; ulong *cpumask; bool need_free; }; #define STACK_OFFSET_TYPE(OFF) \ (((ulong)(OFF) > STACKSIZE()) ? \ (ulong)((ulong)(OFF) - (ulong)(bt->stackbase)) : (ulong)(OFF)) #define GET_STACK_ULONG(OFF) \ *((ulong *)((char *)(&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(OFF))]))) #define GET_STACK_DATA(OFF, LOC, SZ) memcpy((void *)(LOC), \ (void *)(&bt->stackbuf[(ulong)STACK_OFFSET_TYPE(OFF)]), (size_t)(SZ)) struct machine_specific; /* uniquely defined below each machine's area */ struct xendump_data; struct xen_kdump_data; struct vaddr_range { ulong start; ulong end; ulong type; #define KVADDR_UNITY_MAP (1) #define KVADDR_VMALLOC (2) #define KVADDR_VMEMMAP (3) #define KVADDR_START_MAP (4) #define KVADDR_MODULES (5) #define MAX_KVADDR_RANGES KVADDR_MODULES }; #define MAX_MACHDEP_ARGS 5 /* for --machdep/-m machine-specific args */ struct machdep_table { ulong flags; ulong kvbase; ulong identity_map_base; uint pagesize; uint pageshift; ulonglong pagemask; ulong pageoffset; ulong stacksize; uint hz; ulong mhz; int bits; int nr_irqs; uint64_t memsize; int (*eframe_search)(struct bt_info *); void (*back_trace)(struct bt_info *); ulong (*processor_speed)(void); int (*uvtop)(struct task_context *, ulong, physaddr_t *, int); int (*kvtop)(struct task_context *, ulong, physaddr_t *, int); ulong (*get_task_pgd)(ulong); void (*dump_irq)(int); void (*get_stack_frame)(struct bt_info *, ulong *, ulong *); ulong (*get_stackbase)(ulong); ulong (*get_stacktop)(ulong); int (*translate_pte)(ulong, void *, ulonglong); uint64_t (*memory_size)(void); ulong (*vmalloc_start)(void); int (*is_task_addr)(ulong); int (*verify_symbol)(const char *, ulong, char); int (*dis_filter)(ulong, char *, unsigned int); int (*get_smp_cpus)(void); int (*is_kvaddr)(ulong); int (*is_uvaddr)(ulong, struct task_context *); int (*verify_paddr)(uint64_t); void (*cmd_mach)(void); void (*init_kernel_pgd)(void); struct syment *(*value_to_symbol)(ulong, ulong *); struct line_number_hook { char *func; char **file; } *line_number_hooks; ulong last_pgd_read; ulong last_pud_read; ulong last_pmd_read; ulong last_ptbl_read; char *pgd; char *pud; char *pmd; char *ptbl; int ptrs_per_pgd; char *cmdline_args[MAX_MACHDEP_ARGS]; struct machine_specific *machspec; ulong section_size_bits; ulong max_physmem_bits; ulong sections_per_root; int (*xendump_p2m_create)(struct xendump_data *); ulong (*xendump_panic_task)(struct xendump_data *); void (*get_xendump_regs)(struct xendump_data *, struct bt_info *, ulong *, ulong *); void (*clear_machdep_cache)(void); int (*xen_kdump_p2m_create)(struct xen_kdump_data *); int (*in_alternate_stack)(int, ulong); void (*dumpfile_init)(int, void *); void (*process_elf_notes)(void *, unsigned long); int (*get_kvaddr_ranges)(struct vaddr_range *); int (*verify_line_number)(ulong, ulong, ulong); void (*get_irq_affinity)(int); void (*show_interrupts)(int, ulong *); int (*is_page_ptr)(ulong, physaddr_t *); int (*get_current_task_reg)(int, const char *, int, void *, int); int (*is_cpu_prstatus_valid)(int cpu); }; /* * Processor-common flags; processor-specific flags use the lower bits * as defined in their processor-specific files below. (see KSYMS_START defs). */ #define HWRESET (0x80000000) #define OMIT_FRAME_PTR (0x40000000) #define FRAMESIZE_DEBUG (0x20000000) #define MACHDEP_BT_TEXT (0x10000000) #define DEVMEMRD (0x8000000) #define INIT (0x4000000) #define VM_4_LEVEL (0x2000000) #define MCA (0x1000000) #define PAE (0x800000) #define VMEMMAP (0x400000) extern struct machdep_table *machdep; #ifndef HZ #define HZ sysconf(_SC_CLK_TCK) #endif #define IS_LAST_PGD_READ(pgd) ((ulong)(pgd) == machdep->last_pgd_read) #define IS_LAST_PMD_READ(pmd) ((ulong)(pmd) == machdep->last_pmd_read) #define IS_LAST_PTBL_READ(ptbl) ((ulong)(ptbl) == machdep->last_ptbl_read) #define IS_LAST_PUD_READ(pud) ((ulong)(pud) == machdep->last_pud_read) #define FILL_PGD(PGD, TYPE, SIZE) \ if (!IS_LAST_PGD_READ(PGD)) { \ readmem((ulonglong)((ulong)(PGD)), TYPE, machdep->pgd, \ SIZE, "pgd page", FAULT_ON_ERROR); \ machdep->last_pgd_read = (ulong)(PGD); \ } #define FILL_PUD(PUD, TYPE, SIZE) \ if (!IS_LAST_PUD_READ(PUD)) { \ readmem((ulonglong)((ulong)(PUD)), TYPE, machdep->pud, \ SIZE, "pud page", FAULT_ON_ERROR); \ machdep->last_pud_read = (ulong)(PUD); \ } #define FILL_PMD(PMD, TYPE, SIZE) \ if (!IS_LAST_PMD_READ(PMD)) { \ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->last_pmd_read = (ulong)(PMD); \ } #define FILL_PTBL(PTBL, TYPE, SIZE) \ if (!IS_LAST_PTBL_READ(PTBL)) { \ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ SIZE, "page table", FAULT_ON_ERROR); \ machdep->last_ptbl_read = (ulong)(PTBL); \ } #define SETUP_ENV (0) #define PRE_SYMTAB (1) #define PRE_GDB (2) #define POST_GDB (3) #define POST_INIT (4) #define POST_VM (5) #define LOG_ONLY (6) #define POST_RELOC (7) #define FOREACH_BT (1) #define FOREACH_VM (2) #define FOREACH_TASK (3) #define FOREACH_SET (4) #define FOREACH_FILES (5) #define FOREACH_NET (6) #define FOREACH_TEST (7) #define FOREACH_VTOP (8) #define FOREACH_SIG (9) #define FOREACH_PS (10) #define MAX_FOREACH_KEYWORDS (10) #define MAX_FOREACH_TASKS (50) #define MAX_FOREACH_PIDS (50) #define MAX_FOREACH_COMMS (50) #define MAX_FOREACH_ARGS (50) #define MAX_REGEX_ARGS (10) #define FOREACH_CMD (0x1) #define FOREACH_r_FLAG (0x2) #define FOREACH_s_FLAG (0x4) #define FOREACH_S_FLAG (0x8) #define FOREACH_i_FLAG (0x10) #define FOREACH_e_FLAG (0x20) #define FOREACH_g_FLAG (0x40) #define FOREACH_l_FLAG (0x80) #define FOREACH_p_FLAG (0x100) #define FOREACH_t_FLAG (0x200) #define FOREACH_u_FLAG (0x400) #define FOREACH_m_FLAG (0x800) #define FOREACH_v_FLAG (0x1000) #define FOREACH_KERNEL (0x2000) #define FOREACH_USER (0x4000) #define FOREACH_SPECIFIED (0x8000) #define FOREACH_ACTIVE (0x10000) #define FOREACH_k_FLAG (0x20000) #define FOREACH_c_FLAG (0x40000) #define FOREACH_f_FLAG (0x80000) #define FOREACH_o_FLAG (0x100000) #define FOREACH_T_FLAG (0x200000) #define FOREACH_F_FLAG (0x400000) #define FOREACH_x_FLAG (0x800000) #define FOREACH_d_FLAG (0x1000000) #define FOREACH_STATE (0x2000000) #define FOREACH_a_FLAG (0x4000000) #define FOREACH_G_FLAG (0x8000000) #define FOREACH_F_FLAG2 (0x10000000) #define FOREACH_y_FLAG (0x20000000) #define FOREACH_GLEADER (0x40000000) #define FOREACH_PS_EXCLUSIVE \ (FOREACH_g_FLAG|FOREACH_a_FLAG|FOREACH_t_FLAG|FOREACH_c_FLAG|FOREACH_p_FLAG|FOREACH_l_FLAG|FOREACH_r_FLAG|FOREACH_m_FLAG) struct foreach_data { ulong flags; int keyword_array[MAX_FOREACH_KEYWORDS]; ulong task_array[MAX_FOREACH_TASKS]; char *comm_array[MAX_FOREACH_COMMS]; ulong pid_array[MAX_FOREACH_PIDS]; ulong arg_array[MAX_FOREACH_ARGS]; struct regex_info { char *pattern; regex_t regex; } regex_info[MAX_REGEX_ARGS]; const char *state; char *reference; int keys; int pids; int tasks; int comms; int args; int regexs; int policy; }; struct reference { char *str; ulong cmdflags; ulong hexval; ulong decval; ulong ref1; ulong ref2; void *refp; }; struct offset_table { /* stash of commonly-used offsets */ long list_head_next; /* add new entries to end of table */ long list_head_prev; long task_struct_pid; long task_struct_state; long task_struct_comm; long task_struct_mm; long task_struct_tss; long task_struct_thread; long task_struct_active_mm; long task_struct_tss_eip; long task_struct_tss_esp; long task_struct_tss_ksp; long task_struct_processor; long task_struct_p_pptr; long task_struct_parent; long task_struct_has_cpu; long task_struct_cpus_runnable; long task_struct_thread_eip; long task_struct_thread_esp; long task_struct_thread_ksp; long task_struct_next_task; long task_struct_files; long task_struct_fs; long task_struct_pidhash_next; long task_struct_next_run; long task_struct_flags; long task_struct_sig; long task_struct_signal; long task_struct_blocked; long task_struct_sigpending; long task_struct_pending; long task_struct_sigqueue; long task_struct_sighand; long task_struct_start_time; long task_struct_times; long task_struct_utime; long task_struct_stime; long task_struct_cpu; long task_struct_run_list; long task_struct_pgrp; long task_struct_tgid; long task_struct_namespace; long task_struct_pids; long task_struct_last_run; long task_struct_timestamp; long task_struct_thread_info; long task_struct_nsproxy; long task_struct_rlim; long thread_info_task; long thread_info_cpu; long thread_info_previous_esp; long thread_info_flags; long nsproxy_mnt_ns; long mnt_namespace_root; long mnt_namespace_list; long pid_link_pid; long pid_hash_chain; long hlist_node_next; long hlist_node_pprev; long pid_pid_chain; long thread_struct_eip; long thread_struct_esp; long thread_struct_ksp; long thread_struct_fph; long thread_struct_rip; long thread_struct_rsp; long thread_struct_rsp0; long tms_tms_utime; long tms_tms_stime; long signal_struct_count; long signal_struct_action; long signal_struct_shared_pending; long signal_struct_rlim; long k_sigaction_sa; long sigaction_sa_handler; long sigaction_sa_flags; long sigaction_sa_mask; long sigpending_head; long sigpending_list; long sigpending_signal; long signal_queue_next; long signal_queue_info; long sigqueue_next; long sigqueue_list; long sigqueue_info; long sighand_struct_action; long siginfo_si_signo; long thread_struct_cr3; long thread_struct_ptbr; long thread_struct_pg_tables; long switch_stack_r26; long switch_stack_b0; long switch_stack_ar_bspstore; long switch_stack_ar_pfs; long switch_stack_ar_rnat; long switch_stack_pr; long cpuinfo_ia64_proc_freq; long cpuinfo_ia64_unimpl_va_mask; long cpuinfo_ia64_unimpl_pa_mask; long device_node_type; long device_node_allnext; long device_node_properties; long property_name; long property_value; long property_next; long machdep_calls_setup_residual; long RESIDUAL_VitalProductData; long VPD_ProcessorHz; long bd_info_bi_intfreq; long hwrpb_struct_cycle_freq; long hwrpb_struct_processor_offset; long hwrpb_struct_processor_size; long percpu_struct_halt_PC; long percpu_struct_halt_ra; long percpu_struct_halt_pv; long mm_struct_mmap; long mm_struct_pgd; long mm_struct_rss; long mm_struct_anon_rss; long mm_struct_file_rss; long mm_struct_total_vm; long mm_struct_start_code; long mm_struct_arg_start; long mm_struct_arg_end; long mm_struct_env_start; long mm_struct_env_end; long vm_area_struct_vm_mm; long vm_area_struct_vm_next; long vm_area_struct_vm_end; long vm_area_struct_vm_start; long vm_area_struct_vm_flags; long vm_area_struct_vm_file; long vm_area_struct_vm_offset; long vm_area_struct_vm_pgoff; long vm_struct_addr; long vm_struct_size; long vm_struct_next; long module_size_of_struct; long module_next; long module_size; long module_name; long module_nsyms; long module_syms; long module_flags; long module_num_syms; long module_list; long module_gpl_syms; long module_num_gpl_syms; long module_module_core; long module_core_size; long module_core_text_size; long module_num_symtab; long module_symtab; long module_strtab; long module_kallsyms_start; long kallsyms_header_sections; long kallsyms_header_section_off; long kallsyms_header_symbols; long kallsyms_header_symbol_off; long kallsyms_header_string_off; long kallsyms_symbol_section_off; long kallsyms_symbol_symbol_addr; long kallsyms_symbol_name_off; long kallsyms_section_start; long kallsyms_section_size; long kallsyms_section_name_off; long page_next; long page_prev; long page_next_hash; long page_list; long page_list_next; long page_list_prev; long page_inode; long page_offset; long page_count; long page_flags; long page_mapping; long page_index; long page_buffers; long page_lru; long page_pte; long swap_info_struct_swap_file; long swap_info_struct_swap_vfsmnt; long swap_info_struct_flags; long swap_info_struct_swap_map; long swap_info_struct_swap_device; long swap_info_struct_prio; long swap_info_struct_max; long swap_info_struct_pages; long swap_info_struct_old_block_size; long block_device_bd_inode; long block_device_bd_list; long block_device_bd_disk; long irq_desc_t_status; long irq_desc_t_handler; long irq_desc_t_chip; long irq_desc_t_action; long irq_desc_t_depth; long irqdesc_action; long irqdesc_ctl; long irqdesc_level; long irqaction_handler; long irqaction_flags; long irqaction_mask; long irqaction_name; long irqaction_dev_id; long irqaction_next; long hw_interrupt_type_typename; long hw_interrupt_type_startup; long hw_interrupt_type_shutdown; long hw_interrupt_type_handle; long hw_interrupt_type_enable; long hw_interrupt_type_disable; long hw_interrupt_type_ack; long hw_interrupt_type_end; long hw_interrupt_type_set_affinity; long irq_chip_typename; long irq_chip_startup; long irq_chip_shutdown; long irq_chip_enable; long irq_chip_disable; long irq_chip_ack; long irq_chip_end; long irq_chip_set_affinity; long irq_chip_mask; long irq_chip_mask_ack; long irq_chip_unmask; long irq_chip_eoi; long irq_chip_retrigger; long irq_chip_set_type; long irq_chip_set_wake; long irq_cpustat_t___softirq_active; long irq_cpustat_t___softirq_mask; long fdtable_max_fds; long fdtable_max_fdset; long fdtable_open_fds; long fdtable_fd; long files_struct_fdt; long files_struct_max_fds; long files_struct_max_fdset; long files_struct_open_fds; long files_struct_fd; long files_struct_open_fds_init; long file_f_dentry; long file_f_vfsmnt; long file_f_count; long file_f_path; long path_mnt; long path_dentry; long fs_struct_root; long fs_struct_pwd; long fs_struct_rootmnt; long fs_struct_pwdmnt; long dentry_d_inode; long dentry_d_parent; long dentry_d_name; long dentry_d_covers; long dentry_d_iname; long qstr_len; long qstr_name; long inode_i_mode; long inode_i_op; long inode_i_sb; long inode_u; long inode_i_flock; long inode_i_fop; long inode_i_mapping; long address_space_nrpages; long vfsmount_mnt_next; long vfsmount_mnt_devname; long vfsmount_mnt_dirname; long vfsmount_mnt_sb; long vfsmount_mnt_list; long vfsmount_mnt_mountpoint; long vfsmount_mnt_parent; long namespace_root; long namespace_list; long super_block_s_dirty; long super_block_s_type; long super_block_s_files; long file_system_type_name; long nlm_file_f_file; long file_lock_fl_owner; long nlm_host_h_exportent; long svc_client_cl_ident; long kmem_cache_s_c_nextp; long kmem_cache_s_c_name; long kmem_cache_s_c_num; long kmem_cache_s_c_org_size; long kmem_cache_s_c_flags; long kmem_cache_s_c_offset; long kmem_cache_s_c_firstp; long kmem_cache_s_c_gfporder; long kmem_cache_s_c_magic; long kmem_cache_s_num; long kmem_cache_s_next; long kmem_cache_s_name; long kmem_cache_s_objsize; long kmem_cache_s_flags; long kmem_cache_s_gfporder; long kmem_cache_s_slabs; long kmem_cache_s_slabs_full; long kmem_cache_s_slabs_partial; long kmem_cache_s_slabs_free; long kmem_cache_s_cpudata; long kmem_cache_s_c_align; long kmem_cache_s_colour_off; long cpucache_s_avail; long cpucache_s_limit; long kmem_cache_s_array; long array_cache_avail; long array_cache_limit; long kmem_cache_s_lists; long kmem_list3_slabs_partial; long kmem_list3_slabs_full; long kmem_list3_slabs_free; long kmem_list3_free_objects; long kmem_list3_shared; long kmem_slab_s_s_nextp; long kmem_slab_s_s_freep; long kmem_slab_s_s_inuse; long kmem_slab_s_s_mem; long kmem_slab_s_s_index; long kmem_slab_s_s_offset; long kmem_slab_s_s_magic; long slab_s_list; long slab_s_s_mem; long slab_s_inuse; long slab_s_free; long slab_list; long slab_s_mem; long slab_inuse; long slab_free; long net_device_next; long net_device_name; long net_device_type; long net_device_addr_len; long net_device_ip_ptr; long net_device_dev_list; long net_dev_base_head; long device_next; long device_name; long device_type; long device_ip_ptr; long device_addr_len; long socket_sk; long sock_daddr; long sock_rcv_saddr; long sock_dport; long sock_sport; long sock_num; long sock_type; long sock_family; long sock_common_skc_family; long sock_sk_type; long inet_sock_inet; long inet_opt_daddr; long inet_opt_rcv_saddr; long inet_opt_dport; long inet_opt_sport; long inet_opt_num; long ipv6_pinfo_rcv_saddr; long ipv6_pinfo_daddr; long timer_list_list; long timer_list_next; long timer_list_entry; long timer_list_expires; long timer_list_function; long timer_vec_root_vec; long timer_vec_vec; long tvec_root_s_vec; long tvec_s_vec; long tvec_t_base_s_tv1; long wait_queue_task; long wait_queue_next; long __wait_queue_task; long __wait_queue_head_task_list; long __wait_queue_task_list; long pglist_data_node_zones; long pglist_data_node_mem_map; long pglist_data_node_start_paddr; long pglist_data_node_start_mapnr; long pglist_data_node_size; long pglist_data_node_id; long pglist_data_node_next; long pglist_data_nr_zones; long pglist_data_node_start_pfn; long pglist_data_pgdat_next; long pglist_data_node_present_pages; long pglist_data_node_spanned_pages; long pglist_data_bdata; long page_cache_bucket_chain; long zone_struct_free_pages; long zone_struct_free_area; long zone_struct_zone_pgdat; long zone_struct_name; long zone_struct_size; long zone_struct_memsize; long zone_struct_zone_start_pfn; long zone_struct_zone_start_paddr; long zone_struct_zone_start_mapnr; long zone_struct_zone_mem_map; long zone_struct_inactive_clean_pages; long zone_struct_inactive_clean_list; long zone_struct_inactive_dirty_pages; long zone_struct_active_pages; long zone_struct_pages_min; long zone_struct_pages_low; long zone_struct_pages_high; long zone_free_pages; long zone_free_area; long zone_zone_pgdat; long zone_zone_mem_map; long zone_name; long zone_spanned_pages; long zone_zone_start_pfn; long zone_pages_min; long zone_pages_low; long zone_pages_high; long zone_vm_stat; long neighbour_next; long neighbour_primary_key; long neighbour_ha; long neighbour_dev; long neighbour_nud_state; long neigh_table_hash_buckets; long neigh_table_key_len; long in_device_ifa_list; long in_ifaddr_ifa_next; long in_ifaddr_ifa_address; long pci_dev_global_list; long pci_dev_next; long pci_dev_bus; long pci_dev_devfn; long pci_dev_class; long pci_dev_device; long pci_dev_vendor; long pci_bus_number; long resource_entry_t_from; long resource_entry_t_num; long resource_entry_t_name; long resource_entry_t_next; long resource_name; long resource_start; long resource_end; long resource_sibling; long resource_child; long runqueue_curr; long runqueue_idle; long runqueue_active; long runqueue_expired; long runqueue_arrays; long runqueue_cpu; long cpu_s_idle; long cpu_s_curr; long prio_array_nr_active; long prio_array_queue; long user_regs_struct_ebp; long user_regs_struct_esp; long user_regs_struct_rip; long user_regs_struct_cs; long user_regs_struct_eflags; long user_regs_struct_rsp; long user_regs_struct_ss; long e820map_nr_map; long e820entry_addr; long e820entry_size; long e820entry_type; long char_device_struct_next; long char_device_struct_name; long char_device_struct_fops; long char_device_struct_major; long gendisk_major; long gendisk_disk_name; long gendisk_fops; long blk_major_name_next; long blk_major_name_major; long blk_major_name_name; long radix_tree_root_height; long radix_tree_root_rnode; long x8664_pda_pcurrent; long x8664_pda_data_offset; long x8664_pda_kernelstack; long x8664_pda_irqrsp; long x8664_pda_irqstackptr; long x8664_pda_level4_pgt; long x8664_pda_cpunumber; long x8664_pda_me; long tss_struct_ist; long mem_section_section_mem_map; long vcpu_guest_context_user_regs; long cpu_user_regs_eip; long cpu_user_regs_esp; long cpu_user_regs_rip; long cpu_user_regs_rsp; long unwind_table_core; long unwind_table_init; long unwind_table_address; long unwind_table_size; long unwind_table_link; long unwind_table_name; long rq_cfs; long rq_rt; long rq_nr_running; long cfs_rq_rb_leftmost; long cfs_rq_nr_running; long cfs_rq_tasks_timeline; long task_struct_se; long sched_entity_run_node; long rt_rq_active; long kmem_cache_size; long kmem_cache_objsize; long kmem_cache_offset; long kmem_cache_order; long kmem_cache_local_node; long kmem_cache_objects; long kmem_cache_inuse; long kmem_cache_align; long kmem_cache_name; long kmem_cache_list; long kmem_cache_node; long kmem_cache_cpu_slab; long page_inuse; /* long page_offset; use "old" page->offset */ long page_slab; long page_first_page; long page_freelist; long kmem_cache_node_nr_partial; long kmem_cache_node_nr_slabs; long kmem_cache_node_partial; long kmem_cache_node_full; long pid_numbers; long upid_nr; long upid_ns; long upid_pid_chain; long pid_tasks; long kmem_cache_cpu_freelist; long kmem_cache_cpu_page; long kmem_cache_cpu_node; long kmem_cache_flags; long zone_nr_active; long zone_nr_inactive; long zone_all_unreclaimable; long zone_present_pages; long zone_flags; long zone_pages_scanned; long pcpu_info_vcpu; long pcpu_info_idle; long vcpu_struct_rq; long task_struct_sched_info; long sched_info_last_arrival; long page_objects; long kmem_cache_oo; long char_device_struct_cdev; long char_device_struct_baseminor; long cdev_ops; long probe_next; long probe_dev; long probe_data; long kobj_map_probes; long task_struct_prio; long zone_watermark; long module_sect_attrs; long module_sect_attrs_attrs; long module_sect_attrs_nsections; long module_sect_attr_mattr; long module_sect_attr_name; long module_sect_attr_address; long module_attribute_attr; long attribute_owner; long module_sect_attr_attr; long module_sections_attrs; long swap_info_struct_inuse_pages; long s390_lowcore_psw_save_area; long mm_struct_rss_stat; long mm_rss_stat_count; long module_module_init; long module_init_text_size; long cpu_context_save_fp; long cpu_context_save_sp; long cpu_context_save_pc; long elf_prstatus_pr_pid; long elf_prstatus_pr_reg; long irq_desc_t_name; long thread_info_cpu_context; long unwind_table_list; long unwind_table_start; long unwind_table_stop; long unwind_table_begin_addr; long unwind_table_end_addr; long unwind_idx_addr; long unwind_idx_insn; long signal_struct_nr_threads; long module_init_size; long module_percpu; long radix_tree_node_slots; long s390_stack_frame_back_chain; long s390_stack_frame_r14; long user_regs_struct_eip; long user_regs_struct_rax; long user_regs_struct_eax; long user_regs_struct_rbx; long user_regs_struct_ebx; long user_regs_struct_rcx; long user_regs_struct_ecx; long user_regs_struct_rdx; long user_regs_struct_edx; long user_regs_struct_rsi; long user_regs_struct_esi; long user_regs_struct_rdi; long user_regs_struct_edi; long user_regs_struct_ds; long user_regs_struct_es; long user_regs_struct_fs; long user_regs_struct_gs; long user_regs_struct_rbp; long user_regs_struct_r8; long user_regs_struct_r9; long user_regs_struct_r10; long user_regs_struct_r11; long user_regs_struct_r12; long user_regs_struct_r13; long user_regs_struct_r14; long user_regs_struct_r15; long sched_entity_cfs_rq; long sched_entity_my_q; long sched_entity_on_rq; long task_struct_on_rq; long cfs_rq_curr; long irq_desc_t_irq_data; long irq_desc_t_kstat_irqs; long irq_desc_t_affinity; long irq_data_chip; long irq_data_affinity; long kernel_stat_irqs; long socket_alloc_vfs_inode; long class_devices; long class_p; long class_private_devices; long device_knode_class; long device_node; long gendisk_dev; long gendisk_kobj; long gendisk_part0; long gendisk_queue; long hd_struct_dev; long klist_k_list; long klist_node_n_klist; long klist_node_n_node; long kobject_entry; long kset_list; long request_list_count; long request_queue_in_flight; long request_queue_rq; long subsys_private_klist_devices; long subsystem_kset; long mount_mnt_parent; long mount_mnt_mountpoint; long mount_mnt_list; long mount_mnt_devname; long mount_mnt; long task_struct_exit_state; long timekeeper_xtime; long file_f_op; long file_private_data; long hstate_order; long hugetlbfs_sb_info_hstate; long idr_layer_ary; long idr_layer_layer; long idr_layers; long idr_top; long ipc_id_ary_p; long ipc_ids_entries; long ipc_ids_max_id; long ipc_ids_ipcs_idr; long ipc_ids_in_use; long ipc_namespace_ids; long kern_ipc_perm_deleted; long kern_ipc_perm_key; long kern_ipc_perm_mode; long kern_ipc_perm_uid; long kern_ipc_perm_id; long kern_ipc_perm_seq; long nsproxy_ipc_ns; long shmem_inode_info_swapped; long shmem_inode_info_vfs_inode; long shm_file_data_file; long shmid_kernel_shm_file; long shmid_kernel_shm_nattch; long shmid_kernel_shm_perm; long shmid_kernel_shm_segsz; long shmid_kernel_id; long sem_array_sem_perm; long sem_array_sem_id; long sem_array_sem_nsems; long msg_queue_q_perm; long msg_queue_q_id; long msg_queue_q_cbytes; long msg_queue_q_qnum; long super_block_s_fs_info; long rq_timestamp; long radix_tree_node_height; long rb_root_rb_node; long rb_node_rb_left; long rb_node_rb_right; long rt_prio_array_queue; long task_struct_rt; long sched_rt_entity_run_list; long log_ts_nsec; long log_len; long log_text_len; long log_dict_len; long log_level; long log_flags_level; long timekeeper_xtime_sec; long neigh_table_hash_mask; long sched_rt_entity_my_q; long neigh_table_hash_shift; long neigh_table_nht_ptr; long task_group_parent; long task_group_css; long cgroup_subsys_state_cgroup; long cgroup_dentry; long task_group_rt_rq; long rt_rq_tg; long task_group_cfs_rq; long cfs_rq_tg; long task_group_siblings; long task_group_children; long task_group_cfs_bandwidth; long cfs_rq_throttled; long task_group_rt_bandwidth; long rt_rq_rt_throttled; long rt_rq_highest_prio; long rt_rq_rt_nr_running; long vmap_area_va_start; long vmap_area_va_end; long vmap_area_list; long vmap_area_flags; long vmap_area_vm; long hrtimer_cpu_base_clock_base; long hrtimer_clock_base_offset; long hrtimer_clock_base_active; long hrtimer_clock_base_first; long hrtimer_clock_base_get_time; long hrtimer_base_first; long hrtimer_base_pending; long hrtimer_base_get_time; long hrtimer_node; long hrtimer_list; long hrtimer_softexpires; long hrtimer_expires; long hrtimer_function; long timerqueue_head_next; long timerqueue_node_expires; long timerqueue_node_node; long ktime_t_tv64; long ktime_t_sec; long ktime_t_nsec; long module_taints; long module_gpgsig_ok; long module_license_gplok; long tnt_bit; long tnt_true; long tnt_false; long task_struct_thread_context_fp; long task_struct_thread_context_sp; long task_struct_thread_context_pc; long page_slab_page; long trace_print_flags_mask; long trace_print_flags_name; long task_struct_rss_stat; long task_rss_stat_count; long page_s_mem; long page_active; long hstate_nr_huge_pages; long hstate_free_huge_pages; long hstate_name; long cgroup_kn; long kernfs_node_name; long kernfs_node_parent; long kmem_cache_cpu_partial; long kmem_cache_cpu_cache; long nsproxy_net_ns; long atomic_t_counter; long percpu_counter_count; long mm_struct_mm_count; long task_struct_thread_reg29; long task_struct_thread_reg31; long pt_regs_regs; long pt_regs_cp0_badvaddr; long address_space_page_tree; long page_compound_head; long irq_desc_irq_data; long kmem_cache_node_total_objects; long timer_base_vectors; long request_queue_mq_ops; long request_queue_queue_ctx; long blk_mq_ctx_rq_dispatched; long blk_mq_ctx_rq_completed; long task_struct_stack; long tnt_mod; long radix_tree_node_shift; long kmem_cache_red_left_pad; long inactive_task_frame_ret_addr; long sk_buff_head_next; long sk_buff_head_qlen; long sk_buff_next; long sk_buff_len; long sk_buff_data; long nlmsghdr_nlmsg_type; long module_arch; long mod_arch_specific_num_orcs; long mod_arch_specific_orc_unwind_ip; long mod_arch_specific_orc_unwind; long task_struct_policy; long kmem_cache_random; long pid_namespace_idr; long idr_idr_rt; long bpf_prog_aux; long bpf_prog_type; long bpf_prog_tag; long bpf_prog_jited_len; long bpf_prog_bpf_func; long bpf_prog_len; long bpf_prog_insnsi; long bpf_prog_pages; long bpf_map_map_type; long bpf_map_map_flags; long bpf_map_pages; long bpf_map_key_size; long bpf_map_value_size; long bpf_map_max_entries; long bpf_map_user; long bpf_map_name; long bpf_prog_aux_used_map_cnt; long bpf_prog_aux_used_maps; long bpf_prog_aux_load_time; long bpf_prog_aux_user; long user_struct_uid; long idr_cur; long kmem_cache_memcg_params; long memcg_cache_params___root_caches_node; long memcg_cache_params_children; long memcg_cache_params_children_node; long task_struct_pid_links; long kernel_symbol_value; long pci_dev_dev; long pci_dev_hdr_type; long pci_dev_pcie_flags_reg; long pci_bus_node; long pci_bus_devices; long pci_bus_dev; long pci_bus_children; long pci_bus_parent; long pci_bus_self; long device_kobj; long kobject_name; long memory_block_dev; long memory_block_start_section_nr; long memory_block_end_section_nr; long memory_block_state; long memory_block_nid; long mem_section_pageblock_flags; long bus_type_p; long device_private_device; long device_private_knode_bus; long xarray_xa_head; long xa_node_slots; long xa_node_shift; long hd_struct_dkstats; long disk_stats_in_flight; long cpu_context_save_r7; long dentry_d_sb; long device_private_knode_class; long timerqueue_head_rb_root; long rb_root_cached_rb_leftmost; long bpf_map_memory; long bpf_map_memory_pages; long bpf_map_memory_user; long bpf_prog_aux_name; long page_private; long swap_info_struct_bdev; long zram_mem_pool; long zram_compressor; long zram_table_entry_flags; long zs_pool_size_class; long size_class_size; long gendisk_private_data; long zram_table_entry; /* unused; but cannot remove */ long module_core_size_rw; long module_core_size_rx; long module_init_size_rw; long module_init_size_rx; long module_module_core_rw; long module_module_core_rx; long module_module_init_rw; long module_module_init_rx; long super_block_s_inodes; long inode_i_sb_list; long irq_common_data_affinity; long irq_desc_irq_common_data; long uts_namespace_name; long printk_info_seq; long printk_info_ts_nsec; long printk_info_text_len; long printk_info_level; long printk_info_caller_id; long printk_info_dev_info; long dev_printk_info_subsystem; long dev_printk_info_device; long prb_desc_ring; long prb_text_data_ring; long prb_desc_ring_count_bits; long prb_desc_ring_descs; long prb_desc_ring_infos; long prb_desc_ring_head_id; long prb_desc_ring_tail_id; long prb_desc_state_var; long prb_desc_text_blk_lpos; long prb_data_blk_lpos_begin; long prb_data_blk_lpos_next; long prb_data_ring_size_bits; long prb_data_ring_data; long atomic_long_t_counter; long block_device_bd_device; long block_device_bd_stats; long wait_queue_entry_private; long wait_queue_head_head; long wait_queue_entry_entry; long printk_safe_seq_buf_len; long printk_safe_seq_buf_message_lost; long printk_safe_seq_buf_buffer; long sbitmap_word_depth; long sbitmap_word_word; long sbitmap_word_cleared; long sbitmap_depth; long sbitmap_shift; long sbitmap_map_nr; long sbitmap_map; long sbitmap_queue_sb; long sbitmap_queue_alloc_hint; long sbitmap_queue_wake_batch; long sbitmap_queue_wake_index; long sbitmap_queue_ws; long sbitmap_queue_ws_active; long sbitmap_queue_round_robin; long sbitmap_queue_min_shallow_depth; long sbq_wait_state_wait_cnt; long sbq_wait_state_wait; long sbitmap_alloc_hint; long sbitmap_round_robin; long request_cmd_flags; long request_q; long request_state; long request_queue_queue_hw_ctx; long request_queue_nr_hw_queues; long blk_mq_hw_ctx_tags; long blk_mq_tags_bitmap_tags; long blk_mq_tags_breserved_tags; long blk_mq_tags_nr_reserved_tags; long blk_mq_tags_rqs; long request_queue_hctx_table; long percpu_counter_counters; long slab_slab_list; long mm_struct_mm_mt; long maple_tree_ma_root; long maple_tree_ma_flags; long maple_node_parent; long maple_node_ma64; long maple_node_mr64; long maple_node_slot; long maple_arange_64_pivot; long maple_arange_64_slot; long maple_arange_64_gap; long maple_arange_64_meta; long maple_range_64_pivot; long maple_range_64_slot; long maple_metadata_end; long maple_metadata_gap; long sock_sk_common; long sock_common_skc_v6_daddr; long sock_common_skc_v6_rcv_saddr; long inactive_task_frame_bp; long net_device_ip6_ptr; long inet6_dev_addr_list; long inet6_ifaddr_addr; long inet6_ifaddr_if_list; long inet6_ifaddr_if_next; long in6_addr_in6_u; long kset_kobj; long subsys_private_subsys; long vmap_area_purge_list; long module_mem; long module_memory_base; long module_memory_size; long irq_data_irq; long zspage_huge; long zram_comp_algs; long task_struct_thread_reg01; long task_struct_thread_reg03; long mnt_namespace_mounts; long mnt_namespace_nr_mounts; long mount_mnt_node; long log_caller_id; long vmap_node_busy; long rb_list_head; long file_f_inode; long page_page_type; long inactive_task_frame_r15; long inactive_task_frame_r14; long inactive_task_frame_r13; long inactive_task_frame_r12; long inactive_task_frame_flags; long inactive_task_frame_si; long inactive_task_frame_di; long inactive_task_frame_bx; long thread_struct_es; long thread_struct_ds; long thread_struct_fsbase; long thread_struct_gsbase; long thread_struct_fs; long thread_struct_gs; long task_struct_thread_context_x19; long task_struct_thread_context_x20; long task_struct_thread_context_x21; long task_struct_thread_context_x22; long task_struct_thread_context_x23; long task_struct_thread_context_x24; long task_struct_thread_context_x25; long task_struct_thread_context_x26; long task_struct_thread_context_x27; long task_struct_thread_context_x28; long neigh_table_hash_heads; long neighbour_hash; long request_queue_tag_set; long blk_mq_tag_set_flags; long blk_mq_tag_set_shared_tags; long vfsmount_mnt_flags; long proc_mounts_cursor; long bpf_ringbuf_map_map; long bpf_ringbuf_map_rb; long bpf_ringbuf_consumer_pos; long bpf_ringbuf_nr_pages; }; struct size_table { /* stash of commonly-used sizes */ long page; long free_area_struct; long zone_struct; long free_area; long zone; long kmem_slab_s; long kmem_cache_s; long kmem_bufctl_t; long slab_s; long slab; long cpucache_s; long array_cache; long swap_info_struct; long mm_struct; long vm_area_struct; long pglist_data; long page_cache_bucket; long pt_regs; long task_struct; long thread_info; long softirq_state; long desc_struct; long umode_t; long dentry; long files_struct; long fdtable; long fs_struct; long file; long inode; long vfsmount; long super_block; long irqdesc; long module; long list_head; long hlist_node; long hlist_head; long irq_cpustat_t; long cpuinfo_x86; long cpuinfo_ia64; long timer_list; long timer_vec_root; long timer_vec; long tvec_root_s; long tvec_s; long tvec_t_base_s; long wait_queue; long __wait_queue; long device; long net_device; long sock; long signal_struct; long sigpending_signal; long signal_queue; long sighand_struct; long sigqueue; long k_sigaction; long resource_entry_t; long resource; long runqueue; long irq_desc_t; long task_union; long thread_union; long prio_array; long user_regs_struct; long switch_stack; long vm_area_struct_vm_flags; long e820map; long e820entry; long cpu_s; long pgd_t; long kallsyms_header; long kallsyms_symbol; long kallsyms_section; long irq_ctx; long block_device; long blk_major_name; long gendisk; long address_space; long char_device_struct; long inet_sock; long in6_addr; long socket; long spinlock_t; long radix_tree_root; long radix_tree_node; long x8664_pda; long ppc64_paca; long gate_struct; long tss_struct; long task_struct_start_time; long cputime_t; long mem_section; long pid_link; long unwind_table; long rlimit; long kmem_cache; long kmem_cache_node; long upid; long kmem_cache_cpu; long cfs_rq; long pcpu_info; long vcpu_struct; long cdev; long probe; long kobj_map; long page_flags; long module_sect_attr; long task_struct_utime; long task_struct_stime; long cpu_context_save; long elf_prstatus; long note_buf; long unwind_idx; long softirq_action; long irq_data; long s390_stack_frame; long percpu_data; long sched_entity; long kernel_stat; long subsystem; long class_private; long rq_in_flight; long class_private_devices; long mount; long hstate; long ipc_ids; long shmid_kernel; long sem_array; long msg_queue; long log; long log_level; long rt_rq; long task_group; long vmap_area; long hrtimer_clock_base; long hrtimer_base; long tnt; long trace_print_flags; long task_struct_flags; long timer_base; long taint_flag; long nlmsghdr; long nlmsghdr_nlmsg_type; long sk_buff_head_qlen; long sk_buff_len; long orc_entry; long task_struct_policy; long pid; long bpf_prog; long bpf_prog_aux; long bpf_map; long bpf_insn; long xarray; long xa_node; long zram_table_entry; long irq_common_data; long printk_info; long printk_ringbuffer; long prb_desc; long wait_queue_entry; long task_struct_state; long printk_safe_seq_buf_buffer; long sbitmap_word; long sbitmap; long sbitmap_queue; long sbq_wait_state; long blk_mq_tags; long percpu_counter; long maple_tree; long maple_node; long module_memory; long fred_frame; long vmap_node; long cpumask_t; long task_struct_exit_state; long bpf_ringbuf_map; }; struct array_table { int kmem_cache_s_name; int kmem_cache_s_c_name; int kmem_cache_s_array; int kmem_cache_s_cpudata; int irq_desc; int irq_action; int log_buf; int timer_vec_vec; int timer_vec_root_vec; int tvec_s_vec; int tvec_root_s_vec; int page_hash_table; int net_device_name; int neigh_table_hash_buckets; int neighbour_ha; int swap_info; int pglist_data_node_zones; int zone_struct_free_area; int zone_free_area; int free_area; int free_area_DIMENSION; int prio_array_queue; int height_to_maxindex; int pid_hash; int kmem_cache_node; int kmem_cache_cpu_slab; int rt_prio_array_queue; int height_to_maxnodes; int task_struct_rlim; int signal_struct_rlim; int vm_numa_stat; int pid_numbers; }; /* * The following set of macros use gdb to determine structure, union, * or member sizes/offsets. They should be used only during initialization * of the offset_table or size_table, or with data structures whose names * or members are only known/specified during runtime. */ #define MEMBER_SIZE_REQUEST ((struct datatype_member *)(-1)) #define ANON_MEMBER_OFFSET_REQUEST ((struct datatype_member *)(-2)) #define MEMBER_TYPE_REQUEST ((struct datatype_member *)(-3)) #define STRUCT_SIZE_REQUEST ((struct datatype_member *)(-4)) #define MEMBER_TYPE_NAME_REQUEST ((struct datatype_member *)(-5)) #define ANON_MEMBER_SIZE_REQUEST ((struct datatype_member *)(-6)) #define STRUCT_SIZE(X) datatype_info((X), NULL, STRUCT_SIZE_REQUEST) #define UNION_SIZE(X) datatype_info((X), NULL, STRUCT_SIZE_REQUEST) #define STRUCT_EXISTS(X) (datatype_info((X), NULL, STRUCT_SIZE_REQUEST) >= 0) #define DATATYPE_SIZE(X) datatype_info((X)->name, NULL, (X)) #define MEMBER_OFFSET(X,Y) datatype_info((X), (Y), NULL) #define MEMBER_EXISTS(X,Y) (datatype_info((X), (Y), NULL) >= 0) #define MEMBER_SIZE(X,Y) datatype_info((X), (Y), MEMBER_SIZE_REQUEST) #define MEMBER_TYPE(X,Y) datatype_info((X), (Y), MEMBER_TYPE_REQUEST) #define MEMBER_TYPE_NAME(X,Y) ((char *)datatype_info((X), (Y), MEMBER_TYPE_NAME_REQUEST)) #define ANON_MEMBER_OFFSET(X,Y) datatype_info((X), (Y), ANON_MEMBER_OFFSET_REQUEST) #define ANON_MEMBER_SIZE(X,Y) datatype_info((X), (Y), ANON_MEMBER_SIZE_REQUEST) /* * The following set of macros can only be used with pre-intialized fields * in the offset table, size table or array_table. */ #define OFFSET(X) (OFFSET_verify(offset_table.X, (char *)__FUNCTION__, __FILE__, __LINE__, #X)) #define MODULE_OFFSET(X,Y) (PAX_MODULE_SPLIT() ? OFFSET(Y) : OFFSET(X)) #define MODULE_OFFSET2(X,T) MODULE_OFFSET(X, X##_##T) #define SIZE(X) (SIZE_verify(size_table.X, (char *)__FUNCTION__, __FILE__, __LINE__, #X)) #define INVALID_OFFSET (-1) #define INVALID_MEMBER(X) (offset_table.X == INVALID_OFFSET) #define INVALID_SIZE(X) (size_table.X == -1) #define VALID_SIZE(X) (size_table.X >= 0) #define VALID_STRUCT(X) (size_table.X >= 0) #define VALID_MEMBER(X) (offset_table.X >= 0) #define ARRAY_LENGTH(X) (array_table.X) #define ASSIGN_OFFSET(X) (offset_table.X) #define ASSIGN_SIZE(X) (size_table.X) #define OFFSET_OPTION(X,Y) (OFFSET_option(offset_table.X, offset_table.Y, (char *)__FUNCTION__, __FILE__, __LINE__, #X, #Y)) #define SIZE_OPTION(X,Y) (SIZE_option(size_table.X, size_table.Y, (char *)__FUNCTION__, __FILE__, __LINE__, #X, #Y)) #define MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) #define STRUCT_SIZE_INIT(X, Y) (ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) #define ARRAY_LENGTH_INIT(A, B, C, D, E) ((A) = get_array_length(C, D, E)) #define ARRAY_LENGTH_INIT_ALT(A, B, C, D, E) ((A) = get_array_length_alt(B, C, D, E)) #define MEMBER_SIZE_INIT(X, Y, Z) (ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z)) #define ANON_MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = ANON_MEMBER_OFFSET(Y, Z)) /* * For use with non-debug kernels. */ struct builtin_debug_table { char *release; char *machine_type; struct offset_table *offset_table; struct size_table *size_table; struct array_table *array_table; }; /* * Facilitators for pulling correctly-sized data out of a buffer at a * known address. */ #ifdef NEED_ALIGNED_MEM_ACCESS #define DEF_LOADER(TYPE) \ static inline TYPE \ load_##TYPE (char *addr) \ { \ TYPE ret; \ size_t i = sizeof(TYPE); \ while (i--) \ ((char *)&ret)[i] = addr[i]; \ return ret; \ } DEF_LOADER(int); DEF_LOADER(uint); DEF_LOADER(long); DEF_LOADER(ulong); DEF_LOADER(ulonglong); DEF_LOADER(ushort); DEF_LOADER(short); typedef void *pointer_t; DEF_LOADER(pointer_t); DEF_LOADER(bool); #define LOADER(TYPE) load_##TYPE #define INT(ADDR) LOADER(int) ((char *)(ADDR)) #define UINT(ADDR) LOADER(uint) ((char *)(ADDR)) #define LONG(ADDR) LOADER(long) ((char *)(ADDR)) #define ULONG(ADDR) LOADER(ulong) ((char *)(ADDR)) #define ULONGLONG(ADDR) LOADER(ulonglong) ((char *)(ADDR)) #define ULONG_PTR(ADDR) ((ulong *) (LOADER(pointer_t) ((char *)(ADDR)))) #define USHORT(ADDR) LOADER(ushort) ((char *)(ADDR)) #define SHORT(ADDR) LOADER(short) ((char *)(ADDR)) #define UCHAR(ADDR) *((unsigned char *)((char *)(ADDR))) #define VOID_PTR(ADDR) ((void *) (LOADER(pointer_t) ((char *)(ADDR)))) #define BOOL(ADDR) LOADER(bool) ((char *)(ADDR)) #else #define INT(ADDR) *((int *)((char *)(ADDR))) #define UINT(ADDR) *((uint *)((char *)(ADDR))) #define LONG(ADDR) *((long *)((char *)(ADDR))) #define ULONG(ADDR) *((ulong *)((char *)(ADDR))) #define ULONGLONG(ADDR) *((ulonglong *)((char *)(ADDR))) #define ULONG_PTR(ADDR) *((ulong **)((char *)(ADDR))) #define USHORT(ADDR) *((ushort *)((char *)(ADDR))) #define SHORT(ADDR) *((short *)((char *)(ADDR))) #define UCHAR(ADDR) *((unsigned char *)((char *)(ADDR))) #define VOID_PTR(ADDR) *((void **)((char *)(ADDR))) #define BOOL(ADDR) *((bool *)((char *)(ADDR))) #endif /* NEED_ALIGNED_MEM_ACCESS */ struct node_table { int node_id; ulong pgdat; ulong mem_map; ulong size; ulong present; ulonglong start_paddr; ulong start_mapnr; }; struct meminfo; struct slab_data; #define VMA_CACHE (20) struct vm_table { /* kernel VM-related data */ ulong flags; ulong kernel_pgd[NR_CPUS]; ulong high_memory; ulong vmalloc_start; ulong mem_map; long total_pages; ulong totalram_pages; ulong totalhigh_pages; ulong num_physpages; ulong max_mapnr; ulong kmem_max_c_num; ulong kmem_max_limit; ulong kmem_max_cpus; ulong kmem_cache_count; ulong kmem_cache_len_nodes; ulong PG_reserved; ulong PG_slab; ulong PG_head_tail_mask; int kmem_cache_namelen; ulong page_hash_table; int page_hash_table_len; int paddr_prlen; int numnodes; int nr_zones; int nr_free_areas; struct node_table *node_table; void (*dump_free_pages)(struct meminfo *); void (*dump_kmem_cache)(struct meminfo *); struct slab_data *slab_data; uint nr_swapfiles; ulong last_swap_read; char *swap_info_struct; char *vma_cache; ulong cached_vma[VMA_CACHE]; ulong cached_vma_hits[VMA_CACHE]; int vma_cache_index; ulong vma_cache_fills; void *mem_sec; char *mem_section; int ZONE_HIGHMEM; ulong *node_online_map; int node_online_map_len; int nr_vm_stat_items; char **vm_stat_items; int cpu_slab_type; int nr_vm_event_items; char **vm_event_items; int nr_bad_slab_caches; ulong *bad_slab_caches; int nr_pageflags; struct pageflags_data { ulong mask; char *name; } *pageflags_data; ulong max_mem_section_nr; ulong zero_paddr; ulong huge_zero_paddr; uint page_type_base; }; #define NODES (0x1) #define ZONES (0x2) #define PERCPU_KMALLOC_V1 (0x4) #define COMMON_VADDR (0x8) #define KMEM_CACHE_INIT (0x10) #define V_MEM_MAP (0x20) #define PERCPU_KMALLOC_V2 (0x40) #define KMEM_CACHE_UNAVAIL (0x80) #define FLATMEM (0x100) #define DISCONTIGMEM (0x200) #define SPARSEMEM (0x400) #define SPARSEMEM_EX (0x800) #define PERCPU_KMALLOC_V2_NODES (0x1000) #define KMEM_CACHE_DELAY (0x2000) #define NODES_ONLINE (0x4000) #define VM_STAT (0x8000) #define KMALLOC_SLUB (0x10000) #define CONFIG_NUMA (0x20000) #define VM_EVENT (0x40000) #define PGCNT_ADJ (0x80000) #define VM_INIT (0x100000) #define SWAPINFO_V1 (0x200000) #define SWAPINFO_V2 (0x400000) #define NODELISTS_IS_PTR (0x800000) #define KMALLOC_COMMON (0x1000000) #define USE_VMAP_AREA (0x2000000) #define PAGEFLAGS (0x4000000) #define SLAB_OVERLOAD_PAGE (0x8000000) #define SLAB_CPU_CACHE (0x10000000) #define SLAB_ROOT_CACHES (0x20000000) #define USE_VMAP_NODES (0x40000000) /* * The SLAB_PAGEFLAGS flag is introduced to detect the change of * PG_slab's type from a page flag to a page type. */ #define SLAB_PAGEFLAGS (0x80000000) #define IS_FLATMEM() (vt->flags & FLATMEM) #define IS_DISCONTIGMEM() (vt->flags & DISCONTIGMEM) #define IS_SPARSEMEM() (vt->flags & SPARSEMEM) #define IS_SPARSEMEM_EX() (vt->flags & SPARSEMEM_EX) #define COMMON_VADDR_SPACE() (vt->flags & COMMON_VADDR) #define PADDR_PRLEN (vt->paddr_prlen) struct datatype_member { /* minimal definition of a structure/union */ char *name; /* and possibly a member within it */ char *member; ulong type; long size; long member_offset; long member_size; int member_typecode; ulong flags; const char *tagname; /* tagname and value for enums */ long value; ulong vaddr; }; #define union_name struct_name struct list_data { /* generic structure used by do_list() to walk */ ulong flags; /* through linked lists in the kernel */ ulong start; long member_offset; long list_head_offset; ulong end; ulong searchfor; char **structname; int structname_args; char *header; ulong *list_ptr; int (*callback_func)(void *, void *); void *callback_data; long struct_list_offset; }; #define LIST_OFFSET_ENTERED (VERBOSE << 1) #define LIST_START_ENTERED (VERBOSE << 2) #define LIST_HEAD_FORMAT (VERBOSE << 3) #define LIST_HEAD_POINTER (VERBOSE << 4) #define RETURN_ON_DUPLICATE (VERBOSE << 5) #define RETURN_ON_LIST_ERROR (VERBOSE << 6) #define LIST_STRUCT_RADIX_10 (VERBOSE << 7) #define LIST_STRUCT_RADIX_16 (VERBOSE << 8) #define LIST_HEAD_REVERSE (VERBOSE << 9) #define LIST_ALLOCATE (VERBOSE << 10) #define LIST_CALLBACK (VERBOSE << 11) #define CALLBACK_RETURN (VERBOSE << 12) #define LIST_PARSE_MEMBER (VERBOSE << 13) #define LIST_READ_MEMBER (VERBOSE << 14) #define LIST_BRENT_ALGO (VERBOSE << 15) #define LIST_HEAD_OFFSET_ENTERED (VERBOSE << 16) struct tree_data { ulong flags; ulong start; long node_member_offset; char **structname; int structname_args; int count; }; #define TREE_ROOT_OFFSET_ENTERED (VERBOSE << 1) #define TREE_NODE_OFFSET_ENTERED (VERBOSE << 2) #define TREE_NODE_POINTER (VERBOSE << 3) #define TREE_POSITION_DISPLAY (VERBOSE << 4) #define TREE_STRUCT_RADIX_10 (VERBOSE << 5) #define TREE_STRUCT_RADIX_16 (VERBOSE << 6) #define TREE_PARSE_MEMBER (VERBOSE << 7) #define TREE_READ_MEMBER (VERBOSE << 8) #define TREE_LINEAR_ORDER (VERBOSE << 9) #define TREE_STRUCT_VERBOSE (VERBOSE << 10) #define ALIAS_RUNTIME (1) #define ALIAS_RCLOCAL (2) #define ALIAS_RCHOME (3) #define ALIAS_BUILTIN (4) struct alias_data { /* command alias storage */ struct alias_data *next; char *alias; int argcnt; int size; int origin; char *args[MAXARGS]; char argbuf[1]; }; struct rb_node { unsigned long rb_parent_color; #define RB_RED 0 #define RB_BLACK 1 struct rb_node *rb_right; struct rb_node *rb_left; }; struct rb_root { struct rb_node *rb_node; }; #define NUMBER_STACKFRAMES 4 #define SAVE_RETURN_ADDRESS(retaddr) \ { \ int i; \ int saved_stacks; \ \ saved_stacks = backtrace((void **)retaddr, NUMBER_STACKFRAMES); \ \ /* explicitely zero out the invalid addresses */ \ for (i = saved_stacks; i < NUMBER_STACKFRAMES; i++) \ retaddr[i] = 0; \ } #endif /* !GDB_COMMON */ #define SYMBOL_NAME_USED (0x1) #define MODULE_SYMBOL (0x2) #define IS_MODULE_SYMBOL(SYM) ((SYM)->flags & MODULE_SYMBOL) struct syment { ulong value; char *name; struct syment *val_hash_next; struct syment *name_hash_next; char type; unsigned char cnt; unsigned char flags; unsigned char pad2; }; #define NAMESPACE_INIT (1) #define NAMESPACE_REUSE (2) #define NAMESPACE_FREE (3) #define NAMESPACE_INSTALL (4) #define NAMESPACE_COMPLETE (5) struct symbol_namespace { char *address; size_t size; long index; long cnt; }; struct downsized { char *name; struct downsized *next; }; #define SYMVAL_HASH (512) #define SYMVAL_HASH_INDEX(vaddr) \ (((vaddr) >> machdep->pageshift) % SYMVAL_HASH) #define SYMNAME_HASH (512) #define PATCH_KERNEL_SYMBOLS_START ((char *)(1)) #define PATCH_KERNEL_SYMBOLS_STOP ((char *)(2)) #ifndef GDB_COMMON struct symbol_table_data { ulong flags; #ifdef GDB_5_3 struct _bfd *bfd; #else struct bfd *bfd; #endif struct sec *sections; struct syment *symtable; struct syment *symend; long symcnt; ulong syment_size; struct symval_hash_chain { struct syment *val_hash_head; struct syment *val_hash_last; } symval_hash[SYMVAL_HASH]; double val_hash_searches; double val_hash_iterations; struct syment *symname_hash[SYMNAME_HASH]; struct symbol_namespace kernel_namespace; struct syment *ext_module_symtable; struct syment *ext_module_symend; long ext_module_symcnt; struct symbol_namespace ext_module_namespace; int mods_installed; struct load_module *current; struct load_module *load_modules; off_t dwarf_eh_frame_file_offset; ulong dwarf_eh_frame_size; ulong first_ksymbol; ulong __per_cpu_start; ulong __per_cpu_end; off_t dwarf_debug_frame_file_offset; ulong dwarf_debug_frame_size; ulong first_section_start; ulong last_section_end; ulong _stext_vmlinux; struct downsized downsized; ulong divide_error_vmlinux; ulong idt_table_vmlinux; ulong saved_command_line_vmlinux; ulong pti_init_vmlinux; ulong kaiser_init_vmlinux; int kernel_symbol_type; ulong linux_banner_vmlinux; struct syment *mod_symname_hash[SYMNAME_HASH]; }; /* flags for st */ #define KERNEL_SYMS (0x1) #define MODULE_SYMS (0x2) #define LOAD_MODULE_SYMS (0x4) #define INSMOD_BUILTIN (0x8) #define GDB_SYMS_PATCHED (0x10) #define GDB_PATCHED() (st->flags & GDB_SYMS_PATCHED) #define NO_SEC_LOAD (0x20) #define NO_SEC_CONTENTS (0x40) #define FORCE_DEBUGINFO (0x80) #define CRC_MATCHES (0x100) #define ADD_SYMBOL_FILE (0x200) #define USE_OLD_ADD_SYM (0x400) #define PERCPU_SYMS (0x800) #define MODSECT_UNKNOWN (0x1000) #define MODSECT_V1 (0x2000) #define MODSECT_V2 (0x4000) #define MODSECT_V3 (0x8000) #define MODSECT_VMASK (MODSECT_V1|MODSECT_V2|MODSECT_V3) #define NO_STRIP (0x10000) #define NO_LINE_NUMBERS() ((st->flags & GDB_SYMS_PATCHED) && !(kt->flags2 & KASLR)) #endif /* !GDB_COMMON */ #define ALL_MODULES (0) #define MAX_MOD_NAMELIST (256) #define MAX_MOD_NAME (64) #define MAX_MOD_SEC_NAME (64) #define MOD_EXT_SYMS (0x1) #define MOD_LOAD_SYMS (0x2) #define MOD_REMOTE (0x4) #define MOD_KALLSYMS (0x8) #define MOD_INITRD (0x10) #define MOD_NOPATCH (0x20) #define MOD_INIT (0x40) #define MOD_DO_READNOW (0x80) #define SEC_FOUND (0x10000) struct mod_section_data { #if defined(GDB_5_3) || defined(GDB_6_0) struct sec *section; #else struct bfd_section *section; #endif char name[MAX_MOD_SEC_NAME]; ulong offset; ulong size; int priority; int flags; ulong addr; }; /* Emulate enum mod_mem_type in include/linux/module.h */ #define MOD_TEXT (0) #define MOD_DATA (1) #define MOD_RODATA (2) #define MOD_RO_AFTER_INIT (3) #define MOD_INIT_TEXT (4) #define MOD_INIT_DATA (5) #define MOD_INIT_RODATA (6) #define MOD_MEM_NUM_TYPES (7) #define MOD_INVALID (-1) struct module_memory { ulong base; uint size; }; struct load_module { ulong mod_base; ulong module_struct; long mod_size; char mod_namelist[MAX_MOD_NAMELIST]; char mod_name[MAX_MOD_NAME]; ulong mod_flags; struct syment *mod_symtable; struct syment *mod_symend; long mod_ext_symcnt; struct syment *mod_ext_symtable; struct syment *mod_ext_symend; long mod_load_symcnt; struct syment *mod_load_symtable; struct syment *mod_load_symend; long mod_symalloc; struct symbol_namespace mod_load_namespace; ulong mod_size_of_struct; ulong mod_text_start; ulong mod_etext_guess; ulong mod_rodata_start; ulong mod_data_start; ulong mod_bss_start; int mod_sections; struct mod_section_data *mod_section_data; ulong mod_init_text_size; ulong mod_init_module_ptr; ulong mod_init_size; struct syment *mod_init_symtable; struct syment *mod_init_symend; ulong mod_percpu; ulong mod_percpu_size; struct objfile *loaded_objfile; /* For 6.4 module_memory */ struct module_memory mem[MOD_MEM_NUM_TYPES]; struct syment **symtable; struct syment **symend; struct syment *ext_symtable[MOD_MEM_NUM_TYPES]; struct syment *ext_symend[MOD_MEM_NUM_TYPES]; struct syment *load_symtable[MOD_MEM_NUM_TYPES]; struct syment *load_symend[MOD_MEM_NUM_TYPES]; }; #define IN_MODULE(A,L) (in_module_range(A, L, MOD_TEXT, MOD_RO_AFTER_INIT) != MOD_INVALID) #define IN_MODULE_INIT(A,L) (in_module_range(A, L, MOD_INIT_TEXT, MOD_INIT_RODATA) != MOD_INVALID) #define IN_MODULE_TEXT(A,L) (in_module_range(A, L, MOD_TEXT, MOD_TEXT) == MOD_TEXT || \ in_module_range(A, L, MOD_INIT_TEXT, MOD_INIT_TEXT) == MOD_INIT_TEXT) #define IN_MODULE_PERCPU(A,L) \ (((ulong)(A) >= (L)->mod_percpu) && ((ulong)(A) < ((L)->mod_percpu+(L)->mod_percpu_size))) #define MODULE_PERCPU_SYMS_LOADED(L) ((L)->mod_percpu && (L)->mod_percpu_size) #define for_each_mod_mem_type(type) \ for ((type) = MOD_TEXT; (type) < MOD_MEM_NUM_TYPES; (type)++) #ifndef GDB_COMMON #define KVADDR (0x1) #define UVADDR (0x2) #define PHYSADDR (0x4) #define XENMACHADDR (0x8) #define FILEADDR (0x10) #define AMBIGUOUS (~0) #define USE_USER_PGD (UVADDR << 2) #define VERIFY_ADDR (0x8) /* vm_area_dump() flags -- must follow */ #define PRINT_INODES (0x10) /* KVADDR, UVADDR, and PHYSADDR */ #define PRINT_MM_STRUCT (0x20) #define PRINT_VMA_STRUCTS (0x40) #define PRINT_SINGLE_VMA (0x80) #define PRINT_RADIX_10 (0x100) #define PRINT_RADIX_16 (0x200) #define PRINT_NRPAGES (0x400) #define MIN_PAGE_SIZE (4096) #define PTOB(X) ((ulonglong)(X) << machdep->pageshift) #define BTOP(X) ((ulonglong)(X) >> machdep->pageshift) #define PAGESIZE() (machdep->pagesize) #define PAGESHIFT() (machdep->pageshift) #define PAGEOFFSET(X) (((ulong)(X)) & machdep->pageoffset) #define VIRTPAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PHYSPAGEBASE(X) (((physaddr_t)(X)) & (physaddr_t)machdep->pagemask) #define IS_ZEROPAGE(paddr) ((paddr) == vt->zero_paddr || \ (paddr) == vt->huge_zero_paddr) /* * Sparse memory stuff * These must follow the definitions in the kernel mmzone.h */ #define SECTION_SIZE_BITS() (machdep->section_size_bits) #define MAX_PHYSMEM_BITS() (machdep->max_physmem_bits) #define SECTIONS_SHIFT() (MAX_PHYSMEM_BITS() - SECTION_SIZE_BITS()) #define PA_SECTION_SHIFT() (SECTION_SIZE_BITS()) #define PFN_SECTION_SHIFT() (SECTION_SIZE_BITS() - PAGESHIFT()) #define NR_MEM_SECTIONS() (1UL << SECTIONS_SHIFT()) #define PAGES_PER_SECTION() (1UL << PFN_SECTION_SHIFT()) #define PAGE_SECTION_MASK() (~(PAGES_PER_SECTION()-1)) #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT()) #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT()) #define SECTIONS_PER_ROOT() (machdep->sections_per_root) /* CONFIG_SPARSEMEM_EXTREME */ #define _SECTIONS_PER_ROOT_EXTREME() (PAGESIZE() / SIZE(mem_section)) /* !CONFIG_SPARSEMEM_EXTREME */ #define _SECTIONS_PER_ROOT() (1) #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT()) #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define NR_SECTION_ROOTS() (DIV_ROUND_UP(NR_MEM_SECTIONS(), SECTIONS_PER_ROOT())) #define SECTION_ROOT_MASK() (SECTIONS_PER_ROOT() - 1) struct QEMUCPUSegment { uint32_t selector; uint32_t limit; uint32_t flags; uint32_t pad; uint64_t base; }; typedef struct QEMUCPUSegment QEMUCPUSegment; struct QEMUCPUState { uint32_t version; uint32_t size; uint64_t rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp; uint64_t r8, r9, r10, r11, r12, r13, r14, r15; uint64_t rip, rflags; QEMUCPUSegment cs, ds, es, fs, gs, ss; QEMUCPUSegment ldt, tr, gdt, idt; uint64_t cr[5]; }; typedef struct QEMUCPUState QEMUCPUState; /* * Machine specific stuff */ #ifdef ARM #define _32BIT_ #define MACHINE_TYPE "ARM" #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PTOV(X) \ ((unsigned long)(X)-(machdep->machspec->phys_base)+(machdep->kvbase)) #define VTOP(X) \ ((unsigned long)(X)-(machdep->kvbase)+(machdep->machspec->phys_base)) #define IS_VMALLOC_ADDR(X) arm_is_vmalloc_addr((ulong)(X)) #define DEFAULT_MODULES_VADDR (machdep->kvbase - 16 * 1024 * 1024) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define PGDIR_SHIFT (21) #define PTRS_PER_PTE (512) #define PTRS_PER_PGD (2048) #define PGD_OFFSET(vaddr) ((vaddr) >> PGDIR_SHIFT) #define PTE_OFFSET(vaddr) (((vaddr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) #define __SWP_TYPE_SHIFT 3 #define __SWP_TYPE_BITS 6 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) #define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) #define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (THIS_KERNEL_VERSION >= LINUX(2,6,23) ? 0 : 2) #define _SECTION_SIZE_BITS 28 #define _MAX_PHYSMEM_BITS 32 /*add for LPAE*/ typedef unsigned long long u64; typedef signed int s32; typedef u64 pgd_t; typedef u64 pmd_t; typedef u64 pte_t; #define PMDSIZE() (PAGESIZE()) #define LPAE_PGDIR_SHIFT (30) #define LPAE_PMDIR_SHIFT (21) #define LPAE_PGD_OFFSET(vaddr) ((vaddr) >> LPAE_PGDIR_SHIFT) #define LPAE_PMD_OFFSET(vaddr) (((vaddr) >> LPAE_PMDIR_SHIFT) & \ ((1<<(LPAE_PGDIR_SHIFT-LPAE_PMDIR_SHIFT))-1)) #define _SECTION_SIZE_BITS_LPAE 28 #define _MAX_PHYSMEM_BITS_LPAE 36 /* * #define PTRS_PER_PTE 512 * #define PTRS_PER_PMD 512 * #define PTRS_PER_PGD 4 * */ #define LPAE_PGDIR_SIZE() 32 #define LPAE_PGDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PGDIR_SIZE() - 1)) #define LPAE_PMDIR_SIZE() 4096 #define LPAE_PMDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PMDIR_SIZE() - 1)) #define LPAE_PTEDIR_SIZE() 4096 #define LPAE_PTEDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PTEDIR_SIZE() - 1)) /*section size for LPAE is 2MiB*/ #define LPAE_SECTION_PAGE_MASK (~((MEGABYTES(2))-1)) #define _PHYSICAL_MASK_LPAE ((1ULL << _MAX_PHYSMEM_BITS_LPAE) - 1) #define PAGE_BASE_MASK ((u64)((s32)machdep->pagemask & _PHYSICAL_MASK_LPAE)) #define LPAE_PAGEBASE(X) (((ulonglong)(X)) & PAGE_BASE_MASK) #define LPAE_VTOP(X) \ ((unsigned long long)(unsigned long)(X) - \ (machdep->kvbase) + (machdep->machspec->phys_base)) #define IS_LAST_PGD_READ_LPAE(pgd) ((pgd) == \ machdep->machspec->last_pgd_read_lpae) #define IS_LAST_PMD_READ_LPAE(pmd) ((pmd) == \ machdep->machspec->last_pmd_read_lpae) #define IS_LAST_PTBL_READ_LPAE(ptbl) ((ptbl) == \ machdep->machspec->last_ptbl_read_lpae) #define FILL_PGD_LPAE(PGD, TYPE, SIZE) \ if (!IS_LAST_PGD_READ_LPAE(PGD)) { \ readmem((ulonglong)(PGD), TYPE, machdep->pgd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->machspec->last_pgd_read_lpae \ = (ulonglong)(PGD); \ } #define FILL_PMD_LPAE(PMD, TYPE, SIZE) \ if (!IS_LAST_PMD_READ_LPAE(PMD)) { \ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->machspec->last_pmd_read_lpae \ = (ulonglong)(PMD); \ } #define FILL_PTBL_LPAE(PTBL, TYPE, SIZE) \ if (!IS_LAST_PTBL_READ_LPAE(PTBL)) { \ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ SIZE, "page table", FAULT_ON_ERROR); \ machdep->machspec->last_ptbl_read_lpae \ = (ulonglong)(PTBL); \ } #endif /* ARM */ #ifndef EM_AARCH64 #define EM_AARCH64 183 #endif #ifdef ARM64 #define _64BIT_ #define MACHINE_TYPE "ARM64" #define USERSPACE_TOP (machdep->machspec->userspace_top) #define PAGE_OFFSET (machdep->machspec->page_offset) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define VMEMMAP_VADDR (machdep->machspec->vmemmap_vaddr) #define VMEMMAP_END (machdep->machspec->vmemmap_end) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define PTOV(X) arm64_PTOV((ulong)(X)) #define VTOP(X) arm64_VTOP((ulong)(X)) #define IS_VMALLOC_ADDR(X) arm64_IS_VMALLOC_ADDR((ulong)(X)) #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) /* * 48-bit physical address supported. */ #define PHYS_MASK_SHIFT (48) #define PHYS_MASK (((1UL) << PHYS_MASK_SHIFT) - 1) typedef signed int s32; /* * 3-levels / 4K pages * 39-bit VA */ #define PTRS_PER_PGD_L3_4K ((1UL) << (39 - 30)) #define PTRS_PER_PMD_L3_4K (512) #define PTRS_PER_PTE_L3_4K (512) #define PGDIR_SHIFT_L3_4K (30) #define PGDIR_SIZE_L3_4K ((1UL) << PGDIR_SHIFT_L3_4K) #define PGDIR_MASK_L3_4K (~(PGDIR_SIZE_L3_4K-1)) #define PMD_SHIFT_L3_4K (21) #define PMD_SIZE_L3_4K (1UL << PMD_SHIFT_L3_4K) #define PMD_MASK_L3_4K (~(PMD_SIZE_L3_4K-1)) /* * 4-levels / 4K pages * 48-bit VA */ #define PTRS_PER_PGD_L4_4K ((1UL) << (48 - 39)) #define PTRS_PER_PUD_L4_4K (512) #define PTRS_PER_PMD_L4_4K (512) #define PTRS_PER_PTE_L4_4K (512) #define PGDIR_SHIFT_L4_4K (39) #define PGDIR_SIZE_L4_4K ((1UL) << PGDIR_SHIFT_L4_4K) #define PGDIR_MASK_L4_4K (~(PGDIR_SIZE_L4_4K-1)) #define PUD_SHIFT_L4_4K (30) #define PUD_SIZE_L4_4K ((1UL) << PUD_SHIFT_L4_4K) #define PUD_MASK_L4_4K (~(PUD_SIZE_L4_4K-1)) #define PMD_SHIFT_L4_4K (21) #define PMD_SIZE_L4_4K (1UL << PMD_SHIFT_L4_4K) #define PMD_MASK_L4_4K (~(PMD_SIZE_L4_4K-1)) #define PGDIR_SIZE_48VA (1UL << ((48 - 39) + 3)) #define PGDIR_MASK_48VA (~(PGDIR_SIZE_48VA - 1)) #define PGDIR_OFFSET_48VA(X) (((ulong)(X)) & (PGDIR_SIZE_48VA - 1)) /* * 2-levels / 16K pages * 36-bit VA */ #define PTRS_PER_PGD_L2_16K ((1UL) << (36 - 25)) #define PTRS_PER_PTE_L2_16K (2048) #define PGDIR_SHIFT_L2_16K (25) #define PGDIR_SIZE_L2_16K ((1UL) << PGDIR_SHIFT_L2_16K) #define PGDIR_MASK_L2_16K (~(PGDIR_SIZE_L2_16K-1)) #define PGDIR_OFFSET_L2_16K(X) (((ulong)(X)) & ((machdep->ptrs_per_pgd * 8) - 1)) /* * 3-levels / 16K pages * 47-bit VA */ #define PTRS_PER_PGD_L3_16K ((1UL) << (47 - 36)) #define PTRS_PER_PMD_L3_16K (2048) #define PTRS_PER_PTE_L3_16K (2048) #define PGDIR_SHIFT_L3_16K (36) #define PGDIR_SIZE_L3_16K ((1UL) << PGDIR_SHIFT_L3_16K) #define PGDIR_MASK_L3_16K (~(PGDIR_SIZE_L3_16K-1)) #define PMD_SHIFT_L3_16K (25) #define PMD_SIZE_L3_16K (1UL << PMD_SHIFT_L3_16K) #define PMD_MASK_L3_16K (~(PMD_SIZE_L3_16K-1)) #define PGDIR_OFFSET_L3_16K(X) (((ulong)(X)) & ((machdep->ptrs_per_pgd * 8) - 1)) /* * 4-levels / 16K pages * 48-bit VA */ #define PTRS_PER_PGD_L4_16K ((1UL) << (48 - 47)) #define PTRS_PER_PUD_L4_16K (2048) #define PTRS_PER_PMD_L4_16K (2048) #define PTRS_PER_PTE_L4_16K (2048) #define PGDIR_SHIFT_L4_16K (47) #define PGDIR_SIZE_L4_16K ((1UL) << PGDIR_SHIFT_L4_16K) #define PGDIR_MASK_L4_16K (~(PGDIR_SIZE_L4_16K-1)) #define PUD_SHIFT_L4_16K (36) #define PUD_SIZE_L4_16K ((1UL) << PUD_SHIFT_L4_16K) #define PUD_MASK_L4_16K (~(PUD_SIZE_L4_16K-1)) #define PMD_SHIFT_L4_16K (25) #define PMD_SIZE_L4_16K (1UL << PMD_SHIFT_L4_16K) #define PMD_MASK_L4_16K (~(PMD_SIZE_L4_16K-1)) #define PGDIR_OFFSET_L4_16K(X) (((ulong)(X)) & ((machdep->ptrs_per_pgd * 8) - 1)) /* * 3-levels / 64K pages * 48-bit, 52-bit VA */ #define PTRS_PER_PGD_L3_64K_48 ((1UL) << (48 - 42)) #define PTRS_PER_PGD_L3_64K_52 ((1UL) << (52 - 42)) #define PTRS_PER_PMD_L3_64K (8192) #define PTRS_PER_PTE_L3_64K (8192) #define PGDIR_SHIFT_L3_64K (42) #define PGDIR_SIZE_L3_64K ((1UL) << PGDIR_SHIFT_L3_64K) #define PGDIR_MASK_L3_64K (~(PGDIR_SIZE_L3_64K-1)) #define PMD_SHIFT_L3_64K (29) #define PMD_SIZE_L3_64K (1UL << PMD_SHIFT_L3_64K) #define PMD_MASK_L3_64K (~(PMD_SIZE_L3_64K-1)) #define PGDIR_OFFSET_L3_64K(X) (((ulong)(X)) & ((machdep->ptrs_per_pgd * 8) - 1)) /* * 2-levels / 64K pages * 42-bit VA */ #define PTRS_PER_PGD_L2_64K ((1UL) << (42 - 29)) #define PTRS_PER_PTE_L2_64K (8192) #define PGDIR_SHIFT_L2_64K (29) #define PGDIR_SIZE_L2_64K ((1UL) << PGDIR_SHIFT_L2_64K) #define PGDIR_MASK_L2_64K (~(PGDIR_SIZE_L2_64K-1)) /* * Software defined PTE bits definition. * (arch/arm64/include/asm/pgtable.h) */ #define PTE_VALID (1UL << 0) #define PTE_DIRTY (1UL << 55) #define PTE_SPECIAL (1UL << 56) /* * Level 3 descriptor (PTE). * (arch/arm64/include/asm/pgtable-hwdef.h) */ #define PTE_TYPE_MASK (3UL << 0) #define PTE_TYPE_FAULT (0UL << 0) #define PTE_TYPE_PAGE (3UL << 0) #define PTE_USER (1UL << 6) /* AP[1] */ #define PTE_RDONLY (1UL << 7) /* AP[2] */ #define PTE_SHARED (3UL << 8) /* SH[1:0], inner shareable */ #define PTE_AF (1UL << 10) /* Access Flag */ #define PTE_NG (1UL << 11) /* nG */ #define PTE_PXN (1UL << 53) /* Privileged XN */ #define PTE_UXN (1UL << 54) /* User XN */ #define __swp_type(x) arm64_swp_type(x) #define __swp_offset(x) arm64_swp_offset(x) #define SWP_TYPE(x) __swp_type(x) #define SWP_OFFSET(x) __swp_offset(x) #define KSYMS_START (0x1) #define PHYS_OFFSET (0x2) #define VM_L2_64K (0x4) #define VM_L3_64K (0x8) #define VM_L3_4K (0x10) #define KDUMP_ENABLED (0x20) #define IRQ_STACKS (0x40) #define NEW_VMEMMAP (0x80) #define VM_L4_4K (0x100) #define UNW_4_14 (0x200) #define FLIPPED_VM (0x400) #define HAS_PHYSVIRT_OFFSET (0x800) #define OVERFLOW_STACKS (0x1000) #define ARM64_MTE (0x2000) #define VM_L3_16K (0x4000) #define VM_L2_16K (0x8000) #define VM_L4_16K (0x10000) /* * Get kimage_voffset from /dev/crash */ #define DEV_CRASH_ARCH_DATA _IOR('c', 1, unsigned long) /* * sources: Documentation/arm64/memory.txt * arch/arm64/include/asm/memory.h * arch/arm64/include/asm/pgtable.h */ #define ARM64_VA_START ((0xffffffffffffffffUL) \ << machdep->machspec->VA_BITS) #define _VA_START(va) ((0xffffffffffffffffUL) - \ ((1UL) << ((va) - 1)) + 1) #define TEXT_OFFSET_MASK (~((MEGABYTES(2UL))-1)) #define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) \ << (machdep->machspec->VA_BITS - 1)) /* kernels >= v5.4 the kernel VA space is flipped */ #define ARM64_FLIP_PAGE_OFFSET (-(1UL) << machdep->machspec->VA_BITS) #define ARM64_USERSPACE_TOP ((1UL) << machdep->machspec->VA_BITS) #define ARM64_USERSPACE_TOP_ACTUAL ((1UL) << machdep->machspec->VA_BITS_ACTUAL) /* only used for v4.6 or later */ #define ARM64_MODULES_VSIZE MEGABYTES(128) #define ARM64_KASAN_SHADOW_SIZE (1UL << (machdep->machspec->VA_BITS - 3)) /* * The following 3 definitions are the original values, but are obsolete * for 3.17 and later kernels because they are now build-time calculations. * They all depend on the kernel's new VMEMMAP_SIZE value, which is dependent * upon the size of struct page. Accordingly, arm64_calc_virtual_memory_ranges() * determines their values at POST_GDB time. */ #define ARM64_VMALLOC_END (ARM64_PAGE_OFFSET - 0x400000000UL - KILOBYTES(64) - 1) #define ARM64_VMEMMAP_VADDR ((ARM64_VMALLOC_END+1) + KILOBYTES(64)) #define ARM64_VMEMMAP_END (ARM64_VMEMMAP_VADDR + GIGABYTES(8UL) - 1) #define ARM64_STACK_SIZE (16384) #define ARM64_IRQ_STACK_SIZE ARM64_STACK_SIZE #define ARM64_OVERFLOW_STACK_SIZE (4096) #define _SECTION_SIZE_BITS 30 #define _SECTION_SIZE_BITS_5_12 27 #define _SECTION_SIZE_BITS_5_12_64K 29 #define _MAX_PHYSMEM_BITS 40 #define _MAX_PHYSMEM_BITS_3_17 48 #define _MAX_PHYSMEM_BITS_52 52 typedef unsigned long long __u64; typedef unsigned long long u64; struct arm64_user_pt_regs { __u64 regs[31]; __u64 sp; __u64 pc; __u64 pstate; }; struct arm64_pt_regs { union { struct arm64_user_pt_regs user_regs; struct { u64 regs[31]; u64 sp; u64 pc; u64 pstate; }; }; u64 orig_x0; u64 syscallno; }; /* AArch32 CPSR bits */ #define PSR_MODE32_BIT 0x00000010 #define TIF_SIGPENDING (0) #define display_idt_table() \ error(FATAL, "-d option is not applicable to ARM64 architecture\n") struct machine_specific { ulong flags; ulong userspace_top; ulong page_offset; ulong vmalloc_start_addr; ulong vmalloc_end; ulong vmemmap_vaddr; ulong vmemmap_end; ulong modules_vaddr; ulong modules_end; ulong phys_offset; ulong __exception_text_start; ulong __exception_text_end; struct arm64_pt_regs *panic_task_regs; ulong PTE_PROT_NONE; ulong PTE_FILE; ulong VA_BITS; ulong __SWP_TYPE_BITS; ulong __SWP_TYPE_SHIFT; ulong __SWP_TYPE_MASK; ulong __SWP_OFFSET_BITS; ulong __SWP_OFFSET_SHIFT; ulong __SWP_OFFSET_MASK; ulong crash_kexec_start; ulong crash_kexec_end; ulong crash_save_cpu_start; ulong crash_save_cpu_end; ulong kernel_flags; ulong irq_stack_size; ulong *irq_stacks; char *irq_stackbuf; ulong __irqentry_text_start; ulong __irqentry_text_end; ulong overflow_stack_size; ulong *overflow_stacks; char *overflow_stackbuf; /* for exception vector code */ ulong exp_entry1_start; ulong exp_entry1_end; ulong exp_entry2_start; ulong exp_entry2_end; /* only needed for v4.6 or later kernel */ ulong kimage_voffset; ulong kimage_text; ulong kimage_end; ulong user_eframe_offset; /* for v4.14 or later */ ulong kern_eframe_offset; ulong machine_kexec_start; ulong machine_kexec_end; ulong VA_BITS_ACTUAL; ulong CONFIG_ARM64_VA_BITS; ulong VA_START; ulong CONFIG_ARM64_KERNELPACMASK; ulong physvirt_offset; ulong struct_page_size; ulong vmemmap; }; struct arm64_stackframe { unsigned long x19; unsigned long x20; unsigned long x21; unsigned long x22; unsigned long x23; unsigned long x24; unsigned long x25; unsigned long x26; unsigned long x27; unsigned long x28; unsigned long fp; unsigned long sp; unsigned long pc; }; #endif /* ARM64 */ #ifdef MIPS #define _32BIT_ #define MACHINE_TYPE "MIPS" #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PTOV(X) ((unsigned long)(X) + 0x80000000lu) #define VTOP(X) ((unsigned long)(X) & 0x1ffffffflu) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define DEFAULT_MODULES_VADDR (machdep->kvbase - 16 * 1024 * 1024) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define __SWP_TYPE_SHIFT 3 #define __SWP_TYPE_BITS 6 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) #define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) #define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (THIS_KERNEL_VERSION >= LINUX(2,6,23) ? 1 : 2) #define _SECTION_SIZE_BITS 26 #define _MAX_PHYSMEM_BITS 32 #endif /* MIPS */ #ifdef MIPS64 #define _64BIT_ #define MACHINE_TYPE "MIPS64" #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define IS_CKPHYS(X) (((X) >= 0xffffffff80000000lu) && \ ((X) < 0xffffffffc0000000lu)) #define IS_XKPHYS(X) (((X) >= 0x8000000000000000lu) && \ ((X) < 0xc000000000000000lu)) #define PTOV(X) ((ulong)(X) + 0x9800000000000000lu) #define VTOP(X) (IS_CKPHYS(X) ? ((ulong)(X) & 0x000000001ffffffflu) \ : ((ulong)(X) & 0x0000fffffffffffflu)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start && !IS_CKPHYS(X)) #define DEFAULT_MODULES_VADDR 0xffffffffc0000000lu #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define __SWP_TYPE_SHIFT 16 #define __SWP_TYPE_BITS 8 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) #define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) #define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (THIS_KERNEL_VERSION >= LINUX(2,6,23) ? 1 : 2) #define _SECTION_SIZE_BITS 28 #define _MAX_PHYSMEM_BITS 48 #endif /* MIPS64 */ #ifndef EM_RISCV #define EM_RISCV 243 #endif #ifdef RISCV64 #define _64BIT_ #define MACHINE_TYPE "RISCV64" typedef struct { ulong pgd; } pgd_t; typedef struct { ulong p4d; } p4d_t; typedef struct { ulong pud; } pud_t; typedef struct { ulong pmd; } pmd_t; typedef struct { ulong pte; } pte_t; typedef signed int s32; /* arch/riscv/include/asm/pgtable-64.h */ #define PGD_SHIFT_L3 (30) #define PGD_SHIFT_L4 (39) #define PGD_SHIFT_L5 (48) #define P4D_SHIFT (39) #define PUD_SHIFT (30) #define PMD_SHIFT (21) #define PTRS_PER_PGD (512) #define PTRS_PER_P4D (512) #define PTRS_PER_PUD (512) #define PTRS_PER_PMD (512) #define PTRS_PER_PTE (512) /* * Mask for bit 0~53(PROT and PPN) of PTE * 63 6261 60 54 53 10 9 8 7 6 5 4 3 2 1 0 * N PBMT Reserved P P N RSW D A G U X W R V */ #define PTE_PFN_PROT_MASK 0x3FFFFFFFFFFFFF /* * 3-levels / 4K pages * * sv39 * PGD | PMD | PTE | OFFSET | * 9 | 9 | 9 | 12 | */ #define pgd_index_l3_4k(addr) (((addr) >> PGD_SHIFT_L3) & (PTRS_PER_PGD - 1)) #define pmd_index_l3_4k(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define pte_index_l3_4k(addr) (((addr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) /* * 4-levels / 4K pages * * sv48 * PGD | PUD | PMD | PTE | OFFSET | * 9 | 9 | 9 | 9 | 12 | */ #define pgd_index_l4_4k(addr) (((addr) >> PGD_SHIFT_L4) & (PTRS_PER_PGD - 1)) #define pud_index_l4_4k(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) #define pmd_index_l4_4k(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define pte_index_l4_4k(addr) (((addr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) /* * 5-levels / 4K pages * * sv57 * PGD | P4D | PUD | PMD | PTE | OFFSET | * 9 | 9 | 9 | 9 | 9 | 12 | */ #define pgd_index_l5_4k(addr) (((addr) >> PGD_SHIFT_L5) & (PTRS_PER_PGD - 1)) #define p4d_index_l5_4k(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) #define pud_index_l5_4k(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) #define pmd_index_l5_4k(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define pte_index_l5_4k(addr) (((addr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) /* machdep->flags */ #define KSYMS_START (0x1) #define VM_L3_4K (0x2) #define VM_L3_2M (0x4) #define VM_L3_1G (0x8) #define VM_L4_4K (0x10) #define VM_L4_2M (0x20) #define VM_L4_1G (0x40) #define VM_L5_4K (0x80) #define VM_L5_2M (0x100) #define VM_L5_1G (0x200) #define IRQ_STACKS (0x400) #define OVERFLOW_STACKS (0x800) #define RISCV64_OVERFLOW_STACK_SIZE (1 << 12) #define VM_FLAGS (VM_L3_4K | VM_L3_2M | VM_L3_1G | \ VM_L4_4K | VM_L4_2M | VM_L4_1G | \ VM_L5_4K | VM_L5_2M | VM_L5_1G) /* * Direct memory mapping */ #define PTOV(X) \ (((unsigned long)(X)+(machdep->kvbase)) - machdep->machspec->phys_base) #define VTOP(X) ({ \ ulong _X = X; \ (THIS_KERNEL_VERSION >= LINUX(5,13,0) && \ (_X) >= machdep->machspec->kernel_link_addr) ? \ ((unsigned long)(_X)-(machdep->machspec->va_kernel_pa_offset)): \ (((unsigned long)(_X)-(machdep->kvbase)) + \ machdep->machspec->phys_base); \ }) #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) /* * Stack size order */ #define THREAD_SIZE_ORDER 2 #define PAGE_OFFSET (machdep->machspec->page_offset) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define VMEMMAP_VADDR (machdep->machspec->vmemmap_vaddr) #define VMEMMAP_END (machdep->machspec->vmemmap_end) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define IS_VMALLOC_ADDR(X) riscv64_IS_VMALLOC_ADDR((ulong)(X)) /* from arch/riscv/include/asm/pgtable.h */ #define __SWP_TYPE_SHIFT 6 #define __SWP_TYPE_BITS 5 #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) #define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) #define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (THIS_KERNEL_VERSION >= LINUX(2,6,23) ? 1 : 2) /* from arch/riscv/include/asm/sparsemem.h */ #define _SECTION_SIZE_BITS 27 #define _MAX_PHYSMEM_BITS 56 /* 56-bit physical address supported */ #define PHYS_MASK_SHIFT _MAX_PHYSMEM_BITS #define PHYS_MASK (((1UL) << PHYS_MASK_SHIFT) - 1) #define IS_LAST_P4D_READ(p4d) ((ulong)(p4d) == machdep->machspec->last_p4d_read) #define FILL_P4D(P4D, TYPE, SIZE) \ if (!IS_LAST_P4D_READ(P4D)) { \ readmem((ulonglong)((ulong)(P4D)), TYPE, machdep->machspec->p4d, \ SIZE, "p4d page", FAULT_ON_ERROR); \ machdep->machspec->last_p4d_read = (ulong)(P4D); \ } #endif /* RISCV64 */ /* fix compilation errors due to elf.h version. */ #ifndef EM_LOONGARCH #define EM_LOONGARCH 258 #endif #ifdef LOONGARCH64 #define _64BIT_ #define MACHINE_TYPE "LOONGARCH64" #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define IS_XKPRANGE(X) (((X) >= 0x8000000000000000lu) && \ ((X) < 0xc000000000000000lu)) #define PTOV(X) ((ulong)(X) + 0x9000000000000000lu) #define VTOP(X) ((ulong)(X) & 0x0000fffffffffffflu) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define DEFAULT_MODULES_VADDR 0xffff800000000000lu #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define __SWP_TYPE_SHIFT 16 #define __SWP_TYPE_BITS 8 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) #define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) #define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (1) #define _SECTION_SIZE_BITS 28 #define _MAX_PHYSMEM_BITS 48 #endif /* LOONGARCH64 */ #ifdef X86 #define _32BIT_ #define MACHINE_TYPE "X86" #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define KVBASE_MASK (0x1ffffff) #define PGDIR_SHIFT_2LEVEL (22) #define PTRS_PER_PTE_2LEVEL (1024) #define PTRS_PER_PGD_2LEVEL (1024) #define PGDIR_SHIFT_3LEVEL (30) #define PTRS_PER_PTE_3LEVEL (512) #define PTRS_PER_PGD_3LEVEL (4) #define PMD_SHIFT (21) /* only used by PAE translators */ #define PTRS_PER_PMD (512) /* only used by PAE translators */ #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_4M 0x080 /* 4 MB page, Pentium+, if present.. */ #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ #define _PAGE_PROTNONE (machdep->machspec->page_protnone) #define _PAGE_NX (0x8000000000000000ULL) #define NONPAE_PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) #define NX_BIT_MASK (0x7fffffffffffffffULL) #define PAE_PAGEBASE(X) (((unsigned long long)(X)) & ((unsigned long long)machdep->pagemask) & NX_BIT_MASK) #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type_PAE(entry) (((entry) >> 32) & 0x1f) #define __swp_type_nonPAE(entry) (((entry) >> 1) & 0x1f) #define __swp_offset_PAE(entry) (((entry) >> 32) >> 5) #define __swp_offset_nonPAE(entry) ((entry) >> 8) #define __swp_type(entry) (machdep->flags & PAE ? \ __swp_type_PAE(entry) : __swp_type_nonPAE(entry)) #define __swp_offset(entry) (machdep->flags & PAE ? \ __swp_offset_PAE(entry) : __swp_offset_nonPAE(entry)) #define TIF_SIGPENDING (2) // CONFIG_X86_PAE #define _SECTION_SIZE_BITS_PAE_ORIG 30 #define _SECTION_SIZE_BITS_PAE_2_6_26 29 #define _MAX_PHYSMEM_BITS_PAE 36 // !CONFIG_X86_PAE #define _SECTION_SIZE_BITS 26 #define _MAX_PHYSMEM_BITS 32 #define IS_LAST_PMD_READ_PAE(pmd) ((ulong)(pmd) == machdep->machspec->last_pmd_read_PAE) #define IS_LAST_PTBL_READ_PAE(ptbl) ((ulong)(ptbl) == machdep->machspec->last_ptbl_read_PAE) #define FILL_PMD_PAE(PMD, TYPE, SIZE) \ if (!IS_LAST_PMD_READ_PAE(PMD)) { \ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->machspec->last_pmd_read_PAE = (ulonglong)(PMD); \ } #define FILL_PTBL_PAE(PTBL, TYPE, SIZE) \ if (!IS_LAST_PTBL_READ_PAE(PTBL)) { \ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ SIZE, "page table", FAULT_ON_ERROR); \ machdep->machspec->last_ptbl_read_PAE = (ulonglong)(PTBL); \ } #endif /* X86 */ #ifdef X86_64 #define _64BIT_ #define MACHINE_TYPE "X86_64" #define USERSPACE_TOP (machdep->machspec->userspace_top) #define PAGE_OFFSET (machdep->machspec->page_offset) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define VMEMMAP_VADDR (machdep->machspec->vmemmap_vaddr) #define VMEMMAP_END (machdep->machspec->vmemmap_end) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define __START_KERNEL_map 0xffffffff80000000UL #define MODULES_LEN (MODULES_END - MODULES_VADDR) #define USERSPACE_TOP_ORIG 0x0000008000000000 #define PAGE_OFFSET_ORIG 0x0000010000000000 #define VMALLOC_START_ADDR_ORIG 0xffffff0000000000 #define VMALLOC_END_ORIG 0xffffff7fffffffff #define MODULES_VADDR_ORIG 0xffffffffa0000000 #define MODULES_END_ORIG 0xffffffffafffffff #define USERSPACE_TOP_2_6_11 0x0000800000000000 #define PAGE_OFFSET_2_6_11 0xffff810000000000 #define VMALLOC_START_ADDR_2_6_11 0xffffc20000000000 #define VMALLOC_END_2_6_11 0xffffe1ffffffffff #define MODULES_VADDR_2_6_11 0xffffffff88000000 #define MODULES_END_2_6_11 0xfffffffffff00000 #define VMEMMAP_VADDR_2_6_24 0xffffe20000000000 #define VMEMMAP_END_2_6_24 0xffffe2ffffffffff #define MODULES_VADDR_2_6_26 0xffffffffa0000000 #define PAGE_OFFSET_2_6_27 0xffff880000000000 #define MODULES_END_2_6_27 0xffffffffff000000 #define USERSPACE_TOP_XEN 0x0000800000000000 #define PAGE_OFFSET_XEN 0xffff880000000000 #define VMALLOC_START_ADDR_XEN 0xffffc20000000000 #define VMALLOC_END_XEN 0xffffe1ffffffffff #define MODULES_VADDR_XEN 0xffffffff88000000 #define MODULES_END_XEN 0xfffffffffff00000 #define USERSPACE_TOP_XEN_RHEL4 0x0000008000000000 #define PAGE_OFFSET_XEN_RHEL4 0xffffff8000000000 #define VMALLOC_START_ADDR_XEN_RHEL4 0xffffff0000000000 #define VMALLOC_END_XEN_RHEL4 0xffffff7fffffffff #define MODULES_VADDR_XEN_RHEL4 0xffffffffa0000000 #define MODULES_END_XEN_RHEL4 0xffffffffafffffff #define VMALLOC_START_ADDR_2_6_31 0xffffc90000000000 #define VMALLOC_END_2_6_31 0xffffe8ffffffffff #define VMEMMAP_VADDR_2_6_31 0xffffea0000000000 #define VMEMMAP_END_2_6_31 0xffffeaffffffffff #define MODULES_VADDR_2_6_31 0xffffffffa0000000 #define MODULES_END_2_6_31 0xffffffffff000000 #define USERSPACE_TOP_5LEVEL 0x0100000000000000 #define PAGE_OFFSET_5LEVEL 0xff10000000000000 #define VMALLOC_START_ADDR_5LEVEL 0xffa0000000000000 #define VMALLOC_END_5LEVEL 0xffd1ffffffffffff #define MODULES_VADDR_5LEVEL 0xffffffffa0000000 #define MODULES_END_5LEVEL 0xffffffffff5fffff #define VMEMMAP_VADDR_5LEVEL 0xffd4000000000000 #define VMEMMAP_END_5LEVEL 0xffd5ffffffffffff #define PAGE_OFFSET_4LEVEL_4_20 0xffff888000000000 #define PAGE_OFFSET_5LEVEL_4_20 0xff11000000000000 #define VSYSCALL_START 0xffffffffff600000 #define VSYSCALL_END 0xffffffffff601000 #define CPU_ENTRY_AREA_START 0xfffffe0000000000 #define CPU_ENTRY_AREA_END 0xfffffe7fffffffff #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) x86_64_VTOP((ulong)(X)) #define IS_VMALLOC_ADDR(X) x86_64_IS_VMALLOC_ADDR((ulong)(X)) /* * the default page table level for x86_64: * 4 level page tables */ #define PGDIR_SHIFT 39 #define PTRS_PER_PGD 512 #define PUD_SHIFT 30 #define PTRS_PER_PUD 512 #define PMD_SHIFT 21 #define PTRS_PER_PMD 512 #define PTRS_PER_PTE 512 /* 5 level page */ #define PGDIR_SHIFT_5LEVEL 48 #define PTRS_PER_PGD_5LEVEL 512 #define P4D_SHIFT 39 #define PTRS_PER_P4D 512 #define __PGDIR_SHIFT (machdep->machspec->pgdir_shift) #define __PTRS_PER_PGD (machdep->machspec->ptrs_per_pgd) #define pgd_index(address) (((address) >> __PGDIR_SHIFT) & (__PTRS_PER_PGD-1)) #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define FILL_TOP_PGD() \ if (!(pc->flags & RUNTIME) || ACTIVE()) { \ FILL_PGD(vt->kernel_pgd[0], KVADDR, PAGESIZE()); \ } #define FILL_TOP_PGD_HYPER() \ unsigned long idle_pg_table = symbol_exists("idle_pg_table_4") ? \ symbol_value("idle_pg_table_4") : \ symbol_value("idle_pg_table"); \ FILL_PGD(idle_pg_table, KVADDR, PAGESIZE()); #define IS_LAST_P4D_READ(p4d) ((ulong)(p4d) == machdep->machspec->last_p4d_read) #define FILL_P4D(P4D, TYPE, SIZE) \ if (!IS_LAST_P4D_READ(P4D)) { \ readmem((ulonglong)((ulong)(P4D)), TYPE, machdep->machspec->p4d, \ SIZE, "p4d page", FAULT_ON_ERROR); \ machdep->machspec->last_p4d_read = (ulong)(P4D); \ } #define MAX_POSSIBLE_PHYSMEM_BITS 52 /* * PHYSICAL_PAGE_MASK changed (enlarged) between 2.4 and 2.6, so * for safety, use the 2.6 values to generate it. */ #define __PHYSICAL_MASK_SHIFT_XEN 52 #define __PHYSICAL_MASK_SHIFT_2_6 46 #define __PHYSICAL_MASK_SHIFT_5LEVEL 52 #define __PHYSICAL_MASK_SHIFT (machdep->machspec->physical_mask_shift) #define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1) #define __VIRTUAL_MASK_SHIFT 48 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK ) #define _PAGE_BIT_NX 63 #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_PSE 0x080 /* 2MB page */ #define _PAGE_FILE 0x040 /* set:pagecache, unset:swap */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry */ #define _PAGE_PROTNONE (machdep->machspec->page_protnone) #define _PAGE_NX (1UL<<_PAGE_BIT_NX) #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type(entry) x86_64_swp_type(entry) #define __swp_offset(entry) x86_64_swp_offset(entry) #define TIF_SIGPENDING (2) #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define _CPU_PDA_READ2(CPU, BUFFER) \ ((readmem(symbol_value("_cpu_pda"), \ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ "_cpu_pda addr", RETURN_ON_ERROR)) && \ (readmem(cpu_pda_addr + ((CPU) * sizeof(void *)), \ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ "_cpu_pda addr", RETURN_ON_ERROR)) && \ (cpu_pda_addr) && \ (readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda), \ "cpu_pda entry", RETURN_ON_ERROR))) #define _CPU_PDA_READ(CPU, BUFFER) \ ((STRNEQ("_cpu_pda", closest_symbol((symbol_value("_cpu_pda") + \ ((CPU) * sizeof(unsigned long)))))) && \ (readmem(symbol_value("_cpu_pda") + ((CPU) * sizeof(void *)), \ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ "_cpu_pda addr", RETURN_ON_ERROR)) && \ (readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda), \ "cpu_pda entry", RETURN_ON_ERROR))) #define CPU_PDA_READ(CPU, BUFFER) \ (STRNEQ("cpu_pda", closest_symbol((symbol_value("cpu_pda") + \ ((CPU) * SIZE(x8664_pda))))) && \ readmem(symbol_value("cpu_pda") + ((CPU) * SIZE(x8664_pda)), \ KVADDR, (BUFFER), SIZE(x8664_pda), "cpu_pda entry", \ RETURN_ON_ERROR)) #define VALID_LEVEL4_PGT_ADDR(X) \ (((X) == VIRTPAGEBASE(X)) && IS_KVADDR(X) && !IS_VMALLOC_ADDR(X)) #define _SECTION_SIZE_BITS 27 #define _MAX_PHYSMEM_BITS 40 #define _MAX_PHYSMEM_BITS_2_6_26 44 #define _MAX_PHYSMEM_BITS_2_6_31 46 #define _MAX_PHYSMEM_BITS_5LEVEL 52 #endif /* X86_64 */ #ifdef ALPHA #define _64BIT_ #define MACHINE_TYPE "ALPHA" #define PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define KSEG_BASE_48_BIT (0xffff800000000000) #define KSEG_BASE (0xfffffc0000000000) #define _PFN_MASK (0xFFFFFFFF00000000) #define VMALLOC_START (0xFFFFFE0000000000) #define MIN_SYMBOL_VALUE (KSEG_BASE_48_BIT) #define PGDIR_SHIFT (PAGESHIFT() + 2*(PAGESHIFT()-3)) #define PMD_SHIFT (PAGESHIFT() + (PAGESHIFT()-3)) #define PTRS_PER_PAGE (1024) #define PTRS_PER_PGD (1UL << (PAGESHIFT()-3)) /* * OSF/1 PAL-code-imposed page table bits */ #define _PAGE_VALID 0x0001 #define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ #define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ #define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ #define _PAGE_ASM 0x0010 #define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */ #define _PAGE_URE 0x0200 /* xxx */ #define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */ #define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */ /* .. and these are ours ... */ #define _PAGE_DIRTY 0x20000 #define _PAGE_ACCESSED 0x40000 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff) #define SWP_OFFSET(entry) ((entry) >> 40) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #endif /* ALPHA */ #ifdef PPC #define _32BIT_ #define MACHINE_TYPE "PPC" #define PAGEBASE(X) ((X) & machdep->pagemask) #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) /* Holds the platform specific info for page translation */ struct machine_specific { char *platform; /* page address translation bits */ int pte_size; int pte_rpn_shift; /* page flags */ ulong _page_present; ulong _page_user; ulong _page_rw; ulong _page_guarded; ulong _page_coherent; ulong _page_no_cache; ulong _page_writethru; ulong _page_dirty; ulong _page_accessed; ulong _page_hwwrite; ulong _page_shared; ulong _page_k_rw; /* platform special vtop */ int (*vtop_special)(ulong vaddr, physaddr_t *paddr, int verbose); void *mmu_special; }; /* machdep flags for ppc32 specific */ #define IS_PAE() (machdep->flags & PAE) #define IS_BOOKE() (machdep->flags & CPU_BOOKE) /* Page translation bits */ #define PPC_PLATFORM (machdep->machspec->platform) #define PTE_SIZE (machdep->machspec->pte_size) #define PTE_RPN_SHIFT (machdep->machspec->pte_rpn_shift) #define PAGE_SHIFT (12) #define PTE_T_LOG2 (ffs(PTE_SIZE) - 1) #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) #define PTRS_PER_PTE (1 << PTE_SHIFT) /* special vtop */ #define VTOP_SPECIAL (machdep->machspec->vtop_special) #define MMU_SPECIAL (machdep->machspec->mmu_special) /* PFN shifts */ #define BOOKE3E_PTE_RPN_SHIFT (24) /* PAGE flags */ #define _PAGE_PRESENT (machdep->machspec->_page_present) /* software: pte contains a translation */ #define _PAGE_USER (machdep->machspec->_page_user) /* matches one of the PP bits */ #define _PAGE_RW (machdep->machspec->_page_rw) /* software: user write access allowed */ #define _PAGE_GUARDED (machdep->machspec->_page_guarded) #define _PAGE_COHERENT (machdep->machspec->_page_coherent /* M: enforce memory coherence (SMP systems) */) #define _PAGE_NO_CACHE (machdep->machspec->_page_no_cache) /* I: cache inhibit */ #define _PAGE_WRITETHRU (machdep->machspec->_page_writethru) /* W: cache write-through */ #define _PAGE_DIRTY (machdep->machspec->_page_dirty) /* C: page changed */ #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) /* R: page referenced */ #define _PAGE_HWWRITE (machdep->machspec->_page_hwwrite) /* software: _PAGE_RW & _PAGE_DIRTY */ #define _PAGE_SHARED (machdep->machspec->_page_shared) #define _PAGE_K_RW (machdep->machspec->_page_k_rw) /* privilege only write access allowed */ /* Default values for PAGE flags */ #define DEFAULT_PAGE_PRESENT 0x001 #define DEFAULT_PAGE_USER 0x002 #define DEFAULT_PAGE_RW 0x004 #define DEFAULT_PAGE_GUARDED 0x008 #define DEFAULT_PAGE_COHERENT 0x010 #define DEFAULT_PAGE_NO_CACHE 0x020 #define DEFAULT_PAGE_WRITETHRU 0x040 #define DEFAULT_PAGE_DIRTY 0x080 #define DEFAULT_PAGE_ACCESSED 0x100 #define DEFAULT_PAGE_HWWRITE 0x200 #define DEFAULT_PAGE_SHARED 0 /* PPC44x PAGE flags: Values from kernel asm/pte-44x.h */ #define PPC44x_PAGE_PRESENT 0x001 #define PPC44x_PAGE_RW 0x002 #define PPC44x_PAGE_ACCESSED 0x008 #define PPC44x_PAGE_DIRTY 0x010 #define PPC44x_PAGE_USER 0x040 #define PPC44x_PAGE_GUARDED 0x100 #define PPC44x_PAGE_COHERENT 0x200 #define PPC44x_PAGE_NO_CACHE 0x400 #define PPC44x_PAGE_WRITETHRU 0x800 #define PPC44x_PAGE_HWWRITE 0 #define PPC44x_PAGE_SHARED 0 /* BOOK3E */ #define BOOK3E_PAGE_PRESENT 0x000001 #define BOOK3E_PAGE_BAP_SR 0x000004 #define BOOK3E_PAGE_BAP_UR 0x000008 /* User Readable */ #define BOOK3E_PAGE_BAP_SW 0x000010 #define BOOK3E_PAGE_BAP_UW 0x000020 /* User Writable */ #define BOOK3E_PAGE_DIRTY 0x001000 #define BOOK3E_PAGE_ACCESSED 0x040000 #define BOOK3E_PAGE_GUARDED 0x100000 #define BOOK3E_PAGE_COHERENT 0x200000 #define BOOK3E_PAGE_NO_CACHE 0x400000 #define BOOK3E_PAGE_WRITETHRU 0x800000 #define BOOK3E_PAGE_HWWRITE 0 #define BOOK3E_PAGE_SHARED 0 #define BOOK3E_PAGE_USER (BOOK3E_PAGE_BAP_SR | BOOK3E_PAGE_BAP_UR) #define BOOK3E_PAGE_RW (BOOK3E_PAGE_BAP_SW | BOOK3E_PAGE_BAP_UW) #define BOOK3E_PAGE_KERNEL_RW (BOOK3E_PAGE_BAP_SW | BOOK3E_PAGE_BAP_SR | BOOK3E_PAGE_DIRTY) /* FSL BOOKE */ #define FSL_BOOKE_PAGE_PRESENT 0x00001 #define FSL_BOOKE_PAGE_USER 0x00002 #define FSL_BOOKE_PAGE_RW 0x00004 #define FSL_BOOKE_PAGE_DIRTY 0x00008 #define FSL_BOOKE_PAGE_ACCESSED 0x00020 #define FSL_BOOKE_PAGE_GUARDED 0x00080 #define FSL_BOOKE_PAGE_COHERENT 0x00100 #define FSL_BOOKE_PAGE_NO_CACHE 0x00200 #define FSL_BOOKE_PAGE_WRITETHRU 0x00400 #define FSL_BOOKE_PAGE_HWWRITE 0 #define FSL_BOOKE_PAGE_SHARED 0 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (THIS_KERNEL_VERSION >= LINUX(2,6,23) ? 1 : 2) #define _SECTION_SIZE_BITS 24 #define _MAX_PHYSMEM_BITS 44 #define STACK_FRAME_OVERHEAD 16 #define STACK_FRAME_LR_SAVE (sizeof(ulong)) #define STACK_FRAME_MARKER (2 * sizeof(ulong)) #define STACK_FRAME_REGS_MARKER 0x72656773 #define PPC_STACK_SIZE 8192 #endif /* PPC */ #ifdef IA64 #define _64BIT_ #define MACHINE_TYPE "IA64" #define PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) #define REGION_SHIFT (61) #define VADDR_REGION(X) ((ulong)(X) >> REGION_SHIFT) #define KERNEL_CACHED_REGION (7) #define KERNEL_UNCACHED_REGION (6) #define KERNEL_VMALLOC_REGION (5) #define USER_STACK_REGION (4) #define USER_DATA_REGION (3) #define USER_TEXT_REGION (2) #define USER_SHMEM_REGION (1) #define USER_IA32_EMUL_REGION (0) #define KERNEL_VMALLOC_BASE ((ulong)KERNEL_VMALLOC_REGION << REGION_SHIFT) #define KERNEL_UNCACHED_BASE ((ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT) #define KERNEL_CACHED_BASE ((ulong)KERNEL_CACHED_REGION << REGION_SHIFT) #define _SECTION_SIZE_BITS 30 #define _MAX_PHYSMEM_BITS 50 /* * As of 2.6, these are no longer straight forward. */ #define PTOV(X) ia64_PTOV((ulong)(X)) #define VTOP(X) ia64_VTOP((ulong)(X)) #define IS_VMALLOC_ADDR(X) ia64_IS_VMALLOC_ADDR((ulong)(X)) #define SWITCH_STACK_ADDR(X) (ia64_get_switch_stack((ulong)(X))) #define __IA64_UL(x) ((unsigned long)(x)) #define IA64_MAX_PHYS_BITS (50) /* max # of phys address bits (architected) */ /* * How many pointers will a page table level hold expressed in shift */ #define PTRS_PER_PTD_SHIFT (PAGESHIFT()-3) /* * Definitions for fourth level: */ #define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT)) /* * Definitions for third level: * * PMD_SHIFT determines the size of the area a third-level page table * can map. */ #define PMD_SHIFT (PAGESHIFT() + (PTRS_PER_PTD_SHIFT)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT)) /* * PUD_SHIFT determines the size of the area a second-level page table * can map */ #define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) #define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT)) /* * Definitions for first level: * * PGDIR_SHIFT determines what a first-level page table entry can map. */ #define PGDIR_SHIFT_4L (PUD_SHIFT + (PTRS_PER_PTD_SHIFT)) #define PGDIR_SHIFT_3L (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) /* Turns out 4L & 3L PGDIR_SHIFT are the same (for now) */ #define PGDIR_SHIFT PGDIR_SHIFT_4L #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT #define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ #define FIRST_USER_ADDRESS 0 /* * First, define the various bits in a PTE. Note that the PTE format * matches the VHPT short format, the firt doubleword of the VHPD long * format, and the first doubleword of the TLB insertion format. */ #define _PAGE_P (1 << 0) /* page present bit */ #define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */ #define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */ #define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */ #define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */ #define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */ #define _PAGE_MA_MASK (0x7 << 2) #define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */ #define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */ #define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */ #define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */ #define _PAGE_PL_MASK (3 << 7) #define _PAGE_AR_R (0 << 9) /* read only */ #define _PAGE_AR_RX (1 << 9) /* read & execute */ #define _PAGE_AR_RW (2 << 9) /* read & write */ #define _PAGE_AR_RWX (3 << 9) /* read, write & execute */ #define _PAGE_AR_R_RW (4 << 9) /* read / read & write */ #define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */ #define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */ #define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */ #define _PAGE_AR_MASK (7 << 9) #define _PAGE_AR_SHIFT 9 #define _PAGE_A (1 << 5) /* page accessed bit */ #define _PAGE_D (1 << 6) /* page dirty bit */ #define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL) #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ #define _PAGE_PROTNONE (__IA64_UL(1) << 63) #define _PFN_MASK _PAGE_PPN_MASK #define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_A | _PAGE_D) #define _PAGE_SIZE_4K 12 #define _PAGE_SIZE_8K 13 #define _PAGE_SIZE_16K 14 #define _PAGE_SIZE_64K 16 #define _PAGE_SIZE_256K 18 #define _PAGE_SIZE_1M 20 #define _PAGE_SIZE_4M 22 #define _PAGE_SIZE_16M 24 #define _PAGE_SIZE_64M 26 #define _PAGE_SIZE_256M 28 #define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB #define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED #define EFI_PAGE_SHIFT (12) /* * NOTE: #include'ing creates too many compiler problems, so * this stuff is hardwired here; it's probably etched in stone somewhere. */ struct efi_memory_desc_t { uint32_t type; uint32_t pad; uint64_t phys_addr; uint64_t virt_addr; uint64_t num_pages; uint64_t attribute; } desc; /* Memory types: */ #define EFI_RESERVED_TYPE 0 #define EFI_LOADER_CODE 1 #define EFI_LOADER_DATA 2 #define EFI_BOOT_SERVICES_CODE 3 #define EFI_BOOT_SERVICES_DATA 4 #define EFI_RUNTIME_SERVICES_CODE 5 #define EFI_RUNTIME_SERVICES_DATA 6 #define EFI_CONVENTIONAL_MEMORY 7 #define EFI_UNUSABLE_MEMORY 8 #define EFI_ACPI_RECLAIM_MEMORY 9 #define EFI_ACPI_MEMORY_NVS 10 #define EFI_MEMORY_MAPPED_IO 11 #define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12 #define EFI_PAL_CODE 13 #define EFI_MAX_MEMORY_TYPE 14 /* Attribute values: */ #define EFI_MEMORY_UC 0x0000000000000001 /* uncached */ #define EFI_MEMORY_WC 0x0000000000000002 /* write-coalescing */ #define EFI_MEMORY_WT 0x0000000000000004 /* write-through */ #define EFI_MEMORY_WB 0x0000000000000008 /* write-back */ #define EFI_MEMORY_WP 0x0000000000001000 /* write-protect */ #define EFI_MEMORY_RP 0x0000000000002000 /* read-protect */ #define EFI_MEMORY_XP 0x0000000000004000 /* execute-protect */ #define EFI_MEMORY_RUNTIME 0x8000000000000000 /* range requires runtime mapping */ #define SWP_TYPE(entry) (((entry) >> 1) & 0xff) #define SWP_OFFSET(entry) ((entry) >> 9) #define __swp_type(entry) ((entry >> 2) & 0x7f) #define __swp_offset(entry) ((entry << 1) >> 10) #define TIF_SIGPENDING (THIS_KERNEL_VERSION >= LINUX(2,6,23) ? 0 : 1) #define KERNEL_TR_PAGE_SIZE (1 << _PAGE_SIZE_64M) #define KERNEL_TR_PAGE_MASK (~(KERNEL_TR_PAGE_SIZE - 1)) #define UNKNOWN_PHYS_START ((ulong)(-1)) #define DEFAULT_PHYS_START (KERNEL_TR_PAGE_SIZE * 1) #define IA64_GET_STACK_ULONG(OFF) \ ((INSTACK(OFF,bt)) ? (GET_STACK_ULONG(OFF)) : get_init_stack_ulong((unsigned long)OFF)) #endif /* IA64 */ #ifdef PPC64 #define _64BIT_ #define MACHINE_TYPE "PPC64" #define PPC64_64K_PAGE_SIZE 65536 #define PPC64_STACK_SIZE 16384 #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PTOV(X) ((unsigned long)(X)+(machdep->identity_map_base)) #define VTOP(X) ((unsigned long)(X)-(machdep->identity_map_base)) #define BOOK3E_VMBASE 0x8000000000000000 #define IS_VMALLOC_ADDR(X) machdep->machspec->is_vmaddr(X) #define KERNELBASE machdep->pageoffset #define PGDIR_SHIFT (machdep->pageshift + (machdep->pageshift -3) + (machdep->pageshift - 2)) #define PMD_SHIFT (machdep->pageshift + (machdep->pageshift - 3)) #define PGD_MASK (~((1UL << PGDIR_SHIFT) - 1)) #define PMD_MASK (~((1UL << PMD_SHIFT) - 1)) /* shift to put page number into pte */ #define PTE_RPN_SHIFT_DEFAULT 16 #define PMD_TO_PTEPAGE_SHIFT 2 /* Used for 2.6 or later */ #define PTE_INDEX_SIZE 9 #define PMD_INDEX_SIZE 10 #define PGD_INDEX_SIZE 10 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) #define PGD_OFFSET_24(vaddr) ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #define PGD_OFFSET(vaddr) ((vaddr >> PGDIR_SHIFT) & 0x7ff) #define PMD_OFFSET(vaddr) ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) /* 4-level page table support */ /* 4K pagesize */ #define PTE_INDEX_SIZE_L4_4K 9 #define PMD_INDEX_SIZE_L4_4K 7 #define PUD_INDEX_SIZE_L4_4K 7 #define PGD_INDEX_SIZE_L4_4K 9 #define PUD_INDEX_SIZE_L4_4K_3_7 9 #define PTE_INDEX_SIZE_RADIX_4K 9 #define PMD_INDEX_SIZE_RADIX_4K 9 #define PUD_INDEX_SIZE_RADIX_4K 9 #define PGD_INDEX_SIZE_RADIX_4K 13 #define PTE_RPN_SHIFT_L4_4K 17 #define PTE_RPN_SHIFT_L4_4K_4_5 18 #define PGD_MASKED_BITS_4K 0 #define PUD_MASKED_BITS_4K 0 #define PMD_MASKED_BITS_4K 0 /* 64K pagesize */ #define PTE_INDEX_SIZE_L4_64K 12 #define PMD_INDEX_SIZE_L4_64K 12 #define PUD_INDEX_SIZE_L4_64K 0 #define PGD_INDEX_SIZE_L4_64K 4 #define PTE_INDEX_SIZE_L4_64K_3_10 8 #define PMD_INDEX_SIZE_L4_64K_3_10 10 #define PGD_INDEX_SIZE_L4_64K_3_10 12 #define PMD_INDEX_SIZE_L4_64K_4_6 5 #define PUD_INDEX_SIZE_L4_64K_4_6 5 #define PMD_INDEX_SIZE_L4_64K_4_12 10 #define PUD_INDEX_SIZE_L4_64K_4_12 7 #define PGD_INDEX_SIZE_L4_64K_4_12 8 #define PUD_INDEX_SIZE_L4_64K_4_17 10 #define PTE_INDEX_SIZE_RADIX_64K 5 #define PMD_INDEX_SIZE_RADIX_64K 9 #define PUD_INDEX_SIZE_RADIX_64K 9 #define PGD_INDEX_SIZE_RADIX_64K 13 #define PTE_RPN_SHIFT_L4_64K_V1 32 #define PTE_RPN_SHIFT_L4_64K_V2 30 #define PTE_RPN_SHIFT_L4_BOOK3E_64K 28 #define PTE_RPN_SHIFT_L4_BOOK3E_4K 24 #define PGD_MASKED_BITS_64K 0 #define PUD_MASKED_BITS_64K 0x1ff #define PMD_MASKED_BITS_64K 0x1ff #define PMD_MASKED_BITS_64K_3_11 0xfff #define PMD_MASKED_BITS_BOOK3E_64K_4_5 0x7ff #define PGD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL #define PUD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL #define PMD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL #define PTE_RPN_MASK_DEFAULT 0xffffffffffffffffUL #define PAGE_PA_MAX_L4_4_6 (THIS_KERNEL_VERSION >= LINUX(4,11,0) ? 53 : 57) #define PTE_RPN_MASK_L4_4_6 \ (((1UL << PAGE_PA_MAX_L4_4_6) - 1) & ~((1UL << PAGESHIFT()) - 1)) #define PTE_RPN_SHIFT_L4_4_6 PAGESHIFT() #define PGD_MASKED_BITS_4_7 0xc0000000000000ffUL #define PUD_MASKED_BITS_4_7 0xc0000000000000ffUL #define PMD_MASKED_BITS_4_7 0xc0000000000000ffUL #define PD_HUGE 0x8000000000000000 #define HUGE_PTE_MASK 0x03 #define HUGEPD_SHIFT_MASK 0x3f #define HUGEPD_ADDR_MASK (0x0fffffffffffffffUL & ~HUGEPD_SHIFT_MASK) #define PGD_MASK_L4 \ (THIS_KERNEL_VERSION >= LINUX(3,10,0) ? (machdep->ptrs_per_pgd - 1) : 0x1ff) #define PGD_OFFSET_L4(vaddr) \ ((vaddr >> (machdep->machspec->l4_shift)) & PGD_MASK_L4) #define PUD_OFFSET_L4(vaddr) \ ((vaddr >> (machdep->machspec->l3_shift)) & (machdep->machspec->ptrs_per_l3 - 1)) #define PMD_OFFSET_L4(vaddr) \ ((vaddr >> (machdep->machspec->l2_shift)) & (machdep->machspec->ptrs_per_l2 - 1)) #define _PAGE_PTE (machdep->machspec->_page_pte) /* distinguishes PTEs from pointers */ #define _PAGE_PRESENT (machdep->machspec->_page_present) /* software: pte contains a translation */ #define _PAGE_USER (machdep->machspec->_page_user) /* matches one of the PP bits */ #define _PAGE_RW (machdep->machspec->_page_rw) /* software: user write access allowed */ #define _PAGE_GUARDED (machdep->machspec->_page_guarded) #define _PAGE_COHERENT (machdep->machspec->_page_coherent /* M: enforce memory coherence (SMP systems) */) #define _PAGE_NO_CACHE (machdep->machspec->_page_no_cache) /* I: cache inhibit */ #define _PAGE_WRITETHRU (machdep->machspec->_page_writethru) /* W: cache write-through */ #define _PAGE_DIRTY (machdep->machspec->_page_dirty) /* C: page changed */ #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) /* R: page referenced */ #define PTE_RPN_MASK (machdep->machspec->pte_rpn_mask) #define PTE_RPN_SHIFT (machdep->machspec->pte_rpn_shift) #define TIF_SIGPENDING (THIS_KERNEL_VERSION >= LINUX(2,6,23) ? 1 : 2) #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define MSR_PR_LG 14 /* Problem State / Privilege Level */ /* Used to find the user or kernel-mode frame*/ #define STACK_FRAME_OVERHEAD 112 #define EXCP_FRAME_MARKER 0x7265677368657265 #define _SECTION_SIZE_BITS 24 #define _MAX_PHYSMEM_BITS 44 #define _MAX_PHYSMEM_BITS_3_7 46 #define _MAX_PHYSMEM_BITS_4_19 47 #define _MAX_PHYSMEM_BITS_4_20 51 #endif /* PPC64 */ #ifdef S390 #define _32BIT_ #define MACHINE_TYPE "S390" #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define PTRS_PER_PTE 1024 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 512 #define SEGMENT_TABLE_SIZE ((sizeof(ulong)*4) * PTRS_PER_PGD) #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffe) | \ (((entry) >> 7) & 0x1)) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (THIS_KERNEL_VERSION >= LINUX(3,16,0) ? 1 : 2) #define _SECTION_SIZE_BITS 25 #define _MAX_PHYSMEM_BITS 31 #endif /* S390 */ #ifdef S390X #define _64BIT_ #define MACHINE_TYPE "S390X" #define PTOV(X) s390x_PTOV((ulong)(X)) #define VTOP(X) s390x_VTOP((ulong)(X)) #define IS_VMALLOC_ADDR(X) s390x_IS_VMALLOC_ADDR(X) #define PTRS_PER_PTE 512 #define PTRS_PER_PMD 1024 #define PTRS_PER_PGD 2048 #define SEGMENT_TABLE_SIZE ((sizeof(ulong)*2) * PTRS_PER_PMD) #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffffffffffe) | \ (((entry) >> 7) & 0x1)) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (THIS_KERNEL_VERSION >= LINUX(3,16,0) ? 1 : 2) #define _SECTION_SIZE_BITS 28 #define _MAX_PHYSMEM_BITS_OLD 42 #define _MAX_PHYSMEM_BITS_NEW 46 #endif /* S390X */ #ifdef SPARC64 #define _64BIT_ #define MACHINE_TYPE "SPARC64" #define PTOV(X) \ ((unsigned long)(X) + machdep->machspec->page_offset) #define VTOP(X) \ ((unsigned long)(X) - machdep->machspec->page_offset) #define PAGE_OFFSET (machdep->machspec->page_offset) extern int sparc64_IS_VMALLOC_ADDR(ulong vaddr); #define IS_VMALLOC_ADDR(X) sparc64_IS_VMALLOC_ADDR((ulong)(X)) #define PAGE_SHIFT (13) #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE - 1)) #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define THREAD_SIZE (2 * PAGE_SIZE) /* S3 Core * Core 48-bit physical address supported. * Bit 47 distinguishes memory or I/O. When set to "1" it is I/O. */ #define PHYS_MASK_SHIFT (47) #define PHYS_MASK (((1UL) << PHYS_MASK_SHIFT) - 1) typedef signed int s32; /* * This next two defines are convenience defines for normal page table. */ #define PTES_PER_PAGE (1UL << (PAGE_SHIFT - 3)) #define PTES_PER_PAGE_MASK (PTES_PER_PAGE - 1) /* 4-level page table */ #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE - 1)) #define PMD_BITS (PAGE_SHIFT - 3) #define PUD_SHIFT (PMD_SHIFT + PMD_BITS) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE - 1)) #define PUD_BITS (PAGE_SHIFT - 3) #define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) #define PGDIR_BITS (PAGE_SHIFT - 3) #define PTRS_PER_PTE (1UL << (PAGE_SHIFT - 3)) #define PTRS_PER_PMD (1UL << PMD_BITS) #define PTRS_PER_PUD (1UL << PUD_BITS) #define PTRS_PER_PGD (1UL << PGDIR_BITS) #define HPAGE_SHIFT (23) /* Down one huge page */ #define SPARC64_USERSPACE_TOP (-(1UL << HPAGE_SHIFT)) #define PAGE_PMD_HUGE (0x0100000000000000UL) /* These are for SUN4V. */ #define _PAGE_VALID (0x8000000000000000UL) #define _PAGE_NFO_4V (0x4000000000000000UL) #define _PAGE_MODIFIED_4V (0x2000000000000000UL) #define _PAGE_ACCESSED_4V (0x1000000000000000UL) #define _PAGE_READ_4V (0x0800000000000000UL) #define _PAGE_WRITE_4V (0x0400000000000000UL) #define _PAGE_PADDR_4V (0x00FFFFFFFFFFE000UL) #define _PAGE_PFN_MASK (_PAGE_PADDR_4V) #define _PAGE_P_4V (0x0000000000000100UL) #define _PAGE_EXEC_4V (0x0000000000000080UL) #define _PAGE_W_4V (0x0000000000000040UL) #define _PAGE_PRESENT_4V (0x0000000000000010UL) #define _PAGE_SZALL_4V (0x0000000000000007UL) /* There are other page sizes. Some supported. */ #define _PAGE_SZ4MB_4V (0x0000000000000003UL) #define _PAGE_SZ512K_4V (0x0000000000000002UL) #define _PAGE_SZ64K_4V (0x0000000000000001UL) #define _PAGE_SZ8K_4V (0x0000000000000000UL) #define SPARC64_MODULES_VADDR (0x0000000010000000UL) #define SPARC64_MODULES_END (0x00000000f0000000UL) #define SPARC64_VMALLOC_START (0x0000000100000000UL) #define SPARC64_STACK_SIZE 0x4000 /* sparsemem */ #define _SECTION_SIZE_BITS 30 #define _MAX_PHYSMEM_BITS 53 #define STACK_BIAS 2047 struct machine_specific { ulong page_offset; ulong vmalloc_end; }; #define TIF_SIGPENDING (2) #define SWP_OFFSET(E) ((E) >> (PAGE_SHIFT + 8UL)) #define SWP_TYPE(E) (((E) >> PAGE_SHIFT) & 0xffUL) #define __swp_type(E) SWP_TYPE(E) #define __swp_offset(E) SWP_OFFSET(E) #endif /* SPARC64 */ #ifdef PLATFORM #define SWP_TYPE(entry) (error("PLATFORM_SWP_TYPE: TBD\n")) #define SWP_OFFSET(entry) (error("PLATFORM_SWP_OFFSET: TBD\n")) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #endif /* PLATFORM */ #define KILOBYTES(x) ((x) * (1024)) #define MEGABYTES(x) ((x) * (1048576)) #define GIGABYTES(x) ((x) * (1073741824)) #define TB_SHIFT (40) #define TERABYTES(x) ((x) * (1UL << TB_SHIFT)) #define MEGABYTE_MASK (MEGABYTES(1)-1) #define SIZEOF_64BIT (8) #define SIZEOF_32BIT (4) #define SIZEOF_16BIT (2) #define SIZEOF_8BIT (1) #ifdef ARM #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef X86 #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef ALPHA #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (11) #endif #ifdef PPC #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef IA64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef S390 #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef S390X #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef X86_64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (10) #endif #ifdef PPC64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef ARM64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (10) #endif #ifdef MIPS #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef MIPS64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef SPARC64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef RISCV64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef LOONGARCH64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #define BADADDR ((ulong)(-1)) #define BADVAL ((ulong)(-1)) #define UNUSED (-1) #define UNINITIALIZED (BADVAL) #define BITS_PER_BYTE (8) #define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long)) #define NUM_TO_BIT(x) (1UL<<((x)%BITS_PER_LONG)) #define NUM_IN_BITMAP(bitmap, x) (bitmap[(x)/BITS_PER_LONG] & NUM_TO_BIT(x)) #define SET_BIT(bitmap, x) (bitmap[(x)/BITS_PER_LONG] |= NUM_TO_BIT(x)) static inline unsigned int __const_hweight8(unsigned long w) { return (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))); } #define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8)) #define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16)) #define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32)) #define hweight32(w) __const_hweight32(w) #define hweight64(w) __const_hweight64(w) /* * precision lengths for fprintf */ #define VADDR_PRLEN (sizeof(char *) == 8 ? 16 : 8) #define LONG_LONG_PRLEN (16) #define LONG_PRLEN (sizeof(long) == 8 ? 16 : 8) #define INT_PRLEN (sizeof(int) == 8 ? 16 : 8) #define CHAR_PRLEN (2) #define SHORT_PRLEN (4) #define MINSPACE (-100) #define SYNOPSIS (0x1) #define COMPLETE_HELP (0x2) #define PIPE_TO_SCROLL (0x4) #define MUST_HELP (0x8) #define LEFT_JUSTIFY (1) #define RIGHT_JUSTIFY (2) #define CENTER (0x1) #define LJUST (0x2) #define RJUST (0x4) #define LONG_DEC (0x8) #define LONG_HEX (0x10) #define INT_DEC (0x20) #define INT_HEX (0x40) #define LONGLONG_HEX (0x80) #define ZERO_FILL (0x100) #define SLONG_DEC (0x200) #define INIT_TIME (1) #define RUN_TIME (2) /* * IRQ line status. * For kernels up to and including 2.6.17 */ #define IRQ_INPROGRESS_2_6_17 1 /* IRQ handler active - do not enter! */ #define IRQ_DISABLED_2_6_17 2 /* IRQ disabled - do not enter! */ #define IRQ_PENDING_2_6_17 4 /* IRQ pending - replay on enable */ #define IRQ_REPLAY_2_6_17 8 /* IRQ has been replayed but not acked yet */ #define IRQ_AUTODETECT_2_6_17 16 /* IRQ is being autodetected */ #define IRQ_WAITING_2_6_17 32 /* IRQ not yet seen - for autodetection */ #define IRQ_LEVEL_2_6_17 64 /* IRQ level triggered */ #define IRQ_MASKED_2_6_17 128 /* IRQ masked - shouldn't be seen again */ /* * For kernel 2.6.21 and later */ #define IRQ_TYPE_NONE_2_6_21 0x00000000 /* Default, unspecified type */ #define IRQ_TYPE_EDGE_RISING_2_6_21 0x00000001 /* Edge rising type */ #define IRQ_TYPE_EDGE_FALLING_2_6_21 0x00000002 /* Edge falling type */ #define IRQ_TYPE_EDGE_BOTH_2_6_21 (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) #define IRQ_TYPE_LEVEL_HIGH_2_6_21 0x00000004 /* Level high type */ #define IRQ_TYPE_LEVEL_LOW_2_6_21 0x00000008 /* Level low type */ #define IRQ_TYPE_SENSE_MASK_2_6_21 0x0000000f /* Mask of the above */ #define IRQ_TYPE_PROBE_2_6_21 0x00000010 /* Probing in progress */ #define IRQ_INPROGRESS_2_6_21 0x00000100 /* IRQ handler active - do not enter! */ #define IRQ_DISABLED_2_6_21 0x00000200 /* IRQ disabled - do not enter! */ #define IRQ_PENDING_2_6_21 0x00000400 /* IRQ pending - replay on enable */ #define IRQ_REPLAY_2_6_21 0x00000800 /* IRQ has been replayed but not acked yet */ #define IRQ_AUTODETECT_2_6_21 0x00001000 /* IRQ is being autodetected */ #define IRQ_WAITING_2_6_21 0x00002000 /* IRQ not yet seen - for autodetection */ #define IRQ_LEVEL_2_6_21 0x00004000 /* IRQ level triggered */ #define IRQ_MASKED_2_6_21 0x00008000 /* IRQ masked - shouldn't be seen again */ #define IRQ_PER_CPU_2_6_21 0x00010000 /* IRQ is per CPU */ #define IRQ_NOPROBE_2_6_21 0x00020000 /* IRQ is not valid for probing */ #define IRQ_NOREQUEST_2_6_21 0x00040000 /* IRQ cannot be requested */ #define IRQ_NOAUTOEN_2_6_21 0x00080000 /* IRQ will not be enabled on request irq */ #define IRQ_WAKEUP_2_6_21 0x00100000 /* IRQ triggers system wakeup */ #define IRQ_MOVE_PENDING_2_6_21 0x00200000 /* need to re-target IRQ destination */ #define IRQ_NO_BALANCING_2_6_21 0x00400000 /* IRQ is excluded from balancing */ #define IRQ_SPURIOUS_DISABLED_2_6_21 0x00800000 /* IRQ was disabled by the spurious trap */ #define IRQ_MOVE_PCNTXT_2_6_21 0x01000000 /* IRQ migration from process context */ #define IRQ_AFFINITY_SET_2_6_21 0x02000000 /* IRQ affinity was set from userspace*/ /* * Select proper IRQ value depending on kernel version */ #define IRQ_TYPE_NONE \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_NONE_2_6_21 : 0) #define IRQ_TYPE_EDGE_RISING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_RISING_2_6_21 : 0) #define IRQ_TYPE_EDGE_FALLING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_FALLING_2_6_21 : 0) #define IRQ_TYPE_EDGE_BOTH \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_BOTH_2_6_21 : 0) #define IRQ_TYPE_LEVEL_HIGH \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_LEVEL_HIGH_2_6_21 : 0) #define IRQ_TYPE_LEVEL_LOW \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_LEVEL_LOW_2_6_21 : 0) #define IRQ_TYPE_SENSE_MASK \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_SENSE_MASK_2_6_21 : 0) #define IRQ_TYPE_PROBE \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_PROBE_2_6_21 : 0) #define IRQ_INPROGRESS \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_INPROGRESS_2_6_21 : IRQ_INPROGRESS_2_6_17) #define IRQ_DISABLED \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_DISABLED_2_6_21 : IRQ_DISABLED_2_6_17) #define IRQ_PENDING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_PENDING_2_6_21 : IRQ_PENDING_2_6_17) #define IRQ_REPLAY \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_REPLAY_2_6_21 : IRQ_REPLAY_2_6_17) #define IRQ_AUTODETECT \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_AUTODETECT_2_6_21 : IRQ_AUTODETECT_2_6_17) #define IRQ_WAITING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_WAITING_2_6_21 : IRQ_WAITING_2_6_17) #define IRQ_LEVEL \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_LEVEL_2_6_21 : IRQ_LEVEL_2_6_17) #define IRQ_MASKED \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MASKED_2_6_21 : IRQ_MASKED_2_6_17) #define IRQ_PER_CPU \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_PER_CPU_2_6_21 : 0) #define IRQ_NOPROBE \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOPROBE_2_6_21 : 0) #define IRQ_NOREQUEST \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOREQUEST_2_6_21 : 0) #define IRQ_NOAUTOEN \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOAUTOEN_2_6_21 : 0) #define IRQ_WAKEUP \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_WAKEUP_2_6_21 : 0) #define IRQ_MOVE_PENDING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MOVE_PENDING_2_6_21 : 0) #define IRQ_NO_BALANCING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NO_BALANCING_2_6_21 : 0) #define IRQ_SPURIOUS_DISABLED \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_SPURIOUS_DISABLED_2_6_21 : 0) #define IRQ_MOVE_PCNTXT \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MOVE_PCNTXT_2_6_21 : 0) #define IRQ_AFFINITY_SET \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_AFFINITY_SET_2_6_21 : 0) #ifdef ARM #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef X86 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef X86_64 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef ALPHA #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x40000000 #endif #ifdef PPC #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef PPC64 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000u #endif #ifdef IA64 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef S390 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef S390X #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #define ACTION_FLAGS (SA_INTERRUPT|SA_PROBE|SA_SAMPLE_RANDOM|SA_SHIRQ) #endif /* !GDB_COMMON */ typedef enum drill_ops_s { EOP_MEMBER_SIZES, EOP_MEMBER_NAME, EOP_POINTER, EOP_TYPEDEF, EOP_INT, EOP_VALUE, EOP_ARRAY, EOP_UNION, EOP_ENUM, EOP_ENUMVAL, EOP_STRUCT, EOP_FUNCTION, EOP_DONE, EOP_OOPS } drill_ops_t; /* * Common request structure for BFD or GDB data or commands. */ struct gnu_request { int command; char *buf; FILE *fp; ulong addr; ulong addr2; ulong count; ulong flags; char *name; ulong length; int typecode; #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) char *typename; #else char *type_name; #endif char *target_typename; ulong target_length; int target_typecode; int is_typedef; char *member; long member_offset; long member_length; int member_typecode; long value; const char *tagname; ulong pc; ulong sp; ulong ra; int curframe; ulong frame; ulong prevsp; ulong prevpc; ulong lastsp; ulong task; ulong debug; struct stack_hook *hookp; ulong lowest; ulong highest; void (*callback) (struct gnu_request *req, void *data); void *callback_data; struct load_module *lm; char *member_main_type_name; char *member_main_type_tag_name; char *member_target_type_name; char *member_target_type_tag_name; char *type_tag_name; /* callback function for 3rd party symbol and type (EPPIC for now) */ void *priv; int (*tcb)(drill_ops_t, struct gnu_request *, const void *, const void *, const void *, const void *); }; /* * GNU commands */ #define GNU_DATATYPE_INIT (1) #define GNU_DISASSEMBLE (2) #define GNU_GET_LINE_NUMBER (3) #define GNU_PASS_THROUGH (4) #define GNU_GET_DATATYPE (5) #define GNU_COMMAND_EXISTS (6) #define GNU_STACK_TRACE (7) #define GNU_ALPHA_FRAME_OFFSET (8) #define GNU_FUNCTION_NUMARGS (9) #define GNU_RESOLVE_TEXT_ADDR (10) #define GNU_ADD_SYMBOL_FILE (11) #define GNU_DELETE_SYMBOL_FILE (12) #define GNU_VERSION (13) #define GNU_PATCH_SYMBOL_VALUES (14) #define GNU_GET_SYMBOL_TYPE (15) #define GNU_USER_PRINT_OPTION (16) #define GNU_SET_CRASH_BLOCK (17) #define GNU_GET_FUNCTION_RANGE (18) #define GNU_ITERATE_DATATYPES (19) #define GNU_LOOKUP_STRUCT_CONTENTS (20) #define GNU_DEBUG_COMMAND (100) /* * GNU flags */ #define GNU_PRINT_LINE_NUMBERS (0x1) #define GNU_FUNCTION_ONLY (0x2) #define GNU_PRINT_ENUMERATORS (0x4) #define GNU_RETURN_ON_ERROR (0x8) #define GNU_COMMAND_FAILED (0x10) #define GNU_FROM_TTY_OFF (0x20) #define GNU_NO_READMEM (0x40) #define GNU_VAR_LENGTH_TYPECODE (0x80) #undef TRUE #undef FALSE #define TRUE (1) #define FALSE (0) #ifdef GDB_COMMON /* * function prototypes required by modified gdb source files. */ extern "C" int console(const char *, ...); extern "C" int gdb_CRASHDEBUG(ulong); int gdb_readmem_callback(ulong, void *, int, int); void patch_load_module(struct objfile *objfile, struct minimal_symbol *msymbol); extern "C" int patch_kernel_symbol(struct gnu_request *); struct syment *symbol_search(char *); int gdb_line_number_callback(ulong, ulong, ulong); int gdb_print_callback(ulong); char *gdb_lookup_module_symbol(ulong, ulong *); extern "C" int same_file(char *, char *); #endif #ifndef GDB_COMMON /* * WARNING: the following type codes are type_code enums from gdb/gdbtypes.h */ enum type_code { TYPE_CODE_UNDEF, /* Not used; catches errors */ TYPE_CODE_PTR, /* Pointer type */ TYPE_CODE_ARRAY, /* Array type with lower & upper bounds. */ TYPE_CODE_STRUCT, /* C struct or Pascal record */ TYPE_CODE_UNION, /* C union or Pascal variant part */ TYPE_CODE_ENUM, /* Enumeration type */ #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) || defined(GDB_7_3_1) || defined(GDB_7_6) || defined(GDB_10_2) || defined(GDB_16_2) #if defined(GDB_7_0) || defined(GDB_7_3_1) || defined(GDB_7_6) || defined(GDB_10_2) || defined(GDB_16_2) TYPE_CODE_FLAGS, /* Bit flags type */ #endif TYPE_CODE_FUNC, /* Function type */ TYPE_CODE_INT, /* Integer type */ /* Floating type. This is *NOT* a complex type. Beware, there are parts of GDB which bogusly assume that TYPE_CODE_FLT can mean complex. */ TYPE_CODE_FLT, /* Void type. The length field specifies the length (probably always one) which is used in pointer arithmetic involving pointers to this type, but actually dereferencing such a pointer is invalid; a void type has no length and no actual representation in memory or registers. A pointer to a void type is a generic pointer. */ TYPE_CODE_VOID, TYPE_CODE_SET, /* Pascal sets */ TYPE_CODE_RANGE, /* Range (integers within spec'd bounds) */ /* * NOTE: the remainder of the type codes are not list or used here... */ TYPE_CODE_BOOL = 20, #endif }; /* * include/linux/sched.h */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define SCHED_NORMAL 0 #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 #define SCHED_ISO 4 #define SCHED_IDLE 5 #define SCHED_DEADLINE 6 extern long _ZOMBIE_; #define IS_ZOMBIE(task) (task_state(task) & _ZOMBIE_) #define IS_EXITING(task) (task_flags(task) & PF_EXITING) /* * ps command options. */ #define PS_BY_PID (0x1) #define PS_BY_TASK (0x2) #define PS_BY_CMD (0x4) #define PS_SHOW_ALL (0x8) #define PS_PPID_LIST (0x10) #define PS_CHILD_LIST (0x20) #define PS_KERNEL (0x40) #define PS_USER (0x80) #define PS_TIMES (0x100) #define PS_KSTACKP (0x200) #define PS_LAST_RUN (0x400) #define PS_ARGV_ENVP (0x800) #define PS_TGID_LIST (0x1000) #define PS_RLIMIT (0x2000) #define PS_GROUP (0x4000) #define PS_BY_REGEX (0x8000) #define PS_NO_HEADER (0x10000) #define PS_MSECS (0x20000) #define PS_SUMMARY (0x40000) #define PS_POLICY (0x80000) #define PS_ACTIVE (0x100000) #define PS_EXCLUSIVE (PS_TGID_LIST|PS_ARGV_ENVP|PS_TIMES|PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN|PS_RLIMIT|PS_MSECS|PS_SUMMARY|PS_ACTIVE) #define MAX_PS_ARGS (100) /* maximum command-line specific requests */ struct psinfo { int argc; ulong pid[MAX_PS_ARGS]; int type[MAX_PS_ARGS]; ulong task[MAX_PS_ARGS]; char comm[MAX_PS_ARGS][TASK_COMM_LEN+1]; struct regex_data { char *pattern; regex_t regex; } regex_data[MAX_PS_ARGS]; int regexs; ulong *cpus; int policy; }; #define IS_A_NUMBER(X) (decimal(X, 0) || hexadecimal(X, 0)) #define AMBIGUOUS_NUMBER(X) (decimal(X, 0) && hexadecimal(X, 0)) #define is_mclx_compressed_dump(X) (va_server_init((X), 0, 0, 0) == 0) struct task_mem_usage { ulong rss; ulong total_vm; double pct_physmem; ulong mm_struct_addr; ulong pgd_addr; }; /* * Global data (global_data.c) */ extern FILE *fp; extern struct program_context program_context, *pc; extern struct task_table task_table, *tt; extern struct kernel_table kernel_table, *kt; extern struct command_table_entry linux_command_table[]; extern char *args[MAXARGS]; extern int argcnt; extern int argerrs; extern struct offset_table offset_table; extern struct size_table size_table; extern struct array_table array_table; extern struct vm_table vm_table, *vt; extern struct machdep_table *machdep; extern struct symbol_table_data symbol_table_data, *st; extern struct extension_table *extension_table; /* * Generated in build_data.c */ extern char *build_command; extern char *build_data; extern char *build_target; extern char *build_version; extern char *compiler_version; /* * command prototypes */ void cmd_quit(void); /* main.c */ void cmd_mach(void); /* main.c */ void cmd_help(void); /* help.c */ void cmd_test(void); /* test.c */ void cmd_ascii(void); /* tools.c */ void cmd_sbitmapq(void); /* sbitmap.c */ void cmd_bpf(void); /* bfp.c */ void cmd_set(void); /* tools.c */ void cmd_eval(void); /* tools.c */ void cmd_list(void); /* tools.c */ void cmd_tree(void); /* tools.c */ void cmd_template(void); /* tools.c */ void cmd_alias(void); /* cmdline.c */ void cmd_repeat(void); /* cmdline.c */ void cmd_rd(void); /* memory.c */ void cmd_wr(void); /* memory.c */ void cmd_ptov(void); /* memory.c */ void cmd_vtop(void); /* memory.c */ void cmd_vm(void); /* memory.c */ void cmd_ptob(void); /* memory.c */ void cmd_btop(void); /* memory.c */ void cmd_kmem(void); /* memory.c */ void cmd_search(void); /* memory.c */ void cmd_swap(void); /* memory.c */ void cmd_pte(void); /* memory.c */ void cmd_ps(void); /* task.c */ void cmd_task(void); /* task.c */ void cmd_foreach(void); /* task.c */ void cmd_runq(void); /* task.c */ void cmd_sig(void); /* task.c */ void cmd_bt(void); /* kernel.c */ void cmd_dis(void); /* kernel.c */ void cmd_mod(void); /* kernel.c */ void cmd_log(void); /* kernel.c */ void cmd_sys(void); /* kernel.c */ void cmd_irq(void); /* kernel.c */ void cmd_timer(void); /* kernel.c */ void cmd_waitq(void); /* kernel.c */ void cmd_sym(void); /* symbols.c */ void cmd_struct(void); /* symbols.c */ void cmd_union(void); /* symbols.c */ void cmd_pointer(void); /* symbols.c */ void cmd_whatis(void); /* symbols.c */ void cmd_p(void); /* symbols.c */ void cmd_mount(void); /* filesys.c */ void cmd_files(void); /* filesys.c */ void cmd_fuser(void); /* filesys.c */ void cmd_dev(void); /* dev.c */ void cmd_gdb(void); /* gdb_interface.c */ void cmd_net(void); /* net.c */ void cmd_extend(void); /* extensions.c */ #if defined(S390) || defined(S390X) void cmd_s390dbf(void); #endif void cmd_map(void); /* kvmdump.c */ void cmd_ipcs(void); /* ipcs.c */ void cmd_rustfilt(void); /* symbols.c */ /* * main.c */ void main_loop(void); void exec_command(void); struct command_table_entry *get_command_table_entry(char *); void program_usage(int); #define LONG_FORM (1) #define SHORT_FORM (0) void dump_program_context(void); void dump_build_data(void); #ifdef ARM #define machdep_init(X) arm_init(X) #endif #ifdef ARM64 #define machdep_init(X) arm64_init(X) #endif #ifdef X86 #define machdep_init(X) x86_init(X) #endif #ifdef ALPHA #define machdep_init(X) alpha_init(X) #endif #ifdef PPC #define machdep_init(X) ppc_init(X) #endif #ifdef IA64 #define machdep_init(X) ia64_init(X) #endif #ifdef S390 #define machdep_init(X) s390_init(X) #endif #ifdef S390X #define machdep_init(X) s390x_init(X) #endif #ifdef X86_64 #define machdep_init(X) x86_64_init(X) #endif #ifdef PPC64 #define machdep_init(X) ppc64_init(X) #endif #ifdef MIPS #define machdep_init(X) mips_init(X) #endif #ifdef MIPS64 #define machdep_init(X) mips64_init(X) #endif #ifdef RISCV64 #define machdep_init(X) riscv64_init(X) #endif #ifdef SPARC64 #define machdep_init(X) sparc64_init(X) #endif #ifdef LOONGARCH64 #define machdep_init(X) loongarch64_init(X) #endif int clean_exit(int); int untrusted_file(FILE *, char *); char *readmem_function_name(void); char *writemem_function_name(void); char *no_vmcoreinfo(const char *); /* * cmdline.c */ void restart(int); void alias_init(char *); struct alias_data *is_alias(char *); void deallocate_alias(char *); void cmdline_init(void); void set_command_prompt(char *); void exec_input_file(void); void process_command_line(void); void dump_history(void); void resolve_rc_cmd(char *, int); void dump_alias_data(void); int output_open(void); #define output_closed() (!output_open()) void close_output(void); int interruptible(void); int received_SIGINT(void); void debug_redirect(char *); int CRASHPAGER_valid(void); char *setup_scroll_command(void); int minimal_functions(char *); int is_args_input_file(struct command_table_entry *, struct args_input_file *); void exec_args_input_file(struct command_table_entry *, struct args_input_file *); /* * tools.c */ FILE *set_error(char *); int __error(int, char *, ...); #define error __error /* avoid conflict with gdb error() */ int console(const char *, ...); void create_console_device(char *); int console_off(void); int console_on(int); int console_verbatim(char *); int whitespace(int); int ascii(int); int ascii_string(char *); int printable_string(char *); char *clean_line(char *); char *strip_line_end(char *); char *strip_linefeeds(char *); char *strip_beginning_whitespace(char *); char *strip_ending_whitespace(char *); char *strip_ending_char(char *, char); char *strip_beginning_char(char *, char); char *strip_comma(char *); char *strip_hex(char *); char *upper_case(const char *, char *); char *first_nonspace(char *); char *first_space(char *); char *replace_string(char *, char *, char); void string_insert(char *, char *); char *strstr_rightmost(char *, char *); char *null_first_space(char *); int parse_line(char *, char **); void print_verbatim(FILE *, char *); char *fixup_percent(char *); int can_eval(char *); ulong eval(char *, int, int *); ulonglong evall(char *, int, int *); int eval_common(char *, int, int *, struct number_option *); ulong htol(char *, int, int *); ulong dtol(char *, int, int *); unsigned int dtoi(char *, int, int *); ulong stol(char *, int, int *); ulonglong stoll(char *, int, int *); ulonglong htoll(char *, int, int *); ulonglong dtoll(char *, int, int *); int decimal(char *, int); int hexadecimal(char *, int); int hexadecimal_only(char *, int); ulong convert(char *, int, int *, ulong); void pad_line(FILE *, int, char); #define INDENT(x) pad_line(fp, x, ' ') char *mkstring(char *, int, ulong, const char *); #define MKSTR(X) ((const char *)(X)) int count_leading_spaces(char *); int count_chars(char *, char); long count_buffer_chars(char *, char, long); char *space(int); char *concat_args(char *, int, int); char *shift_string_left(char *, int); char *shift_string_right(char *, int); int bracketed(char *, char *, int); void backspace(int); int do_list(struct list_data *); int do_list_no_hash(struct list_data *); struct radix_tree_ops { void (*entry)(ulong node, ulong slot, const char *path, ulong index, void *private); uint radix; void *private; }; int do_radix_tree_traverse(ulong ptr, int is_root, struct radix_tree_ops *ops); struct xarray_ops { void (*entry)(ulong node, ulong slot, const char *path, ulong index, void *private); uint radix; void *private; }; int do_xarray_traverse(ulong ptr, int is_root, struct xarray_ops *ops); int do_rdtree(struct tree_data *); int do_rbtree(struct tree_data *); int do_xatree(struct tree_data *); int retrieve_list(ulong *, int); long power(long, int); long long ll_power(long long, long long); void hq_init(void); int hq_open(void); int hq_close(void); int hq_enter(ulong); int hq_entry_exists(ulong); int hq_is_open(void); int hq_is_inuse(void); long get_embedded(void); void dump_embedded(char *); char *ordinal(ulong, char *); char *first_nonspace(char *); void dump_hash_table(int); void dump_shared_bufs(void); void drop_core(char *); int extract_hex(char *, ulong *, char, ulong); int count_bits_int(int); int count_bits_long(ulong); int highest_bit_long(ulong); int lowest_bit_long(ulong); void buf_init(void); void sym_buf_init(void); void free_all_bufs(void); char *getbuf(long); void freebuf(char *); char *resizebuf(char *, long, long); char *strdupbuf(char *); #define GETBUF(X) getbuf((long)(X)) #define FREEBUF(X) freebuf((char *)(X)) #define RESIZEBUF(X,Y,Z) (X) = (typeof(X))resizebuf((char *)(X), (long)(Y), (long)(Z)); #define STRDUPBUF(X) strdupbuf((char *)(X)) void sigsetup(int, void *, struct sigaction *, struct sigaction *); #define SIGACTION(s, h, a, o) sigsetup(s, h, a, o) char *convert_time(ulonglong, char *); char *ctime_tz(time_t *); void stall(ulong); char *pages_to_size(ulong, char *); int clean_arg(void); int empty_list(ulong); int machine_type(char *); int machine_type_mismatch(char *, char *, char *, ulong); void command_not_supported(void); void option_not_supported(int); void please_wait(char *); void please_wait_done(void); int pathcmp(char *, char *); int calculate(char *, ulong *, ulonglong *, ulong); int endian_mismatch(char *, char, ulong); uint16_t swap16(uint16_t, int); uint32_t swap32(uint32_t, int); uint64_t swap64(uint64_t, int); ulong *get_cpumask_buf(void); int make_cpumask(char *, ulong *, int, int *); size_t strlcpy(char *, const char *, size_t) __attribute__ ((__weak__)); struct rb_node *rb_first(struct rb_root *); struct rb_node *rb_parent(struct rb_node *, struct rb_node *); struct rb_node *rb_right(struct rb_node *, struct rb_node *); struct rb_node *rb_left(struct rb_node *, struct rb_node *); struct rb_node *rb_next(struct rb_node *); struct rb_node *rb_last(struct rb_root *); long percpu_counter_sum_positive(ulong fbc); ulong get_subsys_private(char *, char *); /* * symbols.c */ void symtab_init(void); char *check_specified_kernel_debug_file(void); void no_debugging_data(int); void get_text_init_space(void); int is_kernel_text(ulong); int is_kernel_data(ulong); int is_init_data(ulong value); int is_kernel_text_offset(ulong); int is_symbol_text(struct syment *); int is_rodata(ulong, struct syment **); int get_text_function_range(ulong, ulong *, ulong *); void datatype_init(void); struct syment *symbol_search(char *); struct syment *value_search(ulong, ulong *); struct syment *value_search_base_kernel(ulong, ulong *); struct syment *value_search_module(ulong, ulong *); struct syment *symbol_search_next(char *, struct syment *); ulong highest_bss_symbol(void); int in_ksymbol_range(ulong); int module_symbol(ulong, struct syment **, struct load_module **, char *, ulong); #define IS_MODULE_VADDR(X) \ (module_symbol((ulong)(X), NULL, NULL, NULL, *gdb_output_radix)) char *closest_symbol(ulong); ulong closest_symbol_value(ulong); #define SAME_FUNCTION(X,Y) (closest_symbol_value(X) == closest_symbol_value(Y)) void show_symbol(struct syment *, ulong, ulong); #define SHOW_LINENUM (0x1) #define SHOW_SECTION (0x2) #define SHOW_HEX_OFFS (0x4) #define SHOW_DEC_OFFS (0x8) #define SHOW_RADIX() (*gdb_output_radix == 16 ? SHOW_HEX_OFFS : SHOW_DEC_OFFS) #define SHOW_MODULE (0x10) int symbol_name_count(char *); int symbol_query(char *, char *, struct syment **); struct syment *next_symbol(char *, struct syment *); struct syment *prev_symbol(char *, struct syment *); void get_symbol_data(char *, long, void *); int try_get_symbol_data(char *, long, void *); char *value_to_symstr(ulong, char *, ulong); char *value_symbol(ulong); ulong symbol_value(char *); ulong symbol_value_module(char *, char *); struct syment *per_cpu_symbol_search(char *); int symbol_exists(char *s); int kernel_symbol_exists(char *s); struct syment *kernel_symbol_search(char *); ulong symbol_value_from_proc_kallsyms(char *); int get_syment_array(char *, struct syment **, int); void set_temporary_radix(unsigned int, unsigned int *); void restore_current_radix(unsigned int); void dump_struct(char *, ulong, unsigned); void dump_struct_member(char *, ulong, unsigned); void dump_union(char *, ulong, unsigned); void store_module_symbols_v1(ulong, int); void store_module_symbols_v2(ulong, int); void store_module_symbols_6_4(ulong, int); int is_datatype_command(void); int is_typedef(char *); int arg_to_datatype(char *, struct datatype_member *, ulong); void dump_symbol_table(void); void dump_struct_table(ulong); void dump_offset_table(char *, ulong); int is_elf_file(char *); int is_kernel(char *); int is_shared_object(char *); int file_elf_version(char *); int is_system_map(char *); int is_compressed_kernel(char *, char **); int select_namelist(char *); int get_array_length(char *, int *, long); int get_array_length_alt(char *, char *, int *, long); int builtin_array_length(char *, int, int *); char *get_line_number(ulong, char *, int); char *get_build_directory(char *); int datatype_exists(char *); int get_function_numargs(ulong); int is_module_name(char *, ulong *, struct load_module **); int is_module_address(ulong, char *); ulong lowest_module_address(void); ulong highest_module_address(void); int load_module_symbols(char *, char *, ulong); void delete_load_module(ulong); ulong gdb_load_module_callback(ulong, char *); char *load_module_filter(char *, int); #define LM_P_FILTER (1) #define LM_DIS_FILTER (2) long datatype_info(char *, char *, struct datatype_member *); int get_symbol_type(char *, char *, struct gnu_request *); int get_symbol_length(char *); void dump_numargs_cache(void); int patch_kernel_symbol(struct gnu_request *); struct syment *generic_machdep_value_to_symbol(ulong, ulong *); long OFFSET_verify(long, char *, char *, int, char *); long SIZE_verify(long, char *, char *, int, char *); long OFFSET_option(long, long, char *, char *, int, char *, char *); long SIZE_option(long, long, char *, char *, int, char *, char *); void dump_trace(void **); int enumerator_value(char *, long *); int dump_enumerator_list(char *); struct load_module *init_module_function(ulong); struct struct_member_data { char *structure; char *member; long type; long unsigned_type; long length; long offset; long bitpos; long bitsize; }; int fill_struct_member_data(struct struct_member_data *); void parse_for_member_extended(struct datatype_member *, ulong); void add_to_downsized(char *); int is_downsized(char *); int is_string(char *, char *); struct syment *symbol_complete_match(const char *, struct syment *); /* * memory.c */ void mem_init(void); void vm_init(void); int readmem(ulonglong, int, void *, long, char *, ulong); int writemem(ulonglong, int, void *, long, char *, ulong); int generic_verify_paddr(uint64_t); int read_dev_mem(int, void *, int, ulong, physaddr_t); int read_memory_device(int, void *, int, ulong, physaddr_t); int read_mclx_dumpfile(int, void *, int, ulong, physaddr_t); int read_lkcd_dumpfile(int, void *, int, ulong, physaddr_t); int read_daemon(int, void *, int, ulong, physaddr_t); int write_dev_mem(int, void *, int, ulong, physaddr_t); int write_memory_device(int, void *, int, ulong, physaddr_t); int write_mclx_dumpfile(int, void *, int, ulong, physaddr_t); int write_lkcd_dumpfile(int, void *, int, ulong, physaddr_t); int write_daemon(int, void *, int, ulong, physaddr_t); int kvtop(struct task_context *, ulong, physaddr_t *, int); int uvtop(struct task_context *, ulong, physaddr_t *, int); void do_vtop(ulong, struct task_context *, ulong); void raw_stack_dump(ulong, ulong); void raw_data_dump(ulong, long, int); int accessible(ulong); ulong vm_area_dump(ulong, ulong, ulong, struct reference *); #define IN_TASK_VMA(TASK,VA) (vm_area_dump((TASK), UVADDR|VERIFY_ADDR, (VA), 0)) char *fill_vma_cache(ulong); void clear_vma_cache(void); void dump_vma_cache(ulong); int generic_is_page_ptr(ulong, physaddr_t *); int is_page_ptr(ulong, physaddr_t *); void dump_vm_table(int); int read_string(ulong, char *, int); void get_task_mem_usage(ulong, struct task_mem_usage *); char *get_memory_size(char *); uint64_t generic_memory_size(void); char *swap_location(ulonglong, char *); void clear_swap_info_cache(void); uint memory_page_size(void); void force_page_size(char *); ulong first_vmalloc_address(void); ulong last_vmalloc_address(void); int in_vmlist_segment(ulong); int phys_to_page(physaddr_t, ulong *); int generic_get_kvaddr_ranges(struct vaddr_range *); int l1_cache_size(void); int dumpfile_memory(int); #define DUMPFILE_MEM_USED (1) #define DUMPFILE_FREE_MEM (2) #define DUMPFILE_MEM_DUMP (3) #define DUMPFILE_ENVIRONMENT (4) uint64_t total_node_memory(void); int generic_is_kvaddr(ulong); int generic_is_uvaddr(ulong, struct task_context *); void fill_stackbuf(struct bt_info *); void alter_stackbuf(struct bt_info *); int vaddr_type(ulong, struct task_context *); char *format_stack_entry(struct bt_info *bt, char *, ulong, ulong); int in_user_stack(ulong, ulong); int dump_inode_page(ulong); ulong valid_section_nr(ulong); void display_memory_from_file_offset(ulonglong, long, void *); void swap_info_init(void); /* * filesys.c */ void fd_init(void); void vfs_init(void); int is_a_tty(char *); int file_exists(char *, struct stat *); int file_readable(char *); int is_directory(char *); char *search_directory_tree(char *, char *, int); void open_tmpfile(void); void close_tmpfile(void); void open_tmpfile2(void); void set_tmpfile2(FILE *); void close_tmpfile2(void); void open_files_dump(ulong, int, struct reference *); void get_pathname(ulong, char *, int, int, ulong); ulong *get_mount_list(int *, struct task_context *); char *vfsmount_devname(ulong, char *, int); ulong file_to_dentry(ulong); ulong file_to_vfsmnt(ulong); int get_proc_version(void); int file_checksum(char *, long *); void dump_filesys_table(int); char *fill_file_cache(ulong); void clear_file_cache(void); char *fill_dentry_cache(ulong); void clear_dentry_cache(void); char *fill_inode_cache(ulong); void clear_inode_cache(void); int monitor_memory(long *, long *, long *, long *); int is_readable(char *); struct list_pair { ulong index; void *value; }; #define radix_tree_pair list_pair ulong do_radix_tree(ulong, int, struct list_pair *); #define RADIX_TREE_COUNT (1) #define RADIX_TREE_SEARCH (2) #define RADIX_TREE_DUMP (3) #define RADIX_TREE_GATHER (4) #define RADIX_TREE_DUMP_CB (5) /* * from: "include/linux/radix-tree.h" */ #define RADIX_TREE_ENTRY_MASK 3UL #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 ulong do_xarray(ulong, int, struct list_pair *); #define XARRAY_COUNT (1) #define XARRAY_SEARCH (2) #define XARRAY_DUMP (3) #define XARRAY_GATHER (4) #define XARRAY_DUMP_CB (5) #define XARRAY_TAG_MASK (3UL) #define XARRAY_TAG_INTERNAL (2UL) int file_dump(ulong, ulong, ulong, int, int); #define DUMP_FULL_NAME 0x1 #define DUMP_INODE_ONLY 0x2 #define DUMP_DENTRY_ONLY 0x4 #define DUMP_EMPTY_FILE 0x8 #define DUMP_FILE_NRPAGES 0x10 int same_file(char *, char *); int cleanup_memory_driver(void); void maple_init(void); int do_mptree(struct tree_data *); ulong do_maple_tree(ulong, int, struct list_pair *); #define MAPLE_TREE_COUNT (1) #define MAPLE_TREE_SEARCH (2) #define MAPLE_TREE_DUMP (3) #define MAPLE_TREE_GATHER (4) #define MAPLE_TREE_DUMP_CB (5) /* * help.c */ #define HELP_COLUMNS 5 #define START_OF_HELP_DATA(X) "START_OF_HELP_DATA" X #define END_OF_HELP_DATA "END_OF_HELP_DATA" void help_init(void); void cmd_usage(char *, int); void display_version(void); void display_help_screen(char *); #ifdef ARM #define dump_machdep_table(X) arm_dump_machdep_table(X) #endif #ifdef ARM64 #define dump_machdep_table(X) arm64_dump_machdep_table(X) #endif #ifdef X86 #define dump_machdep_table(X) x86_dump_machdep_table(X) #endif #ifdef ALPHA #define dump_machdep_table(X) alpha_dump_machdep_table(X) #endif #ifdef PPC #define dump_machdep_table(X) ppc_dump_machdep_table(X) #endif #ifdef IA64 #define dump_machdep_table(X) ia64_dump_machdep_table(X) #endif #ifdef S390 #define dump_machdep_table(X) s390_dump_machdep_table(X) #endif #ifdef S390X #define dump_machdep_table(X) s390x_dump_machdep_table(X) #endif #ifdef X86_64 #define dump_machdep_table(X) x86_64_dump_machdep_table(X) #endif #ifdef PPC64 #define dump_machdep_table(X) ppc64_dump_machdep_table(X) #endif #ifdef MIPS #define dump_machdep_table(X) mips_dump_machdep_table(X) #endif #ifdef MIPS64 #define dump_machdep_table(X) mips64_dump_machdep_table(X) #endif #ifdef SPARC64 #define dump_machdep_table(X) sparc64_dump_machdep_table(X) #endif #ifdef RISCV64 #define dump_machdep_table(X) riscv64_dump_machdep_table(X) #endif #ifdef LOONGARCH64 #define dump_machdep_table(X) loongarch64_dump_machdep_table(X) #endif extern char *help_pointer[]; extern char *help_alias[]; extern char *help_ascii[]; extern char *help_bpf[]; extern char *help_bt[]; extern char *help_btop[]; extern char *help_dev[]; extern char *help_dis[]; extern char *help_eval[]; extern char *help_exit[]; extern char *help_extend[]; extern char *help_files[]; extern char *help_foreach[]; extern char *help_fuser[]; extern char *help_gdb[]; extern char *help_help[]; extern char *help_irq[]; extern char *help_kmem[]; extern char *help__list[]; extern char *help_tree[]; extern char *help_log[]; extern char *help_mach[]; extern char *help_mod[]; extern char *help_mount[]; extern char *help_net[]; extern char *help_p[]; extern char *help_ps[]; extern char *help_pte[]; extern char *help_ptob[]; extern char *help_ptov[]; extern char *help_quit[]; extern char *help_rd[]; extern char *help_repeat[]; extern char *help_runq[]; extern char *help_ipcs[]; extern char *help_sbitmapq[]; extern char *help_search[]; extern char *help_set[]; extern char *help_sig[]; extern char *help_struct[]; extern char *help_swap[]; extern char *help_sym[]; extern char *help_sys[]; extern char *help_task[]; extern char *help_timer[]; extern char *help_union[]; extern char *help_vm[]; extern char *help_vtop[]; extern char *help_waitq[]; extern char *help_whatis[]; extern char *help_wr[]; #if defined(S390) || defined(S390X) extern char *help_s390dbf[]; #endif extern char *help_map[]; extern char *help_rustfilt[]; /* * task.c */ void task_init(void); int set_context(ulong, ulong, uint); void show_context(struct task_context *); ulong pid_to_task(ulong); ulong task_to_pid(ulong); int task_exists(ulong); int is_kernel_thread(ulong); int is_idle_thread(ulong); void get_idle_threads(ulong *, int); char *task_state_string(ulong, char *, int); ulong task_flags(ulong); ulong task_state(ulong); ulong task_mm(ulong, int); ulong task_tgid(ulong); ulonglong task_last_run(ulong); ulong vaddr_in_task_struct(ulong); int comm_exists(char *); struct task_context *task_to_context(ulong); struct task_context *pid_to_context(ulong); struct task_context *tgid_to_context(ulong); ulong stkptr_to_task(ulong); ulong task_to_thread_info(ulong); ulong task_to_stackbase(ulong); int str_to_context(char *, ulong *, struct task_context **); #define STR_PID (0x1) #define STR_TASK (0x2) #define STR_INVALID (0x4) char *get_panicmsg(char *); char *task_cpu(int, char *, int); void print_task_header(FILE *, struct task_context *, int); ulong get_active_task(int); int is_task_active(ulong); int is_panic_thread(ulong); int get_panic_ksp(struct bt_info *, ulong *); void foreach(struct foreach_data *); int pid_exists(ulong); #define TASKS_PER_PID(x) pid_exists(x) char *fill_task_struct(ulong); #define IS_LAST_TASK_READ(task) ((ulong)(task) == tt->last_task_read) char *fill_thread_info(ulong); #define IS_LAST_THREAD_INFO_READ(ti) ((ulong)(ti) == tt->last_thread_info_read) char *fill_mm_struct(ulong); #define IS_LAST_MM_READ(mm) ((ulong)(mm) == tt->last_mm_read) void do_task(ulong, ulong, struct reference *, unsigned int); void clear_task_cache(void); int get_active_set(void); void clear_active_set(void); void do_sig(ulong, ulong, struct reference *); void modify_signame(int, char *, char *); ulong generic_get_stackbase(ulong); ulong generic_get_stacktop(ulong); void dump_task_table(int); void sort_context_array(void); void sort_tgid_array(void); int sort_by_tgid(const void *, const void *); int in_irq_ctx(ulonglong, int, ulong); void check_stack_overflow(void); /* * extensions.c */ void register_extension(struct command_table_entry *); void dump_extension_table(int); void load_extension(char *); void unload_extension(char *); void preload_extensions(void); /* Hooks for sial */ unsigned long get_curtask(void); char *crash_global_cmd(void); struct command_table_entry *crash_cmd_table(void); /* * kernel.c */ void kernel_init(void); void module_init(void); void verify_version(void); void verify_spinlock(void); void non_matching_kernel(void); struct load_module *modref_to_load_module(char *); int load_module_symbols_helper(char *); void unlink_module(struct load_module *); int check_specified_module_tree(char *, char *); int is_system_call(char *, ulong); void get_dumpfile_regs(struct bt_info*, ulong*, ulong*); void generic_dump_irq(int); void generic_get_irq_affinity(int); void generic_show_interrupts(int, ulong *); int generic_dis_filter(ulong, char *, unsigned int); int kernel_BUG_encoding_bytes(void); void display_sys_stats(void); char *get_uptime(char *, ulonglong *); void clone_bt_info(struct bt_info *, struct bt_info *, struct task_context *); void dump_kernel_table(int); void dump_bt_info(struct bt_info *, char *where); void dump_log(int); void parse_kernel_version(char *); #define LOG_LEVEL(v) ((v) & 0x07) #define SHOW_LOG_LEVEL (0x1) #define SHOW_LOG_DICT (0x2) #define SHOW_LOG_TEXT (0x4) #define SHOW_LOG_AUDIT (0x8) #define SHOW_LOG_CTIME (0x10) #define SHOW_LOG_SAFE (0x20) #define SHOW_LOG_CALLER (0x40) #define SHOW_LOG_RUST (0x80) void set_cpu(int); void clear_machdep_cache(void); struct stack_hook *gather_text_list(struct bt_info *); int get_cpus_online(void); int get_cpus_active(void); int get_cpus_present(void); int get_cpus_possible(void); int check_offline_cpu(int); int hide_offline_cpu(int); int get_highest_cpu_online(void); int get_highest_cpu_present(void); int get_cpus_to_display(void); void get_log_from_vmcoreinfo(char *file); int in_cpu_map(int, int); void paravirt_init(void); void print_stack_text_syms(struct bt_info *, ulong, ulong); void back_trace(struct bt_info *); int in_alternate_stack(int, ulong); ulong cpu_map_addr(const char *type); #define BT_RAW (0x1ULL) #define BT_SYMBOLIC_ARGS (0x2ULL) #define BT_FULL (0x4ULL) #define BT_TEXT_SYMBOLS (0x8ULL) #define BT_TEXT_SYMBOLS_PRINT (0x10ULL) #define BT_TEXT_SYMBOLS_NOPRINT (0x20ULL) #define BT_USE_GDB (0x40ULL) #define BT_EXCEPTION_FRAME (0x80ULL) #define BT_LINE_NUMBERS (0x100ULL) #define BT_USER_EFRAME (0x200ULL) #define BT_INCOMPLETE_USER_EFRAME (BT_USER_EFRAME) #define BT_SAVE_LASTSP (0x400ULL) #define BT_FROM_EXCEPTION (0x800ULL) #define BT_FROM_CALLFRAME (0x1000ULL) #define BT_EFRAME_SEARCH (0x2000ULL) #define BT_SPECULATE (0x4000ULL) #define BT_FRAMESIZE_DISABLE (BT_SPECULATE) #define BT_RESCHEDULE (0x8000ULL) #define BT_SCHEDULE (BT_RESCHEDULE) #define BT_RET_FROM_SMP_FORK (0x10000ULL) #define BT_STRACE (0x20000ULL) #define BT_KDUMP_ADJUST (BT_STRACE) #define BT_KSTACKP (0x40000ULL) #define BT_LOOP_TRAP (0x80000ULL) #define BT_BUMP_FRAME_LEVEL (0x100000ULL) #define BT_EFRAME_COUNT (0x200000ULL) #define BT_CPU_IDLE (0x400000ULL) #define BT_WRAP_TRAP (0x800000ULL) #define BT_KERNEL_THREAD (0x1000000ULL) #define BT_ERROR_MASK (BT_LOOP_TRAP|BT_WRAP_TRAP|BT_KERNEL_THREAD|BT_CPU_IDLE) #define BT_UNWIND_ERROR (0x2000000ULL) #define BT_OLD_BACK_TRACE (0x4000000ULL) #define BT_OPT_BACK_TRACE (0x4000000ULL) #define BT_FRAMESIZE_DEBUG (0x8000000ULL) #define BT_CONTEXT_SWITCH (0x10000000ULL) #define BT_HARDIRQ (0x20000000ULL) #define BT_SOFTIRQ (0x40000000ULL) #define BT_CHECK_CALLER (0x80000000ULL) #define BT_NO_CHECK_CALLER (0x100000000ULL) #define BT_EXCEPTION_STACK (0x200000000ULL) #define BT_IRQSTACK (0x400000000ULL) #define BT_DUMPFILE_SEARCH (0x800000000ULL) #define BT_EFRAME_SEARCH2 (0x1000000000ULL) #define BT_START (0x2000000000ULL) #define BT_TEXT_SYMBOLS_ALL (0x4000000000ULL) #define BT_XEN_STOP_THIS_CPU (0x8000000000ULL) #define BT_THREAD_GROUP (0x10000000000ULL) #define BT_SAVE_EFRAME_IP (0x20000000000ULL) #define BT_FULL_SYM_SLAB (0x40000000000ULL) #define BT_KDUMP_ELF_REGS (0x80000000000ULL) #define BT_USER_SPACE (0x100000000000ULL) #define BT_KERNEL_SPACE (0x200000000000ULL) #define BT_FULL_SYM_SLAB2 (0x400000000000ULL) #define BT_EFRAME_TARGET (0x800000000000ULL) #define BT_CPUMASK (0x1000000000000ULL) #define BT_SHOW_ALL_REGS (0x2000000000000ULL) #define BT_REGS_NOT_FOUND (0x4000000000000ULL) #define BT_OVERFLOW_STACK (0x8000000000000ULL) #define BT_SKIP_IDLE (0x10000000000000ULL) #define BT_NO_PRINT_REGS (0x20000000000000ULL) #define BT_SYMBOL_OFFSET (BT_SYMBOLIC_ARGS) #define BT_REF_HEXVAL (0x1) #define BT_REF_SYMBOL (0x2) #define BT_REF_FOUND (0x4) #define BT_REFERENCE_CHECK(X) ((X)->ref) #define BT_REFERENCE_FOUND(X) ((X)->ref && ((X)->ref->cmdflags & BT_REF_FOUND)) #define NO_MODULES() \ (!kt->module_list || (kt->module_list == kt->kernel_module)) #define USER_EFRAME_ADDR(task) \ ((ulong)task + UNION_SIZE("task_union") - SIZE(pt_regs)) struct remote_file { char *filename; char *local; int fd; int flags; int type; long csum; off_t size; }; #define REMOTE_VERBOSE (O_RDWR << 1) #define REMOTE_COPY_DONE (REMOTE_VERBOSE << 1) #define TYPE_ELF (REMOTE_VERBOSE << 2) #define TYPE_DEVMEM (REMOTE_VERBOSE << 3) #define TYPE_MCLXCD (REMOTE_VERBOSE << 4) #define TYPE_LKCD (REMOTE_VERBOSE << 5) #define TYPE_S390D (REMOTE_VERBOSE << 6) #define TYPE_NETDUMP (REMOTE_VERBOSE << 7) ulonglong xen_m2p(ulonglong); void read_in_kernel_config(int); #define IKCFG_INIT (0) #define IKCFG_READ (1) #define IKCFG_SETUP (2) #define IKCFG_FREE (3) int get_kernel_config(char *, char **); enum { IKCONFIG_N, IKCONFIG_Y, IKCONFIG_M, IKCONFIG_STR, }; #define MAGIC_START "IKCFG_ST" #define MAGIC_END "IKCFG_ED" #define MAGIC_SIZE (sizeof(MAGIC_START) - 1) /* * dev.c */ void dev_init(void); void dump_dev_table(void); void devdump_extract(void *, ulonglong, char *, FILE *); void devdump_info(void *, ulonglong, FILE *); /* * ipcs.c */ void ipcs_init(void); ulong idr_find(ulong, int); /* * sbitmap.c */ /* sbitmap helpers */ struct sbitmap_context { unsigned depth; unsigned shift; unsigned map_nr; ulong map_addr; ulong alloc_hint; bool round_robin; }; typedef bool (*sbitmap_for_each_fn)(unsigned int idx, void *p); void sbitmap_for_each_set(const struct sbitmap_context *sc, sbitmap_for_each_fn fn, void *data); void sbitmap_context_load(ulong addr, struct sbitmap_context *sc); /* sbitmap_queue helpers */ typedef bool (*sbitmapq_for_each_fn)(unsigned int idx, ulong addr, void *p); struct sbitmapq_ops { /* array params associated with the bitmap */ ulong addr; ulong size; /* callback params */ sbitmapq_for_each_fn fn; void *p; }; void sbitmapq_init(void); void sbitmapq_for_each_set(ulong addr, struct sbitmapq_ops *ops); #ifdef ARM void arm_init(int); void arm_dump_machdep_table(ulong); int arm_is_vmalloc_addr(ulong); void arm_dump_backtrace_entry(struct bt_info *, int, ulong, ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to ARM architecture\n") struct arm_pt_regs { ulong uregs[18]; }; #define ARM_cpsr uregs[16] #define ARM_pc uregs[15] #define ARM_lr uregs[14] #define ARM_sp uregs[13] #define ARM_ip uregs[12] #define ARM_fp uregs[11] #define ARM_r10 uregs[10] #define ARM_r9 uregs[9] #define ARM_r8 uregs[8] #define ARM_r7 uregs[7] #define ARM_r6 uregs[6] #define ARM_r5 uregs[5] #define ARM_r4 uregs[4] #define ARM_r3 uregs[3] #define ARM_r2 uregs[2] #define ARM_r1 uregs[1] #define ARM_r0 uregs[0] #define ARM_ORIG_r0 uregs[17] #define KSYMS_START (0x1) #define PHYS_BASE (0x2) #define PGTABLE_V2 (0x4) #define IDMAP_PGD (0x8) #define KVBASE_MASK (0x1ffffff) struct machine_specific { ulong phys_base; ulong vmalloc_start_addr; ulong modules_vaddr; ulong modules_end; ulong kernel_text_start; ulong kernel_text_end; ulong exception_text_start; ulong exception_text_end; ulonglong last_pgd_read_lpae; ulonglong last_pmd_read_lpae; ulonglong last_ptbl_read_lpae; struct arm_pt_regs *crash_task_regs; int unwind_index_prel31; }; int init_unwind_tables(void); void unwind_backtrace(struct bt_info *); #endif /* ARM */ /* * arm64.c */ #ifdef ARM64 void arm64_init(int); void arm64_dump_machdep_table(ulong); ulong arm64_VTOP(ulong); ulong arm64_PTOV(ulong); int arm64_IS_VMALLOC_ADDR(ulong); ulong arm64_swp_type(ulong); ulong arm64_swp_offset(ulong); #endif /* * alpha.c */ #ifdef ALPHA void alpha_init(int); void alpha_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to alpha architecture\n") #define HWRESET_TASK(X) ((machdep->flags & HWRESET) && is_task_active(X) && \ (task_to_context(X)->processor == 0)) #endif /* * x86.c */ #ifdef X86 void x86_init(int); void x86_dump_machdep_table(ulong); void x86_display_idt_table(void); #define display_idt_table() x86_display_idt_table() #define KSYMS_START (0x1) void x86_dump_eframe_common(struct bt_info *bt, ulong *, int); char *x86_function_called_by(ulong); struct syment *x86_jmp_error_code(ulong); struct syment *x86_text_lock_jmp(ulong, ulong *); struct machine_specific { ulong *idt_table; ulong entry_tramp_start; ulong entry_tramp_end; physaddr_t entry_tramp_start_phys; ulonglong last_pmd_read_PAE; ulonglong last_ptbl_read_PAE; ulong page_protnone; int max_numnodes; ulong *remap_start_vaddr; ulong *remap_end_vaddr; ulong *remap_start_pfn; }; struct syment *x86_is_entry_tramp_address(ulong, ulong *); #endif /* * x86_64.c */ #ifdef X86_64 void x86_64_init(int); void x86_64_dump_machdep_table(ulong); ulong x86_64_PTOV(ulong); ulong x86_64_VTOP(ulong); int x86_64_IS_VMALLOC_ADDR(ulong); ulong x86_64_swp_type(ulong); ulong x86_64_swp_offset(ulong); void x86_64_display_idt_table(void); #define display_idt_table() x86_64_display_idt_table() long x86_64_exception_frame(ulong, ulong, char *, struct bt_info *, FILE *); #define EFRAME_INIT (0) struct x86_64_pt_regs_offsets { long r15; long r14; long r13; long r12; long rbp; long rbx; /* arguments: non interrupts/non tracing syscalls only save upto here*/ long r11; long r10; long r9; long r8; long rax; long rcx; long rdx; long rsi; long rdi; long orig_rax; /* end of arguments */ /* cpu exception frame or undefined */ long rip; long cs; long eflags; long rsp; long ss; }; #define MAX_EXCEPTION_STACKS 7 #define NMI_STACK (machdep->machspec->stkinfo.NMI_stack_index) struct x86_64_stkinfo { ulong ebase[NR_CPUS][MAX_EXCEPTION_STACKS]; int esize[MAX_EXCEPTION_STACKS]; char available[NR_CPUS][MAX_EXCEPTION_STACKS]; ulong ibase[NR_CPUS]; int isize; int NMI_stack_index; char *exception_stacks[MAX_EXCEPTION_STACKS]; }; typedef struct __attribute__((__packed__)) { signed short sp_offset; signed short bp_offset; unsigned int sp_reg:4; unsigned int bp_reg:4; unsigned int type:2; unsigned int signal:1; unsigned int end:1; } kernel_orc_entry; typedef struct __attribute__((__packed__)) { signed short sp_offset; signed short bp_offset; unsigned int sp_reg:4; unsigned int bp_reg:4; unsigned int type:3; unsigned int signal:1; } kernel_orc_entry_6_4; typedef struct orc_entry { signed short sp_offset; signed short bp_offset; unsigned int sp_reg; unsigned int bp_reg; unsigned int type; unsigned int signal; unsigned int end; } orc_entry; struct ORC_data { int module_ORC; uint lookup_num_blocks; ulong __start_orc_unwind_ip; ulong __stop_orc_unwind_ip; ulong __start_orc_unwind; ulong __stop_orc_unwind; ulong orc_lookup; ulong ip_entry; ulong orc_entry; orc_entry orc_entry_data; int has_signal; int has_end; }; #define ORC_TYPE_CALL ((machdep->flags & ORC_6_4) ? 2 : 0) /* The below entries are not used and must be updated if we use them. */ #define ORC_TYPE_REGS 1 #define ORC_TYPE_REGS_IRET 2 #define UNWIND_HINT_TYPE_SAVE 3 #define UNWIND_HINT_TYPE_RESTORE 4 #define ORC_REG_UNDEFINED 0 #define ORC_REG_PREV_SP 1 #define ORC_REG_DX 2 #define ORC_REG_DI 3 #define ORC_REG_BP 4 #define ORC_REG_SP 5 #define ORC_REG_R10 6 #define ORC_REG_R13 7 #define ORC_REG_BP_INDIRECT 8 #define ORC_REG_SP_INDIRECT 9 #define ORC_REG_MAX 15 struct machine_specific { ulong userspace_top; ulong page_offset; ulong vmalloc_start_addr; ulong vmalloc_end; ulong vmemmap_vaddr; ulong vmemmap_end; ulong modules_vaddr; ulong modules_end; ulong phys_base; char *pml4; char *upml; ulong last_upml_read; ulong last_pml4_read; char *irqstack; ulong irq_eframe_link; struct x86_64_pt_regs_offsets pto; struct x86_64_stkinfo stkinfo; ulong *current; ulong *crash_nmi_rsp; ulong vsyscall_page; ulong thread_return; ulong page_protnone; ulong GART_start; ulong GART_end; ulong kernel_image_size; ulong physical_mask_shift; ulong pgdir_shift; char *p4d; ulong last_p4d_read; struct ORC_data orc; ulong irq_stack_gap; ulong kpti_entry_stack; ulong kpti_entry_stack_size; ulong ptrs_per_pgd; ulong cpu_entry_area_start; ulong cpu_entry_area_end; ulong page_offset_force; char **exception_functions; ulong sme_mask; }; #define KSYMS_START (0x1) #define PT_REGS_INIT (0x2) #define VM_ORIG (0x4) #define VM_2_6_11 (0x8) #define VM_XEN (0x10) #define NO_TSS (0x20) #define SCHED_TEXT (0x40) #define PHYS_BASE (0x80) #define VM_XEN_RHEL4 (0x100) #define FRAMEPOINTER (0x200) #define GART_REGION (0x400) #define NESTED_NMI (0x800) #define RANDOMIZED (0x1000) #define VM_5LEVEL (0x2000) #define ORC (0x4000) #define KPTI (0x8000) #define L1TF (0x10000) #define ORC_6_4 (0x20000) #define VM_FLAGS (VM_ORIG|VM_2_6_11|VM_XEN|VM_XEN_RHEL4|VM_5LEVEL) #define _2MB_PAGE_MASK (~((MEGABYTES(2))-1)) #define _1GB_PAGE_MASK (~((GIGABYTES(1))-1)) #endif #if defined(X86) || defined(X86_64) /* * unwind_x86_32_64.c */ void init_unwind_table(void); int dwarf_backtrace(struct bt_info *, int, ulong); void dwarf_debug(struct bt_info *); int dwarf_print_stack_entry(struct bt_info *, int); #endif /* * ppc64.c */ /* * This structure was copied from kernel source * in include/asm-ppc/ptrace.h */ struct ppc64_pt_regs { long gpr[32]; long nip; long msr; long orig_gpr3; /* Used for restarting system calls */ long ctr; long link; long xer; long ccr; long mq; /* 601 only (not used at present) */ /* Used on APUS to hold IPL value. */ long trap; /* Reason for being here */ long dar; /* Fault registers */ long dsisr; long result; /* Result of a system call */ }; struct ppc64_elf_siginfo { int si_signo; int si_code; int si_errno; }; struct ppc64_elf_prstatus { struct ppc64_elf_siginfo pr_info; short pr_cursig; unsigned long pr_sigpend; unsigned long pr_sighold; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct timeval pr_utime; struct timeval pr_stime; struct timeval pr_cutime; struct timeval pr_cstime; struct ppc64_pt_regs pr_reg; int pr_fpvalid; }; #ifdef PPC64 enum emergency_stack_type { NONE_STACK = 0, EMERGENCY_STACK, NMI_EMERGENCY_STACK, MC_EMERGENCY_STACK }; struct ppc64_opal { uint64_t base; uint64_t entry; uint64_t size; }; struct ppc64_vmemmap { unsigned long phys; unsigned long virt; }; /* * Used to store the HW interrupt stack. It is only for 2.4. */ struct machine_specific { ulong *hwintrstack; char *hwstackbuf; uint hwstacksize; /* Emergency stacks */ ulong *emergency_sp; ulong *nmi_emergency_sp; ulong *mc_emergency_sp; uint l4_index_size; uint l3_index_size; uint l2_index_size; uint l1_index_size; uint ptrs_per_l4; uint ptrs_per_l3; uint ptrs_per_l2; uint ptrs_per_l1; uint l4_shift; uint l3_shift; uint l2_shift; uint l1_shift; uint pte_rpn_shift; ulong pte_rpn_mask; ulong pgd_masked_bits; ulong pud_masked_bits; ulong pmd_masked_bits; int vmemmap_cnt; int vmemmap_psize; ulong vmemmap_base; struct ppc64_vmemmap *vmemmap_list; ulong _page_pte; ulong _page_present; ulong _page_user; ulong _page_rw; ulong _page_guarded; ulong _page_coherent; ulong _page_no_cache; ulong _page_writethru; ulong _page_dirty; ulong _page_accessed; int (*is_kvaddr)(ulong); int (*is_vmaddr)(ulong); struct ppc64_opal opal; }; void ppc64_init(int); void ppc64_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to PowerPC architecture\n") #define KSYMS_START (0x1) #define VM_ORIG (0x2) #define VMEMMAP_AWARE (0x4) #define BOOK3E (0x8) #define PHYS_ENTRY_L4 (0x10) #define SWAP_ENTRY_L4 (0x20) /* * The flag bit for radix MMU in cpu_spec.mmu_features * in the kernel is also 0x40. */ #define RADIX_MMU (0x40) #define OPAL_FW (0x80) #define REGION_SHIFT (60UL) #define REGION_ID(addr) (((unsigned long)(addr)) >> REGION_SHIFT) #define VMEMMAP_REGION_ID (0xfUL) #endif /* * ppc.c */ #ifdef PPC void ppc_init(int); void ppc_dump_machdep_table(ulong); void ppc_relocate_nt_prstatus_percpu(void **, uint *); #define display_idt_table() \ error(FATAL, "-d option is not applicable to PowerPC architecture\n") #define KSYMS_START (0x1) /* This should match PPC_FEATURE_BOOKE from include/asm-powerpc/cputable.h */ #define CPU_BOOKE (0x00008000) #else #define ppc_relocate_nt_prstatus_percpu(X,Y) do {} while (0) #endif /* * lkcd_fix_mem.c */ struct _dump_header_asm_s; struct _dump_header_s; ulong get_lkcd_switch_stack(ulong); int fix_addr_v8(struct _dump_header_asm_s *); int lkcd_dump_init_v8_arch(struct _dump_header_s *dh); int fix_addr_v7(int); int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp); int lkcd_get_kernel_start_v8(ulong *addr); /* * lkcd_v8.c */ int get_lkcd_regs_for_cpu_v8(struct bt_info *bt, ulong *eip, ulong *esp); /* * ia64.c */ #ifdef IA64 void ia64_init(int); void ia64_dump_machdep_table(ulong); void ia64_dump_line_number(ulong); ulong ia64_get_switch_stack(ulong); void ia64_exception_frame(ulong, struct bt_info *bt); ulong ia64_PTOV(ulong); ulong ia64_VTOP(ulong); int ia64_IS_VMALLOC_ADDR(ulong); #define display_idt_table() \ error(FATAL, "-d option TBD on ia64 architecture\n"); int ia64_in_init_stack(ulong addr); int ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt); physaddr_t ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo); #define OLD_UNWIND (0x1) /* CONFIG_IA64_NEW_UNWIND not turned on */ #define NEW_UNWIND (0x2) /* CONFIG_IA64_NEW_UNWIND turned on */ #define NEW_UNW_V1 (0x4) #define NEW_UNW_V2 (0x8) #define NEW_UNW_V3 (0x10) #define UNW_OUT_OF_SYNC (0x20) /* shared data structures out of sync */ #define UNW_READ (0x40) /* kernel unw has been read successfully */ #define MEM_LIMIT (0x80) #define UNW_PTREGS (0x100) #define UNW_R0 (0x200) #undef IA64_RBS_OFFSET #undef IA64_STK_OFFSET #define IA64_RBS_OFFSET ((SIZE(task_struct) + 15) & ~15) #define IA64_STK_OFFSET (STACKSIZE()) struct machine_specific { ulong cpu_data_address; ulong unimpl_va_mask; ulong unimpl_pa_mask; long unw_tables_offset; long unw_kernel_table_offset; long unw_pt_regs_offsets; int script_index; struct unw_script *script_cache; ulong script_cache_fills; ulong script_cache_hits; void *unw; ulong mem_limit; ulong kernel_region; ulong kernel_start; ulong phys_start; ulong vmalloc_start; char *ia64_memmap; uint64_t efi_memmap_size; uint64_t efi_memdesc_size; void (*unwind_init)(void); void (*unwind)(struct bt_info *); void (*dump_unwind_stats)(void); int (*unwind_debug)(ulong); int ia64_init_stack_size; }; /* * unwind.c */ void unwind_init_v1(void); void unwind_v1(struct bt_info *); void dump_unwind_stats_v1(void); int unwind_debug_v1(ulong); void unwind_init_v2(void); void unwind_v2(struct bt_info *); void dump_unwind_stats_v2(void); int unwind_debug_v2(ulong); void unwind_init_v3(void); void unwind_v3(struct bt_info *); void dump_unwind_stats_v3(void); int unwind_debug_v3(ulong); #endif /* IA64 */ /* * s390.c */ #ifdef S390 void s390_init(int); void s390_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to S390 architecture\n") #define KSYMS_START (0x1) #endif /* * s390_dump.c */ int is_s390_dump(char *); FILE* s390_dump_init(char *); int read_s390_dumpfile(int, void *, int, ulong, physaddr_t); int write_s390_dumpfile(int, void *, int, ulong, physaddr_t); uint s390_page_size(void); int s390_memory_used(void); int s390_free_memory(void); int s390_memory_dump(FILE *); ulong get_s390_panic_task(void); void get_s390_panicmsg(char *); /* * s390x.c */ #ifdef S390X struct machine_specific { ulong (*virt_to_phys)(ulong vaddr); ulong (*phys_to_virt)(ulong paddr); int (*is_vmalloc_addr)(ulong vaddr); ulong __kaslr_offset_phys; ulong amode31_start; ulong amode31_end; }; void s390x_init(int); ulong s390x_PTOV(ulong); ulong s390x_VTOP(ulong); int s390x_IS_VMALLOC_ADDR(ulong); void s390x_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to S390X architecture\n") #define KSYMS_START (0x1) #endif /* * mips.c */ void mips_display_regs_from_elf_notes(int, FILE *); #ifdef MIPS void mips_init(int); void mips_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to MIPS architecture\n") struct mips_regset { ulong regs[45]; }; struct mips_pt_regs_main { ulong regs[32]; ulong cp0_status; ulong hi; ulong lo; }; struct mips_pt_regs_cp0 { ulong cp0_badvaddr; ulong cp0_cause; ulong cp0_epc; }; #define KSYMS_START (0x1) #define PHYS_BASE (0x2) #define KVBASE_MASK (0x1ffffff) struct machine_specific { ulong phys_base; ulong vmalloc_start_addr; ulong modules_vaddr; ulong modules_end; ulong _page_present; ulong _page_read; ulong _page_write; ulong _page_accessed; ulong _page_modified; ulong _page_global; ulong _page_valid; ulong _page_no_read; ulong _page_no_exec; ulong _page_dirty; ulong _pfn_shift; #define _PAGE_PRESENT (machdep->machspec->_page_present) #define _PAGE_READ (machdep->machspec->_page_read) #define _PAGE_WRITE (machdep->machspec->_page_write) #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) #define _PAGE_MODIFIED (machdep->machspec->_page_modified) #define _PAGE_GLOBAL (machdep->machspec->_page_global) #define _PAGE_VALID (machdep->machspec->_page_valid) #define _PAGE_NO_READ (machdep->machspec->_page_no_read) #define _PAGE_NO_EXEC (machdep->machspec->_page_no_exec) #define _PAGE_DIRTY (machdep->machspec->_page_dirty) #define _PFN_SHIFT (machdep->machspec->_pfn_shift) struct mips_regset *crash_task_regs; }; #endif /* MIPS */ /* * mips64.c */ void mips64_display_regs_from_elf_notes(int, FILE *); #ifdef MIPS64 void mips64_init(int); void mips64_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to MIPS64 architecture\n") /* from arch/mips/include/asm/ptrace.h */ struct mips64_register { ulong regs[45]; }; struct mips64_pt_regs_main { ulong regs[32]; ulong cp0_status; ulong hi; ulong lo; }; struct mips64_pt_regs_cp0 { ulong cp0_badvaddr; ulong cp0_cause; ulong cp0_epc; }; struct mips64_unwind_frame { unsigned long sp; unsigned long pc; unsigned long ra; }; #define KSYMS_START (0x1) struct machine_specific { ulong phys_base; ulong vmalloc_start_addr; ulong modules_vaddr; ulong modules_end; ulong _page_present; ulong _page_read; ulong _page_write; ulong _page_accessed; ulong _page_modified; ulong _page_huge; ulong _page_special; ulong _page_protnone; ulong _page_global; ulong _page_valid; ulong _page_no_read; ulong _page_no_exec; ulong _page_dirty; ulong _pfn_shift; struct mips64_register *crash_task_regs; }; /* from arch/mips/include/asm/pgtable-bits.h */ #define _PAGE_PRESENT (machdep->machspec->_page_present) #define _PAGE_READ (machdep->machspec->_page_read) #define _PAGE_WRITE (machdep->machspec->_page_write) #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) #define _PAGE_MODIFIED (machdep->machspec->_page_modified) #define _PAGE_HUGE (machdep->machspec->_page_huge) #define _PAGE_SPECIAL (machdep->machspec->_page_special) #define _PAGE_PROTNONE (machdep->machspec->_page_protnone) #define _PAGE_GLOBAL (machdep->machspec->_page_global) #define _PAGE_VALID (machdep->machspec->_page_valid) #define _PAGE_NO_READ (machdep->machspec->_page_no_read) #define _PAGE_NO_EXEC (machdep->machspec->_page_no_exec) #define _PAGE_DIRTY (machdep->machspec->_page_dirty) #define _PFN_SHIFT (machdep->machspec->_pfn_shift) #endif /* MIPS64 */ /* * riscv64.c */ void riscv64_display_regs_from_elf_notes(int, FILE *); #ifdef RISCV64 void riscv64_init(int); void riscv64_dump_machdep_table(ulong); int riscv64_IS_VMALLOC_ADDR(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to RISCV64 architecture\n") /* * regs[0,31] : struct user_regs_struct * from arch/riscv/include/uapi/asm/ptrace.h * regs[0,35] : struct pt_regs * from arch/riscv/include/asm/ptrace.h */ struct riscv64_register { ulong regs[36]; }; struct riscv64_unwind_frame { ulong fp; ulong sp; ulong pc; }; struct machine_specific { ulong phys_base; ulong page_offset; ulong vmalloc_start_addr; ulong vmalloc_end; ulong vmemmap_vaddr; ulong vmemmap_end; ulong modules_vaddr; ulong modules_end; ulong kernel_link_addr; ulong va_kernel_pa_offset; ulong _page_present; ulong _page_read; ulong _page_write; ulong _page_exec; ulong _page_user; ulong _page_global; ulong _page_accessed; ulong _page_dirty; ulong _page_soft; ulong _pfn_shift; ulong va_bits; char *p4d; ulong last_p4d_read; ulong struct_page_size; struct riscv64_register *crash_task_regs; ulong irq_stack_size; ulong *irq_stacks; ulong overflow_stack_size; ulong *overflow_stacks; }; /* from arch/riscv/include/asm/pgtable-bits.h */ #define _PAGE_PRESENT (machdep->machspec->_page_present) #define _PAGE_READ (machdep->machspec->_page_read) #define _PAGE_WRITE (machdep->machspec->_page_write) #define _PAGE_EXEC (machdep->machspec->_page_exec) #define _PAGE_USER (machdep->machspec->_page_user) #define _PAGE_GLOBAL (machdep->machspec->_page_global) #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) #define _PAGE_DIRTY (machdep->machspec->_page_dirty) #define _PAGE_SOFT (machdep->machspec->_page_soft) #define _PAGE_SEC (machdep->machspec->_page_sec) #define _PAGE_SHARE (machdep->machspec->_page_share) #define _PAGE_BUF (machdep->machspec->_page_buf) #define _PAGE_CACHE (machdep->machspec->_page_cache) #define _PAGE_SO (machdep->machspec->_page_so) #define _PAGE_SPECIAL _PAGE_SOFT #define _PAGE_TABLE _PAGE_PRESENT #define _PAGE_PROT_NONE _PAGE_READ #define _PAGE_PFN_SHIFT 10 /* from 'struct pt_regs' definitions of RISC-V arch */ #define RISCV64_REGS_EPC 0 #define RISCV64_REGS_RA 1 #define RISCV64_REGS_SP 2 #define RISCV64_REGS_FP 8 #define RISCV64_REGS_STATUS 32 #define RISCV64_REGS_CAUSE 34 #endif /* RISCV64 */ /* * sparc64.c */ #ifdef SPARC64 void sparc64_init(int); void sparc64_dump_machdep_table(ulong); int sparc64_vmalloc_addr(ulong); #define display_idt_table() \ error(FATAL, "The -d option is not applicable to sparc64.\n") #endif /* * loongarch64.c */ void loongarch64_display_regs_from_elf_notes(int, FILE *); #ifdef LOONGARCH64 void loongarch64_init(int); void loongarch64_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to LOONGARCH64 architecture\n") #define KSYMS_START (0x1) struct machine_specific { ulong phys_base; ulong vmalloc_start_addr; ulong modules_vaddr; ulong modules_end; struct loongarch64_pt_regs *crash_task_regs; }; /* * Basic page table format: * * 63 62 61 PALEN-1 12 10 9 8 7 6 5 4 3 2 1 0 * +----+--+--+------+--------------------+----+--+--+-+-+-+---+---+-+-+ * |RPLV|NX|NR| | PA[PALEN-1:12] | |SP|PN|W|P|G|MAT|PLV|D|V| * +----+--+--+------+--------------------+----+--+--+-+-+-+---+---+-+-+ * * * Huge page table format: * * 63 62 61 PALEN-1 12 10 9 8 7 6 5 4 3 2 1 0 * +----+--+--+------+-----------------+--+----+--+--+-+-+-+---+---+-+-+ * |RPLV|NX|NR| | PA[PALEN-1:12] | G| |SP|PN|W|P|H|MAT|PLV|D|V| * +----+--+--+------+-----------------+--+----+--+--+-+-+-+---+---+-+-+ * */ /* from arch/loongarch/include/asm/pgtable-bits.h */ /* Page table bits */ #define _PAGE_VALID_SHIFT 0 #define _PAGE_ACCESSED_SHIFT 0 /* Reuse Valid for Accessed */ #define _PAGE_DIRTY_SHIFT 1 #define _PAGE_PLV_SHIFT 2 /* 2~3, two bits */ #define _CACHE_SHIFT 4 /* 4~5, two bits */ #define _PAGE_GLOBAL_SHIFT 6 #define _PAGE_HUGE_SHIFT 6 /* HUGE is a PMD bit */ #define _PAGE_PRESENT_SHIFT 7 #define _PAGE_WRITE_SHIFT 8 #define _PAGE_MODIFIED_SHIFT 9 #define _PAGE_PROTNONE_SHIFT 10 #define _PAGE_SPECIAL_SHIFT 11 #define _PAGE_HGLOBAL_SHIFT 12 /* HGlobal is a PMD bit */ #define _PAGE_PFN_SHIFT 12 #define _PAGE_SWP_EXCLUSIVE_SHIFT 23 #define _PAGE_PFN_END_SHIFT 48 #define _PAGE_PRESENT_INVALID_SHIFT 60 #define _PAGE_NO_READ_SHIFT 61 #define _PAGE_NO_EXEC_SHIFT 62 #define _PAGE_RPLV_SHIFT 63 #ifndef _ULCAST_ #define _ULCAST_ (unsigned long) #endif /* Used by software */ #define _PAGE_PRESENT (_ULCAST_(1) << _PAGE_PRESENT_SHIFT) #define _PAGE_PRESENT_INVALID (_ULCAST_(1) << _PAGE_PRESENT_INVALID_SHIFT) #define _PAGE_WRITE (_ULCAST_(1) << _PAGE_WRITE_SHIFT) #define _PAGE_ACCESSED (_ULCAST_(1) << _PAGE_ACCESSED_SHIFT) #define _PAGE_MODIFIED (_ULCAST_(1) << _PAGE_MODIFIED_SHIFT) #define _PAGE_PROTNONE (_ULCAST_(1) << _PAGE_PROTNONE_SHIFT) #define _PAGE_SPECIAL (_ULCAST_(1) << _PAGE_SPECIAL_SHIFT) /* We borrow bit 23 to store the exclusive marker in swap PTEs. */ #define _PAGE_SWP_EXCLUSIVE (_ULCAST_(1) << _PAGE_SWP_EXCLUSIVE_SHIFT) /* Used by TLB hardware (placed in EntryLo*) */ #define _PAGE_VALID (_ULCAST_(1) << _PAGE_VALID_SHIFT) #define _PAGE_DIRTY (_ULCAST_(1) << _PAGE_DIRTY_SHIFT) #define _PAGE_PLV (_ULCAST_(3) << _PAGE_PLV_SHIFT) #define _PAGE_GLOBAL (_ULCAST_(1) << _PAGE_GLOBAL_SHIFT) #define _PAGE_HUGE (_ULCAST_(1) << _PAGE_HUGE_SHIFT) #define _PAGE_HGLOBAL (_ULCAST_(1) << _PAGE_HGLOBAL_SHIFT) #define _PAGE_NO_READ (_ULCAST_(1) << _PAGE_NO_READ_SHIFT) #define _PAGE_NO_EXEC (_ULCAST_(1) << _PAGE_NO_EXEC_SHIFT) #define _PAGE_RPLV (_ULCAST_(1) << _PAGE_RPLV_SHIFT) #define _CACHE_MASK (_ULCAST_(3) << _CACHE_SHIFT) #define _PFN_SHIFT (PAGESHIFT() - 12 + _PAGE_PFN_SHIFT) #define _PAGE_USER (PLV_USER << _PAGE_PLV_SHIFT) #define _PAGE_KERN (PLV_KERN << _PAGE_PLV_SHIFT) #define _PFN_MASK (~((_ULCAST_(1) << (_PFN_SHIFT)) - 1) & \ ((_ULCAST_(1) << (_PAGE_PFN_END_SHIFT)) - 1)) #endif /* LOONGARCH64 */ /* * netdump.c */ int is_netdump(char *, ulong); uint netdump_page_size(void); int read_netdump(int, void *, int, ulong, physaddr_t); int write_netdump(int, void *, int, ulong, physaddr_t); int netdump_free_memory(void); int netdump_memory_used(void); int netdump_init(char *, FILE *); ulong get_netdump_panic_task(void); ulong get_netdump_switch_stack(ulong); FILE *set_netdump_fp(FILE *); int netdump_memory_dump(FILE *); void get_netdump_regs(struct bt_info *, ulong *, ulong *); int is_partial_netdump(void); void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); void dump_registers_for_elf_dumpfiles(void); struct vmcore_data; struct vmcore_data *get_kdump_vmcore_data(void); int read_kdump(int, void *, int, ulong, physaddr_t); int write_kdump(int, void *, int, ulong, physaddr_t); int is_kdump(char *, ulong); int kdump_init(char *, FILE *); ulong get_kdump_panic_task(void); uint kdump_page_size(void); int kdump_free_memory(void); int kdump_memory_used(void); int kdump_memory_dump(FILE *); void get_kdump_regs(struct bt_info *, ulong *, ulong *); void xen_kdump_p2m_mfn(char *); int is_sadump_xen(void); void set_xen_phys_start(char *); ulong xen_phys_start(void); int xen_major_version(void); int xen_minor_version(void); int get_netdump_arch(void); int exist_regs_in_elf_notes(struct task_context *); void *get_regs_from_elf_notes(struct task_context *); void map_cpus_to_prstatus(void); int kdump_phys_base(ulong *); int kdump_set_phys_base(ulong); int arm_kdump_phys_base(ulong *); int arm_kdump_phys_end(ulong *); int is_proc_kcore(char *, ulong); int proc_kcore_init(FILE *, int); int read_proc_kcore(int, void *, int, ulong, physaddr_t); int write_proc_kcore(int, void *, int, ulong, physaddr_t); int kcore_memory_dump(FILE *); void dump_registers_for_qemu_mem_dump(void); void kdump_backup_region_init(void); void display_regs_from_elf_notes(int, FILE *); void display_ELF_note(int, int, void *, FILE *); void *netdump_get_prstatus_percpu(int); int kdump_kaslr_check(void); void display_vmcoredd_note(void *ptr, FILE *ofp); int kdump_get_nr_cpus(void); QEMUCPUState *kdump_get_qemucpustate(int); void kdump_device_dump_info(FILE *); void kdump_device_dump_extract(int, char *, FILE *); #define PRSTATUS_NOTE (1) #define QEMU_NOTE (2) /* * ramdump.c */ int is_ramdump(char *pattern); char *ramdump_to_elf(void); void ramdump_elf_output_file(char *opt); void ramdump_cleanup(void); int read_ramdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr); void show_ramdump_files(void); void dump_ramdump_data(void); int is_ramdump_image(void); /* * diskdump.c */ int is_diskdump(char *); uint diskdump_page_size(void); int read_diskdump(int, void *, int, ulong, physaddr_t); int write_diskdump(int, void *, int, ulong, physaddr_t); int diskdump_free_memory(void); int diskdump_memory_used(void); int diskdump_init(char *, FILE *); ulong get_diskdump_panic_task(void); ulong get_diskdump_switch_stack(ulong); int diskdump_memory_dump(FILE *); FILE *set_diskdump_fp(FILE *); void get_diskdump_regs(struct bt_info *, ulong *, ulong *); int diskdump_phys_base(unsigned long *); int diskdump_set_phys_base(unsigned long); extern ulong *diskdump_flags; int is_partial_diskdump(void); int get_dump_level(void); int dumpfile_is_split(void); void show_split_dumpfiles(void); void x86_process_elf_notes(void *, unsigned long); void *diskdump_get_prstatus_percpu(int); int diskdump_is_cpu_prstatus_valid(int cpu); int have_crash_notes(int cpu); void map_cpus_to_prstatus_kdump_cmprs(void); void diskdump_display_regs(int, FILE *); void process_elf32_notes(void *, ulong); void process_elf64_notes(void *, ulong); void dump_registers_for_compressed_kdump(void); int diskdump_kaslr_check(void); int diskdump_get_nr_cpus(void); QEMUCPUState *diskdump_get_qemucpustate(int); void diskdump_device_dump_info(FILE *); void diskdump_device_dump_extract(int, char *, FILE *); ulong readswap(ulonglong pte_val, char *buf, ulong len, ulonglong vaddr); /*support for zram*/ ulong try_zram_decompress(ulonglong pte_val, unsigned char *buf, ulong len, ulonglong vaddr); #define OBJ_TAG_BITS 1 #ifndef MAX_POSSIBLE_PHYSMEM_BITS #define MAX_POSSIBLE_PHYSMEM_BITS (MAX_PHYSMEM_BITS()) #endif #define _PFN_BITS (MAX_POSSIBLE_PHYSMEM_BITS - PAGESHIFT()) #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) #define OBJ_INDEX_MASK ((1 << OBJ_INDEX_BITS) - 1) #define ZS_HANDLE_SIZE (sizeof(unsigned long)) #define ZSPAGE_MAGIC 0x58 #define SWAP_ADDRESS_SPACE_SHIFT 14 #define SECTOR_SHIFT 9 #define SECTORS_PER_PAGE_SHIFT (PAGESHIFT() - SECTOR_SHIFT) #define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) struct zspage { union { unsigned int flag_bits; struct { unsigned int fullness : 2; unsigned int class : 9; unsigned int isolated : 3; unsigned int magic : 8; } v0; struct { unsigned int huge : 1; unsigned int fullness : 2; unsigned int class : 9; unsigned int isolated : 3; unsigned int magic : 8; } v5_17; }; unsigned int inuse; unsigned int freeobj; }; /* * makedumpfile.c */ void check_flattened_format(char *file); int is_flattened_format(char *file); int read_flattened_format(int fd, off_t offset, void *buf, size_t size); void dump_flat_header(FILE *); /* * xendump.c */ int is_xendump(char *); int read_xendump(int, void *, int, ulong, physaddr_t); int write_xendump(int, void *, int, ulong, physaddr_t); uint xendump_page_size(void); int xendump_free_memory(void); int xendump_memory_used(void); int xendump_init(char *, FILE *); int xendump_memory_dump(FILE *); ulong get_xendump_panic_task(void); void get_xendump_regs(struct bt_info *, ulong *, ulong *); char *xc_core_mfn_to_page(ulong, char *); int xc_core_mfn_to_page_index(ulong); void xendump_panic_hook(char *); int read_xendump_hyper(int, void *, int, ulong, physaddr_t); struct xendump_data *get_xendump_data(void); /* * kvmdump.c */ int is_kvmdump(char *); int is_kvmdump_mapfile(char *); int kvmdump_init(char *, FILE *); int read_kvmdump(int, void *, int, ulong, physaddr_t); int write_kvmdump(int, void *, int, ulong, physaddr_t); int kvmdump_free_memory(void); int kvmdump_memory_used(void); int kvmdump_memory_dump(FILE *); void get_kvmdump_regs(struct bt_info *, ulong *, ulong *); ulong get_kvmdump_panic_task(void); int kvmdump_phys_base(unsigned long *); void kvmdump_display_regs(int, FILE *); void set_kvmhost_type(char *); void set_kvm_iohole(char *); struct kvm_register_set { union { uint32_t cs; uint32_t ss; uint32_t ds; uint32_t es; uint32_t fs; uint32_t gs; uint64_t ip; uint64_t flags; uint64_t regs[16]; } x86; }; int get_kvm_register_set(int, struct kvm_register_set *); /* * sadump.c */ int is_sadump(char *); uint sadump_page_size(void); int read_sadump(int, void *, int, ulong, physaddr_t); int write_sadump(int, void *, int, ulong, physaddr_t); int sadump_init(char *, FILE *); int sadump_is_diskset(void); ulong get_sadump_panic_task(void); ulong get_sadump_switch_stack(ulong); int sadump_memory_used(void); int sadump_free_memory(void); int sadump_memory_dump(FILE *); FILE *set_sadump_fp(FILE *); void get_sadump_regs(struct bt_info *bt, ulong *ipp, ulong *spp); void sadump_display_regs(int, FILE *); int sadump_phys_base(ulong *); int sadump_set_phys_base(ulong); void sadump_show_diskset(void); int sadump_is_zero_excluded(void); void sadump_set_zero_excluded(void); void sadump_unset_zero_excluded(void); struct sadump_data; struct sadump_data *get_sadump_data(void); int sadump_calc_kaslr_offset(ulong *); int sadump_get_nr_cpus(void); int sadump_get_cr3_cr4_idtr(int, ulong *, ulong *, ulong *); /* * qemu.c */ int qemu_init(char *); /* * qemu-load.c */ int is_qemu_vm_file(char *); void dump_qemu_header(FILE *); /* * net.c */ void net_init(void); void dump_net_table(void); void dump_sockets_workhorse(ulong, ulong, struct reference *); /* * remote.c */ int is_remote_daemon(char *); physaddr_t get_remote_phys_base(physaddr_t, physaddr_t); physaddr_t remote_vtop(int, physaddr_t); int get_remote_regs(struct bt_info *, ulong *, ulong *); physaddr_t get_remote_cr3(int); void remote_fd_init(void); int get_remote_file(struct remote_file *); uint remote_page_size(void); int find_remote_module_objfile(struct load_module *lm, char *, char *); int remote_free_memory(void); int remote_memory_dump(int); int remote_memory_used(void); void remote_exit(void); int remote_execute(void); void remote_clear_pipeline(void); int remote_memory_read(int, char *, int, physaddr_t, int); /* * vmware_vmss.c */ int is_vmware_vmss(char *filename); int vmware_vmss_init(char *filename, FILE *ofp); uint vmware_vmss_page_size(void); int read_vmware_vmss(int, void *, int, ulong, physaddr_t); int write_vmware_vmss(int, void *, int, ulong, physaddr_t); void vmware_vmss_display_regs(int, FILE *); void get_vmware_vmss_regs(struct bt_info *, ulong *, ulong *); int vmware_vmss_memory_dump(FILE *); void dump_registers_for_vmss_dump(void); int vmware_vmss_valid_regs(struct bt_info *); int vmware_vmss_get_nr_cpus(void); int vmware_vmss_get_cr3_cr4_idtr(int, ulong *, ulong *, ulong *); int vmware_vmss_phys_base(ulong *phys_base); int vmware_vmss_set_phys_base(ulong); int vmware_vmss_get_cpu_reg(int, int, const char *, int, void *); /* * vmware_guestdump.c */ int is_vmware_guestdump(char *filename); int vmware_guestdump_init(char *filename, FILE *ofp); int vmware_guestdump_memory_dump(FILE *); /* * kaslr_helper.c */ int calc_kaslr_offset(ulong *, ulong *); /* * printk.c */ void dump_lockless_record_log(int); /* caller_id default and max character sizes based on pid field size */ #define PID_CHARS_MAX 16 /* Max Number of PID characters */ #define PID_CHARS_DEFAULT 8 /* Default number of PID characters */ /* * gnu_binutils.c */ /* NO LONGER IN USE */ /* * test.c */ void cmd_template(void); void foreach_test(ulong, ulong); /* * va_server.c */ int mclx_page_size(void); int vas_memory_used(void); int vas_memory_dump(FILE *); int vas_free_memory(char *); void set_vas_debug(ulong); size_t vas_write(void *, size_t); int va_server_init(char *, ulong *, ulong *, ulong *); size_t vas_read(void *, size_t); int vas_lseek(ulong, int); /* * lkcd_x86_trace.c */ int lkcd_x86_back_trace(struct bt_info *, int, FILE *); /* * lkcd_common.c */ int lkcd_dump_init(FILE *, int, char *); ulong get_lkcd_panic_task(void); void get_lkcd_panicmsg(char *); int is_lkcd_compressed_dump(char *); void dump_lkcd_environment(ulong); int lkcd_lseek(physaddr_t); long lkcd_read(void *, long); void set_lkcd_debug(ulong); FILE *set_lkcd_fp(FILE *); uint lkcd_page_size(void); int lkcd_memory_used(void); int lkcd_memory_dump(FILE *); int lkcd_free_memory(void); void lkcd_print(char *, ...); void set_remote_lkcd_panic_data(ulong, char *); void set_lkcd_nohash(void); int lkcd_load_dump_page_header(void *, ulong); void lkcd_dumpfile_complaint(uint32_t, uint32_t, int); int set_mb_benchmark(ulong); ulonglong fix_lkcd_address(ulonglong); int lkcd_get_kernel_start(ulong *addr); int get_lkcd_regs_for_cpu(struct bt_info *bt, ulong *eip, ulong *esp); /* * lkcd_v1.c */ int lkcd_dump_init_v1(FILE *, int); void dump_dump_page_v1(char *, void *); void dump_lkcd_environment_v1(ulong); uint32_t get_dp_size_v1(void); uint32_t get_dp_flags_v1(void); uint64_t get_dp_address_v1(void); /* * lkcd_v2_v3.c */ int lkcd_dump_init_v2_v3(FILE *, int); void dump_dump_page_v2_v3(char *, void *); void dump_lkcd_environment_v2_v3(ulong); uint32_t get_dp_size_v2_v3(void); uint32_t get_dp_flags_v2_v3(void); uint64_t get_dp_address_v2_v3(void); /* * lkcd_v5.c */ int lkcd_dump_init_v5(FILE *, int); void dump_dump_page_v5(char *, void *); void dump_lkcd_environment_v5(ulong); uint32_t get_dp_size_v5(void); uint32_t get_dp_flags_v5(void); uint64_t get_dp_address_v5(void); /* * lkcd_v7.c */ int lkcd_dump_init_v7(FILE *, int, char *); void dump_dump_page_v7(char *, void *); void dump_lkcd_environment_v7(ulong); uint32_t get_dp_size_v7(void); uint32_t get_dp_flags_v7(void); uint64_t get_dp_address_v7(void); /* * lkcd_v8.c */ int lkcd_dump_init_v8(FILE *, int, char *); void dump_dump_page_v8(char *, void *); void dump_lkcd_environment_v8(ulong); uint32_t get_dp_size_v8(void); uint32_t get_dp_flags_v8(void); uint64_t get_dp_address_v8(void); #ifdef LKCD_COMMON /* * Until they differ across versions, these remain usable in the common * routines in lkcd_common.c */ #define LKCD_DUMP_MAGIC_NUMBER (0xa8190173618f23edULL) #define LKCD_DUMP_MAGIC_LIVE (0xa8190173618f23cdULL) #define LKCD_DUMP_V1 (0x1) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V2 (0x2) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V3 (0x3) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V5 (0x5) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V6 (0x6) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V7 (0x7) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V8 (0x8) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V9 (0x9) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V10 (0xa) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_VERSION_NUMBER_MASK (0xf) #define LKCD_DUMP_RAW (0x1) /* DUMP_[DH_]RAW */ #define LKCD_DUMP_COMPRESSED (0x2) /* DUMP_[DH_]COMPRESSED */ #define LKCD_DUMP_END (0x4) /* DUMP_[DH_]END */ #define LKCD_DUMP_COMPRESS_NONE (0x0) /* DUMP_COMPRESS_NONE */ #define LKCD_DUMP_COMPRESS_RLE (0x1) /* DUMP_COMPRESS_RLE */ #define LKCD_DUMP_COMPRESS_GZIP (0x2) /* DUMP_COMPRESS_GZIP */ #define LKCD_DUMP_MCLX_V0 (0x80000000) /* MCLX mod of LKCD */ #define LKCD_DUMP_MCLX_V1 (0x40000000) /* Extra page header data */ #define LKCD_OFFSET_TO_FIRST_PAGE (65536) #define MCLX_PAGE_HEADERS (4096) #define MCLX_V1_PAGE_HEADER_CACHE ((sizeof(uint64_t)) * MCLX_PAGE_HEADERS) /* * lkcd_load_dump_page_header() return values */ #define LKCD_DUMPFILE_OK (0) #define LKCD_DUMPFILE_EOF (1) #define LKCD_DUMPFILE_END (2) /* * Common handling of LKCD dump environment */ #define LKCD_CACHED_PAGES (16) #define LKCD_PAGE_HASH (32) #define LKCD_DUMP_HEADER_ONLY (1) /* arguments to lkcd_dump_environment */ #define LKCD_DUMP_PAGE_ONLY (2) #define LKCD_VALID (0x1) /* flags */ #define LKCD_REMOTE (0x2) #define LKCD_NOHASH (0x4) #define LKCD_MCLX (0x8) #define LKCD_BAD_DUMP (0x10) struct page_hash_entry { uint32_t pg_flags; uint64_t pg_addr; off_t pg_hdr_offset; struct page_hash_entry *next; }; struct page_desc { off_t offset; /* lseek offset in dump file */ }; struct physmem_zone { uint64_t start; struct page_desc *pages; }; struct fix_addrs { ulong task; ulong saddr; ulong sw; }; struct lkcd_environment { int fd; /* dumpfile file descriptor */ ulong flags; /* flags from above */ ulong debug; /* shadow of pc->debug */ FILE *fp; /* abstracted fp for fprintf */ void *dump_header; /* header stash, v1 or v2 */ void *dump_header_asm; /* architecture specific header for v2 */ void *dump_header_asm_smp; /* architecture specific header for v7 & v8 */ void *dump_page; /* current page header holder */ uint32_t version; /* version number of this dump */ uint32_t page_size; /* size of a Linux memory page */ int page_shift; /* byte address to page */ int bits; /* processor bitsize */ ulong panic_task; /* panic task address */ char *panic_string; /* pointer to stashed panic string */ uint32_t compression; /* compression type */ uint32_t (*get_dp_size)(void); /* returns current page's dp_size */ uint32_t (*get_dp_flags)(void); /* returns current page's dp_size */ uint64_t (*get_dp_address)(void); /* returns current page's dp_address*/ size_t page_header_size; /* size of version's page header */ unsigned long curpos; /* offset into current page */ uint64_t curpaddr; /* current page's physical address */ off_t curhdroffs; /* current page's header offset */ char *curbufptr; /* pointer to uncompressed page buffer */ uint64_t kvbase; /* physical-to-LKCD page address format*/ char *page_cache_buf; /* base of cached buffer pages */ char *compressed_page; /* copy of compressed page data */ int evict_index; /* next page to evict */ ulong evictions; /* total evictions done */ struct page_cache_hdr { /* header for each cached page */ uint32_t pg_flags; uint64_t pg_addr; char *pg_bufptr; ulong pg_hit_count; } page_cache_hdr[LKCD_CACHED_PAGES]; struct page_hash_entry *page_hash; ulong total_pages; ulong benchmark_pages; ulong benchmarks_done; off_t *mb_hdr_offsets; ulong total_reads; ulong cached_reads; ulong hashed_reads; ulong hashed; ulong compressed; ulong raw; /* lkcd_v7 additions */ char *dumpfile_index; /* array of offsets for each page */ int ifd; /* index file for dump (LKCD V7+) */ long memory_pages; /* Mamimum index of dump pages */ off_t page_offset_max; /* Offset of page with greatest offset seen so far */ long page_index_max; /* Index of page with greatest offset seen so far */ off_t *page_offsets; /* Pointer to huge array with seek offsets */ /* NB: There are no holes in the array */ struct physmem_zone *zones; /* Array of physical memory zones */ int num_zones; /* Number of zones initialized */ int max_zones; /* Size of the zones array */ long zoned_offsets; /* Number of stored page offsets */ uint64_t zone_mask; int zone_shift; int fix_addr_num; /* Number of active stacks to switch to saved values */ struct fix_addrs *fix_addr; /* Array of active stacks to switch to saved values */ }; #define ZONE_ALLOC 128 #define ZONE_SIZE (MEGABYTES(512)) #define MEGABYTE_ALIGNED(vaddr) (!((uint64_t)(vaddr) & MEGABYTE_MASK)) #define LKCD_PAGE_HASH_INDEX(paddr) \ (((paddr) >> lkcd->page_shift) % LKCD_PAGE_HASH) #define LKCD_PAGES_PER_MEGABYTE() (MEGABYTES(1) / lkcd->page_size) #define LKCD_PAGE_MEGABYTE(page) ((page) / LKCD_PAGES_PER_MEGABYTE()) #define LKCD_BENCHMARKS_DONE() (lkcd->benchmarks_done >= lkcd->benchmark_pages) #define LKCD_VALID_PAGE(flags) ((flags) & LKCD_VALID) extern struct lkcd_environment *lkcd; #define LKCD_DEBUG(x) (lkcd->debug >= (x)) #undef BITS #undef BITS32 #undef BITS64 #define BITS() (lkcd->bits) #define BITS32() (lkcd->bits == 32) #define BITS64() (lkcd->bits == 64) #endif /* LKCD_COMMON */ /* * gdb_interface.c */ void gdb_main_loop(int, char **); void display_gdb_banner(void); void get_gdb_version(void); void gdb_session_init(void); void gdb_interface(struct gnu_request *); int gdb_pass_through(char *, FILE *, ulong); int gdb_readmem_callback(ulong, void *, int, int); int gdb_line_number_callback(ulong, ulong, ulong); int gdb_print_callback(ulong); char *gdb_lookup_module_symbol(ulong, ulong *); void gdb_error_hook(void); void restore_gdb_sanity(void); int is_gdb_command(int, ulong); char *gdb_command_string(int, char *, int); void dump_gnu_request(struct gnu_request *, int); int gdb_CRASHDEBUG(ulong); void dump_gdb_data(void); void update_gdb_hooks(void); void gdb_readnow_warning(void); int gdb_set_crash_scope(ulong, char *); extern int *gdb_output_format; extern unsigned int *gdb_print_max; extern unsigned char *gdb_prettyprint_structs; extern unsigned char *gdb_prettyprint_arrays; extern unsigned int *gdb_repeat_count_threshold; extern unsigned char *gdb_stop_print_at_null; extern unsigned int *gdb_output_radix; int is_kvaddr(ulong); /* * gdb/top.c */ extern void execute_command (char *, int); #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) extern void (*command_loop_hook)(void); extern void (*error_hook)(void); #else extern void (*deprecated_command_loop_hook)(void); /* * gdb/exceptions.c */ extern void (*error_hook)(void); #endif /* * gdb/symtab.c */ extern void gdb_command_funnel(struct gnu_request *); /* * gdb/symfile.c */ #if defined(GDB_6_0) || defined(GDB_6_1) struct objfile; extern void (*target_new_objfile_hook)(struct objfile *); #endif /* * gdb/valprint.c */ extern unsigned output_radix; #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) extern int output_format; extern int prettyprint_structs; extern int prettyprint_arrays; extern int repeat_count_threshold; extern unsigned int print_max; extern int stop_print_at_null; #endif #ifdef GDB_7_6 /* * gdb/cleanups.c */ struct cleanup; extern struct cleanup *all_cleanups(void); extern void do_cleanups(struct cleanup *); #else /* * gdb/utils.c */ extern void do_cleanups(void *); #endif /* * gdb/version.c */ extern char *version; /* * gdb/disasm.c */ #ifdef GDB_5_3 extern int gdb_disassemble_from_exec; #endif /* * readline/readline.c */ #ifdef GDB_5_3 extern char *readline(char *); #else extern char *readline(const char *); #endif extern int rl_editing_mode; /* * readline/history.c */ extern int history_offset; /* * external gdb routines */ extern int gdb_main_entry(int, char **); #ifdef GDB_5_3 extern unsigned long calc_crc32(unsigned long, unsigned char *, size_t); #else extern unsigned long gnu_debuglink_crc32 (unsigned long, unsigned char *, size_t); #endif extern int have_partial_symbols(void *); extern int have_full_symbols(void *); #if defined(X86) || defined(X86_64) || defined(IA64) #define XEN_HYPERVISOR_ARCH #endif #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #define REG_SEQ(TYPE, MEMBER) \ (offsetof(struct TYPE, MEMBER) / FIELD_SIZEOF(struct TYPE, MEMBER)) /* * Register numbers must be in sync with gdb/features/i386/64bit-core.c * to make crash_target->fetch_registers() ---> machdep->get_current_task_reg() * working properly. */ enum x86_64_regnum { RAX_REGNUM, RBX_REGNUM, RCX_REGNUM, RDX_REGNUM, RSI_REGNUM, RDI_REGNUM, RBP_REGNUM, RSP_REGNUM, R8_REGNUM, R9_REGNUM, R10_REGNUM, R11_REGNUM, R12_REGNUM, R13_REGNUM, R14_REGNUM, R15_REGNUM, RIP_REGNUM, EFLAGS_REGNUM, CS_REGNUM, SS_REGNUM, DS_REGNUM, ES_REGNUM, FS_REGNUM, GS_REGNUM, ST0_REGNUM, ST1_REGNUM, ST2_REGNUM, ST3_REGNUM, ST4_REGNUM, ST5_REGNUM, ST6_REGNUM, ST7_REGNUM, FCTRL_REGNUM, FSTAT_REGNUM, FTAG_REGNUM, FISEG_REGNUM, FIOFF_REGNUM, FOSEG_REGNUM, FOOFF_REGNUM, FOP_REGNUM, FS_BASE_REGNUM = 152, GS_BASE_REGNUM, ORIG_RAX_REGNUM, LAST_REGNUM }; enum arm64_regnum { X0_REGNUM, X1_REGNUM, X2_REGNUM, X3_REGNUM, X4_REGNUM, X5_REGNUM, X6_REGNUM, X7_REGNUM, X8_REGNUM, X9_REGNUM, X10_REGNUM, X11_REGNUM, X12_REGNUM, X13_REGNUM, X14_REGNUM, X15_REGNUM, X16_REGNUM, X17_REGNUM, X18_REGNUM, X19_REGNUM, X20_REGNUM, X21_REGNUM, X22_REGNUM, X23_REGNUM, X24_REGNUM, X25_REGNUM, X26_REGNUM, X27_REGNUM, X28_REGNUM, X29_REGNUM, X30_REGNUM, SP_REGNUM, PC_REGNUM, }; /* * Register numbers to make crash_target->fetch_registers() * ---> machdep->get_current_task_reg() work properly. * * These register numbers and names are given according to output of * `rs6000_register_name`, because that is what was being used by * crash_target::fetch_registers in case of PPC64 */ enum ppc64_regnum { PPC64_R0_REGNUM = 0, PPC64_R1_REGNUM, PPC64_R2_REGNUM, PPC64_R3_REGNUM, PPC64_R4_REGNUM, PPC64_R5_REGNUM, PPC64_R6_REGNUM, PPC64_R7_REGNUM, PPC64_R8_REGNUM, PPC64_R9_REGNUM, PPC64_R10_REGNUM, PPC64_R11_REGNUM, PPC64_R12_REGNUM, PPC64_R13_REGNUM, PPC64_R14_REGNUM, PPC64_R15_REGNUM, PPC64_R16_REGNUM, PPC64_R17_REGNUM, PPC64_R18_REGNUM, PPC64_R19_REGNUM, PPC64_R20_REGNUM, PPC64_R21_REGNUM, PPC64_R22_REGNUM, PPC64_R23_REGNUM, PPC64_R24_REGNUM, PPC64_R25_REGNUM, PPC64_R26_REGNUM, PPC64_R27_REGNUM, PPC64_R28_REGNUM, PPC64_R29_REGNUM, PPC64_R30_REGNUM, PPC64_R31_REGNUM, PPC64_F0_REGNUM = 32, PPC64_F1_REGNUM, PPC64_F2_REGNUM, PPC64_F3_REGNUM, PPC64_F4_REGNUM, PPC64_F5_REGNUM, PPC64_F6_REGNUM, PPC64_F7_REGNUM, PPC64_F8_REGNUM, PPC64_F9_REGNUM, PPC64_F10_REGNUM, PPC64_F11_REGNUM, PPC64_F12_REGNUM, PPC64_F13_REGNUM, PPC64_F14_REGNUM, PPC64_F15_REGNUM, PPC64_F16_REGNUM, PPC64_F17_REGNUM, PPC64_F18_REGNUM, PPC64_F19_REGNUM, PPC64_F20_REGNUM, PPC64_F21_REGNUM, PPC64_F22_REGNUM, PPC64_F23_REGNUM, PPC64_F24_REGNUM, PPC64_F25_REGNUM, PPC64_F26_REGNUM, PPC64_F27_REGNUM, PPC64_F28_REGNUM, PPC64_F29_REGNUM, PPC64_F30_REGNUM, PPC64_F31_REGNUM, PPC64_PC_REGNUM = 64, PPC64_MSR_REGNUM = 65, PPC64_CR_REGNUM = 66, PPC64_LR_REGNUM = 67, PPC64_CTR_REGNUM = 68, PPC64_XER_REGNUM = 69, PPC64_FPSCR_REGNUM = 70, PPC64_VR0_REGNUM = 106, PPC64_VR1_REGNUM, PPC64_VR2_REGNUM, PPC64_VR3_REGNUM, PPC64_VR4_REGNUM, PPC64_VR5_REGNUM, PPC64_VR6_REGNUM, PPC64_VR7_REGNUM, PPC64_VR8_REGNUM, PPC64_VR9_REGNUM, PPC64_VR10_REGNUM, PPC64_VR11_REGNUM, PPC64_VR12_REGNUM, PPC64_VR13_REGNUM, PPC64_VR14_REGNUM, PPC64_VR15_REGNUM, PPC64_VR16_REGNUM, PPC64_VR17_REGNUM, PPC64_VR18_REGNUM, PPC64_VR19_REGNUM, PPC64_VR20_REGNUM, PPC64_VR21_REGNUM, PPC64_VR22_REGNUM, PPC64_VR23_REGNUM, PPC64_VR24_REGNUM, PPC64_VR25_REGNUM, PPC64_VR26_REGNUM, PPC64_VR27_REGNUM, PPC64_VR28_REGNUM, PPC64_VR29_REGNUM, PPC64_VR30_REGNUM, PPC64_VR31_REGNUM, PPC64_VSCR_REGNUM = 138, PPC64_VRSAVE_REGNU = 139 }; /* crash_target.c */ extern int gdb_change_thread_context (void); extern int gdb_add_substack (int); #endif /* !GDB_COMMON */ crash-utility-crash-9cd43f5/s390x.c0000664000372000037200000017124015107550337016433 0ustar juerghjuergh/* s390.c - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2009-2014 David Anderson * Copyright (C) 2002-2006, 2009-2014 Red Hat, Inc. All rights reserved. * Copyright (C) 2005, 2006, 2010-2013 Michael Holzheu, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef S390X #include #include "defs.h" #include "netdump.h" #define S390X_WORD_SIZE 8 #define S390X_PAGE_BASE_MASK (~((1ULL<<12)-1)) /* Flags used in entries of page dirs and page tables. */ #define S390X_PTE_FLAG_BITS 0xfffULL /* Page table entry flag bits */ #define S390X_PAGE_PRESENT 0x001ULL /* set: loaded in physical memory * clear: not loaded in physical mem */ #define S390X_PAGE_RO 0x200ULL /* HW read-only */ #define S390X_PAGE_INVALID 0x400ULL /* HW invalid */ #define S390X_PAGE_INVALID_MASK 0x601ULL /* for linux 2.6 */ #define S390X_PAGE_INVALID_NONE 0x401ULL /* for linux 2.6 */ /* bits 52, 55 must contain zeroes in a pte */ #define S390X_PTE_INVALID_MASK 0x900ULL #define S390X_PTE_INVALID(x) ((x) & S390X_PTE_INVALID_MASK) #define INT_STACK_SIZE STACKSIZE() // can be 8192 or 16384 #define KERNEL_STACK_SIZE STACKSIZE() // can be 8192 or 16384 #define LOWCORE_SIZE 8192 #define VX_SA_SIZE (32 * 16) #define S390X_PSW_MASK_PSTATE 0x0001000000000000UL #define S390X_LC_VMCORE_INFO 0xe0c #define S390X_LC_OS_INFO 0xe18 /* * Flags for Region and Segment table entries. */ #define S390X_RTE_FLAG_BITS_FC0 0xfffULL #define S390X_RTE_FLAG_BITS_FC1 0x7fffffffULL #define S390X_RTE_TL 0x3ULL #define S390X_RTE_TL_10 0x2ULL #define S390X_RTE_TL_01 0x1ULL #define S390X_RTE_TT 0xcULL #define S390X_RTE_TT_10 0x8ULL #define S390X_RTE_TT_01 0x4ULL #define S390X_RTE_CR 0x10ULL #define S390X_RTE_I 0x20ULL #define S390X_RTE_TF 0xc0ULL #define S390X_RTE_TF_10 0x80ULL #define S390X_RTE_TF_01 0x40ULL #define S390X_RTE_P 0x200ULL #define S390X_RTE_FC 0x400ULL #define S390X_RTE_F 0x800ULL #define S390X_RTE_ACC 0xf000ULL #define S390X_RTE_ACC_1000 0x8000ULL #define S390X_RTE_ACC_0100 0x4000ULL #define S390X_RTE_ACC_0010 0x2000ULL #define S390X_RTE_ACC_0001 0x1000ULL #define S390X_RTE_AV 0x10000ULL #define S390X_STE_FLAG_BITS_FC0 0x7ffULL #define S390X_STE_FLAG_BITS_FC1 0xfffffULL #define S390X_STE_TT 0xcULL #define S390X_STE_TT_10 0x8ULL #define S390X_STE_TT_01 0x4ULL #define S390X_STE_CS 0x10ULL #define S390X_STE_I 0x20ULL #define S390X_STE_P 0x200ULL #define S390X_STE_FC 0x400ULL #define S390X_STE_F 0x800ULL #define S390X_STE_ACC 0xf000ULL #define S390X_STE_ACC_1000 0x8000ULL #define S390X_STE_ACC_0100 0x4000ULL #define S390X_STE_ACC_0010 0x2000ULL #define S390X_STE_ACC_0001 0x1000ULL #define S390X_STE_AV 0x10000ULL /* * S390x prstatus ELF Note */ struct s390x_nt_prstatus { uint8_t pad1[32]; uint32_t pr_pid; uint8_t pad2[76]; uint64_t psw[2]; uint64_t gprs[16]; uint32_t acrs[16]; uint64_t orig_gpr2; uint32_t pr_fpvalid; uint8_t pad3[4]; } __attribute__ ((packed)); /* * S390x floating point register ELF Note */ #ifndef NT_FPREGSET #define NT_FPREGSET 0x2 #endif struct s390x_nt_fpregset { uint32_t fpc; uint32_t pad; uint64_t fprs[16]; } __attribute__ ((packed)); struct s390x_vxrs { uint64_t low; uint64_t high; } __attribute__ ((packed)); /* * s390x CPU info */ struct s390x_cpu { uint64_t gprs[16]; uint64_t ctrs[16]; uint32_t acrs[16]; uint64_t fprs[16]; uint32_t fpc; uint64_t psw[2]; uint32_t prefix; uint64_t timer; uint64_t todcmp; uint32_t todpreg; uint64_t vxrs_low[16]; struct s390x_vxrs vxrs_high[16]; }; /* * declarations of static functions */ static void s390x_print_lowcore(char*, struct bt_info*,int); static int s390x_kvtop(struct task_context *, ulong, physaddr_t *, int); static int s390x_uvtop(struct task_context *, ulong, physaddr_t *, int); static int s390x_vtop(unsigned long, ulong, physaddr_t*, int); static ulong s390x_vmalloc_start(void); static int s390x_is_task_addr(ulong); static int s390x_verify_symbol(const char *, ulong, char type); static ulong s390x_get_task_pgd(ulong); static int s390x_translate_pte(ulong, void *, ulonglong); static ulong s390x_processor_speed(void); static int s390x_eframe_search(struct bt_info *); static void s390x_back_trace_cmd(struct bt_info *); static void s390x_get_stack_frame(struct bt_info *, ulong *, ulong *); static int s390x_dis_filter(ulong, char *, unsigned int); static void s390x_cmd_mach(void); static int s390x_get_smp_cpus(void); static void s390x_display_machine_stats(void); static void s390x_dump_line_number(ulong); static struct line_number_hook s390x_line_number_hooks[]; static int s390x_is_uvaddr(ulong, struct task_context *); static int s390x_get_kvaddr_ranges(struct vaddr_range *); static int set_s390x_max_physmem_bits(void); static ulong s390x_generic_VTOP(ulong vaddr); static ulong s390x_generic_PTOV(ulong paddr); static int s390x_generic_IS_VMALLOC_ADDR(ulong vaddr); static ulong s390x_vr_VTOP(ulong vaddr); static ulong s390x_vr_PTOV(ulong paddr); static int s390x_vr_IS_VMALLOC_ADDR(ulong vaddr); static int s390x_vr_is_kvaddr(ulong); struct machine_specific s390x_machine_specific = { .virt_to_phys = s390x_generic_VTOP, .phys_to_virt = s390x_generic_PTOV, .is_vmalloc_addr = s390x_generic_IS_VMALLOC_ADDR, }; /* * struct lowcore name (old: "_lowcore", new: "lowcore") */ static char *lc_struct; /* * Read a unsigned long value from address */ static unsigned long readmem_ul(unsigned long addr) { unsigned long rc; readmem(addr, KVADDR, &rc, sizeof(rc), "readmem_ul", FAULT_ON_ERROR); return rc; } /* * Print hex data */ static void print_hex_buf(void *buf, int len, int cols, char *tag) { int j, first = 1; for (j = 0; j < len; j += 8) { if (j % (cols * 8) == 0) { if (first) first = 0; else fprintf(fp, "\n"); fprintf(fp, "%s", tag); } fprintf(fp, "%#018lx ", *((unsigned long *)(buf + j))); } if (len) fprintf(fp, "\n"); } /* * Initialize member offsets */ static void s390x_offsets_init(void) { if (STRUCT_EXISTS("lowcore")) lc_struct = "lowcore"; else lc_struct = "_lowcore"; if (MEMBER_EXISTS(lc_struct, "st_status_fixed_logout")) MEMBER_OFFSET_INIT(s390_lowcore_psw_save_area, lc_struct, "st_status_fixed_logout"); else MEMBER_OFFSET_INIT(s390_lowcore_psw_save_area, lc_struct, "psw_save_area"); if (!STRUCT_EXISTS("stack_frame")) { ASSIGN_OFFSET(s390_stack_frame_back_chain) = 0; ASSIGN_OFFSET(s390_stack_frame_r14) = 112; ASSIGN_SIZE(s390_stack_frame) = 160; } else { ASSIGN_OFFSET(s390_stack_frame_back_chain) = MEMBER_OFFSET("stack_frame", "back_chain"); ASSIGN_OFFSET(s390_stack_frame_r14) = MEMBER_OFFSET("stack_frame", "gprs") + 8 * 8; ASSIGN_SIZE(s390_stack_frame) = STRUCT_SIZE("stack_frame"); } } /* * MAX_PHYSMEM_BITS is 42 on older kernels, and 46 on newer kernels. */ static int set_s390x_max_physmem_bits(void) { int array_len, dimension; char *string; if ((string = pc->read_vmcoreinfo("NUMBER(MAX_PHYSMEM_BITS)"))) { machdep->max_physmem_bits = atol(string); free(string); return TRUE; } machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_OLD; if (!kernel_symbol_exists("mem_section")) return TRUE; /* * The mem_section was changed to be a pointer in 4.15, so it's * guaranteed to be a newer kernel. */ if (get_symbol_type("mem_section", NULL, NULL) == TYPE_CODE_PTR) { machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_NEW; return TRUE; } if (!(array_len = get_array_length("mem_section", &dimension, 0))) return FALSE; /* * !CONFIG_SPARSEMEM_EXTREME */ if (dimension) { machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_OLD; if (array_len == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT())) return TRUE; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_NEW; if (array_len == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT())) return TRUE; return FALSE; } /* * CONFIG_SPARSEMEM_EXTREME */ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_OLD; if (array_len == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) return TRUE; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_NEW; if (array_len == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) return TRUE; return FALSE; } static struct s390x_cpu *s390x_cpu_vec; static int s390x_cpu_cnt; /* * Return s390x CPU data for backtrace */ static struct s390x_cpu *s390x_cpu_get(struct bt_info *bt) { unsigned int cpu = bt->tc->processor; unsigned long lowcore_ptr, prefix; unsigned int i; lowcore_ptr = symbol_value("lowcore_ptr"); readmem(lowcore_ptr + cpu * sizeof(long), KVADDR, &prefix, sizeof(long), "lowcore_ptr", FAULT_ON_ERROR); for (i = 0; i < s390x_cpu_cnt; i++) { if (s390x_cpu_vec[i].prefix == VTOP(prefix)) return &s390x_cpu_vec[i]; } error(FATAL, "cannot determine CPU for task: %lx\n", bt->task); return NULL; } /* * ELF core dump fuctions for storing CPU data */ static void s390x_elf_nt_prstatus_add(struct s390x_cpu *cpu, struct s390x_nt_prstatus *prstatus) { memcpy(&cpu->psw, &prstatus->psw, sizeof(cpu->psw)); memcpy(&cpu->gprs, &prstatus->gprs, sizeof(cpu->gprs)); memcpy(&cpu->acrs, &prstatus->acrs, sizeof(cpu->acrs)); } static void s390x_elf_nt_fpregset_add(struct s390x_cpu *cpu, struct s390x_nt_fpregset *fpregset) { memcpy(&cpu->fpc, &fpregset->fpc, sizeof(cpu->fpc)); memcpy(&cpu->fprs, &fpregset->fprs, sizeof(cpu->fprs)); } static void s390x_elf_nt_timer_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->timer, desc, sizeof(cpu->timer)); } static void s390x_elf_nt_todcmp_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->todcmp, desc, sizeof(cpu->todcmp)); } static void s390x_elf_nt_todpreg_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->todpreg, desc, sizeof(cpu->todpreg)); } static void s390x_elf_nt_ctrs_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->ctrs, desc, sizeof(cpu->ctrs)); } static void s390x_elf_nt_prefix_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->prefix, desc, sizeof(cpu->prefix)); } static void s390x_elf_nt_vxrs_low_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->vxrs_low, desc, sizeof(cpu->vxrs_low)); } static void s390x_elf_nt_vxrs_high_add(struct s390x_cpu *cpu, void *desc) { memcpy(&cpu->vxrs_high, desc, sizeof(cpu->vxrs_high)); } static void *get_elf_note_desc(Elf64_Nhdr *note) { void *ptr = note; return ptr + roundup(sizeof(*note) + note->n_namesz, 4); } static void s390x_elf_note_add(int elf_cpu_nr, void *note_ptr) { Elf64_Nhdr *note = note_ptr; struct s390x_cpu *cpu; void *desc; desc = get_elf_note_desc(note); if (elf_cpu_nr != s390x_cpu_cnt) { s390x_cpu_cnt++; s390x_cpu_vec = realloc(s390x_cpu_vec, s390x_cpu_cnt * sizeof(*s390x_cpu_vec)); if (!s390x_cpu_vec) error(FATAL, "cannot malloc cpu space."); } cpu = &s390x_cpu_vec[s390x_cpu_cnt - 1]; switch (note->n_type) { case NT_PRSTATUS: s390x_elf_nt_prstatus_add(cpu, desc); break; case NT_FPREGSET: s390x_elf_nt_fpregset_add(cpu, desc); break; case NT_S390_TIMER: s390x_elf_nt_timer_add(cpu, desc); break; case NT_S390_TODCMP: s390x_elf_nt_todcmp_add(cpu, desc); break; case NT_S390_TODPREG: s390x_elf_nt_todpreg_add(cpu, desc); break; case NT_S390_CTRS: s390x_elf_nt_ctrs_add(cpu, desc); break; case NT_S390_PREFIX: s390x_elf_nt_prefix_add(cpu, desc); break; case NT_S390_VXRS_LOW: s390x_elf_nt_vxrs_low_add(cpu, desc); break; case NT_S390_VXRS_HIGH: s390x_elf_nt_vxrs_high_add(cpu, desc); break; } } static void s390x_process_elf_notes(void *note_ptr, unsigned long size_note) { Elf64_Nhdr *note = NULL; size_t tot, len; static int num_prstatus_notes = 0; for (tot = 0; tot < size_note; tot += len) { note = note_ptr + tot; if (note->n_type == NT_PRSTATUS) num_prstatus_notes++; machdep->dumpfile_init(num_prstatus_notes, note); len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); } } static void s390x_check_live(void) { unsigned long long live_magic; readmem(0, KVADDR, &live_magic, sizeof(live_magic), "live_magic", RETURN_ON_ERROR | QUIET); if (live_magic == 0x4c49564544554d50ULL) pc->flags2 |= LIVE_DUMP; } static char * vmcoreinfo_read_string_s390x(const char *vmcoreinfo, const char *key) { char *value_string = NULL; size_t value_length; char keybuf[128]; char *p1, *p2; sprintf(keybuf, "%s=", key); if ((p1 = strstr(vmcoreinfo, keybuf))) { p2 = p1 + strlen(keybuf); p1 = strstr(p2, "\n"); value_length = p1-p2; value_string = calloc(value_length + 1, sizeof(char)); strncpy(value_string, p2, value_length); value_string[value_length] = NULLCHAR; } return value_string; } /* * Check the value in well-known lowcore location and process it as either * an explicit KASLR offset (early dump case) or as vmcoreinfo pointer to * read the relocated _stext symbol value (important for s390 and lkcd dump * formats). */ static void s390x_check_kaslr(void) { char *_stext_string, *vmcoreinfo; Elf64_Nhdr note; char str[128]; ulong addr; /* Read the value from well-known lowcore location*/ if (!readmem(S390X_LC_VMCORE_INFO, PHYSADDR, &addr, sizeof(addr), "s390x vmcoreinfo ptr", QUIET|RETURN_ON_ERROR)) return; if (addr == 0) return; /* Check for explicit kaslr offset flag */ if (addr & 0x1UL) { /* Drop the last bit to get an offset value */ addr &= ~(0x1UL); /* Make sure the offset is aligned by 0x1000 */ if (addr && !(addr & 0xfff)) { kt->relocate = addr * (-1); kt->flags |= RELOC_SET; kt->flags2 |= KASLR; } return; } /* Use the addr value as vmcoreinfo pointer */ if (!readmem(addr, PHYSADDR, ¬e, sizeof(note), "Elf64_Nhdr vmcoreinfo", QUIET|RETURN_ON_ERROR)) return; memset(str, 0, sizeof(str)); if (!readmem(addr + sizeof(note), PHYSADDR, str, note.n_namesz, "VMCOREINFO", QUIET|RETURN_ON_ERROR)) return; if (memcmp(str, "VMCOREINFO", sizeof("VMCOREINFO")) != 0) return; if ((vmcoreinfo = malloc(note.n_descsz + 1)) == NULL) { error(INFO, "s390x_check_kaslr: cannot malloc vmcoreinfo buffer\n"); return; } addr = addr + sizeof(note) + note.n_namesz + 1; if (!readmem(addr, PHYSADDR, vmcoreinfo, note.n_descsz, "s390x vmcoreinfo", QUIET|RETURN_ON_ERROR)) { free(vmcoreinfo); return; } vmcoreinfo[note.n_descsz] = NULLCHAR; /* * Read relocated _stext symbol value and store it in the kernel_table * for further processing within derive_kaslr_offset(). */ if ((_stext_string = vmcoreinfo_read_string_s390x(vmcoreinfo, "SYMBOL(_stext)"))) { kt->vmcoreinfo._stext_SYMBOL = htol(_stext_string, RETURN_ON_ERROR, NULL); free(_stext_string); } free(vmcoreinfo); } #define OS_INFO_VERSION_MAJOR 1 #define OS_INFO_VERSION_MINOR 1 #define OS_INFO_VMCOREINFO 0 #define OS_INFO_REIPL_BLOCK 1 #define OS_INFO_FLAGS_ENTRY 2 #define OS_INFO_RESERVED 3 #define OS_INFO_IDENTITY_BASE 4 #define OS_INFO_KASLR_OFFSET 5 #define OS_INFO_KASLR_OFF_PHYS 6 #define OS_INFO_VMEMMAP 7 #define OS_INFO_AMODE31_START 8 #define OS_INFO_AMODE31_END 9 struct os_info_entry { union { __u64 addr; __u64 val; }; __u64 size; __u32 csum; } __attribute__((packed)); struct os_info { __u64 magic; __u32 csum; __u16 version_major; __u16 version_minor; __u64 crashkernel_addr; __u64 crashkernel_size; struct os_info_entry entry[10]; __u8 reserved[3864]; } __attribute__((packed)); struct vm_info { __u64 __identity_base; __u64 __kaslr_offset; __u64 __kaslr_offset_phys; __u64 amode31_start; __u64 amode31_end; }; static bool vmcoreinfo_read_u64(const char *key, __u64 *val) { char *string; string = pc->read_vmcoreinfo(key); if (string) { *val = strtoul(string, NULL, 16); free(string); return true; } return false; } static bool vmcoreinfo_read_vm_info(struct vm_info *_vm_info) { struct vm_info vm_info; if (!vmcoreinfo_read_u64("IDENTITYBASE", &vm_info.__identity_base) || !vmcoreinfo_read_u64("KERNELOFFSET", &vm_info.__kaslr_offset) || !vmcoreinfo_read_u64("KERNELOFFPHYS", &vm_info.__kaslr_offset_phys) || !vmcoreinfo_read_u64("SAMODE31", &vm_info.amode31_start) || !vmcoreinfo_read_u64("EAMODE31", &vm_info.amode31_end)) return false; *_vm_info = vm_info; return true; } static bool os_info_read_vm_info(struct vm_info *vm_info) { struct os_info os_info; ulong addr; if (!readmem(S390X_LC_OS_INFO, PHYSADDR, &addr, sizeof(addr), "s390x os_info ptr", QUIET|RETURN_ON_ERROR)) return false; if (addr == 0) return true; if (!readmem(addr, PHYSADDR, &os_info, offsetof(struct os_info, reserved), "s390x os_info header", QUIET|RETURN_ON_ERROR)) return false; vm_info->__identity_base = os_info.entry[OS_INFO_IDENTITY_BASE].val; vm_info->__kaslr_offset = os_info.entry[OS_INFO_KASLR_OFFSET].val; vm_info->__kaslr_offset_phys = os_info.entry[OS_INFO_KASLR_OFF_PHYS].val; vm_info->amode31_start = os_info.entry[OS_INFO_AMODE31_START].val; vm_info->amode31_end = os_info.entry[OS_INFO_AMODE31_END].val; return true; } static bool vm_info_empty(struct vm_info *vm_info) { return !vm_info->__kaslr_offset; } static bool s390x_init_vm(void) { struct vm_info vm_info; if (pc->flags & PROC_KCORE) { if (!vmcoreinfo_read_vm_info(&vm_info)) return true; } else { if (!os_info_read_vm_info(&vm_info)) return false; } if (vm_info_empty(&vm_info)) return true; machdep->identity_map_base = vm_info.__identity_base; machdep->kvbase = vm_info.__kaslr_offset; machdep->machspec->__kaslr_offset_phys = vm_info.__kaslr_offset_phys; machdep->machspec->amode31_start = vm_info.amode31_start; machdep->machspec->amode31_end = vm_info.amode31_end; machdep->is_kvaddr = s390x_vr_is_kvaddr; machdep->machspec->virt_to_phys = s390x_vr_VTOP; machdep->machspec->phys_to_virt = s390x_vr_PTOV; machdep->machspec->is_vmalloc_addr = s390x_vr_IS_VMALLOC_ADDR; return true; } static ulong s390x_generic_VTOP(ulong vaddr) { return vaddr - machdep->kvbase; } static ulong s390x_generic_PTOV(ulong paddr) { return paddr + machdep->kvbase; } static int s390x_generic_IS_VMALLOC_ADDR(ulong vaddr) { return vt->vmalloc_start && vaddr >= vt->vmalloc_start; } static ulong s390x_vr_VTOP(ulong vaddr) { if (vaddr < LOWCORE_SIZE) return vaddr; if ((vaddr < machdep->machspec->amode31_end) && (vaddr >= machdep->machspec->amode31_start)) return vaddr; if (vaddr < machdep->kvbase) return vaddr - machdep->identity_map_base; return vaddr - machdep->kvbase + machdep->machspec->__kaslr_offset_phys; } static ulong s390x_vr_PTOV(ulong paddr) { return paddr + machdep->identity_map_base; } static int s390x_vr_IS_VMALLOC_ADDR(ulong vaddr) { return (vt->vmalloc_start && vaddr >= vt->vmalloc_start && vaddr < machdep->kvbase); } ulong s390x_VTOP(ulong vaddr) { return machdep->machspec->virt_to_phys(vaddr); } ulong s390x_PTOV(ulong paddr) { return machdep->machspec->phys_to_virt(paddr); } int s390x_IS_VMALLOC_ADDR(ulong vaddr) { return machdep->machspec->is_vmalloc_addr(vaddr); } /* * Do all necessary machine-specific setup here. This is called several * times during initialization. */ void s390x_init(int when) { switch (when) { case SETUP_ENV: machdep->dumpfile_init = s390x_elf_note_add; machdep->process_elf_notes = s390x_process_elf_notes; break; case PRE_SYMTAB: machdep->machspec = &s390x_machine_specific; machdep->verify_symbol = s390x_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); // machdep->stacksize = KERNEL_STACK_SIZE; if ((machdep->pgd = (char *)malloc(SEGMENT_TABLE_SIZE)) == NULL) error(FATAL, "cannot malloc pgd space."); machdep->pmd = machdep->pgd; if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->get_kvaddr_ranges = s390x_get_kvaddr_ranges; machdep->ptrs_per_pgd = PTRS_PER_PGD; if (DUMPFILE() && !(kt->flags & RELOC_SET)) s390x_check_kaslr(); break; case PRE_GDB: machdep->kvbase = 0; machdep->identity_map_base = 0; machdep->is_kvaddr = generic_is_kvaddr; if (!s390x_init_vm()) error(FATAL, "cannot initialize VM parameters."); machdep->is_uvaddr = s390x_is_uvaddr; machdep->eframe_search = s390x_eframe_search; machdep->back_trace = s390x_back_trace_cmd; machdep->processor_speed = s390x_processor_speed; machdep->uvtop = s390x_uvtop; machdep->kvtop = s390x_kvtop; machdep->get_task_pgd = s390x_get_task_pgd; machdep->get_stack_frame = s390x_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = s390x_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = s390x_is_task_addr; machdep->dis_filter = s390x_dis_filter; machdep->cmd_mach = s390x_cmd_mach; machdep->get_smp_cpus = s390x_get_smp_cpus; machdep->line_number_hooks = s390x_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; vt->flags |= COMMON_VADDR; s390x_check_live(); break; case POST_GDB: if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else machdep->nr_irqs = 0; machdep->vmalloc_start = s390x_vmalloc_start; machdep->dump_irq = generic_dump_irq; if (!machdep->hz) machdep->hz = HZ; machdep->section_size_bits = _SECTION_SIZE_BITS; if (!set_s390x_max_physmem_bits()) error(WARNING, "cannot determine MAX_PHYSMEM_BITS\n"); s390x_offsets_init(); break; case POST_INIT: break; } } /* * Dump machine dependent information */ void s390x_dump_machdep_table(ulong arg) { int others; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", (unsigned long long)machdep->memsize, (unsigned long long)machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: s390x_eframe_search()\n"); fprintf(fp, " back_trace: s390x_back_trace_cmd()\n"); fprintf(fp, " processor_speed: s390x_processor_speed()\n"); fprintf(fp, " uvtop: s390x_uvtop()\n"); fprintf(fp, " kvtop: s390x_kvtop()\n"); fprintf(fp, " get_task_pgd: s390x_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_stack_frame: s390x_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: s390x_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: s390x_vmalloc_start()\n"); fprintf(fp, " is_task_addr: s390x_is_task_addr()\n"); fprintf(fp, " verify_symbol: s390x_verify_symbol()\n"); fprintf(fp, " dis_filter: s390x_dis_filter()\n"); fprintf(fp, " cmd_mach: s390x_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: s390x_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: %s()\n", machdep->is_kvaddr == s390x_vr_is_kvaddr ? "s390x_vr_is_kvaddr" : "generic_is_kvaddr"); fprintf(fp, " is_uvaddr: s390x_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " get_kvaddr_ranges: s390x_get_kvaddr_ranges()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " dumpfile_init: s390x_elf_note_add()\n"); fprintf(fp, " process_elf_notes: s390x_process_elf_notes()\n"); fprintf(fp, " line_number_hooks: s390x_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } static int s390x_vr_is_kvaddr(ulong vaddr) { return (vaddr < LOWCORE_SIZE) || (vaddr >= machdep->identity_map_base); } /* * Check if address is in context's address space */ static int s390x_is_uvaddr(ulong vaddr, struct task_context *tc) { return IN_TASK_VMA(tc->task, vaddr); } /* * Translates a user virtual address to its physical address */ static int s390x_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { unsigned long pgd_base; readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd_base,sizeof(long), "pgd_base",FAULT_ON_ERROR); return s390x_vtop(pgd_base, vaddr, paddr, verbose); } /* * Translates a kernel virtual address to its physical address */ static int s390x_kvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { unsigned long pgd_base; if (!IS_KVADDR(vaddr)){ *paddr = 0; return FALSE; } if (!verbose && !IS_VMALLOC_ADDR(vaddr)) { *paddr = VTOP(vaddr); return TRUE; } pgd_base = (unsigned long)vt->kernel_pgd[0]; return s390x_vtop(pgd_base, vaddr, paddr, verbose); } /* * Check if page is mapped */ static inline int s390x_pte_present(unsigned long x){ if(THIS_KERNEL_VERSION >= LINUX(2,6,0)){ return !((x) & S390X_PAGE_INVALID) || ((x) & S390X_PAGE_INVALID_MASK) == S390X_PAGE_INVALID_NONE; } else { return ((x) & S390X_PAGE_PRESENT); } } /* * page table traversal functions */ /* Print flags of Segment-Table entry with format control = 1 */ static void print_segment_entry_fc1(ulong val) { fprintf(fp, "AV=%u; ACC=%u%u%u%u; F=%u; FC=%u; P=%u; I=%u; CS=%u; TT=%u%u\n", !!(val & S390X_STE_AV), !!(val & S390X_STE_ACC_1000), !!(val & S390X_STE_ACC_0100), !!(val & S390X_STE_ACC_0010), !!(val & S390X_STE_ACC_0001), !!(val & S390X_STE_F), !!(val & S390X_STE_FC), !!(val & S390X_STE_P), !!(val & S390X_STE_I), !!(val & S390X_STE_CS), !!(val & S390X_STE_TT_10), !!(val & S390X_STE_TT_01)); } /* Print flags of Segment-Table entry with format control = 0 */ static void print_segment_entry_fc0(ulong val) { fprintf(fp, "FC=%u; P=%u; I=%u; CS=%u; TT=%u%u\n", !!(val & S390X_STE_FC), !!(val & S390X_STE_P), !!(val & S390X_STE_I), !!(val & S390X_STE_CS), !!(val & S390X_STE_TT_10), !!(val & S390X_STE_TT_01)); } /* Print flags of Region-Third-Table entry with format control = 1 */ static void print_region_third_entry_fc1(ulong val) { fprintf(fp, "AV=%u; ACC=%u%u%u%u; F=%u; FC=%u; P=%u; I=%u; CR=%u; TT=%u%u\n", !!(val & S390X_RTE_AV), !!(val & S390X_RTE_ACC_1000), !!(val & S390X_RTE_ACC_0100), !!(val & S390X_RTE_ACC_0010), !!(val & S390X_RTE_ACC_0001), !!(val & S390X_RTE_F), !!(val & S390X_RTE_FC), !!(val & S390X_RTE_P), !!(val & S390X_RTE_I), !!(val & S390X_RTE_CR), !!(val & S390X_RTE_TT_10), !!(val & S390X_RTE_TT_01)); } /* Print flags of Region-Third-Table entry with format control = 0 */ static void print_region_third_entry_fc0(ulong val) { fprintf(fp, "FC=%u; P=%u; TF=%u%u; I=%u; CR=%u; TT=%u%u; TL=%u%u\n", !!(val & S390X_RTE_FC), !!(val & S390X_RTE_P), !!(val & S390X_RTE_TF_10), !!(val & S390X_RTE_TF_01), !!(val & S390X_RTE_I), !!(val & S390X_RTE_CR), !!(val & S390X_RTE_TT_10), !!(val & S390X_RTE_TT_01), !!(val & S390X_RTE_TL_10), !!(val & S390X_RTE_TL_01)); } /* Print flags of Region-First/Second-Table entry */ static void print_region_first_second_entry(ulong val) { fprintf(fp, "P=%u; TF=%u%u; I=%u; TT=%u%u; TL=%u%u\n", !!(val & S390X_RTE_P), !!(val & S390X_RTE_TF_10), !!(val & S390X_RTE_TF_01), !!(val & S390X_RTE_I), !!(val & S390X_RTE_TT_10), !!(val & S390X_RTE_TT_01), !!(val & S390X_RTE_TL_10), !!(val & S390X_RTE_TL_01)); } /* Print the binary flags for Region or Segment table entry */ static void s390x_print_te_binary_flags(ulong val, int level) { fprintf(fp, " flags in binary : "); switch (level) { case 0: if (val & S390X_STE_FC) print_segment_entry_fc1(val); else print_segment_entry_fc0(val); break; case 1: if (val & S390X_RTE_FC) print_region_third_entry_fc1(val); else print_region_third_entry_fc0(val); break; case 2: case 3: print_region_first_second_entry(val); break; } } /* Region or segment table traversal function */ static ulong _kl_rsg_table_deref_s390x(ulong vaddr, ulong table, int len, int level, int verbose) { const char *name_vec[] = {"STE", "RTTE", "RSTE", "RFTE"}; ulong offset, entry, flags, addr; int flags_prt_len; offset = ((vaddr >> (11*level + 20)) & 0x7ffULL) * 8; if (offset >= (len + 1)*4096) /* Offset is over the table limit. */ return 0; addr = table + offset; readmem(addr, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); if (verbose) { flags_prt_len = 3; if (entry & S390X_RTE_FC) if (level) { flags = entry & S390X_RTE_FLAG_BITS_FC1; flags_prt_len = 8; } else { flags = entry & S390X_STE_FLAG_BITS_FC1; flags_prt_len = 5; } else if (level) flags = entry & S390X_RTE_FLAG_BITS_FC0; else flags = entry & S390X_STE_FLAG_BITS_FC0; fprintf(fp, "%5s: %016lx => %016lx (flags = %0*lx)\n", name_vec[level], addr, entry, flags_prt_len, flags); s390x_print_te_binary_flags(entry, level); } /* * Check if the segment table entry could be read and doesn't have * any of the reserved bits set. */ if ((entry & S390X_RTE_TT) != (level << 2)) return 0; /* Check if the region table entry has the invalid bit set. */ if (entry & S390X_RTE_I) return 0; /* Region table entry is valid and well formed. */ return entry; } /* Check for swap entry */ static int swap_entry(ulong entry) { if (THIS_KERNEL_VERSION < LINUX(2,6,19)) { if ((entry & 0x601ULL) == 0x600ULL) return 1; } if (THIS_KERNEL_VERSION < LINUX(3,12,0)) { if ((entry & 0x403ULL) == 0x403ULL) return 1; } else { if ((entry & 0x603ULL) == 0x402ULL) return 1; } return 0; } /* Page table traversal function */ static ulong _kl_pg_table_deref_s390x(ulong vaddr, ulong table, int verbose) { ulong offset, entry, addr; offset = ((vaddr >> 12) & 0xffULL) * 8; addr = table + offset; readmem(addr, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); if (verbose) { fprintf(fp, "%5s: %016lx => %016lx (flags = %03llx)\n", "PTE", addr, entry, entry & S390X_PTE_FLAG_BITS); fprintf(fp, " flags in binary : I=%u; P=%u\n", !!(entry & S390X_PAGE_INVALID), !!(entry & S390X_PAGE_RO)); fprintf(fp, "%5s: %016llx\n", "PAGE", entry & ~S390X_PTE_FLAG_BITS); } /* * Return zero if the page table entry has the reserved (0x800) or * the invalid (0x400) bit set and it is not a swap entry. */ if ((entry & 0xc00ULL) && !swap_entry(entry)) return 0; /* Page table entry is valid and well formed. */ return entry; } /* lookup virtual address in page tables */ int s390x_vtop(ulong table, ulong vaddr, physaddr_t *phys_addr, int verbose) { ulong entry, paddr; int level, len; if (verbose) fprintf(fp, "PAGE DIRECTORY: %016lx\n", table); *phys_addr = 0; /* * Walk the region and segment tables. * We assume that the table length field in the asce is set to the * maximum value of 3 (which translates to a region first, region * second, region third or segment table with 2048 entries) and that * the addressing mode is 64 bit. */ len = 3; /* Read the first entry to find the number of page table levels. */ readmem(table, KVADDR, &entry, sizeof(entry), "entry", FAULT_ON_ERROR); level = (entry & 0xcULL) >> 2; if ((level < 3) && (vaddr >> (31 + 11*level)) != 0ULL) { /* Address too big for the number of page table levels. */ return FALSE; } while (level >= 0) { entry = _kl_rsg_table_deref_s390x(vaddr, table, len, level, verbose); if (!entry) return FALSE; table = PTOV(entry & ~0xfffULL); /* Check if this a 2GB page */ if ((entry & 0x400ULL) && (level == 1)) { /* Add the 2GB frame offset & return the final value. */ table &= ~0x7fffffffULL; *phys_addr = VTOP(table + (vaddr & 0x7fffffffULL)); return TRUE; } len = entry & 0x3ULL; level--; } /* Check if this is a large page. */ if (entry & 0x400ULL) { /* Add the 1MB page offset and return the final value. */ table &= ~0xfffffULL; *phys_addr = VTOP(table + (vaddr & 0xfffffULL)); return TRUE; } /* Get the page table entry */ entry = _kl_pg_table_deref_s390x(vaddr, PTOV(entry & ~0x7ffULL), verbose); if (!entry) return FALSE; /* For swap entries we have to return FALSE and phys_addr = PTE */ if (swap_entry(entry)) { *phys_addr = entry; return FALSE; } /* Isolate the page origin from the page table entry. */ paddr = entry & ~0xfffULL; /* Add the page offset and return the final value. */ *phys_addr = paddr + (vaddr & 0xfffULL); return TRUE; } /* * Determine where vmalloc'd memory starts. */ static ulong s390x_vmalloc_start(void) { unsigned long highmem_addr,high_memory; highmem_addr=symbol_value("high_memory"); readmem(highmem_addr, KVADDR, &high_memory,sizeof(long), "highmem",FAULT_ON_ERROR); return high_memory; } /* * Check if address can be a valid task_struct */ static int s390x_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * return MHz - unfortunately it is not possible to get this on linux * for zSeries */ static ulong s390x_processor_speed(void) { return 0; } /* * Accept or reject a symbol from the kernel namelist. */ static int s390x_verify_symbol(const char *name, ulong value, char type) { int i; if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "startup") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if (STREQ(name, "Letext") || STREQ(name, "gcc2_compiled.")) return FALSE; /* reject L2^B symbols */ if (strstr(name, "L2\002") == name) return FALSE; if (STREQ(name, ".rodata")) return TRUE; /* throw away all symbols containing a '.' */ for(i = 0; i < strlen(name);i++){ if(name[i] == '.') return FALSE; } return TRUE; } /* * Get the relevant page directory pointer from a task structure. */ static ulong s390x_get_task_pgd(ulong task) { return (error(FATAL, "s390x_get_task_pgd: TBD\n")); } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int s390x_translate_pte(ulong pte, void *physaddr, ulonglong unused) { char *arglist[MAXARGS]; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; int c,len1,len2,len3; if(S390X_PTE_INVALID(pte)){ fprintf(fp,"PTE is invalid\n"); return FALSE; } if(physaddr) *((ulong *)physaddr) = pte & S390X_PAGE_BASE_MASK; if(!s390x_pte_present(pte)){ swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|LJUST, "PTE"), mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); sprintf(ptebuf, "%lx", pte); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return FALSE; } fprintf(fp,"PTE PHYSICAL FLAGS\n"); fprintf(fp,"%08lx %08llx",pte, pte & S390X_PAGE_BASE_MASK); fprintf(fp," ("); if(pte & S390X_PAGE_INVALID) fprintf(fp,"INVALID "); if(pte & S390X_PAGE_RO) fprintf(fp,"PROTECTION"); fprintf(fp,")"); return TRUE; } /* * Look for likely exception frames in a stack. */ static int s390x_eframe_search(struct bt_info *bt) { ulong esp; if (bt->flags & BT_EFRAME_SEARCH2) { return (error(FATAL, "Option '-E' is not implemented for this architecture\n")); } else { /* For 'bt -e' print only interrupt frames and related pt_regs */ s390x_get_stack_frame(bt, NULL, &esp); bt->stkptr = esp; s390x_back_trace_cmd(bt); return 0; } } #ifdef DEPRECATED /* * returns cpu number of task */ static int s390x_cpu_of_task(unsigned long task) { unsigned int cpu; if(VALID_MEMBER(task_struct_processor)){ /* linux 2.4 */ readmem(task + OFFSET(task_struct_processor),KVADDR, &cpu, sizeof(cpu), "task_struct_processor", FAULT_ON_ERROR); } else { /* linux 2.6 */ char thread_info[8192]; unsigned long thread_info_addr; readmem(task + OFFSET(task_struct_thread_info),KVADDR, &thread_info_addr, sizeof(thread_info_addr), "thread info addr", FAULT_ON_ERROR); readmem(thread_info_addr,KVADDR,thread_info,sizeof(thread_info), "thread info", FAULT_ON_ERROR); cpu = *((int*) &thread_info[OFFSET(thread_info_cpu)]); } return cpu; } #endif /* * returns true, if task of bt currently is executed by a cpu */ static int s390x_has_cpu(struct bt_info *bt) { int cpu = bt->tc->processor; if (is_task_active(bt->task) && (kt->cpu_flags[cpu] & ONLINE_MAP)) return TRUE; else return FALSE; } /* * read lowcore for cpu */ static void s390x_get_lowcore(struct bt_info *bt, char* lowcore) { unsigned long lowcore_array,lowcore_ptr; struct s390x_cpu *s390x_cpu; int cpu = bt->tc->processor; lowcore_array = symbol_value("lowcore_ptr"); readmem(lowcore_array + cpu * S390X_WORD_SIZE,KVADDR, &lowcore_ptr, sizeof(long), "lowcore_ptr", FAULT_ON_ERROR); readmem(lowcore_ptr, KVADDR, lowcore, LOWCORE_SIZE, "lowcore", FAULT_ON_ERROR); if (!s390x_cpu_vec) return; /* Copy register information to defined places in lowcore */ s390x_cpu = s390x_cpu_get(bt); memcpy(lowcore + 4864, &s390x_cpu->psw, sizeof(s390x_cpu->psw)); memcpy(lowcore + 4736, &s390x_cpu->gprs, sizeof(s390x_cpu->gprs)); memcpy(lowcore + 4928, &s390x_cpu->acrs, sizeof(s390x_cpu->acrs)); memcpy(lowcore + 4892, &s390x_cpu->fpc, sizeof(s390x_cpu->fpc)); memcpy(lowcore + 4608, &s390x_cpu->fprs, sizeof(s390x_cpu->fprs)); memcpy(lowcore + 4888, &s390x_cpu->prefix, sizeof(s390x_cpu->prefix)); memcpy(lowcore + 4992, &s390x_cpu->ctrs, sizeof(s390x_cpu->ctrs)); memcpy(lowcore + 4900, &s390x_cpu->todpreg, sizeof(s390x_cpu->todpreg)); memcpy(lowcore + 4904, &s390x_cpu->timer, sizeof(s390x_cpu->timer)); memcpy(lowcore + 4912, &s390x_cpu->todcmp, sizeof(s390x_cpu->todcmp)); } /* * Copy VX registers out of s390x cpu */ static void vx_copy(void *buf, struct s390x_cpu *s390x_cpu) { char *_buf = buf; int i; for (i = 0; i < 16; i++) { memcpy(&_buf[i * 16], &s390x_cpu->fprs[i], 8); memcpy(&_buf[i * 16 + 8], &s390x_cpu->vxrs_low[i], 8); } memcpy(&_buf[16 * 16], &s390x_cpu->vxrs_high[0], 16 * 16); } /* * Check if VX registers are available */ static int has_vx_regs(char *lowcore) { unsigned long addr = *((uint64_t *)(lowcore + 0x11b0)); if (addr == 0 || addr % 1024) return 0; return 1; } /* * Print vector registers for cpu */ static void s390x_print_vx_sa(struct bt_info *bt, char *lc) { char vx_sa[VX_SA_SIZE]; uint64_t addr; if (!(bt->flags & BT_SHOW_ALL_REGS)) return; if (!has_vx_regs(lc)) return; if (!s390x_cpu_vec) { /* Pointer to save area */ addr = *((uint64_t *)(lc + 0x11b0)); readmem(addr, KVADDR, vx_sa, sizeof(vx_sa), "vx_sa", FAULT_ON_ERROR); } else { /* Get data from s390x cpu */ vx_copy(vx_sa, s390x_cpu_get(bt)); } fprintf(fp, " -vector registers:\n"); print_hex_buf(vx_sa, sizeof(vx_sa), 2, " "); } /* * Get stack address for interrupt stack using the pcpu array */ static unsigned long get_int_stack_pcpu(char *stack_name, int cpu) { unsigned long addr; addr = symbol_value("pcpu_devices") + cpu * STRUCT_SIZE("pcpu") + MEMBER_OFFSET("pcpu", stack_name); return readmem_ul(addr) + INT_STACK_SIZE; } /* * Get stack address for interrupt stack using the lowcore */ static unsigned long get_int_stack_lc(char *stack_name, char *lc) { if (!MEMBER_EXISTS(lc_struct, stack_name)) return 0; return roundup(ULONG(lc + MEMBER_OFFSET(lc_struct, stack_name)), PAGESIZE()); } /* * Read interrupt stack (either "async_stack" or "panic_stack"); */ static void get_int_stack(char *stack_name, int cpu, char *lc, unsigned long *start, unsigned long *end) { unsigned long stack_addr; *start = *end = 0; if (strcmp(stack_name, "restart_stack") == 0) { stack_addr = symbol_value("restart_stack"); stack_addr = readmem_ul(stack_addr); } else { if (symbol_exists("pcpu_devices") && MEMBER_EXISTS("pcpu", stack_name)) stack_addr = get_int_stack_pcpu(stack_name, cpu); else stack_addr = get_int_stack_lc(stack_name, lc); } if (stack_addr == 0) return; *start = stack_addr - INT_STACK_SIZE; *end = stack_addr; } /* * Print hex data */ static void print_hex(unsigned long addr, int len, int cols) { int j, first = 1; for (j = 0; j < len; j += 8) { if (j % (cols * 8) == 0) { if (!first) fprintf(fp, "\n"); else first = 0; fprintf(fp, " %016lx: ", addr + j); } fprintf(fp, " %016lx", readmem_ul(addr + j)); } if (len) fprintf(fp, "\n"); } /* * Print hexdump of stack frame data */ static void print_frame_data(unsigned long sp, unsigned long high) { unsigned long next_sp, len = high - sp; next_sp = readmem_ul(sp + MEMBER_OFFSET("stack_frame", "back_chain")); if (next_sp == 0) len = MIN(len, SIZE(s390_stack_frame) + STRUCT_SIZE("pt_regs")); else len = MIN(len, next_sp - sp); print_hex(sp, len, 2); } /* * Do reference check and set flags */ static int bt_reference_check(struct bt_info *bt, unsigned long addr) { if (!BT_REFERENCE_CHECK(bt)) return 0; if (bt->ref->cmdflags & BT_REF_HEXVAL) { if (addr == bt->ref->hexval) bt->ref->cmdflags |= BT_REF_FOUND; } else { if (STREQ(closest_symbol(addr), bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } return 1; } /* * Print stack frame */ static void print_frame(struct bt_info *bt, int cnt, unsigned long sp, unsigned long r14) { struct load_module *lm; char *sym; ulong offset; struct syment *symp; char *name_plus_offset; char buf[BUFSIZE]; if (bt_reference_check(bt, r14)) return; fprintf(fp, "%s#%d [%08lx] ", cnt < 10 ? " " : "", cnt, sp); sym = closest_symbol(r14); name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { symp = value_search(r14, &offset); if (symp && offset) name_plus_offset = value_to_symstr(r14, buf, bt->radix); } fprintf(fp, "%s at %lx", name_plus_offset ? name_plus_offset : sym, r14); if (module_symbol(r14, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); if (bt->flags & BT_LINE_NUMBERS) s390x_dump_line_number(r14); } /* * Print pt_regs structure */ static void print_ptregs(struct bt_info *bt, unsigned long sp) { unsigned long addr, psw_flags, psw_addr, offs; struct load_module *lm; char *sym; int i; addr = sp + MEMBER_OFFSET("pt_regs", "psw"); psw_flags = readmem_ul(addr); psw_addr = readmem_ul(addr + sizeof(long)); if (bt_reference_check(bt, psw_addr)) return; fprintf(fp, " PSW: %016lx %016lx ", psw_flags, psw_addr); if (psw_flags & S390X_PSW_MASK_PSTATE) { fprintf(fp, "(user space)\n"); } else { sym = closest_symbol(psw_addr); offs = psw_addr - closest_symbol_value(psw_addr); if (module_symbol(psw_addr, NULL, &lm, NULL, 0)) fprintf(fp, "(%s+%ld [%s])\n", sym, offs, lm->mod_name); else fprintf(fp, "(%s+%ld)\n", sym, offs); } addr = sp + MEMBER_OFFSET("pt_regs", "gprs"); for (i = 0; i < 16; i++) { if (i != 0 && i % 4 == 0) fprintf(fp, "\n"); if (i % 4 == 0) { if (i == 0) fprintf(fp, " GPRS: "); else fprintf(fp, " "); } fprintf(fp, "%016lx ", readmem_ul(addr + i * sizeof(long))); } fprintf(fp, "\n"); } /* * Print back trace for one stack */ static unsigned long show_trace(struct bt_info *bt, int cnt, unsigned long sp, unsigned long low, unsigned long high) { unsigned long reg, iframe_addr; unsigned long psw_addr ATTRIBUTE_UNUSED; while (1) { if (sp < low || sp > high - SIZE(s390_stack_frame)) return sp; reg = readmem_ul(sp + OFFSET(s390_stack_frame_r14)); if (!(bt->flags & BT_EFRAME_SEARCH)) { if (!s390x_has_cpu(bt)) print_frame(bt, cnt++, sp, reg); if (bt->flags & BT_FULL) print_frame_data(sp, high); } /* Follow the backchain. */ while (1) { low = sp; sp = readmem_ul(sp + OFFSET(s390_stack_frame_back_chain)); if (!sp) { sp = low; break; } if (sp <= low || sp > high - SIZE(s390_stack_frame)) return sp; reg = readmem_ul(sp + OFFSET(s390_stack_frame_r14)); if (!(bt->flags & BT_EFRAME_SEARCH)) { print_frame(bt, cnt++, sp, reg); if (bt->flags & BT_FULL) print_frame_data(sp, high); } } /* Zero backchain detected, check for interrupt frame. */ iframe_addr = sp; sp += SIZE(s390_stack_frame); if (sp <= low || sp > high - STRUCT_SIZE("pt_regs")) return sp; /* Check for user PSW */ reg = readmem_ul(sp + MEMBER_OFFSET("pt_regs", "psw")); if (reg & S390X_PSW_MASK_PSTATE) { if (bt->flags & BT_EFRAME_SEARCH) fprintf(fp, " USER-MODE INTERRUPT FRAME at %lx\n", iframe_addr); else fprintf(fp, " USER-MODE INTERRUPT FRAME;"); fprintf(fp, " pt_regs at %lx:\n", sp); print_ptregs(bt, sp); return sp; } /* Get new backchain from r15 */ reg = readmem_ul(sp + MEMBER_OFFSET("pt_regs", "gprs") + 15 * sizeof(long)); /* Get address of interrupted function */ psw_addr = readmem_ul(sp + MEMBER_OFFSET("pt_regs", "psw") + sizeof(long)); /* Check for loop (kernel_thread_starter) of second zero bc */ if (low == reg || reg == 0) return reg; if (bt->flags & BT_EFRAME_SEARCH) fprintf(fp, " KERNEL-MODE INTERRUPT FRAME at %lx\n", iframe_addr); else fprintf(fp, " KERNEL-MODE INTERRUPT FRAME;"); fprintf(fp, " pt_regs at %lx:\n", sp); print_ptregs(bt, sp); low = sp; sp = reg; cnt = 0; } } /* * Unroll a kernel stack */ static void s390x_back_trace_cmd(struct bt_info *bt) { unsigned long low, high, sp = bt->stkptr; int cpu = bt->tc->processor, cnt = 0; char lowcore[LOWCORE_SIZE]; unsigned long psw_flags; if (bt->hp && bt->hp->eip) { error(WARNING, "instruction pointer argument ignored on this architecture!\n"); } if (is_task_active(bt->task) && !(kt->cpu_flags[cpu] & ONLINE_MAP)) { fprintf(fp, " CPU offline\n"); return; } /* * Print lowcore and print interrupt stacks when task has cpu */ if (s390x_has_cpu(bt)) { s390x_get_lowcore(bt, lowcore); psw_flags = ULONG(lowcore + OFFSET(s390_lowcore_psw_save_area)); if (psw_flags & S390X_PSW_MASK_PSTATE) { fprintf(fp,"Task runs in userspace\n"); s390x_print_lowcore(lowcore,bt,0); s390x_print_vx_sa(bt, lowcore); return; } s390x_print_lowcore(lowcore,bt,1); s390x_print_vx_sa(bt, lowcore); fprintf(fp,"\n"); if (symbol_exists("restart_stack")) { get_int_stack("restart_stack", cpu, lowcore, &low, &high); sp = show_trace(bt, cnt, sp, low, high); } if (MEMBER_EXISTS("lowcore", "nodat_stack")) get_int_stack("nodat_stack", cpu, lowcore, &low, &high); else get_int_stack("panic_stack", cpu, lowcore, &low, &high); sp = show_trace(bt, cnt, sp, low, high); get_int_stack("async_stack", cpu, lowcore, &low, &high); sp = show_trace(bt, cnt, sp, low, high); } /* * Print task stack */ if (THIS_KERNEL_VERSION >= LINUX(2, 6, 0)) { low = task_to_stackbase(bt->task); } else { low = bt->task; } high = low + KERNEL_STACK_SIZE; sp = show_trace(bt, cnt, sp, low, high); } /* * print lowcore info (psw and all registers) */ static void s390x_print_lowcore(char* lc, struct bt_info *bt,int show_symbols) { char* ptr; unsigned long tmp[4]; ptr = lc + OFFSET(s390_lowcore_psw_save_area); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390X_WORD_SIZE); if(BT_REFERENCE_CHECK(bt)){ if(bt->ref->cmdflags & BT_REF_HEXVAL){ if(tmp[1] == bt->ref->hexval) bt->ref->cmdflags |= BT_REF_FOUND; } else { if(STREQ(closest_symbol(tmp[1]),bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } return; } fprintf(fp," LOWCORE INFO:\n"); fprintf(fp," -psw : %#018lx %#018lx\n", tmp[0], tmp[1]); if(show_symbols){ fprintf(fp," -function : %s at %lx\n", closest_symbol(tmp[1]), tmp[1]); if (bt->flags & BT_LINE_NUMBERS) s390x_dump_line_number(tmp[1]); } ptr = lc + MEMBER_OFFSET(lc_struct, "prefixreg_save_area"); tmp[0] = UINT(ptr); fprintf(fp," -prefix : %#010lx\n", tmp[0]); ptr = lc + MEMBER_OFFSET(lc_struct, "cpu_timer_save_area"); tmp[0]=ULONG(ptr); fprintf(fp," -cpu timer: %#018lx\n", tmp[0]); ptr = lc + MEMBER_OFFSET(lc_struct, "clock_comp_save_area"); /* * Shift clock comparator by 8 because we got bit positions 0-55 * in byte 1 to 8. The first byte is always zero. */ tmp[0]=ULONG(ptr) << 8; fprintf(fp," -clock cmp: %#018lx\n", tmp[0]); fprintf(fp," -general registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "gpregs_save_area"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 8 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 10* S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 11* S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 12* S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 13* S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 14* S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 15* S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); fprintf(fp," -access registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "access_regs_save_area"); tmp[0]=UINT(ptr); tmp[1]=UINT(ptr + 4); tmp[2]=UINT(ptr + 2 * 4); tmp[3]=UINT(ptr + 3 * 4); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=UINT(ptr + 4 * 4); tmp[1]=UINT(ptr + 5 * 4); tmp[2]=UINT(ptr + 6 * 4); tmp[3]=UINT(ptr + 7 * 4); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=UINT(ptr + 8 * 4); tmp[1]=UINT(ptr + 9 * 4); tmp[2]=UINT(ptr + 10 * 4); tmp[3]=UINT(ptr + 11 * 4); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); tmp[0]=UINT(ptr + 12 * 4); tmp[1]=UINT(ptr + 13 * 4); tmp[2]=UINT(ptr + 14 * 4); tmp[3]=UINT(ptr + 15 * 4); fprintf(fp," %#010lx %#010lx %#010lx %#010lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); fprintf(fp," -control registers:\n"); ptr = lc + MEMBER_OFFSET(lc_struct, "cregs_save_area"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 8 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 10 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 11 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 12 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 13 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 14 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 15 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); ptr = lc + MEMBER_OFFSET(lc_struct, "floating_pt_save_area"); fprintf(fp," -floating point registers:\n"); tmp[0]=ULONG(ptr); tmp[1]=ULONG(ptr + S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 2 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 3 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 4 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 5 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 6 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 7 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 8 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 9 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 10 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 11 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); tmp[0]=ULONG(ptr + 12 * S390X_WORD_SIZE); tmp[1]=ULONG(ptr + 13 * S390X_WORD_SIZE); tmp[2]=ULONG(ptr + 14 * S390X_WORD_SIZE); tmp[3]=ULONG(ptr + 15 * S390X_WORD_SIZE); fprintf(fp," %#018lx %#018lx\n", tmp[0],tmp[1]); fprintf(fp," %#018lx %#018lx\n", tmp[2],tmp[3]); } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void s390x_get_stack_frame(struct bt_info *bt, ulong *eip, ulong *esp) { unsigned long ksp, r14; int r14_offset; char lowcore[LOWCORE_SIZE]; if(s390x_has_cpu(bt)) s390x_get_lowcore(bt, lowcore); /* get the stack pointer */ if(esp){ if (!LIVE() && s390x_has_cpu(bt)) { ksp = ULONG(lowcore + MEMBER_OFFSET(lc_struct, "gpregs_save_area") + (15 * S390X_WORD_SIZE)); } else { readmem(bt->task + OFFSET(task_struct_thread_ksp), KVADDR, &ksp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); } *esp = ksp; } else { /* for 'bt -S' */ ksp=bt->hp->esp; } /* get the instruction address */ if(!eip) return; if(s390x_has_cpu(bt) && esp){ *eip = ULONG(lowcore + OFFSET(s390_lowcore_psw_save_area) + S390X_WORD_SIZE); } else { if(!STRUCT_EXISTS("stack_frame")){ r14_offset = 112; } else { r14_offset = MEMBER_OFFSET("stack_frame","gprs") + 8 * S390X_WORD_SIZE; } readmem(ksp + r14_offset,KVADDR,&r14,sizeof(void*),"eip", FAULT_ON_ERROR); *eip=r14; } } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int s390x_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * so this routine both fixes the references as well as imposing the current * output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !(STRNEQ(p1, " 0x") || STRNEQ(p1, "\t0x") || STRNEQ(p1, ",0x"))) p1--; if (!(STRNEQ(p1, " 0x") || STRNEQ(p1, "\t0x") || STRNEQ(p1, ",0x"))) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int s390x_get_smp_cpus(void) { return MAX(get_cpus_online(), get_highest_cpu_online()+1); } /* * Machine dependent command. */ void s390x_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cm")) != EOF) { switch(c) { case 'c': fprintf(fp,"'-c' option is not implemented on this architecture\n"); return; case 'm': fprintf(fp,"'-m' option is not implemented on this architecture\n"); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); s390x_display_machine_stats(); } /* * "mach" command output. */ static void s390x_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } static const char *hook_files[] = { "arch/s390x/kernel/entry.S", "arch/s390x/kernel/head.S" }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) static struct line_number_hook s390x_line_number_hooks[] = { {"startup",HEAD_S}, {"_stext",HEAD_S}, {"_pstart",HEAD_S}, {"system_call",ENTRY_S}, {"sysc_do_svc",ENTRY_S}, {"sysc_do_restart",ENTRY_S}, {"sysc_return",ENTRY_S}, {"sysc_sigpending",ENTRY_S}, {"sysc_restart",ENTRY_S}, {"sysc_singlestep",ENTRY_S}, {"sysc_tracesys",ENTRY_S}, {"ret_from_fork",ENTRY_S}, {"pgm_check_handler",ENTRY_S}, {"io_int_handler",ENTRY_S}, {"io_return",ENTRY_S}, {"ext_int_handler",ENTRY_S}, {"mcck_int_handler",ENTRY_S}, {"mcck_return",ENTRY_S}, {"restart_int_handler",ENTRY_S}, {NULL, NULL} /* list must be NULL-terminated */ }; static void s390x_dump_line_number(ulong callpc) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) { fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); } else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } static int s390x_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; physaddr_t phys1, phys2; ulong pp1, pp2; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->kvbase; vrp[cnt++].end = vt->high_memory; vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = first_vmalloc_address(); vrp[cnt++].end = last_vmalloc_address(); phys1 = (physaddr_t)(0); phys2 = (physaddr_t)VTOP(vt->high_memory - PAGESIZE()); if (phys_to_page(phys1, &pp1) && phys_to_page(phys2, &pp2) && (pp1 >= vrp[cnt-1].end)) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = pp1; vrp[cnt++].end = pp2; } return cnt; } #endif /* S390X */ crash-utility-crash-9cd43f5/global_data.c0000664000372000037200000001334015107550337017772 0ustar juerghjuergh/* global_data.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2010, 2012-2013, 2018 David Anderson * Copyright (C) 2002-2006, 2010, 2012-2013, 2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" /* * Data output FILE pointer. The contents of fp are changed on the fly * depending upon whether the output is going to stdout, redirected to a * user-designated pipe or file, or to the "standard" scrolling pipe. * Regardless of where it ends up, fprintf(fp, ...) is used throughout * instead of printf(). */ FILE *fp; /* * The state of the program is kept in the program_context structure. * Given that it's consulted so often, "pc" is globally available to * quickly access the structure contents. */ struct program_context program_context = { 0 }; struct program_context *pc = &program_context; /* * The same thing goes for accesses to the frequently-accessed task_table, * kernel_table, vm_table, symbol_table_data and machdep_table, making the * "tt", "kt", "vt", "st" and "machdep" pointers globally available. */ struct task_table task_table = { 0 }; struct task_table *tt = &task_table; struct kernel_table kernel_table = { 0 }; struct kernel_table *kt = &kernel_table; struct vm_table vm_table = { 0 }; struct vm_table *vt = &vm_table; struct symbol_table_data symbol_table_data = { 0 }; struct symbol_table_data *st = &symbol_table_data; struct machdep_table machdep_table = { 0 }; struct machdep_table *machdep = &machdep_table; /* * Command functions are entered with the args[] array and argcnt value * pre-set for issuance to getopt(). */ char *args[MAXARGS]; /* argument array */ int argcnt; /* argument count */ int argerrs; /* argument error counter */ /* * To add a new command, declare it in defs.h and enter it in this table. */ struct command_table_entry linux_command_table[] = { {"*", cmd_pointer, help_pointer, 0}, {"alias", cmd_alias, help_alias, 0}, {"ascii", cmd_ascii, help_ascii, 0}, {"bpf", cmd_bpf, help_bpf, 0}, {"bt", cmd_bt, help_bt, REFRESH_TASK_TABLE}, {"btop", cmd_btop, help_btop, 0}, {"dev", cmd_dev, help_dev, 0}, {"dis", cmd_dis, help_dis, MINIMAL}, {"eval", cmd_eval, help_eval, MINIMAL}, {"exit", cmd_quit, help_exit, MINIMAL}, {"extend", cmd_extend, help_extend, MINIMAL}, {"files", cmd_files, help_files, REFRESH_TASK_TABLE}, {"foreach", cmd_foreach, help_foreach, REFRESH_TASK_TABLE}, {"fuser", cmd_fuser, help_fuser, REFRESH_TASK_TABLE}, {"gdb", cmd_gdb, help_gdb, REFRESH_TASK_TABLE}, {"help", cmd_help, help_help, MINIMAL}, {"ipcs", cmd_ipcs, help_ipcs, REFRESH_TASK_TABLE}, {"irq", cmd_irq, help_irq, 0}, {"kmem", cmd_kmem, help_kmem, 0}, {"list", cmd_list, help__list, REFRESH_TASK_TABLE}, {"log", cmd_log, help_log, MINIMAL}, {"mach", cmd_mach, help_mach, 0}, {"map", cmd_map, help_map, HIDDEN_COMMAND}, {"mod", cmd_mod, help_mod, 0}, {"mount", cmd_mount, help_mount, REFRESH_TASK_TABLE}, {"net", cmd_net, help_net, REFRESH_TASK_TABLE}, {"p", cmd_p, help_p, 0}, {"ps", cmd_ps, help_ps, REFRESH_TASK_TABLE}, {"pte", cmd_pte, help_pte, 0}, {"ptob", cmd_ptob, help_ptob, 0}, {"ptov", cmd_ptov, help_ptov, 0}, {"q", cmd_quit, help_quit, MINIMAL}, {"tree", cmd_tree, help_tree, REFRESH_TASK_TABLE}, {"rd", cmd_rd, help_rd, MINIMAL}, {"repeat", cmd_repeat, help_repeat, 0}, {"runq", cmd_runq, help_runq, REFRESH_TASK_TABLE}, {"sbitmapq", cmd_sbitmapq, help_sbitmapq, 0}, {"search", cmd_search, help_search, 0}, {"set", cmd_set, help_set, REFRESH_TASK_TABLE | MINIMAL}, {"sig", cmd_sig, help_sig, REFRESH_TASK_TABLE}, {"struct", cmd_struct, help_struct, 0}, {"swap", cmd_swap, help_swap, 0}, {"sym", cmd_sym, help_sym, MINIMAL}, {"sys", cmd_sys, help_sys, REFRESH_TASK_TABLE}, {"task", cmd_task, help_task, REFRESH_TASK_TABLE}, {"test", cmd_test, NULL, HIDDEN_COMMAND}, {"timer", cmd_timer, help_timer, 0}, {"union", cmd_union, help_union, 0}, {"vm", cmd_vm, help_vm, REFRESH_TASK_TABLE}, {"vtop", cmd_vtop, help_vtop, REFRESH_TASK_TABLE}, {"waitq", cmd_waitq, help_waitq, REFRESH_TASK_TABLE}, {"whatis", cmd_whatis, help_whatis, 0}, {"wr", cmd_wr, help_wr, 0}, #if defined(S390) || defined(S390X) {"s390dbf", cmd_s390dbf, help_s390dbf, 0}, #endif {"rustfilt", cmd_rustfilt, help_rustfilt, MINIMAL}, {(char *)NULL} }; struct extension_table *extension_table = NULL; /* * The offset_table and size_table structure contents are referenced * through several OFFSET- and SIZE-related macros. The array_table * is a shortcut used by get_array_length(). */ struct offset_table offset_table = { 0 }; struct size_table size_table = { 0 }; struct array_table array_table = { 0 }; crash-utility-crash-9cd43f5/kaslr_helper.c0000664000372000037200000004610015107550337020214 0ustar juerghjuergh/* * kaslr_helper - helper for kaslr offset calculation * * Copyright (c) 2011 FUJITSU LIMITED * Copyright (c) 2018 Red Hat Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Authors: HATAYAMA Daisuke * INDOH Takao * Sergio Lopez */ #include "defs.h" #include #include #ifdef X86_64 /* * Get address of vector0 interrupt handler (Devide Error) from Interrupt * Descriptor Table. */ static ulong get_vec0_addr(ulong idtr) { struct gate_struct64 { uint16_t offset_low; uint16_t segment; uint32_t ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; uint16_t offset_middle; uint32_t offset_high; uint32_t zero1; } __attribute__((packed)) gate; readmem(idtr, PHYSADDR, &gate, sizeof(gate), "idt_table", FAULT_ON_ERROR); return ((ulong)gate.offset_high << 32) + ((ulong)gate.offset_middle << 16) + gate.offset_low; } /* * Parse a string of [size[KMG] ]offset[KMG] * Import from Linux kernel(lib/cmdline.c) */ static ulong memparse(char *ptr, char **retptr) { char *endptr; unsigned long long ret = strtoull(ptr, &endptr, 0); switch (*endptr) { case 'E': case 'e': ret <<= 10; case 'P': case 'p': ret <<= 10; case 'T': case 't': ret <<= 10; case 'G': case 'g': ret <<= 10; case 'M': case 'm': ret <<= 10; case 'K': case 'k': ret <<= 10; endptr++; default: break; } if (retptr) *retptr = endptr; return ret; } /* * Find "elfcorehdr=" in the boot parameter of kernel and return the address * of elfcorehdr. */ static ulong get_elfcorehdr(ulong kaslr_offset) { char cmdline[BUFSIZE], *ptr; ulong cmdline_vaddr; ulong cmdline_paddr; ulong buf_vaddr, buf_paddr; char *end; ulong elfcorehdr_addr = 0, elfcorehdr_size = 0; int verbose = CRASHDEBUG(1)? 1: 0; cmdline_vaddr = st->saved_command_line_vmlinux + kaslr_offset; if (!kvtop(NULL, cmdline_vaddr, &cmdline_paddr, verbose)) return 0; if (CRASHDEBUG(1)) { fprintf(fp, "cmdline vaddr=%lx\n", cmdline_vaddr); fprintf(fp, "cmdline paddr=%lx\n", cmdline_paddr); } if (!readmem(cmdline_paddr, PHYSADDR, &buf_vaddr, sizeof(ulong), "saved_command_line", RETURN_ON_ERROR)) return 0; if (!kvtop(NULL, buf_vaddr, &buf_paddr, verbose)) return 0; if (CRASHDEBUG(1)) { fprintf(fp, "cmdline buffer vaddr=%lx\n", buf_vaddr); fprintf(fp, "cmdline buffer paddr=%lx\n", buf_paddr); } memset(cmdline, 0, BUFSIZE); if (!readmem(buf_paddr, PHYSADDR, cmdline, BUFSIZE, "saved_command_line", RETURN_ON_ERROR)) return 0; ptr = strstr(cmdline, "elfcorehdr="); if (!ptr) return 0; if (CRASHDEBUG(1)) fprintf(fp, "2nd kernel detected\n"); ptr += strlen("elfcorehdr="); elfcorehdr_addr = memparse(ptr, &end); if (*end == '@') { elfcorehdr_size = elfcorehdr_addr; elfcorehdr_addr = memparse(end + 1, &end); } if (CRASHDEBUG(1)) { fprintf(fp, "elfcorehdr_addr=%lx\n", elfcorehdr_addr); fprintf(fp, "elfcorehdr_size=%lx\n", elfcorehdr_size); } return elfcorehdr_addr; } /* * Get vmcoreinfo from elfcorehdr. * Some codes are imported from Linux kernel(fs/proc/vmcore.c) */ static int get_vmcoreinfo(ulong elfcorehdr, ulong *addr, int *len) { unsigned char e_ident[EI_NIDENT]; Elf64_Ehdr ehdr; Elf64_Phdr phdr; Elf64_Nhdr nhdr; ulong ptr; ulong nhdr_offset = 0; int i; if (!readmem(elfcorehdr, PHYSADDR, e_ident, EI_NIDENT, "EI_NIDENT", RETURN_ON_ERROR)) return FALSE; if (e_ident[EI_CLASS] != ELFCLASS64) { error(INFO, "Only ELFCLASS64 is supportd\n"); return FALSE; } if (!readmem(elfcorehdr, PHYSADDR, &ehdr, sizeof(ehdr), "Elf64_Ehdr", RETURN_ON_ERROR)) return FALSE; /* Sanity Check */ if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || (ehdr.e_type != ET_CORE) || ehdr.e_ident[EI_CLASS] != ELFCLASS64 || ehdr.e_ident[EI_VERSION] != EV_CURRENT || ehdr.e_version != EV_CURRENT || ehdr.e_ehsize != sizeof(Elf64_Ehdr) || ehdr.e_phentsize != sizeof(Elf64_Phdr) || ehdr.e_phnum == 0) { error(INFO, "Invalid elf header\n"); return FALSE; } ptr = elfcorehdr + ehdr.e_phoff; for (i = 0; i < ehdr.e_phnum; i++) { ulong offset; char name[16]; if (!readmem(ptr, PHYSADDR, &phdr, sizeof(phdr), "Elf64_Phdr", RETURN_ON_ERROR)) return FALSE; ptr += sizeof(phdr); if (phdr.p_type != PT_NOTE) continue; offset = phdr.p_offset; if (!readmem(offset, PHYSADDR, &nhdr, sizeof(nhdr), "Elf64_Nhdr", RETURN_ON_ERROR)) return FALSE; offset += DIV_ROUND_UP(sizeof(Elf64_Nhdr), sizeof(Elf64_Word))* sizeof(Elf64_Word); memset(name, 0, sizeof(name)); if (!readmem(offset, PHYSADDR, name, sizeof(name), "Elf64_Nhdr name", RETURN_ON_ERROR)) return FALSE; if(!strcmp(name, "VMCOREINFO")) { nhdr_offset = offset; break; } } if (!nhdr_offset) return FALSE; *addr = nhdr_offset + DIV_ROUND_UP(nhdr.n_namesz, sizeof(Elf64_Word))* sizeof(Elf64_Word); *len = nhdr.n_descsz; if (CRASHDEBUG(1)) { fprintf(fp, "vmcoreinfo addr=%lx\n", *addr); fprintf(fp, "vmcoreinfo len=%d\n", *len); } return TRUE; } static int qemu_get_nr_cpus(void) { if (DISKDUMP_DUMPFILE()) return diskdump_get_nr_cpus(); else if (KDUMP_DUMPFILE()) return kdump_get_nr_cpus(); return 0; } static int qemu_get_cr3_cr4_idtr(int cpu, ulong *cr3, ulong *cr4, ulong *idtr) { QEMUCPUState *cpustat; if (DISKDUMP_DUMPFILE()) cpustat = diskdump_get_qemucpustate(cpu); else if (KDUMP_DUMPFILE()) cpustat = kdump_get_qemucpustate(cpu); else return FALSE; if (!cpustat) return FALSE; *cr3 = cpustat->cr[3]; *cr4 = cpustat->cr[4]; *idtr = cpustat->idt.base; return TRUE; } /* * Check if current kaslr_offset/phys_base is for 1st kernel or 2nd kernel. * If we are in 2nd kernel, get kaslr_offset/phys_base from vmcoreinfo. * * 1. Get command line and try to retrieve "elfcorehdr=" boot parameter * 2. If "elfcorehdr=" is not found in command line, we are in 1st kernel. * There is nothing to do. * 3. If "elfcorehdr=" is found, we are in 2nd kernel. Find vmcoreinfo * using "elfcorehdr=" and retrieve kaslr_offset/phys_base from vmcoreinfo. */ static int get_kaslr_offset_from_vmcoreinfo(ulong orig_kaslr_offset, ulong *kaslr_offset, ulong *phys_base) { ulong elfcorehdr_addr = 0; ulong vmcoreinfo_addr; int vmcoreinfo_len; char *buf, *pos; int ret = FALSE; /* Find "elfcorehdr=" in the kernel boot parameter */ elfcorehdr_addr = get_elfcorehdr(orig_kaslr_offset); if (!elfcorehdr_addr) return FALSE; /* Get vmcoreinfo from the address of "elfcorehdr=" */ if (!get_vmcoreinfo(elfcorehdr_addr, &vmcoreinfo_addr, &vmcoreinfo_len)) return FALSE; if (!vmcoreinfo_len) return FALSE; if (CRASHDEBUG(1)) fprintf(fp, "Find vmcoreinfo in kdump memory\n"); buf = GETBUF(vmcoreinfo_len); if (!readmem(vmcoreinfo_addr, PHYSADDR, buf, vmcoreinfo_len, "vmcoreinfo", RETURN_ON_ERROR)) goto quit; /* Get phys_base form vmcoreinfo */ pos = strstr(buf, "NUMBER(phys_base)="); if (!pos) goto quit; *phys_base = strtoull(pos + strlen("NUMBER(phys_base)="), NULL, 0); /* Get kaslr_offset form vmcoreinfo */ pos = strstr(buf, "KERNELOFFSET="); if (!pos) goto quit; *kaslr_offset = strtoull(pos + strlen("KERNELOFFSET="), NULL, 16); ret = TRUE; quit: FREEBUF(buf); return ret; } static int get_nr_cpus(void) { if (SADUMP_DUMPFILE()) return sadump_get_nr_cpus(); else if (QEMU_MEM_DUMP_NO_VMCOREINFO()) return qemu_get_nr_cpus(); else if (VMSS_DUMPFILE()) return vmware_vmss_get_nr_cpus(); return 0; } static int get_cr3_cr4_idtr(int cpu, ulong *cr3, ulong *cr4, ulong *idtr) { if (SADUMP_DUMPFILE()) return sadump_get_cr3_cr4_idtr(cpu, cr3, cr4, idtr); else if (QEMU_MEM_DUMP_NO_VMCOREINFO()) return qemu_get_cr3_cr4_idtr(cpu, cr3, cr4, idtr); else if (VMSS_DUMPFILE()) return vmware_vmss_get_cr3_cr4_idtr(cpu, cr3, cr4, idtr); return FALSE; } #define BANNER "Linux version" static int verify_kaslr_offset(ulong kaslr_offset) { char buf[sizeof(BANNER)]; ulong linux_banner_paddr; if (!kvtop(NULL, st->linux_banner_vmlinux + kaslr_offset, &linux_banner_paddr, CRASHDEBUG(1))) return FALSE; if (!readmem(linux_banner_paddr, PHYSADDR, buf, sizeof(buf), "linux_banner", RETURN_ON_ERROR)) return FALSE; if (!STRNEQ(buf, BANNER)) return FALSE; return TRUE; } /* * Find virtual (VA) and physical (PA) addresses of kernel start * * va: * Actual address of the kernel start (_stext) placed * randomly by kaslr feature. To be more accurate, * VA = _stext(from vmlinux) + kaslr_offset * * pa: * Physical address where the kerenel is placed. * * In nokaslr case, VA = _stext (from vmlinux) * In kaslr case, virtual address of the kernel placement goes * in this range: ffffffff80000000..ffffffff9fffffff, or * __START_KERNEL_map..+512MB * * https://www.kernel.org/doc/Documentation/x86/x86_64/mm.txt * * Randomized VA will be the first valid page starting from * ffffffff80000000 (__START_KERNEL_map). Page tree entry of * this page will contain the PA of the kernel start. */ static int find_kernel_start(uint64_t pgd, ulong *va, ulong *pa) { int pgd_idx, p4d_idx, pud_idx, pmd_idx, pte_idx; uint64_t pgd_pte = 0, pud_pte, pmd_pte, pte; pgd_idx = pgd_index(__START_KERNEL_map); if (machdep->flags & VM_5LEVEL) p4d_idx = p4d_index(__START_KERNEL_map); pud_idx = pud_index(__START_KERNEL_map); pmd_idx = pmd_index(__START_KERNEL_map); pte_idx = pte_index(__START_KERNEL_map); /* If the VM is in 5-level page table */ if (machdep->flags & VM_5LEVEL) *va = ~((1UL << 57) - 1); else *va = ~__VIRTUAL_MASK; FILL_PGD(pgd & PHYSICAL_PAGE_MASK, PHYSADDR, PAGESIZE()); for (; pgd_idx < PTRS_PER_PGD; pgd_idx++) { pgd_pte = ULONG(machdep->pgd + pgd_idx * sizeof(uint64_t)); if (pgd_pte & _PAGE_PRESENT) break; p4d_idx = pud_idx = pmd_idx = pte_idx = 0; } if (pgd_idx == PTRS_PER_PGD) return FALSE; *va |= (ulong)pgd_idx << __PGDIR_SHIFT; if (machdep->flags & VM_5LEVEL) { FILL_P4D(pgd_pte & PHYSICAL_PAGE_MASK, PHYSADDR, PAGESIZE()); for (; p4d_idx < PTRS_PER_P4D; p4d_idx++) { /* reuse pgd_pte */ pgd_pte = ULONG(machdep->machspec->p4d + p4d_idx * sizeof(uint64_t)); if (pgd_pte & _PAGE_PRESENT) break; pud_idx = pmd_idx = pte_idx = 0; } if (p4d_idx == PTRS_PER_P4D) return FALSE; *va |= (ulong)p4d_idx << P4D_SHIFT; } FILL_PUD(pgd_pte & PHYSICAL_PAGE_MASK, PHYSADDR, PAGESIZE()); for (; pud_idx < PTRS_PER_PUD; pud_idx++) { pud_pte = ULONG(machdep->pud + pud_idx * sizeof(uint64_t)); if (pud_pte & _PAGE_PRESENT) break; pmd_idx = pte_idx = 0; } if (pud_idx == PTRS_PER_PUD) return FALSE; *va |= (ulong)pud_idx << PUD_SHIFT; if (pud_pte & _PAGE_PSE) { /* 1GB page */ *pa = pud_pte & PHYSICAL_PAGE_MASK; return TRUE; } FILL_PMD(pud_pte & PHYSICAL_PAGE_MASK, PHYSADDR, PAGESIZE()); for (; pmd_idx < PTRS_PER_PMD; pmd_idx++) { pmd_pte = ULONG(machdep->pmd + pmd_idx * sizeof(uint64_t)); if (pmd_pte & _PAGE_PRESENT) break; pte_idx = 0; } if (pmd_idx == PTRS_PER_PMD) return FALSE; *va |= pmd_idx << PMD_SHIFT; if (pmd_pte & _PAGE_PSE) { /* 2MB page */ *pa = pmd_pte & PHYSICAL_PAGE_MASK; return TRUE; } FILL_PTBL(pmd_pte & PHYSICAL_PAGE_MASK, PHYSADDR, PAGESIZE()); for (; pte_idx < PTRS_PER_PTE; pte_idx++) { pte = ULONG(machdep->ptbl + pte_idx * sizeof(uint64_t)); if (pte & _PAGE_PRESENT) break; } if (pte_idx == PTRS_PER_PTE) return FALSE; *va |= pte_idx << PAGE_SHIFT; *pa = pmd_pte & PHYSICAL_PAGE_MASK; return TRUE; } /* * Page Tables based method to calculate kaslr_offset and phys_base. * It uses VA and PA of kernel start. * * kaslr offset and phys_base are calculated as follows: * * kaslr_offset = VA - st->_stext_vmlinux * phys_base = PA - (VA - __START_KERNEL_map) */ static int calc_kaslr_offset_from_page_tables(uint64_t pgd, ulong *kaslr_offset, ulong *phys_base) { ulong va, pa; if (!st->_stext_vmlinux || st->_stext_vmlinux == UNINITIALIZED) { fprintf(fp, "%s: st->_stext_vmlinux must be initialized\n", __FUNCTION__); return FALSE; } if (!find_kernel_start(pgd, &va, &pa)) return FALSE; if (CRASHDEBUG(1)) { fprintf(fp, "calc_kaslr_offset: _stext(vmlinux): %lx\n", st->_stext_vmlinux); fprintf(fp, "calc_kaslr_offset: kernel start VA: %lx\n", va); fprintf(fp, "calc_kaslr_offset: kernel start PA: %lx\n", pa); } *kaslr_offset = va - st->_stext_vmlinux; *phys_base = pa - (va - __START_KERNEL_map); return TRUE; } /* * IDT based method to calculate kaslr_offset and phys_base * * kaslr offset and phys_base are calculated as follows: * * kaslr_offset: * 1) Get IDTR and CR3 value from the dump header. * 2) Get a virtual address of IDT from IDTR value * --- (A) * 3) Translate (A) to physical address using CR3, the upper 52 bits * of which points a top of page table. * --- (B) * 4) Get an address of vector0 (Devide Error) interrupt handler from * IDT, which are pointed by (B). * --- (C) * 5) Get an address of symbol "divide_error" form vmlinux * --- (D) * * Now we have two addresses: * (C)-> Actual address of "divide_error" * (D)-> Original address of "divide_error" in the vmlinux * * kaslr_offset can be calculated by the difference between these two * value. * * phys_base; * 1) Get IDT virtual address from vmlinux * --- (E) * * So phys_base can be calculated using relationship of directly mapped * address. * * phys_base = * Physical address(B) - * (Virtual address(E) + kaslr_offset - __START_KERNEL_map) * * Note that the address (A) cannot be used instead of (E) because (A) is * not direct map address, it's a fixed map address. * * NOTE: This solution works in most every case, but does not work in the * following case. If the dump is captured on early stage of kernel boot, * IDTR points to the early IDT table(early_idts) instead of normal * IDT(idt_table). Need enhancement. */ static int calc_kaslr_offset_from_idt(uint64_t idtr, uint64_t pgd, ulong *kaslr_offset, ulong *phys_base) { uint64_t idtr_paddr; ulong divide_error_vmcore; int verbose = CRASHDEBUG(1)? 1: 0; if (!idtr) return FALSE; /* Convert virtual address of IDT table to physical address */ if (!kvtop(NULL, idtr, &idtr_paddr, verbose)) return FALSE; /* Now we can calculate kaslr_offset and phys_base */ divide_error_vmcore = get_vec0_addr(idtr_paddr); *kaslr_offset = divide_error_vmcore - st->divide_error_vmlinux; *phys_base = idtr_paddr - (st->idt_table_vmlinux + *kaslr_offset - __START_KERNEL_map); if (verbose) { fprintf(fp, "calc_kaslr_offset: idtr=%lx\n", idtr); fprintf(fp, "calc_kaslr_offset: pgd=%lx\n", pgd); fprintf(fp, "calc_kaslr_offset: idtr(phys)=%lx\n", idtr_paddr); fprintf(fp, "calc_kaslr_offset: divide_error(vmlinux): %lx\n", st->divide_error_vmlinux); fprintf(fp, "calc_kaslr_offset: divide_error(vmcore): %lx\n", divide_error_vmcore); } return TRUE; } /* * Calculate kaslr_offset and phys_base * * kaslr_offset: * The difference between original address in System.map or vmlinux and * actual address placed randomly by kaslr feature. To be more accurate, * kaslr_offset = actual address - original address * * phys_base: * Physical address where the kerenel is placed. In other words, it's a * physical address of __START_KERNEL_map. This is also decided randomly by * kaslr. * * It walks through all available CPUs registers to calculate the offset/base. * * Also, it considers the case where dump is captured whle kdump is working, * IDTR points to the IDT table of 2nd kernel, not 1st kernel. * In that case, get kaslr_offset and phys_base as follows. * * 1) Get kaslr_offset and phys_base using the above solution. * 2) Get kernel boot parameter from "saved_command_line" * 3) If "elfcorehdr=" is not included in boot parameter, we are in the * first kernel, nothing to do any more. * 4) If "elfcorehdr=" is included in boot parameter, we are in the 2nd * kernel. Retrieve vmcoreinfo from address of "elfcorehdr=" and * get kaslr_offset and phys_base from vmcoreinfo. */ #define PTI_USER_PGTABLE_BIT PAGE_SHIFT #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) #define CR3_PCID_MASK 0xFFFull #define CR4_LA57 (1 << 12) int calc_kaslr_offset(ulong *ko, ulong *pb) { uint64_t cr3 = 0, cr4 = 0, idtr = 0, pgd = 0; ulong kaslr_offset, phys_base; ulong kaslr_offset_kdump, phys_base_kdump; int cpu, nr_cpus; if (!machine_type("X86_64")) return FALSE; nr_cpus = get_nr_cpus(); for (cpu = 0; cpu < nr_cpus; cpu++) { if (!get_cr3_cr4_idtr(cpu, &cr3, &cr4, &idtr)) continue; if (!cr3) continue; if (st->pti_init_vmlinux || st->kaiser_init_vmlinux) pgd = cr3 & ~(CR3_PCID_MASK|PTI_USER_PGTABLE_MASK); else pgd = cr3 & ~CR3_PCID_MASK; /* * Set up for kvtop. * * calc_kaslr_offset() is called before machdep_init(PRE_GDB), so some * variables are not initialized yet. Set up them here to call kvtop(). * * TODO: XEN is not supported */ vt->kernel_pgd[0] = pgd; machdep->last_pgd_read = vt->kernel_pgd[0]; if (cr4 & CR4_LA57) { machdep->flags |= VM_5LEVEL; machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_5LEVEL; machdep->machspec->pgdir_shift = PGDIR_SHIFT_5LEVEL; machdep->machspec->ptrs_per_pgd = PTRS_PER_PGD_5LEVEL; if ((machdep->machspec->p4d = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc p4d space."); machdep->machspec->last_p4d_read = 0; } else { machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_2_6; machdep->machspec->pgdir_shift = PGDIR_SHIFT; machdep->machspec->ptrs_per_pgd = PTRS_PER_PGD; } if (!readmem(pgd, PHYSADDR, machdep->pgd, PAGESIZE(), "pgd", RETURN_ON_ERROR)) continue; if (!calc_kaslr_offset_from_page_tables(pgd, &kaslr_offset, &phys_base)) { if (!calc_kaslr_offset_from_idt(idtr, pgd, &kaslr_offset, &phys_base)) continue; } if (verify_kaslr_offset(kaslr_offset)) goto found; } vt->kernel_pgd[0] = 0; machdep->last_pgd_read = 0; return FALSE; found: /* * Check if current kaslr_offset/phys_base is for 1st kernel or 2nd * kernel. If we are in 2nd kernel, get kaslr_offset/phys_base * from vmcoreinfo */ if (get_kaslr_offset_from_vmcoreinfo(kaslr_offset, &kaslr_offset_kdump, &phys_base_kdump)) { kaslr_offset = kaslr_offset_kdump; phys_base = phys_base_kdump; } else if (CRASHDEBUG(1)) { fprintf(fp, "kaslr_helper: failed to determine which kernel was running at crash,\n"); fprintf(fp, "kaslr_helper: asssuming the kdump 1st kernel.\n"); } if (CRASHDEBUG(1)) { fprintf(fp, "calc_kaslr_offset: kaslr_offset=%lx\n", kaslr_offset); fprintf(fp, "calc_kaslr_offset: phys_base=%lx\n", phys_base); } *ko = kaslr_offset; *pb = phys_base; vt->kernel_pgd[0] = 0; machdep->last_pgd_read = 0; return TRUE; } #else int calc_kaslr_offset(ulong *kaslr_offset, ulong *phys_page) { return FALSE; } #endif /* X86_64 */ crash-utility-crash-9cd43f5/ppc64.c0000664000372000037200000034257715107550337016516 0ustar juerghjuergh/* ppc64.c -- core analysis suite * * Copyright (C) 2004-2015,2018 David Anderson * Copyright (C) 2004-2015,2018 Red Hat, Inc. All rights reserved. * Copyright (C) 2004, 2006 Haren Myneni, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef PPC64 #include "defs.h" #include #include static int ppc64_kvtop(struct task_context *, ulong, physaddr_t *, int); static int ppc64_uvtop(struct task_context *, ulong, physaddr_t *, int); static ulong ppc64_vmalloc_start(void); static int ppc64_vmemmap_to_phys(ulong, physaddr_t *, int); static int ppc64_is_task_addr(ulong); static int ppc64_verify_symbol(const char *, ulong, char); static ulong ppc64_get_task_pgd(ulong); static int ppc64_translate_pte(ulong, void *, ulonglong); static ulong ppc64_processor_speed(void); static int ppc64_eframe_search(struct bt_info *); static void ppc64_back_trace_cmd(struct bt_info *); static void ppc64_back_trace(struct gnu_request *, struct bt_info *); static void get_ppc64_frame(struct bt_info *, ulong *, ulong *); static void ppc64_print_stack_entry(int,struct gnu_request *, ulong, ulong, struct bt_info *); static void ppc64_dump_irq(int); static ulong ppc64_get_sp(ulong); static void ppc64_get_stack_frame(struct bt_info *, ulong *, ulong *); static int ppc64_dis_filter(ulong, char *, unsigned int); static void ppc64_cmd_mach(void); static int ppc64_get_smp_cpus(void); static void ppc64_display_machine_stats(void); static void ppc64_dump_line_number(ulong); static struct line_number_hook ppc64_line_number_hooks[]; static ulong ppc64_get_stackbase(ulong); static ulong ppc64_get_stacktop(ulong); void ppc64_compiler_warning_stub(void); static ulong ppc64_in_irqstack(ulong); static enum emergency_stack_type ppc64_in_emergency_stack(int cpu, ulong addr, bool verbose); static void ppc64_set_bt_emergency_stack(enum emergency_stack_type type, struct bt_info *bt); static char * ppc64_check_eframe(struct ppc64_pt_regs *); static void ppc64_print_eframe(char *, struct ppc64_pt_regs *, struct bt_info *); static int ppc64_get_current_task_reg(int regno, const char *name, int size, void *value, int); static void parse_cmdline_args(void); static int ppc64_paca_percpu_offset_init(int); static void ppc64_init_cpu_info(void); static int ppc64_get_cpu_map(void); static void ppc64_clear_machdep_cache(void); static void ppc64_init_paca_info(void); static void ppc64_vmemmap_init(void); static int ppc64_get_kvaddr_ranges(struct vaddr_range *); static uint get_ptetype(ulong pte); static int is_hugepage(ulong pte); static int is_hugepd(ulong pte); static ulong hugepage_dir(ulong pte); static ulong pgd_page_vaddr_l4(ulong pgd); static ulong pud_page_vaddr_l4(ulong pud); static ulong pmd_page_vaddr_l4(ulong pmd); static int is_opal_context(ulong sp, ulong nip); void opalmsg(void); struct user_regs_bitmap_struct { struct ppc64_pt_regs ur; ulong bitmap[32]; }; #define MAX_EXCEPTION_STACKS 7 ulong extra_stacks_idx = 0; struct user_regs_bitmap_struct *extra_stacks_regs[MAX_EXCEPTION_STACKS] = {0}; static int is_opal_context(ulong sp, ulong nip) { uint64_t opal_start, opal_end; if (!(machdep->flags & OPAL_FW)) return FALSE; opal_start = machdep->machspec->opal.base; opal_end = opal_start + machdep->machspec->opal.size; if (((sp >= opal_start) && (sp < opal_end)) || ((nip >= opal_start) && (nip < opal_end))) return TRUE; return FALSE; } static inline int is_hugepage(ulong pte) { if ((machdep->flags & BOOK3E) || (THIS_KERNEL_VERSION < LINUX(3,10,0))) { /* * hugepage support via hugepd for book3e and * also kernel v3.9 & below. */ return 0; } else if (THIS_KERNEL_VERSION >= LINUX(4,5,0)) { /* * leaf pte for huge page, if _PAGE_PTE is set. */ return !!(pte & _PAGE_PTE); } else { /* BOOK3S, kernel v3.10 - v4.4 */ /* * leaf pte for huge page, bottom two bits != 00 */ return ((pte & HUGE_PTE_MASK) != 0x0); } } static inline int is_hugepd(ulong pte) { if ((machdep->flags & BOOK3E) || (THIS_KERNEL_VERSION < LINUX(3,10,0))) return ((pte & PD_HUGE) == 0x0); else if (THIS_KERNEL_VERSION >= LINUX(4,5,0)) { /* * hugepd pointer, if _PAGE_PTE is not set and * hugepd shift mask is set. */ return (!(pte & _PAGE_PTE) && ((pte & HUGEPD_SHIFT_MASK) != 0)); } else { /* BOOK3S, kernel v3.10 - v4.4 */ /* * hugepd pointer, bottom two bits == 00 and next 4 bits * indicate size of table */ return (((pte & HUGE_PTE_MASK) == 0x0) && ((pte & HUGEPD_SHIFT_MASK) != 0)); } } static inline uint get_ptetype(ulong pte) { uint pte_type = 0; /* 0: regular entry; 1: huge pte; 2: huge pd */ if (is_hugepage(pte)) pte_type = 1; else if (!(machdep->flags & RADIX_MMU) && (PAGESIZE() != PPC64_64K_PAGE_SIZE) && is_hugepd(pte)) pte_type = 2; return pte_type; } static inline ulong hugepage_dir(ulong pte) { if ((machdep->flags & BOOK3E) || (THIS_KERNEL_VERSION < LINUX(3,10,0))) return (ulong)((pte & ~HUGEPD_SHIFT_MASK) | PD_HUGE); else if (machdep->flags & PHYS_ENTRY_L4) return PTOV(pte & ~HUGEPD_ADDR_MASK); else /* BOOK3S, kernel v3.10 - v4.4 */ return (ulong)(pte & ~HUGEPD_SHIFT_MASK); } static inline ulong pgd_page_vaddr_l4(ulong pgd) { ulong pgd_val; pgd_val = (pgd & ~machdep->machspec->pgd_masked_bits); if (machdep->flags & PHYS_ENTRY_L4) { /* * physical address is stored starting from kernel v4.6 */ pgd_val = PTOV(pgd_val); } return pgd_val; } static inline ulong pud_page_vaddr_l4(ulong pud) { ulong pud_val; pud_val = (pud & ~machdep->machspec->pud_masked_bits); if (machdep->flags & PHYS_ENTRY_L4) { /* * physical address is stored starting from kernel v4.6 */ pud_val = PTOV(pud_val); } return pud_val; } static inline ulong pmd_page_vaddr_l4(ulong pmd) { ulong pmd_val; pmd_val = (pmd & ~machdep->machspec->pmd_masked_bits); if (machdep->flags & PHYS_ENTRY_L4) { /* * physical address is stored starting from kernel v4.6 */ pmd_val = PTOV(pmd_val); } return pmd_val; } static int book3e_is_kvaddr(ulong addr) { return (addr >= BOOK3E_VMBASE); } static int book3e_is_vmaddr(ulong addr) { return (addr >= BOOK3E_VMBASE) && (addr < machdep->identity_map_base); } static int ppc64_is_vmaddr(ulong addr) { return (vt->vmalloc_start && addr >= vt->vmalloc_start); } #define is_RHEL8() (strstr(kt->proc_version, ".el8.")) static int set_ppc64_max_physmem_bits(void) { int dimension; char *string; if ((string = pc->read_vmcoreinfo("NUMBER(MAX_PHYSMEM_BITS)"))) { machdep->max_physmem_bits = atol(string); free(string); return 0; } get_array_length("mem_section", &dimension, 0); if ((machdep->flags & VMEMMAP) && (THIS_KERNEL_VERSION >= LINUX(4,20,0)) && !dimension && (machdep->pagesize == 65536)) { /* * SPARSEMEM_VMEMMAP & SPARSEMEM_EXTREME configurations with * 64K pagesize and v4.20 kernel or later. */ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_4_20; } else if ((machdep->flags & VMEMMAP) && ((THIS_KERNEL_VERSION >= LINUX(4,19,0)) || is_RHEL8())) { /* SPARSEMEM_VMEMMAP & v4.19 kernel or later, or RHEL8 */ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_4_19; } else if (THIS_KERNEL_VERSION >= LINUX(3,7,0)) machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_3_7; else machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; return 0; } struct machine_specific ppc64_machine_specific = { .hwintrstack = NULL, .hwstackbuf = 0, .hwstacksize = 0, .pte_rpn_shift = PTE_RPN_SHIFT_DEFAULT, ._page_pte = 0x0UL, ._page_present = 0x1UL, ._page_user = 0x2UL, ._page_rw = 0x4UL, ._page_guarded = 0x8UL, ._page_coherent = 0x10UL, ._page_no_cache = 0x20UL, ._page_writethru = 0x40UL, ._page_dirty = 0x80UL, ._page_accessed = 0x100UL, .is_kvaddr = generic_is_kvaddr, .is_vmaddr = ppc64_is_vmaddr, }; struct machine_specific book3e_machine_specific = { .hwintrstack = NULL, .hwstackbuf = 0, .hwstacksize = 0, .pte_rpn_shift = PTE_RPN_SHIFT_L4_BOOK3E_64K, ._page_pte = 0x0UL, ._page_present = 0x1UL, ._page_user = 0xCUL, ._page_rw = 0x30UL, ._page_guarded = 0x100000UL, ._page_coherent = 0x200000UL, ._page_no_cache = 0x400000UL, ._page_writethru = 0x800000UL, ._page_dirty = 0x1000UL, ._page_accessed = 0x40000UL, .is_kvaddr = book3e_is_kvaddr, .is_vmaddr = book3e_is_vmaddr, }; /** * No additional checks are required on PPC64, for checking if PRSTATUS notes * is valid */ static int ppc64_is_cpu_prstatus_valid(int cpu) { return TRUE; } #define SKIBOOT_BASE 0x30000000 /* * Do all necessary machine-specific setup here. This is called several * times during initialization. */ void ppc64_init(int when) { struct machine_specific *ms; #if defined(__x86_64__) if (ACTIVE()) error(FATAL, "compiled for the PPC64 architecture\n"); #endif switch (when) { case SETUP_ENV: machdep->process_elf_notes = process_elf64_notes; break; case PRE_SYMTAB: machdep->machspec = &ppc64_machine_specific; machdep->verify_symbol = ppc64_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->stacksize = PPC64_STACK_SIZE; machdep->last_pgd_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->flags |= MACHDEP_BT_TEXT; if (machdep->cmdline_args[0]) parse_cmdline_args(); machdep->clear_machdep_cache = ppc64_clear_machdep_cache; break; case PRE_GDB: /* * Recently there were changes made to kexec tools * to support 64K page size. With those changes * vmcore file obtained from a kernel which supports * 64K page size cannot be analyzed using crash on a * machine running with kernel supporting 4K page size * * The following modifications are required in crash * tool to be in sync with kexec tools. * * Look if the following symbol exists. If yes then * the dump was taken with a kernel supporting 64k * page size. So change the page size accordingly. * * Also moved the following code block from * PRE_SYMTAB case here. */ if (symbol_exists("interrupt_base_book3e")) { machdep->machspec = &book3e_machine_specific; machdep->flags |= BOOK3E; machdep->kvbase = BOOK3E_VMBASE; } else machdep->kvbase = symbol_value("_stext"); if (symbol_exists("__hash_page_64K")) machdep->pagesize = PPC64_64K_PAGE_SIZE; else machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pud space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->identity_map_base = symbol_value("_stext"); machdep->is_kvaddr = machdep->machspec->is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = ppc64_eframe_search; machdep->back_trace = ppc64_back_trace_cmd; machdep->processor_speed = ppc64_processor_speed; machdep->uvtop = ppc64_uvtop; machdep->kvtop = ppc64_kvtop; machdep->get_task_pgd = ppc64_get_task_pgd; machdep->get_stack_frame = ppc64_get_stack_frame; machdep->get_stackbase = ppc64_get_stackbase; machdep->get_stacktop = ppc64_get_stacktop; machdep->translate_pte = ppc64_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = ppc64_is_task_addr; machdep->dis_filter = ppc64_dis_filter; machdep->cmd_mach = ppc64_cmd_mach; machdep->get_smp_cpus = ppc64_get_smp_cpus; machdep->line_number_hooks = ppc64_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->get_kvaddr_ranges = ppc64_get_kvaddr_ranges; machdep->init_kernel_pgd = NULL; machdep->is_cpu_prstatus_valid = ppc64_is_cpu_prstatus_valid; if (symbol_exists("vmemmap_populate")) { if (symbol_exists("vmemmap")) { readmem(symbol_value("vmemmap"), KVADDR, &machdep->machspec->vmemmap_base, sizeof(void *), "vmemmap", QUIET|FAULT_ON_ERROR); } else machdep->machspec->vmemmap_base = VMEMMAP_REGION_ID << REGION_SHIFT; machdep->flags |= VMEMMAP; } machdep->get_irq_affinity = generic_get_irq_affinity; machdep->show_interrupts = generic_show_interrupts; break; case POST_GDB: ms = machdep->machspec; if (!(machdep->flags & BOOK3E)) { /* * To determine if the kernel was running on OPAL based platform, * use struct opal, which is populated with relevant values. */ if (symbol_exists("opal")) { get_symbol_data("opal", sizeof(struct ppc64_opal), &(ms->opal)); if (ms->opal.base == SKIBOOT_BASE) machdep->flags |= OPAL_FW; } /* * On Power ISA 3.0 based server processors, a kernel can * run with radix MMU or standard MMU. Set the flag, * if it is radix MMU. */ if (symbol_exists("cur_cpu_spec") && MEMBER_EXISTS("cpu_spec", "mmu_features")) { ulong cur_cpu_spec; uint mmu_features, offset; get_symbol_data("cur_cpu_spec", sizeof(void *), &cur_cpu_spec); offset = MEMBER_OFFSET("cpu_spec", "mmu_features"); readmem(cur_cpu_spec + offset, KVADDR, &mmu_features, sizeof(uint), "cpu mmu features", FAULT_ON_ERROR); machdep->flags |= (mmu_features & RADIX_MMU); } /* * Starting with v3.14 we no longer use _PAGE_COHERENT * bit as it is always set on hash64 and on platforms * that cannot always set it, _PAGE_NO_CACHE and * _PAGE_WRITETHRU can be used to infer it. */ if (THIS_KERNEL_VERSION >= LINUX(3,14,0)) ms->_page_coherent = 0x0UL; /* * In kernel v4.5, _PAGE_PTE bit is introduced to * distinguish PTEs from pointers. */ if (THIS_KERNEL_VERSION >= LINUX(4,5,0)) { ms->_page_pte = 0x1UL; ms->_page_present = 0x2UL; ms->_page_user = 0x4UL; ms->_page_rw = 0x8UL; ms->_page_guarded = 0x10UL; } /* * Starting with kernel v4.6, to accommodate both * radix and hash MMU modes in a single kernel, * _PAGE_PTE & _PAGE_PRESENT page flags are changed. * Also, page table entries store physical addresses. */ if (THIS_KERNEL_VERSION >= LINUX(4,6,0)) { ms->_page_pte = 0x1UL << 62; ms->_page_present = 0x1UL << 63; machdep->flags |= PHYS_ENTRY_L4; } if (THIS_KERNEL_VERSION >= LINUX(4,7,0)) { /* * Starting with kernel v4.7 page table entries * are always big endian on BOOK3S. Set this * flag if kernel is not big endian. */ if (__BYTE_ORDER == __LITTLE_ENDIAN) machdep->flags |= SWAP_ENTRY_L4; } } if (!(machdep->flags & (VM_ORIG|VM_4_LEVEL))) { if (THIS_KERNEL_VERSION >= LINUX(2,6,14)) { machdep->flags |= VM_4_LEVEL; } else { machdep->flags |= VM_ORIG; } } if (machdep->flags & VM_ORIG) { /* pre-2.6.14 layout */ free(machdep->pud); machdep->pud = NULL; machdep->ptrs_per_pgd = PTRS_PER_PGD; } else { /* 2.6.14 layout */ if (machdep->pagesize == 65536) { /* 64K pagesize */ if (machdep->flags & RADIX_MMU) { ms->l1_index_size = PTE_INDEX_SIZE_RADIX_64K; ms->l2_index_size = PMD_INDEX_SIZE_RADIX_64K; ms->l3_index_size = PUD_INDEX_SIZE_RADIX_64K; ms->l4_index_size = PGD_INDEX_SIZE_RADIX_64K; } else if (!(machdep->flags & BOOK3E) && (THIS_KERNEL_VERSION >= LINUX(4,6,0))) { ms->l1_index_size = PTE_INDEX_SIZE_L4_64K_3_10; if (THIS_KERNEL_VERSION >= LINUX(4,12,0)) { ms->l2_index_size = PMD_INDEX_SIZE_L4_64K_4_12; if (THIS_KERNEL_VERSION >= LINUX(4,17,0)) ms->l3_index_size = PUD_INDEX_SIZE_L4_64K_4_17; else ms->l3_index_size = PUD_INDEX_SIZE_L4_64K_4_12; ms->l4_index_size = PGD_INDEX_SIZE_L4_64K_4_12; } else { ms->l2_index_size = PMD_INDEX_SIZE_L4_64K_4_6; ms->l3_index_size = PUD_INDEX_SIZE_L4_64K_4_6; ms->l4_index_size = PGD_INDEX_SIZE_L4_64K_3_10; } } else if (THIS_KERNEL_VERSION >= LINUX(3,10,0)) { ms->l1_index_size = PTE_INDEX_SIZE_L4_64K_3_10; ms->l2_index_size = PMD_INDEX_SIZE_L4_64K_3_10; ms->l3_index_size = PUD_INDEX_SIZE_L4_64K; ms->l4_index_size = PGD_INDEX_SIZE_L4_64K_3_10; } else { ms->l1_index_size = PTE_INDEX_SIZE_L4_64K; ms->l2_index_size = PMD_INDEX_SIZE_L4_64K; ms->l3_index_size = PUD_INDEX_SIZE_L4_64K; ms->l4_index_size = PGD_INDEX_SIZE_L4_64K; } if (!(machdep->flags & BOOK3E)) ms->pte_rpn_shift = symbol_exists("demote_segment_4k") ? PTE_RPN_SHIFT_L4_64K_V2 : PTE_RPN_SHIFT_L4_64K_V1; if (!(machdep->flags & BOOK3E) && (THIS_KERNEL_VERSION >= LINUX(4,6,0))) { ms->pgd_masked_bits = PGD_MASKED_BITS_64K_4_6; ms->pud_masked_bits = PUD_MASKED_BITS_64K_4_6; ms->pmd_masked_bits = PMD_MASKED_BITS_64K_4_6; } else { ms->pgd_masked_bits = PGD_MASKED_BITS_64K; ms->pud_masked_bits = PUD_MASKED_BITS_64K; if ((machdep->flags & BOOK3E) && (THIS_KERNEL_VERSION >= LINUX(4,5,0))) ms->pmd_masked_bits = PMD_MASKED_BITS_BOOK3E_64K_4_5; else if (THIS_KERNEL_VERSION >= LINUX(3,11,0)) ms->pmd_masked_bits = PMD_MASKED_BITS_64K_3_11; else ms->pmd_masked_bits = PMD_MASKED_BITS_64K; } } else { /* 4K pagesize */ if (machdep->flags & RADIX_MMU) { ms->l1_index_size = PTE_INDEX_SIZE_RADIX_4K; ms->l2_index_size = PMD_INDEX_SIZE_RADIX_4K; ms->l3_index_size = PUD_INDEX_SIZE_RADIX_4K; ms->l4_index_size = PGD_INDEX_SIZE_RADIX_4K; } else { ms->l1_index_size = PTE_INDEX_SIZE_L4_4K; ms->l2_index_size = PMD_INDEX_SIZE_L4_4K; if (THIS_KERNEL_VERSION >= LINUX(3,7,0)) ms->l3_index_size = PUD_INDEX_SIZE_L4_4K_3_7; else ms->l3_index_size = PUD_INDEX_SIZE_L4_4K; ms->l4_index_size = PGD_INDEX_SIZE_L4_4K; if (machdep->flags & BOOK3E) ms->pte_rpn_shift = PTE_RPN_SHIFT_L4_BOOK3E_4K; else ms->pte_rpn_shift = THIS_KERNEL_VERSION >= LINUX(4,5,0) ? PTE_RPN_SHIFT_L4_4K_4_5 : PTE_RPN_SHIFT_L4_4K; } ms->pgd_masked_bits = PGD_MASKED_BITS_4K; ms->pud_masked_bits = PUD_MASKED_BITS_4K; ms->pmd_masked_bits = PMD_MASKED_BITS_4K; } ms->pte_rpn_mask = PTE_RPN_MASK_DEFAULT; if (!(machdep->flags & BOOK3E)) { if (THIS_KERNEL_VERSION >= LINUX(4,6,0)) { ms->pte_rpn_mask = PTE_RPN_MASK_L4_4_6; ms->pte_rpn_shift = PTE_RPN_SHIFT_L4_4_6; } if (THIS_KERNEL_VERSION >= LINUX(4,7,0)) { ms->pgd_masked_bits = PGD_MASKED_BITS_4_7; ms->pud_masked_bits = PUD_MASKED_BITS_4_7; ms->pmd_masked_bits = PMD_MASKED_BITS_4_7; } } /* Compute ptrs per each level */ ms->l1_shift = machdep->pageshift; ms->ptrs_per_l1 = (1 << ms->l1_index_size); ms->ptrs_per_l2 = (1 << ms->l2_index_size); ms->ptrs_per_l3 = (1 << ms->l3_index_size); ms->ptrs_per_l4 = (1 << ms->l4_index_size); machdep->ptrs_per_pgd = ms->ptrs_per_l4; /* Compute shifts */ ms->l2_shift = ms->l1_shift + ms->l1_index_size; ms->l3_shift = ms->l2_shift + ms->l2_index_size; ms->l4_shift = ms->l3_shift + ms->l3_index_size; } if (machdep->flags & VMEMMAP) ppc64_vmemmap_init(); machdep->section_size_bits = _SECTION_SIZE_BITS; set_ppc64_max_physmem_bits(); ppc64_init_cpu_info(); machdep->vmalloc_start = ppc64_vmalloc_start; MEMBER_OFFSET_INIT(thread_struct_pg_tables, "thread_struct", "pg_tables"); STRUCT_SIZE_INIT(irqdesc, "irqdesc"); STRUCT_SIZE_INIT(irq_desc_t, "irq_desc_t"); if (INVALID_SIZE(irqdesc) && INVALID_SIZE(irq_desc_t)) STRUCT_SIZE_INIT(irq_desc_t, "irq_desc"); /* as of 2.3.x PPC uses the generic irq handlers */ if (VALID_SIZE(irq_desc_t)) machdep->dump_irq = generic_dump_irq; else { machdep->dump_irq = ppc64_dump_irq; MEMBER_OFFSET_INIT(irqdesc_action, "irqdesc", "action"); MEMBER_OFFSET_INIT(irqdesc_ctl, "irqdesc", "ctl"); MEMBER_OFFSET_INIT(irqdesc_level, "irqdesc", "level"); } MEMBER_OFFSET_INIT(device_node_type, "device_node", "type"); MEMBER_OFFSET_INIT(device_node_allnext, "device_node", "allnext"); MEMBER_OFFSET_INIT(device_node_properties, "device_node", "properties"); MEMBER_OFFSET_INIT(property_name, "property", "name"); MEMBER_OFFSET_INIT(property_value, "property", "value"); MEMBER_OFFSET_INIT(property_next, "property", "next"); MEMBER_OFFSET_INIT(machdep_calls_setup_residual, "machdep_calls", "setup_residual"); MEMBER_OFFSET_INIT(RESIDUAL_VitalProductData, "RESIDUAL", "VitalProductData"); MEMBER_OFFSET_INIT(VPD_ProcessorHz, "VPD", "ProcessorHz"); MEMBER_OFFSET_INIT(bd_info_bi_intfreq, "bd_info", "bi_intfreq"); if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else machdep->nr_irqs = 0; if (symbol_exists("paca") && MEMBER_EXISTS("paca_struct", "xHrdIntStack")) { ulong paca_sym, offset; uint cpu, paca_size = STRUCT_SIZE("paca_struct"); /* * Get the HW Interrupt stack base and top values. * Note that, this stack will be used to store frames * when the CPU received IPI (only for 2.4 kernel). * Hence it is needed to retrieve IPI symbols * (Ex: smp_message_recv, xics_ipi_action, and etc) * and to get the top SP in the process's stack. */ offset = MEMBER_OFFSET("paca_struct", "xHrdIntStack"); paca_sym = symbol_value("paca"); if (!(ms->hwintrstack = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc hwintrstack space."); for (cpu = 0; cpu < kt->cpus; cpu++) { readmem(paca_sym + (paca_size * cpu) + offset, KVADDR, &ms->hwintrstack[cpu], sizeof(ulong), "PPC64 HW_intr_stack", FAULT_ON_ERROR); } ms->hwstacksize = 8 * machdep->pagesize; if ((ms->hwstackbuf = (char *)malloc(ms->hwstacksize)) == NULL) error(FATAL, "cannot malloc hwirqstack buffer space."); } machdep->get_current_task_reg = ppc64_get_current_task_reg; ppc64_init_paca_info(); if (!machdep->hz) { machdep->hz = HZ; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) machdep->hz = 1000; } /* * IRQ stacks are introduced in 2.6 and also configurable. */ if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && symbol_exists("hardirq_ctx")) ASSIGN_SIZE(irq_ctx) = STACKSIZE(); break; case POST_INIT: break; case LOG_ONLY: machdep->identity_map_base = kt->vmcoreinfo._stext_SYMBOL; break; } } #ifndef KSYMS_START #define KSYMS_START 1 #endif static ulong ppc64_task_to_stackbase(ulong task) { ulong stackbase; if (tt->flags & THREAD_INFO_IN_TASK) { readmem(task + OFFSET(task_struct_stack), KVADDR, &stackbase, sizeof(void *), "task_struct.stack", FAULT_ON_ERROR); return stackbase; } else if (tt->flags & THREAD_INFO) return task_to_thread_info(task); else return task; } static ulong ppc64_get_stackbase(ulong task) { return ppc64_task_to_stackbase(task); } static ulong ppc64_get_stacktop(ulong task) { return ppc64_task_to_stackbase(task) + STACKSIZE(); } void ppc64_dump_machdep_table(ulong arg) { struct machine_specific *ms = machdep->machspec; int i, c, others; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & MACHDEP_BT_TEXT) fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); if (machdep->flags & VM_ORIG) fprintf(fp, "%sVM_ORIG", others++ ? "|" : ""); if (machdep->flags & VM_4_LEVEL) fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : ""); if (machdep->flags & VMEMMAP) fprintf(fp, "%sVMEMMAP", others++ ? "|" : ""); if (machdep->flags & VMEMMAP_AWARE) fprintf(fp, "%sVMEMMAP_AWARE", others++ ? "|" : ""); if (machdep->flags & BOOK3E) fprintf(fp, "%sBOOK3E", others++ ? "|" : ""); if (machdep->flags & PHYS_ENTRY_L4) fprintf(fp, "%sPHYS_ENTRY_L4", others++ ? "|" : ""); if (machdep->flags & SWAP_ENTRY_L4) fprintf(fp, "%sSWAP_ENTRY_L4", others++ ? "|" : ""); if (machdep->flags & RADIX_MMU) fprintf(fp, "%sRADIX_MMU", others++ ? "|" : ""); if (machdep->flags & OPAL_FW) fprintf(fp, "%sOPAL_FW", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: ppc64_eframe_search()\n"); fprintf(fp, " back_trace: ppc64_back_trace_cmd()\n"); fprintf(fp, " processor_speed: ppc64_processor_speed()\n"); fprintf(fp, " uvtop: ppc64_uvtop()\n"); fprintf(fp, " kvtop: ppc64_kvtop()\n"); fprintf(fp, " get_task_pgd: ppc64_get_task_pgd()\n"); fprintf(fp, " dump_irq: ppc64_dump_irq()\n"); fprintf(fp, " get_stack_frame: ppc64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: ppc64_get_stackbase()\n"); fprintf(fp, " get_stacktop: ppc64_get_stacktop()\n"); fprintf(fp, " translate_pte: ppc64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: ppc64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: ppc64_is_task_addr()\n"); fprintf(fp, " verify_symbol: ppc64_verify_symbol()\n"); fprintf(fp, " dis_filter: ppc64_dis_filter()\n"); fprintf(fp, " cmd_mach: ppc64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: ppc64_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: %s\n", machdep->is_kvaddr == book3e_is_kvaddr ? "book3e_is_kvaddr()" : "generic_is_kvaddr()"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " get_kvaddr_ranges: ppc64_get_kvaddr_ranges()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " xendump_p2m_create: NULL\n"); fprintf(fp, "xen_kdump_p2m_create: NULL\n"); fprintf(fp, " line_number_hooks: ppc64_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pud_read: %lx\n", machdep->last_pud_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, "clear_machdep_cache: ppc64_clear_machdep_cache()\n"); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pud: %lx\n", (ulong)machdep->pud); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } fprintf(fp, " machspec: %lx\n", (ulong)ms); fprintf(fp, " is_kvaddr: %s\n", ms->is_kvaddr == book3e_is_kvaddr ? "book3e_is_kvaddr()" : "generic_is_kvaddr()"); fprintf(fp, " is_vmaddr: %s\n", ms->is_vmaddr == book3e_is_vmaddr ? "book3e_is_vmaddr()" : "ppc64_is_vmaddr()"); if (ms->hwintrstack) { fprintf(fp, " hwintrstack[%d]: ", NR_CPUS); for (c = 0; c < NR_CPUS; c++) { fprintf(fp, "%s%016lx ", ((c % 4) == 0) ? "\n " : "", ms->hwintrstack[c]); } } else fprintf(fp, " hwintrstack: (unused)"); fprintf(fp, "\n"); fprintf(fp, " hwstackbuf: %lx\n", (ulong)ms->hwstackbuf); fprintf(fp, " hwstacksize: %d\n", ms->hwstacksize); fprintf(fp, " l4_index_size: %d\n", ms->l4_index_size); fprintf(fp, " l3_index_size: %d\n", ms->l3_index_size); fprintf(fp, " l2_index_size: %d\n", ms->l2_index_size); fprintf(fp, " l1_index_size: %d\n", ms->l1_index_size); fprintf(fp, " ptrs_per_l4: %d\n", ms->ptrs_per_l4); fprintf(fp, " ptrs_per_l3: %d\n", ms->ptrs_per_l3); fprintf(fp, " ptrs_per_l2: %d\n", ms->ptrs_per_l2); fprintf(fp, " ptrs_per_l1: %d\n", ms->ptrs_per_l1); fprintf(fp, " l4_shift: %d\n", ms->l4_shift); fprintf(fp, " l3_shift: %d\n", ms->l3_shift); fprintf(fp, " l2_shift: %d\n", ms->l2_shift); fprintf(fp, " l1_shift: %d\n", ms->l1_shift); fprintf(fp, " pte_rpn_mask: %lx\n", ms->pte_rpn_mask); fprintf(fp, " pte_rpn_shift: %d\n", ms->pte_rpn_shift); fprintf(fp, " pgd_masked_bits: %lx\n", ms->pgd_masked_bits); fprintf(fp, " pud_masked_bits: %lx\n", ms->pud_masked_bits); fprintf(fp, " pmd_masked_bits: %lx\n", ms->pmd_masked_bits); fprintf(fp, " vmemmap_base: "); if (ms->vmemmap_base) fprintf(fp, "%lx\n", ms->vmemmap_base); else fprintf(fp, "(unused)\n"); if (ms->vmemmap_cnt) { fprintf(fp, " vmemmap_cnt: %d\n", ms->vmemmap_cnt); fprintf(fp, " vmemmap_psize: %d\n", ms->vmemmap_psize); for (i = 0; i < ms->vmemmap_cnt; i++) { fprintf(fp, " vmemmap_list[%d]: virt: %lx phys: %lx\n", i, ms->vmemmap_list[i].virt, ms->vmemmap_list[i].phys); } } else { fprintf(fp, " vmemmap_cnt: (unused)\n"); fprintf(fp, " vmemmap_page_size: (unused)\n"); fprintf(fp, " vmemmap_list[]: (unused)\n"); } } /* * Virtual to physical memory translation. This function will be called * by both ppc64_kvtop and ppc64_uvtop. */ static int ppc64_vtop(ulong vaddr, ulong *pgd, physaddr_t *paddr, int verbose) { ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte, pmd_pte; ulong pte; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); if (THIS_KERNEL_VERSION < LINUX(2,6,0)) page_dir = (ulong *)((uint *)pgd + PGD_OFFSET_24(vaddr)); else page_dir = (ulong *)((uint *)pgd + PGD_OFFSET(vaddr)); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = UINT(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!pgd_pte) return FALSE; pgd_pte <<= PAGESHIFT(); page_middle = (ulong *)((uint *)pgd_pte + PMD_OFFSET(vaddr)); FILL_PMD(PTOV(PAGEBASE(pgd_pte)), KVADDR, PAGESIZE()); pmd_pte = UINT(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; if (THIS_KERNEL_VERSION < LINUX(2,6,0)) pmd_pte <<= PAGESHIFT(); else pmd_pte = ((pmd_pte << PAGESHIFT()) >> PMD_TO_PTEPAGE_SHIFT); page_table = (ulong *)pmd_pte + (BTOP(vaddr) & (PTRS_PER_PTE - 1)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n",(ulong)page_middle, (ulong)page_table); FILL_PTBL(PTOV(PAGEBASE(pmd_pte)), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & _PAGE_PRESENT)) { if (pte && verbose) { fprintf(fp, "\n"); ppc64_translate_pte(pte, 0, PTE_RPN_SHIFT_DEFAULT); } return FALSE; } *paddr = PAGEBASE(PTOB(pte >> PTE_RPN_SHIFT_DEFAULT)) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ppc64_translate_pte(pte, 0, PTE_RPN_SHIFT_DEFAULT); } return TRUE; } /* * Virtual to physical memory translation. This function will be called * by both ppc64_kvtop and ppc64_uvtop. */ static int ppc64_vtop_level4(ulong vaddr, ulong *level4, physaddr_t *paddr, int verbose) { ulong *pgdir; ulong *page_upper; ulong *page_middle; ulong *page_table; ulong pgd_pte, pud_pte, pmd_pte; ulong pte; uint pdshift; uint hugepage_type = 0; /* 0: regular entry; 1: huge pte; 2: huge pd */ uint swap = !!(machdep->flags & SWAP_ENTRY_L4); if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)level4); pgdir = (ulong *)((ulong *)level4 + PGD_OFFSET_L4(vaddr)); FILL_PGD(PAGEBASE(level4), KVADDR, PAGESIZE()); pgd_pte = swap64(ULONG(machdep->pgd + PAGEOFFSET(pgdir)), swap); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgdir, pgd_pte); if (!pgd_pte) return FALSE; hugepage_type = get_ptetype(pgd_pte); if (hugepage_type) { pte = pgd_pte; pdshift = machdep->machspec->l4_shift; goto out; } /* Sometimes we don't have level3 pagetable entries */ if (machdep->machspec->l3_index_size != 0) { pgd_pte = pgd_page_vaddr_l4(pgd_pte); page_upper = (ulong *)((ulong *)pgd_pte + PUD_OFFSET_L4(vaddr)); FILL_PUD(PAGEBASE(pgd_pte), KVADDR, PAGESIZE()); pud_pte = swap64(ULONG(machdep->pud + PAGEOFFSET(page_upper)), swap); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); if (!pud_pte) return FALSE; hugepage_type = get_ptetype(pud_pte); if (hugepage_type) { pte = pud_pte; pdshift = machdep->machspec->l3_shift; goto out; } } else { pud_pte = pgd_pte; } pud_pte = pud_page_vaddr_l4(pud_pte); page_middle = (ulong *)((ulong *)pud_pte + PMD_OFFSET_L4(vaddr)); FILL_PMD(PAGEBASE(pud_pte), KVADDR, PAGESIZE()); pmd_pte = swap64(ULONG(machdep->pmd + PAGEOFFSET(page_middle)), swap); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; hugepage_type = get_ptetype(pmd_pte); if (hugepage_type) { pte = pmd_pte; pdshift = machdep->machspec->l2_shift; goto out; } pmd_pte = pmd_page_vaddr_l4(pmd_pte); page_table = (ulong *)(pmd_pte) + (BTOP(vaddr) & (machdep->machspec->ptrs_per_l1 - 1)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n",(ulong)page_middle, (ulong)page_table); FILL_PTBL(PAGEBASE(pmd_pte), KVADDR, PAGESIZE()); pte = swap64(ULONG(machdep->ptbl + PAGEOFFSET(page_table)), swap); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & _PAGE_PRESENT)) { if (pte && verbose) { fprintf(fp, "\n"); ppc64_translate_pte(pte, 0, machdep->machspec->pte_rpn_shift); } return FALSE; } out: if (hugepage_type) { if (hugepage_type == 2) { /* TODO: Calculate the offset within the huge page * directory for this huge page to get corresponding * physical address. In the current form, it may * return the physical address of the first huge page * in this directory for all the huge pages * in this huge page directory. */ ulong hugepd = hugepage_dir(pte); readmem(hugepd, KVADDR, &pte, sizeof(pte), "hugepd_entry", RETURN_ON_ERROR); if (verbose) fprintf(fp, " HUGE PD: %lx => %lx\n", hugepd, pte); if (!pte) return FALSE; } *paddr = PAGEBASE(PTOB((pte & PTE_RPN_MASK) >> PTE_RPN_SHIFT)) + (vaddr & ((1UL << pdshift) - 1)); } else { *paddr = PAGEBASE(PTOB((pte & PTE_RPN_MASK) >> PTE_RPN_SHIFT)) + PAGEOFFSET(vaddr); } if (verbose) { if (hugepage_type) fprintf(fp, " HUGE PAGE: %lx\n\n", PAGEBASE(*paddr)); else fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ppc64_translate_pte(pte, 0, machdep->machspec->pte_rpn_shift); } return TRUE; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ static int ppc64_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_pg_tables)) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no pg_tables or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (machdep->flags & VM_4_LEVEL) return ppc64_vtop_level4(vaddr, pgd, paddr, verbose); else return ppc64_vtop(vaddr, pgd, paddr, verbose); } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int ppc64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { if (!IS_KVADDR(kvaddr)) return FALSE; if ((machdep->flags & VMEMMAP) && (kvaddr >= machdep->machspec->vmemmap_base)) return ppc64_vmemmap_to_phys(kvaddr, paddr, verbose); if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } if (machdep->flags & VM_4_LEVEL) return ppc64_vtop_level4(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); else return ppc64_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); } static void ppc64_init_paca_info(void) { struct machine_specific *ms = machdep->machspec; ulong *paca_ptr; int i; if (!(paca_ptr = (ulong *)calloc(kt->cpus, sizeof(ulong)))) error(FATAL, "cannot malloc paca pointers space.\n"); /* Get paca pointers for all CPUs. */ if (symbol_exists("paca_ptrs")) { ulong paca_loc; readmem(symbol_value("paca_ptrs"), KVADDR, &paca_loc, sizeof(void *), "paca double pointer", RETURN_ON_ERROR); readmem(paca_loc, KVADDR, paca_ptr, sizeof(void *) * kt->cpus, "paca pointers", RETURN_ON_ERROR); } else if (symbol_exists("paca") && (get_symbol_type("paca", NULL, NULL) == TYPE_CODE_PTR)) { readmem(symbol_value("paca"), KVADDR, paca_ptr, sizeof(void *) * kt->cpus, "paca pointers", RETURN_ON_ERROR); } else { free(paca_ptr); return; } /* Initialize emergency stacks info. */ if (MEMBER_EXISTS("paca_struct", "emergency_sp")) { ulong offset = MEMBER_OFFSET("paca_struct", "emergency_sp"); if (!(ms->emergency_sp = (ulong *)calloc(kt->cpus, sizeof(ulong)))) error(FATAL, "cannot malloc emergency stack space.\n"); for (i = 0; i < kt->cpus; i++) readmem(paca_ptr[i] + offset, KVADDR, &ms->emergency_sp[i], sizeof(void *), "paca->emergency_sp", RETURN_ON_ERROR); } if (MEMBER_EXISTS("paca_struct", "nmi_emergency_sp")) { ulong offset = MEMBER_OFFSET("paca_struct", "nmi_emergency_sp"); if (!(ms->nmi_emergency_sp = (ulong *)calloc(kt->cpus, sizeof(ulong)))) error(FATAL, "cannot malloc NMI emergency stack space.\n"); for (i = 0; i < kt->cpus; i++) readmem(paca_ptr[i] + offset, KVADDR, &ms->nmi_emergency_sp[i], sizeof(void *), "paca->nmi_emergency_sp", RETURN_ON_ERROR); } if (MEMBER_EXISTS("paca_struct", "mc_emergency_sp")) { ulong offset = MEMBER_OFFSET("paca_struct", "mc_emergency_sp"); if (!(ms->mc_emergency_sp = (ulong *)calloc(kt->cpus, sizeof(ulong)))) error(FATAL, "cannot malloc machine check emergency stack space.\n"); for (i = 0; i < kt->cpus; i++) readmem(paca_ptr[i] + offset, KVADDR, &ms->mc_emergency_sp[i], sizeof(void *), "paca->mc_emergency_sp", RETURN_ON_ERROR); } free(paca_ptr); } /* * Verify that the kernel has made the vmemmap list available, * and if so, stash the relevant data required to make vtop * translations. */ static void ppc64_vmemmap_init(void) { int i, psize, shift, cnt; struct list_data list_data, *ld; long backing_size, virt_addr_offset, phys_offset, list_offset; ulong *vmemmap_list; char *vmemmap_buf; struct machine_specific *ms = machdep->machspec; ld = &list_data; BZERO(ld, sizeof(struct list_data)); /* * vmemmap_list is missing or set to 0 in the kernel would imply * vmemmap region is mapped in the kernel pagetable. So, read vmemmap_list * anyway and use the translation method accordingly. */ if (kernel_symbol_exists("vmemmap_list")) readmem(symbol_value("vmemmap_list"), KVADDR, &ld->start, sizeof(void *), "vmemmap_list", RETURN_ON_ERROR|QUIET); if (!ld->start) { /* * vmemmap_list is set to 0 or missing. Do kernel pagetable walk * for vmemmap address translation. */ ms->vmemmap_list = NULL; ms->vmemmap_cnt = 0; machdep->flags |= VMEMMAP_AWARE; return; } if (!(kernel_symbol_exists("mmu_psize_defs")) || !(kernel_symbol_exists("mmu_vmemmap_psize")) || !STRUCT_EXISTS("vmemmap_backing") || !STRUCT_EXISTS("mmu_psize_def") || !MEMBER_EXISTS("mmu_psize_def", "shift") || !MEMBER_EXISTS("vmemmap_backing", "phys") || !MEMBER_EXISTS("vmemmap_backing", "virt_addr") || !MEMBER_EXISTS("vmemmap_backing", "list")) return; backing_size = STRUCT_SIZE("vmemmap_backing"); virt_addr_offset = MEMBER_OFFSET("vmemmap_backing", "virt_addr"); phys_offset = MEMBER_OFFSET("vmemmap_backing", "phys"); list_offset = MEMBER_OFFSET("vmemmap_backing", "list"); if (!readmem(symbol_value("mmu_vmemmap_psize"), KVADDR, &psize, sizeof(int), "mmu_vmemmap_psize", RETURN_ON_ERROR)) return; if (!readmem(symbol_value("mmu_psize_defs") + (STRUCT_SIZE("mmu_psize_def") * psize) + MEMBER_OFFSET("mmu_psize_def", "shift"), KVADDR, &shift, sizeof(int), "mmu_psize_def shift", RETURN_ON_ERROR)) return; ms->vmemmap_psize = 1 << shift; ld->end = symbol_value("vmemmap_list"); ld->list_head_offset = list_offset; hq_open(); cnt = do_list(ld); vmemmap_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(vmemmap_list, cnt); hq_close(); if ((ms->vmemmap_list = (struct ppc64_vmemmap *)malloc(cnt * sizeof(struct ppc64_vmemmap))) == NULL) error(FATAL, "cannot malloc vmemmap list space"); vmemmap_buf = GETBUF(backing_size); for (i = 0; i < cnt; i++) { if (!readmem(vmemmap_list[i], KVADDR, vmemmap_buf, backing_size, "vmemmap_backing", RETURN_ON_ERROR)) { free(ms->vmemmap_list); goto out; } ms->vmemmap_list[i].phys = ULONG(vmemmap_buf + phys_offset); ms->vmemmap_list[i].virt = ULONG(vmemmap_buf + virt_addr_offset); if (ms->vmemmap_list[i].virt < ms->vmemmap_base) ms->vmemmap_base = ms->vmemmap_list[i].virt; } ms->vmemmap_cnt = cnt; machdep->flags |= VMEMMAP_AWARE; if (CRASHDEBUG(1)) fprintf(fp, "ppc64_vmemmap_init: vmemmap base: %lx\n", ms->vmemmap_base); out: FREEBUF(vmemmap_buf); FREEBUF(vmemmap_list); } /* * If the vmemmap address translation information is stored in the kernel, * make the translation. */ static int ppc64_vmemmap_to_phys(ulong kvaddr, physaddr_t *paddr, int verbose) { int i; ulong offset; struct machine_specific *ms = machdep->machspec; if (!(machdep->flags & VMEMMAP_AWARE)) { /* * During runtime, just fail the command. */ if (vt->flags & VM_INIT) error(FATAL, "cannot translate vmemmap address: %lx\n", kvaddr); /* * During vm_init() initialization, print a warning message. */ error(WARNING, "cannot translate vmemmap kernel virtual addresses:\n" " commands requiring page structure contents" " will fail\n\n"); return FALSE; } /** * When vmemmap_list is not populated, kernel does the mapping in init_mm * page table, so do a pagetable walk in kernel page table */ if (!ms->vmemmap_list) return ppc64_vtop_level4(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); for (i = 0; i < ms->vmemmap_cnt; i++) { if ((kvaddr >= ms->vmemmap_list[i].virt) && (kvaddr < (ms->vmemmap_list[i].virt + ms->vmemmap_psize))) { offset = kvaddr - ms->vmemmap_list[i].virt; *paddr = ms->vmemmap_list[i].phys + offset; return TRUE; } } return FALSE; } /* * Determine where vmalloc'd memory starts. */ static ulong ppc64_vmalloc_start(void) { return (first_vmalloc_address()); } /* * */ static int ppc64_is_task_addr(ulong task) { int i; if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else if (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)) return TRUE; for (i = 0; i < kt->cpus; i++) if (task == tt->idle_threads[i]) return TRUE; return FALSE; } /* * */ static ulong ppc64_processor_speed(void) { ulong res, value, ppc_md, md_setup_res; ulong prep_setup_res; ulong node, type, name, properties; char str_buf[32]; uint len; ulong mhz = 0; if (machdep->mhz) return(machdep->mhz); if (symbol_exists("ppc_proc_freq")) { get_symbol_data("ppc_proc_freq", sizeof(ulong), &mhz); mhz /= 1000000; return (machdep->mhz = mhz); } if(symbol_exists("allnodes")) { get_symbol_data("allnodes", sizeof(void *), &node); while(node) { readmem(node+OFFSET(device_node_type), KVADDR, &type, sizeof(ulong), "node type", FAULT_ON_ERROR); if(type != 0) { len = read_string(type, str_buf, sizeof(str_buf)); if(len && (strcasecmp(str_buf, "cpu") == 0)) break; } readmem(node+OFFSET(device_node_allnext), KVADDR, &node, sizeof(ulong), "node allnext", FAULT_ON_ERROR); } /* now, if we found a CPU node, get the speed property */ if(node) { readmem(node+OFFSET(device_node_properties), KVADDR, &properties, sizeof(ulong), "node properties", FAULT_ON_ERROR); while(properties) { readmem(properties+OFFSET(property_name), KVADDR, &name, sizeof(ulong), "property name", FAULT_ON_ERROR); len = read_string(name, str_buf, sizeof(str_buf)); if (len && (strcasecmp(str_buf, "clock-frequency") == 0)) { /* found the right cpu property */ readmem(properties+ OFFSET(property_value), KVADDR, &value, sizeof(ulong), "clock freqency pointer", FAULT_ON_ERROR); readmem(value, KVADDR, &mhz, sizeof(int), "clock frequency value", FAULT_ON_ERROR); mhz /= 1000000; break; } else if(len && (strcasecmp(str_buf, "ibm,extended-clock-frequency") == 0)){ /* found the right cpu property */ readmem(properties+ OFFSET(property_value), KVADDR, &value, sizeof(ulong), "clock freqency pointer", FAULT_ON_ERROR); readmem(value, KVADDR, &mhz, sizeof(ulong), "clock frequency value", FAULT_ON_ERROR); mhz /= 1000000; break; } /* keep looking */ readmem(properties+ OFFSET(property_next), KVADDR, &properties, sizeof(ulong), "property next", FAULT_ON_ERROR); } if(!properties) { /* didn't find the cpu speed for some reason */ return (machdep->mhz = 0); } } } /* for machines w/o OF */ /* untested, but in theory this should work on prep machines */ if (symbol_exists("res") && !mhz) { get_symbol_data("res", sizeof(void *), &res); if (symbol_exists("prep_setup_residual")) { get_symbol_data("prep_setup_residual", sizeof(void *), &prep_setup_res); get_symbol_data("ppc_md", sizeof(void *), &ppc_md); readmem(ppc_md + OFFSET(machdep_calls_setup_residual), KVADDR, &md_setup_res, sizeof(ulong), "ppc_md setup_residual", FAULT_ON_ERROR); if(prep_setup_res == md_setup_res) { /* PREP machine */ readmem(res+ OFFSET(RESIDUAL_VitalProductData)+ OFFSET(VPD_ProcessorHz), KVADDR, &mhz, sizeof(ulong), "res VitalProductData", FAULT_ON_ERROR); mhz = (mhz > 1024) ? mhz >> 20 : mhz; } } if(!mhz) { /* everything else seems to do this the same way... */ readmem(res + OFFSET(bd_info_bi_intfreq), KVADDR, &mhz, sizeof(ulong), "bd_info bi_intfreq", FAULT_ON_ERROR); mhz /= 1000000; } } /* else...well, we don't have OF, or a residual structure, so * just print unknown MHz */ return (machdep->mhz = (ulong)mhz); } /* * Accept or reject a symbol from the kernel namelist. */ static int ppc64_verify_symbol(const char *name, ulong value, char type) { if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "_start") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; return (name && strlen(name) && (machdep->flags & KSYMS_START) && !STREQ(name, "Letext") && !STRNEQ(name, "__func__.")); } /* * Get the relevant page directory pointer from a task structure. */ static ulong ppc64_get_task_pgd(ulong task) { long offset; ulong pg_tables; offset = VALID_MEMBER(task_struct_thread) ? OFFSET(task_struct_thread) : OFFSET(task_struct_tss); if (INVALID_MEMBER(thread_struct_pg_tables)) error(FATAL, "pg_tables does not exist in this kernel's thread_struct\n"); offset += OFFSET(thread_struct_pg_tables); readmem(task + offset, KVADDR, &pg_tables, sizeof(ulong), "task thread pg_tables", FAULT_ON_ERROR); return(pg_tables); } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int ppc64_translate_pte(ulong pte, void *physaddr, ulonglong pte_rpn_shift) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; ulong paddr; if (STREQ(pc->curcmd, "pte")) pte_rpn_shift = machdep->machspec->pte_rpn_shift; paddr = PTOB(pte >> pte_rpn_shift); page_present = !!(pte & _PAGE_PRESENT); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); if (!page_present && pte) { swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); fprintf(fp, "%s ", mkstring(buf2, len1, CENTER|LJUST, "PTE")); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_PTE) fprintf(fp, "%sPTE", others++ ? "|" : ""); if (pte & _PAGE_PRESENT) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (pte & _PAGE_USER) fprintf(fp, "%sUSER", others++ ? "|" : ""); if (pte & _PAGE_RW) fprintf(fp, "%sRW", others++ ? "|" : ""); if (pte & _PAGE_GUARDED) fprintf(fp, "%sGUARDED", others++ ? "|" : ""); if (pte & _PAGE_COHERENT) fprintf(fp, "%sCOHERENT", others++ ? "|" : ""); if (pte & _PAGE_NO_CACHE) fprintf(fp, "%sNO_CACHE", others++ ? "|" : ""); if (pte & _PAGE_WRITETHRU) fprintf(fp, "%sWRITETHRU", others++ ? "|" : ""); if (pte & _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (pte & _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); } else fprintf(fp, "no mapping"); fprintf(fp, ")\n"); return page_present; } /* * The user specified SP could be in HW interrupt stack for tasks running on * other CPUs. Hence, get the SP which is in process's stack. */ static ulong ppc64_check_sp_in_HWintrstack(ulong sp, struct bt_info *bt) { /* * Since the seperate HW Interrupt stack is involved to store * IPI frames, printing all stack symbols or searching for exception * frames for running tasks on other CPUS is tricky. The simple * solution is - ignore HW intr stack and search in the process stack. * Anyway the user will be interested only frames that are * involved before receiving CALL_FUNCTION_IPI. * So, if the SP is not within the stack, read the top value * from the HW Interrupt stack which is the SP points to top * frame in the process's stack. * * Note: HW Interrupt stack is used only in 2.4 kernel. */ if (machdep->machspec->hwintrstack && is_task_active(bt->task) && (bt->task != tt->panic_task)) { ulong newsp; readmem(machdep->machspec->hwintrstack[bt->tc->processor], KVADDR, &newsp, sizeof(ulong), "stack pointer", FAULT_ON_ERROR); if (INSTACK(newsp, bt)) sp = newsp; } return sp; } /* * Look for likely exception frames in a stack. */ static int ppc64_eframe_search(struct bt_info *bt_in) { ulong addr; struct bt_info bt_local, *bt; ulong *stack, *first, *last; ulong irqstack; char *mode; ulong eframe_addr; int c, cnt; struct ppc64_pt_regs *regs; bt = bt_in; if (bt->flags & BT_EFRAME_SEARCH2) { if (!(tt->flags & IRQSTACKS)) { error(INFO, "This kernel does not have IRQ stacks\n"); return 0; } BCOPY(bt_in, &bt_local, sizeof(struct bt_info)); bt = &bt_local; bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; for (c = 0; c < NR_CPUS; c++) { if (tt->hardirq_ctx[c]) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; bt->hp->esp = tt->hardirq_ctx[c]; fprintf(fp, "CPU %d HARD IRQ STACK:\n", c); if ((cnt = ppc64_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } } for (c = 0; c < NR_CPUS; c++) { if (tt->softirq_ctx[c]) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; bt->hp->esp = tt->softirq_ctx[c]; fprintf(fp, "CPU %d SOFT IRQ STACK:\n", c); if ((cnt = ppc64_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } } return 0; } if (bt->hp && bt->hp->esp) { BCOPY(bt_in, &bt_local, sizeof(struct bt_info)); bt = &bt_local; addr = bt->hp->esp; if ((irqstack = ppc64_in_irqstack(addr))) { bt->stackbase = irqstack; bt->stacktop = irqstack + STACKSIZE(); alter_stackbuf(bt); addr = bt->stackbase + roundup(SIZE(thread_info), sizeof(ulong)); } else if (!INSTACK(addr, bt)) { enum emergency_stack_type estype; if ((estype = ppc64_in_emergency_stack(bt->tc->processor, addr, false))) ppc64_set_bt_emergency_stack(estype, bt); /* * If the user specified SP is in HW interrupt stack * (only for tasks running on other CPUs and in 2.4 * kernel), get the top SP points to process's stack. */ addr = ppc64_check_sp_in_HWintrstack(addr, bt); if (!INSTACK(addr, bt)) error(FATAL, "unrecognized stack address for this task: %lx\n", addr); } } else if (tt->flags & THREAD_INFO) addr = bt->stackbase + roundup(SIZE(thread_info), sizeof(ulong)); else addr = bt->stackbase + roundup(SIZE(task_struct), sizeof(ulong)); if (!INSTACK(addr, bt)) return(0); stack = (ulong *)bt->stackbuf; first = stack + ((addr - bt->stackbase) / sizeof(ulong)); last = stack + (((bt->stacktop - bt->stackbase) - SIZE(pt_regs)) / sizeof(ulong)); for ( ; first <= last; first++) { char *efrm_str = NULL; eframe_addr = bt->stackbase + sizeof(ulong) * (first - stack); if (THIS_KERNEL_VERSION < LINUX(2,6,0)) { regs = (struct ppc64_pt_regs *)first; if (!IS_KVADDR(regs->gpr[1]) || !IS_KVADDR(regs->nip) || !is_kernel_text(regs->nip)) if (!IS_UVADDR(regs->gpr[1], bt->tc) || !IS_UVADDR(regs->nip, bt->tc)) continue; } else { /* * In 2.6 or later, 0x7265677368657265 is saved in the * stack (sp + 96) for the exception frame. Also, * pt_regs will be saved at sp + 112. * Hence, once we know the location of exception marker * in the stack, pt_regs is saved at * - 96 + 112. ==> first + 16. */ if (*first == EXCP_FRAME_MARKER) { ulong *sp; /* * SP points to - 96/8; */ sp = (ulong *)(first - 12); if (!IS_KVADDR(*sp)) if (!IS_UVADDR(*sp, bt->tc)) continue; first = (ulong *)((char *)first + 16); regs = (struct ppc64_pt_regs *)first; } else continue; } if ((efrm_str = ppc64_check_eframe(regs)) != NULL) { if ((((regs)->msr) >> MSR_PR_LG) & 0x1) mode = "USER-MODE"; else mode = "KERNEL-MODE"; fprintf(fp, "%s %s EXCEPTION FRAME AT %lx:\n", bt->flags & BT_EFRAME_SEARCH ? "\n" : "", mode, eframe_addr); ppc64_print_eframe(efrm_str, regs, bt); } } return 0; } static ulong ppc64_in_irqstack(ulong addr) { int c; if (!(tt->flags & IRQSTACKS)) return 0; for (c = 0; c < NR_CPUS; c++) { if (tt->hardirq_ctx[c]) { if ((addr >= tt->hardirq_ctx[c]) && (addr < (tt->hardirq_ctx[c] + SIZE(irq_ctx)))) return(tt->hardirq_ctx[c]); } if (tt->softirq_ctx[c]) { if ((addr >= tt->softirq_ctx[c]) && (addr < (tt->softirq_ctx[c] + SIZE(irq_ctx)))) return(tt->softirq_ctx[c]); } } return 0; } /* * Check if the CPU is running in any of its emergency stacks. * Returns * NONE_STACK : if input is invalid or addr is not within any emergency stack. * EMERGENCY_STACK : if the addr is within emergency stack. * NMI_EMERGENCY_STACK : if the addr is within NMI emergency stack. * MC_EMERGENCY_STACK : if the addr is within machine check emergency stack. */ static enum emergency_stack_type ppc64_in_emergency_stack(int cpu, ulong addr, bool verbose) { struct machine_specific *ms = machdep->machspec; ulong base, top; if (cpu < 0 || cpu >= kt->cpus) return NONE_STACK; if (ms->emergency_sp && IS_KVADDR(ms->emergency_sp[cpu])) { top = ms->emergency_sp[cpu]; base = top - STACKSIZE(); if (addr >= base && addr < top) { if (verbose) fprintf(fp, "------\n"); return EMERGENCY_STACK; } } if (ms->nmi_emergency_sp && IS_KVADDR(ms->nmi_emergency_sp[cpu])) { top = ms->nmi_emergency_sp[cpu]; base = top - STACKSIZE(); if (addr >= base && addr < top) { if (verbose) fprintf(fp, "------\n"); return NMI_EMERGENCY_STACK; } } if (ms->mc_emergency_sp && IS_KVADDR(ms->mc_emergency_sp[cpu])) { top = ms->mc_emergency_sp[cpu]; base = top - STACKSIZE(); if (addr >= base && addr < top) { if (verbose) fprintf(fp, "------\n"); return MC_EMERGENCY_STACK; } } return NONE_STACK; } static void ppc64_set_bt_emergency_stack(enum emergency_stack_type type, struct bt_info *bt) { struct machine_specific *ms = machdep->machspec; ulong top; switch (type) { case EMERGENCY_STACK: top = ms->emergency_sp[bt->tc->processor]; break; case NMI_EMERGENCY_STACK: top = ms->nmi_emergency_sp[bt->tc->processor]; break; case MC_EMERGENCY_STACK: top = ms->mc_emergency_sp[bt->tc->processor]; break; default: top = 0; break; } if (top) { bt->stackbase = top - STACKSIZE(); bt->stacktop = top; alter_stackbuf(bt); } } /* * Unroll a kernel stack. */ static void ppc64_back_trace_cmd(struct bt_info *bt) { char buf[BUFSIZE]; struct gnu_request *req; extern void print_stack_text_syms(struct bt_info *, ulong, ulong); extra_stacks_idx = 0; bt->flags |= BT_EXCEPTION_FRAME; if (CRASHDEBUG(1) || bt->debug) fprintf(fp, " => PC: %lx (%s) FP: %lx \n", bt->instptr, value_to_symstr(bt->instptr, buf, 0), bt->stkptr); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_STACK_TRACE; req->flags = GNU_RETURN_ON_ERROR; req->buf = GETBUF(BUFSIZE); req->debug = bt->debug; req->task = bt->task; req->pc = bt->instptr; req->sp = bt->stkptr; if (is_task_active(bt->task)) { if (!extra_stacks_regs[extra_stacks_idx]) { extra_stacks_regs[extra_stacks_idx] = (struct user_regs_bitmap_struct *) malloc(sizeof(struct user_regs_bitmap_struct)); } memset(extra_stacks_regs[extra_stacks_idx], 0, sizeof(struct user_regs_bitmap_struct)); extra_stacks_regs[extra_stacks_idx]->ur.nip = req->pc; extra_stacks_regs[extra_stacks_idx]->ur.gpr[1] = req->sp; SET_BIT(extra_stacks_regs[extra_stacks_idx]->bitmap, REG_SEQ(ppc64_pt_regs, nip)); SET_BIT(extra_stacks_regs[extra_stacks_idx]->bitmap, REG_SEQ(ppc64_pt_regs, gpr[0]) + 1); if (!bt->machdep || (extra_stacks_regs[extra_stacks_idx]->ur.gpr[1] != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.gpr[1] && extra_stacks_regs[extra_stacks_idx]->ur.nip != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.nip)) { gdb_add_substack (extra_stacks_idx++); } } if (bt->flags & (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) { if (!INSTACK(req->sp, bt)) /* * If the user specified SP is in HW interrupt stack * (only for tasks running on other CPUs and in 2.4 * kernel), get the top SP points to process's stack. */ req->sp = ppc64_check_sp_in_HWintrstack(req->sp, bt); print_stack_text_syms(bt, req->sp, req->pc); } else { if (bt->flags & BT_USE_GDB) { strcpy(req->buf, "backtrace"); gdb_interface(req); } else ppc64_back_trace(req, bt); } FREEBUF(req->buf); FREEBUF(req); } /* * Unroll the kernel stack using a minimal amount of gdb services. * * (Ref: 64-bit PowerPC ELF ABI Spplement; Ian Lance Taylor, Zembu Labs). * A PPC64 stack frame looks like this: * * High Address * .-> Back Chain (etc...) * | FP reg save area * | GP reg save area * | Local var space * | Parameter save area (SP+48) * | TOC save area (SP+40) * | link editor doubleword (SP+32) * | compiler doubleword (SP+24) * | LR save (SP+16) * | CR save (SP+8) * `- Back Chain <-- sp (SP+0) * * Note that the LR (ret addr) may not be saved in the current frame if * no functions have been called from the current function. */ /* HACK: put an initial lr in this var for find_trace(). It will be * cleared during the trace. */ static void ppc64_back_trace(struct gnu_request *req, struct bt_info *bt) { enum emergency_stack_type estype; ulong newpc = 0, newsp, marker; int c = bt->tc->processor; ulong nmi_sp = 0; int eframe_found; int frame = 0; ulong lr = 0; /* hack...need to pass in initial lr reg */ if (!INSTACK(req->sp, bt)) { ulong irqstack; struct machine_specific *ms = machdep->machspec; if ((irqstack = ppc64_in_irqstack(req->sp))) { bt->stackbase = irqstack; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); } else if ((estype = ppc64_in_emergency_stack(c, req->sp, true))) { if (estype == NMI_EMERGENCY_STACK) nmi_sp = req->sp; ppc64_set_bt_emergency_stack(estype, bt); } else if (ms->hwintrstack) { bt->stacktop = ms->hwintrstack[bt->tc->processor] + sizeof(ulong); bt->stackbase = ms->hwintrstack[bt->tc->processor] - ms->hwstacksize + STACK_FRAME_OVERHEAD; bt->stackbuf = ms->hwstackbuf; alter_stackbuf(bt); } else { fprintf(fp, "cannot find the stack info.\n"); return; } } while (INSTACK(req->sp, bt)) { newsp = *(ulong *)&bt->stackbuf[req->sp - bt->stackbase]; if ((req->name = closest_symbol(req->pc)) == NULL) { if (CRASHDEBUG(1)) { error(FATAL, "ppc64_back_trace hit unknown symbol (%lx).\n", req->pc); } } bt->flags |= BT_SAVE_LASTSP; ppc64_print_stack_entry(frame, req, newsp, lr, bt); bt->flags &= ~(ulonglong)BT_SAVE_LASTSP; lr = 0; if (IS_KVADDR(newsp)) { /* * In 2.4, HW interrupt stack will be used to save * smp_call_functions symbols. i.e, when the dumping * CPU is issued IPI call to freeze other CPUS, */ if (INSTACK(newsp, bt) && (newsp + 16 > bt->stacktop)) newsp = *(ulong *)&bt->stackbuf[newsp - bt->stackbase]; if (!INSTACK(newsp, bt)) { if ((estype = ppc64_in_emergency_stack(c, newsp, true))) { if (!nmi_sp && estype == NMI_EMERGENCY_STACK) nmi_sp = newsp; ppc64_set_bt_emergency_stack(estype, bt); } else { /* * Switch HW interrupt stack or emergency stack * to process's stack. */ bt->stackbase = GET_STACKBASE(bt->task); bt->stacktop = GET_STACKTOP(bt->task); alter_stackbuf(bt); } } if (IS_KVADDR(newsp) && INSTACK(newsp, bt)) newpc = *(ulong *)&bt->stackbuf[newsp + 16 - bt->stackbase]; } if (BT_REFERENCE_FOUND(bt)) return; eframe_found = FALSE; /* * Is this frame an execption one? * In 2.6, 0x7265677368657265 is saved and used * to determine the execption frame. */ if (THIS_KERNEL_VERSION < LINUX(2,6,0)) { if (frame && (newsp - req->sp - STACK_FRAME_OVERHEAD) >= sizeof(struct ppc64_pt_regs)) eframe_found = TRUE; else if (STREQ(req->name, ".ret_from_except")) eframe_found = TRUE; } else if ((newsp - req->sp - STACK_FRAME_OVERHEAD) >= sizeof(struct ppc64_pt_regs)) { readmem(req->sp+0x60, KVADDR, &marker, sizeof(ulong), "stack frame", FAULT_ON_ERROR); if (marker == EXCP_FRAME_MARKER) eframe_found = TRUE; } if (eframe_found) { char *efrm_str = NULL; struct ppc64_pt_regs regs; readmem(req->sp+STACK_FRAME_OVERHEAD, KVADDR, ®s, sizeof(struct ppc64_pt_regs), "exception frame", FAULT_ON_ERROR); efrm_str = ppc64_check_eframe(®s); if (efrm_str) { ppc64_print_eframe(efrm_str, ®s, bt); lr = regs.link; newpc = regs.nip; newsp = regs.gpr[1]; } } /* * NMI stack may not be re-entrant. In so, an SP in the NMI stack * is likely to point back to an SP within the NMI stack, in case * of a nested NMI. */ if (nmi_sp && nmi_sp == newsp) { fprintf(fp, "------\n"); break; } /* * Some Linux 3.7 kernel threads have been seen to have * their end-of-trace stack linkage pointer pointing * back to itself (instead of NULL), which would cause * an infinite loop at the .ret_from_kernel_thread frame. */ if (req->sp == newsp) break; req->pc = newpc; req->sp = newsp; frame++; } } static void ppc64_display_full_frame(struct bt_info *bt, ulong nextsp, FILE *ofp) { int i, u_idx; ulong *nip; ulong words, addr; char buf[BUFSIZE]; if (!INSTACK(nextsp, bt)) nextsp = bt->stacktop; words = (nextsp - bt->frameptr) / sizeof(ulong); addr = bt->frameptr; u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); nip = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(ofp, "%s ", format_stack_entry(bt, buf, *nip, 0)); addr += sizeof(ulong); } fprintf(ofp, "\n"); } /* * print one entry of a stack trace */ static void ppc64_print_stack_entry(int frame, struct gnu_request *req, ulong newsp, ulong lr, struct bt_info *bt) { struct load_module *lm; char *lrname = NULL; ulong offset; struct syment *sp; char *name_plus_offset; char buf[BUFSIZE]; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(req->name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; break; case BT_REF_HEXVAL: if (bt->ref->hexval == req->pc) bt->ref->cmdflags |= BT_REF_FOUND; break; } } else { name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(req->pc, &offset); if (sp && offset) name_plus_offset = value_to_symstr(req->pc, buf, bt->radix); } fprintf(fp, "%s#%d [%lx] %s at %lx", frame < 10 ? " " : "", frame, req->sp, name_plus_offset ? name_plus_offset : req->name, req->pc); if (module_symbol(req->pc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); if (req->ra) { /* * Previous frame is an exception one. If the func * symbol for the current frame is same as with * the previous frame's LR value, print "(unreliable)". */ lrname = closest_symbol(req->ra); req->ra = 0; if (!lrname) { if (CRASHDEBUG(1)) error(FATAL, "ppc64_back_trace hit unknown symbol (%lx).\n", req->ra); return; } } if (lr) { /* * Link register value for an expection frame. */ if ((lrname = closest_symbol(lr)) == NULL) { if (CRASHDEBUG(1)) error(FATAL, "ppc64_back_trace hit unknown symbol (%lx).\n", lr); return; } req->ra = lr; } if (!req->name || STREQ(req->name, lrname) || !is_kernel_text(req->pc)) fprintf(fp, " (unreliable)"); fprintf(fp, "\n"); } if (bt->flags & BT_SAVE_LASTSP) req->lastsp = req->sp; bt->frameptr = req->sp; if (bt->flags & BT_FULL) if (IS_KVADDR(newsp)) ppc64_display_full_frame(bt, newsp, fp); if (bt->flags & BT_LINE_NUMBERS) ppc64_dump_line_number(req->pc); } /* * Check whether the frame is exception one! */ static char * ppc64_check_eframe(struct ppc64_pt_regs *regs) { switch(regs->trap & ~0xF) { case 0x100: return("System Reset"); case 0x200: return("Machine Check"); case 0x300: return("Data Access"); case 0x380: return("Data SLB Access"); case 0x400: return("Instruction Access"); case 0x480: return("Instruction SLB Access"); case 0x500: return("Hardware Interrupt"); case 0x600: return("Alignment"); case 0x700: return("Program Check"); case 0x800: return("FPU Unavailable"); case 0x900: return("Decrementer"); case 0x980: return("Hypervisor Decrementer"); case 0xa00: return("Doorbell"); case 0xb00: return("reserved"); case 0xc00: return("System Call"); case 0xd00: return("Single Step"); case 0xe00: return("fp assist"); case 0xe40: return("Emulation Assist"); case 0xe60: return("HMI"); case 0xe80: return("Hypervisor Doorbell"); case 0xf00: return("Performance Monitor"); case 0xf20: return("Altivec Unavailable"); case 0x1300: return("Instruction Breakpoint"); case 0x1500: return("Denormalisation"); case 0x1700: return("Altivec Assist"); } /* No exception frame exists */ return NULL; } static void ppc64_print_regs(struct ppc64_pt_regs *regs) { int i; /* print out the gprs... */ for (i=0; i<32; i++) { if (i && !(i % 3)) fprintf(fp, "\n"); fprintf(fp, " R%d:%s %016lx ", i, ((i < 10) ? " " : ""), regs->gpr[i]); /* * In 2.6, some stack frame contains only partial regs set. * For the partial set, only 14 regs will be saved and trap * field will contain 1 in the least significant bit. */ if ((i == 13) && (regs->trap & 1)) break; } fprintf(fp, "\n"); /* print out the rest of the registers */ fprintf(fp, " NIP: %016lx ", regs->nip); fprintf(fp, " MSR: %016lx ", regs->msr); fprintf(fp, "OR3: %016lx\n", regs->orig_gpr3); fprintf(fp, " CTR: %016lx ", regs->ctr); fprintf(fp, " LR: %016lx ", regs->link); fprintf(fp, "XER: %016lx\n", regs->xer); fprintf(fp, " CCR: %016lx ", regs->ccr); fprintf(fp, " MQ: %016lx ", regs->mq); fprintf(fp, "DAR: %016lx\n", regs->dar); fprintf(fp, " DSISR: %016lx ", regs->dsisr); fprintf(fp, " Syscall Result: %016lx\n", regs->result); } static void ppc64_print_nip_lr(struct ppc64_pt_regs *regs, int print_lr) { char buf[BUFSIZE]; char *sym_buf; sym_buf = value_to_symstr(regs->nip, buf, 0); if (sym_buf[0] != NULLCHAR) fprintf(fp, " [NIP : %s]\n", sym_buf); if (print_lr) { sym_buf = value_to_symstr(regs->link, buf, 0); if (sym_buf[0] != NULLCHAR) fprintf(fp, " [LR : %s]\n", sym_buf); } } /* * Print the exception frame information */ static void ppc64_print_eframe(char *efrm_str, struct ppc64_pt_regs *regs, struct bt_info *bt) { if (BT_REFERENCE_CHECK(bt)) return; fprintf(fp, " %s [%lx] exception frame:\n", efrm_str, regs->trap); ppc64_print_regs(regs); ppc64_print_nip_lr(regs, 1); if (!((regs->msr >> MSR_PR_LG) & 0x1) && !(bt->flags & BT_EFRAME_SEARCH)) { if (!extra_stacks_regs[extra_stacks_idx]) { extra_stacks_regs[extra_stacks_idx] = (struct user_regs_bitmap_struct *) malloc(sizeof(struct user_regs_bitmap_struct)); } memset(extra_stacks_regs[extra_stacks_idx], 0, sizeof(struct user_regs_bitmap_struct)); memcpy(&extra_stacks_regs[extra_stacks_idx]->ur, regs, sizeof(struct ppc64_pt_regs)); for (int i = 0; i < sizeof(struct ppc64_pt_regs)/sizeof(ulong); i++) SET_BIT(extra_stacks_regs[extra_stacks_idx]->bitmap, i); if (!bt->machdep || (extra_stacks_regs[extra_stacks_idx]->ur.gpr[1] != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.gpr[1] && extra_stacks_regs[extra_stacks_idx]->ur.nip != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.nip)) { gdb_add_substack (extra_stacks_idx++); } } } static int ppc64_get_current_task_reg(int regno, const char *name, int size, void *value, int sid) { struct bt_info bt_info, bt_setup; struct task_context *tc; struct user_regs_bitmap_struct *ur_bitmap; ulong ip, sp; bool ret = FALSE; /* Currently only handling registers available in ppc64_pt_regs: * * 0-31: r0-r31 * 64: pc/nip * 65: msr * * 67: lr * 68: ctr */ switch (regno) { case PPC64_R0_REGNUM ... PPC64_R31_REGNUM: case PPC64_PC_REGNUM: case PPC64_MSR_REGNUM: case PPC64_LR_REGNUM: case PPC64_CTR_REGNUM: break; default: // return false if we can't get that register if (CRASHDEBUG(1)) error(WARNING, "unsupported register, regno=%d\n", regno); return FALSE; } tc = CURRENT_CONTEXT(); if (!tc) return FALSE; if (sid && sid <= extra_stacks_idx) { ur_bitmap = extra_stacks_regs[sid - 1]; goto get_sub; } BZERO(&bt_setup, sizeof(struct bt_info)); clone_bt_info(&bt_setup, &bt_info, tc); if (bt_info.stackbase == 0) return FALSE; fill_stackbuf(&bt_info); // reusing the get_dumpfile_regs function to get pt regs structure get_dumpfile_regs(&bt_info, &sp, &ip); if (bt_info.stackbuf) FREEBUF(bt_info.stackbuf); ur_bitmap = (struct user_regs_bitmap_struct *)bt_info.machdep; if (!ur_bitmap) { error(WARNING, "pt_regs not available for cpu %d\n", tc->processor); return FALSE; } if (!bt_info.need_free) { goto get_all; } get_sub: switch (regno) { case PPC64_R0_REGNUM ... PPC64_R31_REGNUM: if (!NUM_IN_BITMAP(ur_bitmap->bitmap, REG_SEQ(ppc64_pt_regs, gpr[0]) + regno - PPC64_R0_REGNUM)) { if (!sid) FREEBUF(ur_bitmap); return FALSE; } break; case PPC64_PC_REGNUM: if (!NUM_IN_BITMAP(ur_bitmap->bitmap, REG_SEQ(ppc64_pt_regs, nip))) { if (!sid) FREEBUF(ur_bitmap); return FALSE; } break; case PPC64_MSR_REGNUM: if (!NUM_IN_BITMAP(ur_bitmap->bitmap, REG_SEQ(ppc64_pt_regs, msr))) { if (!sid) FREEBUF(ur_bitmap); return FALSE; } break; case PPC64_LR_REGNUM: if (!NUM_IN_BITMAP(ur_bitmap->bitmap, REG_SEQ(ppc64_pt_regs, link))) { if (!sid) FREEBUF(ur_bitmap); return FALSE; } break; case PPC64_CTR_REGNUM: if (!NUM_IN_BITMAP(ur_bitmap->bitmap, REG_SEQ(ppc64_pt_regs, ctr))) { if (!sid) FREEBUF(ur_bitmap); return FALSE; } break; } get_all: switch (regno) { case PPC64_R0_REGNUM ... PPC64_R31_REGNUM: if (size != sizeof(ur_bitmap->ur.gpr[regno])) break; memcpy(value, &ur_bitmap->ur.gpr[regno], size); ret = TRUE; break; case PPC64_PC_REGNUM: if (size != sizeof(ur_bitmap->ur.nip)) break; memcpy(value, &ur_bitmap->ur.nip, size); ret = TRUE; break; case PPC64_MSR_REGNUM: if (size != sizeof(ur_bitmap->ur.msr)) break; memcpy(value, &ur_bitmap->ur.msr, size); ret = TRUE; break; case PPC64_LR_REGNUM: if (size != sizeof(ur_bitmap->ur.link)) break; memcpy(value, &ur_bitmap->ur.link, size); ret = TRUE; break; case PPC64_CTR_REGNUM: if (size != sizeof(ur_bitmap->ur.ctr)) break; memcpy(value, &ur_bitmap->ur.ctr, size); ret = TRUE; break; } if (!sid && bt_info.need_free) { FREEBUF(ur_bitmap); bt_info.need_free = FALSE; } return ret; } /* * For vmcore typically saved with KDump or FADump, get SP and IP values * from the saved ptregs. */ static int ppc64_vmcore_stack_frame(struct bt_info *bt_in, ulong *nip, ulong *ksp) { struct ppc64_pt_regs *pt_regs; unsigned long unip; /* * TRUE: task is running in a different context (userspace, OPAL..) * FALSE: task is probably running in kernel space. */ int out_of_context = FALSE; pt_regs = (struct ppc64_pt_regs *)bt_in->machdep; if (!pt_regs || !pt_regs->gpr[1]) { if (bt_in->hp) { if (bt_in->hp->esp) { *ksp = bt_in->hp->esp; if (!bt_in->hp->eip) { if (IS_KVADDR(*ksp)) { readmem(*ksp+16, KVADDR, &unip, sizeof(ulong), "Regs NIP value", FAULT_ON_ERROR); *nip = unip; } } else *nip = bt_in->hp->eip; } return TRUE; } /* * Not collected regs. May be the corresponding CPU not * responded to an IPI in case of KDump OR f/w has not * not provided the register info in case of FADump. */ fprintf(fp, "%0lx: GPR1 register value (SP) was not saved\n", bt_in->task); return FALSE; } *ksp = pt_regs->gpr[1]; if (IS_KVADDR(*ksp)) { readmem(*ksp+16, KVADDR, &unip, sizeof(ulong), "Regs NIP value", FAULT_ON_ERROR); *nip = unip; } else { *nip = pt_regs->nip; if (IN_TASK_VMA(bt_in->task, *ksp)) { fprintf(fp, "%0lx: Task is running in user space\n", bt_in->task); out_of_context = TRUE; } else if (is_opal_context(*ksp, *nip)) { fprintf(fp, "%0lx: Task is running in OPAL (firmware) context\n", bt_in->task); out_of_context = TRUE; } else fprintf(fp, "%0lx: Invalid Stack Pointer %0lx\n", bt_in->task, *ksp); } if (bt_in->flags && ((BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT))) return TRUE; /* * Print the collected regs for the active task */ ppc64_print_regs(pt_regs); if (out_of_context) return TRUE; if (!IS_KVADDR(*ksp)) return FALSE; ppc64_print_nip_lr(pt_regs, (unip != pt_regs->link) ? 1 : 0); return TRUE; } /* * Get the starting point for the active cpus in a diskdump/netdump. */ static int ppc64_get_dumpfile_stack_frame(struct bt_info *bt_in, ulong *nip, ulong *ksp) { int i, ret, panic_task; char *sym; ulong *up; struct bt_info bt_local, *bt; struct machine_specific *ms; ulong ur_nip = 0; ulong ur_ksp = 0; int check_hardirq, check_softirq; int check_intrstack = TRUE; struct ppc64_pt_regs *pt_regs; struct syment *sp; bt = &bt_local; BCOPY(bt_in, bt, sizeof(struct bt_info)); ms = machdep->machspec; ur_nip = ur_ksp = 0; panic_task = tt->panic_task == bt->task ? TRUE : FALSE; check_hardirq = check_softirq = tt->flags & IRQSTACKS ? TRUE : FALSE; if (panic_task && bt->machdep) { pt_regs = (struct ppc64_pt_regs *)bt->machdep; ur_nip = pt_regs->nip; ur_ksp = pt_regs->gpr[1]; if (!(bt->flags & BT_NO_PRINT_REGS)) { /* Print the collected regs for panic task. */ ppc64_print_regs(pt_regs); ppc64_print_nip_lr(pt_regs, 1); } } else if ((pc->flags & KDUMP) || ((pc->flags & DISKDUMP) && (*diskdump_flags & KDUMP_CMPRS_LOCAL))) { /* * For the KDump or FADump vmcore, use SP and IP values * that are saved in ptregs. */ ret = ppc64_vmcore_stack_frame(bt_in, nip, ksp); if (ret) return TRUE; } if (bt->task != tt->panic_task) { char cpu_frozen = FALSE; /* * Determine whether the CPU responded to an IPI. * We captured the GPR1 register value in the * platform_freeze_cpu() function. */ if ((sp = symbol_search("dump_header")) && !is_symbol_text(sp)) { /* Diskdump */ ulong task_addr; /* * The dump_header struct is specified in the module. */ ulong offset = roundup(STRUCT_SIZE("timespec") + STRUCT_SIZE("new_utsname") + 52, 8); offset += sizeof(ulong) * bt->tc->processor; readmem(symbol_value("dump_header") + offset, KVADDR, &task_addr, sizeof(ulong), "Task Address", FAULT_ON_ERROR); if (task_addr) cpu_frozen = TRUE; } if (!cpu_frozen && symbol_exists("cpus_frozen")) { /* Netdump */ readmem(symbol_value("cpus_frozen") + sizeof(char) * bt->tc->processor, KVADDR, &cpu_frozen, sizeof(char), "CPU Frozen Value", FAULT_ON_ERROR); } ur_ksp = ppc64_get_sp(bt->task); if (IS_KVADDR(ur_ksp)) { /* * Since we could not capture the NIP value, we do not * know the top symbol name. Hence, move the SP to next * frame. */ if (cpu_frozen) readmem(ur_ksp, KVADDR, &ur_ksp, sizeof(ulong), "Stack Pointer", FAULT_ON_ERROR); else if (symbol_exists("platform_freeze_cpu")) fprintf(fp, "%0lx: GPR1 register value (SP) was not saved\n", bt->task); if (IS_KVADDR(ur_ksp)) /* * Get the LR value stored in the stack frame. */ readmem(ur_ksp+16, KVADDR, &ur_nip, sizeof(ulong), "Regs NIP value", FAULT_ON_ERROR); *ksp = ur_ksp; *nip = ur_nip; } else { *ksp = ur_ksp; fprintf(fp, "Could not find SP for task %0lx\n", bt->task); } } /* * Check the process stack first. We are scanning stack for only * panic task. Even though we have dumping CPU's regs, we will be * looking for specific symbols to display trace from actual dump * functions. If these symbols are not exists, consider the regs * stored in the ELF header. */ retry: for (i = 0, up = (ulong *)bt->stackbuf; i < (bt->stacktop - bt->stackbase)/sizeof(ulong); i++, up++) { sym = closest_symbol(*up); if (STREQ(sym, ".netconsole_netdump") || STREQ(sym, ".netpoll_start_netdump") || STREQ(sym, ".start_disk_dump") || STREQ(sym, "crash_kexec") || STREQ(sym, "crash_fadump") || STREQ(sym, "crash_ipi_callback") || STREQ(sym, ".crash_kexec") || STREQ(sym, ".crash_fadump") || STREQ(sym, ".crash_ipi_callback") || STREQ(sym, ".disk_dump")) { *nip = *up; *ksp = bt->stackbase + ((char *)(up) - 16 - bt->stackbuf); /* * Check whether this symbol relates to a * backtrace or not */ ur_ksp = *(ulong *)&bt->stackbuf[(*ksp) - bt->stackbase]; if (!INSTACK(ur_ksp, bt)) continue; return TRUE; } } bt->flags &= ~(BT_HARDIRQ|BT_SOFTIRQ); if (check_hardirq && (tt->hardirq_tasks[bt->tc->processor] == bt->tc->task)) { bt->stackbase = tt->hardirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_HARDIRQ; check_hardirq = FALSE; goto retry; } if (check_softirq && (tt->softirq_tasks[bt->tc->processor] == bt->tc->task)) { bt->stackbase = tt->softirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_SOFTIRQ; check_softirq = FALSE; goto retry; } if (check_intrstack && ms->hwintrstack) { bt->stacktop = ms->hwintrstack[bt->tc->processor] + sizeof(ulong); bt->stackbase = ms->hwintrstack[bt->tc->processor] - ms->hwstacksize + STACK_FRAME_OVERHEAD; bt->stackbuf = ms->hwstackbuf; alter_stackbuf(bt); check_intrstack = FALSE; goto retry; } /* * We didn't find what we were looking for, so just use what was * passed in the ELF header. */ if (ur_nip && ur_ksp) { *nip = ur_nip; *ksp = ur_ksp; return TRUE; } console("ppc64_get_dumpfile_stack_frame: cannot find SP for panic task\n"); return FALSE; } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void ppc64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { ulong ksp, nip; struct user_regs_bitmap_struct *ur_bitmap; nip = ksp = 0; if (DUMPFILE() && is_task_active(bt->task)) { ppc64_get_dumpfile_stack_frame(bt, &nip, &ksp); bt->need_free = FALSE; } else { get_ppc64_frame(bt, &nip, &ksp); ur_bitmap = (struct user_regs_bitmap_struct *)GETBUF(sizeof(*ur_bitmap)); memset(ur_bitmap, 0, sizeof(*ur_bitmap)); ur_bitmap->ur.nip = nip; ur_bitmap->ur.gpr[1] = ksp; SET_BIT(ur_bitmap->bitmap, REG_SEQ(ppc64_pt_regs, nip)); SET_BIT(ur_bitmap->bitmap, REG_SEQ(ppc64_pt_regs, gpr[0]) + 1); bt->machdep = ur_bitmap; bt->need_free = TRUE; } if (pcp) *pcp = nip; if (spp) *spp = ksp; } static ulong ppc64_get_sp(ulong task) { ulong sp; if (tt->flags & THREAD_INFO) readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, &sp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); else { ulong offset; offset = OFFSET_OPTION(task_struct_thread_ksp, task_struct_tss_ksp); readmem(task + offset, KVADDR, &sp, sizeof(void *), "task_struct ksp", FAULT_ON_ERROR); } return sp; } /* * get the SP and PC values for idle tasks. */ static void get_ppc64_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { ulong ip; ulong sp; ulong *stack; ulong task; char *closest; struct ppc64_pt_regs regs; ip = 0; task = bt->task; stack = (ulong *)bt->stackbuf; sp = ppc64_get_sp(task); if (!INSTACK(sp, bt)) goto out; readmem(sp+STACK_FRAME_OVERHEAD, KVADDR, ®s, sizeof(struct ppc64_pt_regs), "PPC64 pt_regs", FAULT_ON_ERROR); ip = regs.nip; closest = closest_symbol(ip); if (STREQ(closest, ".__switch_to") || STREQ(closest, "__switch_to")) { /* NOTE: _switch_to() calls _switch() which * is asm. _switch leaves pc == lr. * Working through this frame is tricky, * and this mess isn't going to help if we * actually dumped here. Most likely the * analyzer is trying to backtrace a task. * Need to skip 2 frames. */ sp = stack[(sp - bt->stackbase)/sizeof(ulong)]; if (!INSTACK(sp, bt)) goto out; sp = stack[(sp - bt->stackbase)/sizeof(ulong)]; if (!INSTACK(sp+16, bt)) goto out; ip = stack[(sp + 16 - bt->stackbase)/sizeof(ulong)]; } out: *getsp = sp; *getpc = ip; } /* * Do the work for cmd_irq(). */ static void ppc64_dump_irq(int irq) { ulong irq_desc_addr, addr; int level, others; ulong action, ctl, value; char typename[32]; irq_desc_addr = symbol_value("irq_desc") + (SIZE(irqdesc) * irq); readmem(irq_desc_addr + OFFSET(irqdesc_level), KVADDR, &level, sizeof(int), "irq_desc entry", FAULT_ON_ERROR); readmem(irq_desc_addr + OFFSET(irqdesc_action), KVADDR, &action, sizeof(long), "irq_desc entry", FAULT_ON_ERROR); readmem(irq_desc_addr + OFFSET(irqdesc_ctl), KVADDR, &ctl, sizeof(long), "irq_desc entry", FAULT_ON_ERROR); fprintf(fp, " IRQ: %d\n", irq); fprintf(fp, " STATUS: 0\n"); fprintf(fp, "HANDLER: "); if (value_symbol(ctl)) { fprintf(fp, "%lx ", ctl); pad_line(fp, VADDR_PRLEN == 8 ? VADDR_PRLEN+2 : VADDR_PRLEN-6, ' '); fprintf(fp, "<%s>\n", value_symbol(ctl)); } else fprintf(fp, "%lx\n", ctl); if(ctl) { /* typename */ readmem(ctl + OFFSET(hw_interrupt_type_typename), KVADDR, &addr, sizeof(ulong), "typename pointer", FAULT_ON_ERROR); fprintf(fp, " typename: %08lx ", addr); if (read_string(addr, typename, 32)) fprintf(fp, "\"%s\"\n", typename); else fprintf(fp, "\n"); /* startup...I think this is always 0 */ readmem(ctl + OFFSET(hw_interrupt_type_startup), KVADDR, &addr, sizeof(ulong), "interrupt startup", FAULT_ON_ERROR); fprintf(fp, " startup: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); /* shutdown...I think this is always 0 */ readmem(ctl + OFFSET(hw_interrupt_type_shutdown), KVADDR, &addr, sizeof(ulong), "interrupt shutdown", FAULT_ON_ERROR); fprintf(fp, " shutdown: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); if (VALID_MEMBER(hw_interrupt_type_handle)) { /* handle */ readmem(ctl + OFFSET(hw_interrupt_type_handle), KVADDR, &addr, sizeof(ulong), "interrupt handle", FAULT_ON_ERROR); fprintf(fp, " handle: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); } /* enable/disable */ readmem(ctl + OFFSET(hw_interrupt_type_enable), KVADDR, &addr, sizeof(ulong), "interrupt enable", FAULT_ON_ERROR); fprintf(fp, " enable: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); readmem(ctl + OFFSET(hw_interrupt_type_disable), KVADDR, &addr, sizeof(ulong), "interrupt disable", FAULT_ON_ERROR); fprintf(fp, " disable: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "0\n"); } /* next, the action... and its submembers */ if(!action) fprintf(fp, " ACTION: (none)\n"); while(action) { fprintf(fp, " ACTION: %08lx\n", action); /* handler */ readmem(action + OFFSET(irqaction_handler), KVADDR, &addr, sizeof(ulong), "action handler", FAULT_ON_ERROR); fprintf(fp, " handler: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "0\n"); /* flags */ readmem(action + OFFSET(irqaction_flags), KVADDR, &value, sizeof(ulong), "action flags", FAULT_ON_ERROR); fprintf(fp, " flags: %lx ", value); if (value) { others = 0; fprintf(fp, "("); if (value & SA_INTERRUPT) fprintf(fp, "%sSA_INTERRUPT", others++ ? "|" : ""); if (value & SA_PROBE) fprintf(fp, "%sSA_PROBE", others++ ? "|" : ""); if (value & SA_SAMPLE_RANDOM) fprintf(fp, "%sSA_SAMPLE_RANDOM", others++ ? "|" : ""); if (value & SA_SHIRQ) fprintf(fp, "%sSA_SHIRQ", others++ ? "|" : ""); fprintf(fp, ")"); if (value & ~ACTION_FLAGS) { fprintf(fp, " (bits %lx not translated)", value & ~ACTION_FLAGS); } } fprintf(fp, "\n"); /* mask */ readmem(action + OFFSET(irqaction_mask), KVADDR, &value, sizeof(ulong), "action mask", FAULT_ON_ERROR); fprintf(fp, " mask: %lx\n", value); /* name */ readmem(action + OFFSET(irqaction_name), KVADDR, &addr, sizeof(ulong), "action name", FAULT_ON_ERROR); fprintf(fp, " name: %08lx ", addr); if (read_string(addr, typename, 32)) fprintf(fp, "\"%s\"\n", typename); else fprintf(fp, "\n"); /* dev_id */ readmem(action + OFFSET(irqaction_dev_id), KVADDR, &value, sizeof(ulong), "action dev_id", FAULT_ON_ERROR); fprintf(fp, " dev_id: %08lx\n", value); /* next */ readmem(action + OFFSET(irqaction_next), KVADDR, &value, sizeof(ulong), "action next", FAULT_ON_ERROR); fprintf(fp, " next: %lx\n", value); /* keep going if there are chained interrupts */ action = value; } fprintf(fp, " DEPTH: %x\n\n", level); } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int ppc64_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on ppc64) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !(STRNEQ(p1, " 0x") || STRNEQ(p1, ",0x"))) p1--; if (!(STRNEQ(p1, " 0x") || STRNEQ(p1, ",0x"))) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int ppc64_get_smp_cpus(void) { return get_cpus_online(); } /* * Definitions derived from OPAL. These need to track corresponding values in * https://github.com/open-power/skiboot/blob/master/include/mem-map.h */ #define SKIBOOT_CONSOLE_DUMP_START 0x31000000 #define SKIBOOT_CONSOLE_DUMP_SIZE 0x100000 #define ASCII_UNLIMITED ((ulong)(-1) >> 1) void opalmsg(void) { struct memloc { uint8_t u8; uint16_t u16; uint32_t u32; uint64_t u64; uint64_t limit64; }; int i, a; size_t typesz; void *location; char readtype[20]; struct memloc mem; int displayed, per_line; int lost; ulong error_handle; long count = SKIBOOT_CONSOLE_DUMP_SIZE; ulonglong addr = SKIBOOT_CONSOLE_DUMP_START; if (!(machdep->flags & OPAL_FW)) error(FATAL, "dump was not captured on OPAL based system"); if (CRASHDEBUG(4)) fprintf(fp, "\n", addr, count, "PHYSADDR"); BZERO(&mem, sizeof(struct memloc)); lost = typesz = per_line = 0; location = NULL; /* ASCII */ typesz = SIZEOF_8BIT; location = &mem.u8; sprintf(readtype, "ascii"); per_line = 256; displayed = 0; error_handle = FAULT_ON_ERROR; for (i = a = 0; i < count; i++) { if (!readmem(addr, PHYSADDR, location, typesz, readtype, error_handle)) { addr += typesz; lost += 1; continue; } if (isprint(mem.u8)) { if ((a % per_line) == 0) { if (displayed && i) fprintf(fp, "\n"); } fprintf(fp, "%c", mem.u8); displayed++; a++; } else { if (count == ASCII_UNLIMITED) return; a = 0; } addr += typesz; } if (lost != count) fprintf(fp, "\n"); } static void ppc64_print_emergency_stack_info(void) { struct machine_specific *ms = machdep->machspec; char buf[32]; int i; fprintf(fp, " EMERGENCY STACK: "); if (ms->emergency_sp) { fprintf(fp, "\n"); for (i = 0; i < kt->cpus; i++) { sprintf(buf, "CPU %d", i); fprintf(fp, "%19s: %lx\n", buf, ms->emergency_sp[i]); } } else fprintf(fp, "(unused)\n"); fprintf(fp, "NMI EMERGENCY STACK: "); if (ms->nmi_emergency_sp) { fprintf(fp, "\n"); for (i = 0; i < kt->cpus; i++) { sprintf(buf, "CPU %d", i); fprintf(fp, "%19s: %lx\n", buf, ms->nmi_emergency_sp[i]); } } else fprintf(fp, "(unused)\n"); fprintf(fp, " MC EMERGENCY STACK: "); if (ms->mc_emergency_sp) { fprintf(fp, "\n"); for (i = 0; i < kt->cpus; i++) { sprintf(buf, "CPU %d", i); fprintf(fp, "%19s: %lx\n", buf, ms->mc_emergency_sp[i]); } } else fprintf(fp, "(unused)\n"); fprintf(fp, "\n"); } /* * Machine dependent command. */ void ppc64_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cmo")) != EOF) { switch(c) { case 'c': case 'm': fprintf(fp, "PPC64: '-%c' option is not supported\n", c); break; case 'o': return opalmsg(); default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); ppc64_display_machine_stats(); } /* * "mach" command output. */ static void ppc64_display_machine_stats(void) { int c; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " MMU: %s\n", machdep->flags & RADIX_MMU ? "RADIX" : "HASH"); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); if (tt->flags & IRQSTACKS) { fprintf(fp, "HARD IRQ STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " HARD IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->hardirq_ctx[c]) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->hardirq_ctx[c]); } fprintf(fp, "SOFT IRQ STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " SOFT IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->softirq_ctx) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->softirq_ctx[c]); } } ppc64_print_emergency_stack_info(); } static const char *hook_files[] = { "arch/ppc64/kernel/entry.S", "arch/ppc64/kernel/head.S", "arch/ppc64/kernel/semaphore.c" }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) #define SEMAPHORE_C ((char **)&hook_files[2]) static struct line_number_hook ppc64_line_number_hooks[] = { {"DoSyscall", ENTRY_S}, {"_switch", ENTRY_S}, {"ret_from_syscall_1", ENTRY_S}, {"ret_from_syscall_2", ENTRY_S}, {"ret_from_fork", ENTRY_S}, {"ret_from_except", ENTRY_S}, {"do_signal_ret", ENTRY_S}, {"ret_to_user_hook", ENTRY_S}, {"enter_rtas", ENTRY_S}, {"restore", ENTRY_S}, {"do_bottom_half_ret", ENTRY_S}, {"ret_to_user_hook", ENTRY_S}, {"_stext", HEAD_S}, {"_start", HEAD_S}, {"__start", HEAD_S}, {"__secondary_hold", HEAD_S}, {"DataAccessCont", HEAD_S}, {"DataAccess", HEAD_S}, {"i0x300", HEAD_S}, {"DataSegmentCont", HEAD_S}, {"InstructionAccessCont", HEAD_S}, {"InstructionAccess", HEAD_S}, {"i0x400", HEAD_S}, {"InstructionSegmentCont", HEAD_S}, {"HardwareInterrupt", HEAD_S}, {"do_IRQ_intercept", HEAD_S}, {"i0x600", HEAD_S}, {"ProgramCheck", HEAD_S}, {"i0x700", HEAD_S}, {"FPUnavailable", HEAD_S}, {"i0x800", HEAD_S}, {"Decrementer", HEAD_S}, {"timer_interrupt_intercept", HEAD_S}, {"SystemCall", HEAD_S}, {"trap_0f_cont", HEAD_S}, {"Trap_0f", HEAD_S}, {"InstructionTLBMiss", HEAD_S}, {"InstructionAddressInvalid", HEAD_S}, {"DataLoadTLBMiss", HEAD_S}, {"DataAddressInvalid", HEAD_S}, {"DataStoreTLBMiss", HEAD_S}, {"AltiVecUnavailable", HEAD_S}, {"DataAccess", HEAD_S}, {"InstructionAccess", HEAD_S}, {"DataSegment", HEAD_S}, {"InstructionSegment", HEAD_S}, {"transfer_to_handler", HEAD_S}, {"stack_ovf", HEAD_S}, {"load_up_fpu", HEAD_S}, {"KernelFP", HEAD_S}, {"load_up_altivec", HEAD_S}, {"KernelAltiVec", HEAD_S}, {"giveup_altivec", HEAD_S}, {"giveup_fpu", HEAD_S}, {"relocate_kernel", HEAD_S}, {"copy_and_flush", HEAD_S}, {"fix_mem_constants", HEAD_S}, {"apus_interrupt_entry", HEAD_S}, {"__secondary_start_gemini", HEAD_S}, {"__secondary_start_psurge", HEAD_S}, {"__secondary_start_psurge2", HEAD_S}, {"__secondary_start_psurge3", HEAD_S}, {"__secondary_start_psurge99", HEAD_S}, {"__secondary_start", HEAD_S}, {"setup_common_caches", HEAD_S}, {"setup_604_hid0", HEAD_S}, {"setup_750_7400_hid0", HEAD_S}, {"load_up_mmu", HEAD_S}, {"start_here", HEAD_S}, {"clear_bats", HEAD_S}, {"flush_tlbs", HEAD_S}, {"mmu_off", HEAD_S}, {"initial_bats", HEAD_S}, {"setup_disp_bat", HEAD_S}, {"m8260_gorom", HEAD_S}, {"sdata", HEAD_S}, {"empty_zero_page", HEAD_S}, {"swapper_pg_dir", HEAD_S}, {"cmd_line", HEAD_S}, {"intercept_table", HEAD_S}, {"set_context", HEAD_S}, {NULL, NULL} /* list must be NULL-terminated */ }; static void ppc64_dump_line_number(ulong callpc) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } void ppc64_compiler_warning_stub(void) { struct line_number_hook *lhp; lhp = &ppc64_line_number_hooks[0]; lhp++; ppc64_back_trace(NULL, NULL); ppc64_dump_line_number(0); } /* * Force the VM address-range selection via: * * --machdep vm=orig * --machdep vm=2.6.14 */ void parse_cmdline_args(void) { int index, i, c; char *p; char buf[BUFSIZE]; char *arglist[MAXARGS]; int lines = 0; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { error(WARNING, "ignoring --machdep option: %s\n\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { if (STRNEQ(arglist[i], "vm=")) { p = arglist[i] + strlen("vm="); if (strlen(p)) { if (STREQ(p, "orig")) { machdep->flags |= VM_ORIG; continue; } else if (STREQ(p, "2.6.14")) { machdep->flags |= VM_4_LEVEL; continue; } } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); lines++; } switch (machdep->flags & (VM_ORIG|VM_4_LEVEL)) { case VM_ORIG: error(NOTE, "using original PPC64 VM address ranges\n"); lines++; break; case VM_4_LEVEL: error(NOTE, "using 4-level pagetable PPC64 VM address ranges\n"); lines++; break; case (VM_ORIG|VM_4_LEVEL): error(WARNING, "cannot set both vm=orig and vm=2.6.14\n"); lines++; machdep->flags &= ~(VM_ORIG|VM_4_LEVEL); break; } if (lines) fprintf(fp, "\n"); } } /* * Initialize the per cpu data_offset values from paca structure. */ static int ppc64_paca_percpu_offset_init(int map) { int i, cpus, nr_paca; char *cpu_paca_buf; ulong data_offset; ulong paca; if (!symbol_exists("paca")) error(FATAL, "PPC64: Could not find 'paca' symbol\n"); /* * In v2.6.34 ppc64, the upstream commit 1426d5a3 (powerpc: Dynamically * allocate pacas) now dynamically allocates the paca and have * changed data type of 'paca' symbol from array to pointer. With this * change in place crash utility fails to read vmcore generated for * upstream kernel. * Add a check for paca variable data type before accessing. */ if (get_symbol_type("paca", NULL, NULL) == TYPE_CODE_PTR) readmem(symbol_value("paca"), KVADDR, &paca, sizeof(ulong), "paca", FAULT_ON_ERROR); else paca = symbol_value("paca"); if (!MEMBER_EXISTS("paca_struct", "data_offset")) return kt->cpus; STRUCT_SIZE_INIT(ppc64_paca, "paca_struct"); data_offset = MEMBER_OFFSET("paca_struct", "data_offset"); cpu_paca_buf = GETBUF(SIZE(ppc64_paca)); if (!(nr_paca = get_array_length("paca", NULL, 0))) nr_paca = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); if (nr_paca > NR_CPUS) { error(WARNING, "PPC64: Number of paca entries (%d) greater than NR_CPUS (%d)\n", nr_paca, NR_CPUS); error(FATAL, "Recompile crash with larger NR_CPUS\n"); } for (i = cpus = 0; i < nr_paca; i++) { /* * CPU present or online or can exist in the system(possible)? */ if (!in_cpu_map(map, i)) continue; readmem(paca + (i * SIZE(ppc64_paca)), KVADDR, cpu_paca_buf, SIZE(ppc64_paca), "paca entry", FAULT_ON_ERROR); kt->__per_cpu_offset[i] = ULONG(cpu_paca_buf + data_offset); kt->flags |= PER_CPU_OFF; cpus++; } return cpus; } static int ppc64_get_cpu_map(void) { int map; if (cpu_map_addr("possible")) map = POSSIBLE_MAP; else if (cpu_map_addr("present")) map = PRESENT_MAP; else if (cpu_map_addr("online")) map = ONLINE_MAP; else if (cpu_map_addr("active")) map = ACTIVE_MAP; else { map = 0; error(FATAL, "PPC64: cannot find 'cpu_possible_map', " "'cpu_present_map', 'cpu_online_map' or 'cpu_active_map' symbols\n"); } return map; } /* * Updating any smp-related items that were possibly bypassed * or improperly initialized in kernel_init(). */ static void ppc64_init_cpu_info(void) { int i, map, cpus, nr_cpus; map = ppc64_get_cpu_map(); /* * starting from v2.6.36 we can not rely on paca structure to get * per cpu data_offset. The upstream commit fc53b420 overwrites * the paca pointer variable to point to static paca that contains * valid data_offset only for crashing cpu. * * But the kernel v2.6.36 ppc64 introduces __per_cpu_offset symbol * which was removed post v2.6.15 ppc64 and now we get the per cpu * data_offset from __per_cpu_offset symbol during kernel_init() * call. Hence for backward (pre-2.6.36) compatibility, call * ppc64_paca_percpu_offset_init() only if symbol __per_cpu_offset * does not exist. */ if (!symbol_exists("__per_cpu_offset")) cpus = ppc64_paca_percpu_offset_init(map); else { if (!(nr_cpus = get_array_length("__per_cpu_offset", NULL, 0))) nr_cpus = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); for (i = cpus = 0; i < nr_cpus; i++) { if (!in_cpu_map(map, i)) continue; cpus++; } } switch (map) { case POSSIBLE_MAP: if (cpus > kt->cpus) { i = get_highest_cpu_online() + 1; if (i > kt->cpus) kt->cpus = i; } break; case ONLINE_MAP: case PRESENT_MAP: kt->cpus = cpus; break; } if (kt->cpus > 1) kt->flags |= SMP; } void ppc64_clear_machdep_cache(void) { if (machdep->last_pgd_read != vt->kernel_pgd[0]) machdep->last_pgd_read = 0; } static int ppc64_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; physaddr_t phys1, phys2; ulong pp1, pp2; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->kvbase; vrp[cnt++].end = vt->high_memory; vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = first_vmalloc_address(); vrp[cnt++].end = last_vmalloc_address(); if (machdep->flags & VMEMMAP) { phys1 = (physaddr_t)(0); phys2 = (physaddr_t)VTOP((vt->high_memory - PAGESIZE())); if (phys_to_page(phys1, &pp1) && phys_to_page(phys2, &pp2)) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = pp1; vrp[cnt++].end = pp2; } } return cnt; } #endif /* PPC64 */ crash-utility-crash-9cd43f5/vmware_vmss.c0000664000372000037200000007126215107550337020121 0ustar juerghjuergh/* * vmware_vmss.c * * Copyright (c) 2015, 2020 VMware, Inc. * Copyright (c) 2018 Red Hat Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Authors: Dyno Hongjun Fu * Sergio Lopez * Alexey Makhalov */ #include "defs.h" #include "vmware_vmss.h" #define LOGPRX "vmw: " vmssdata vmss = { 0 }; int is_vmware_vmss(char *filename) { struct cptdumpheader hdr; FILE *fp; if ((fp = fopen(filename, "r")) == NULL) { error(INFO, LOGPRX"Failed to open '%s': [Error %d] %s\n", filename, errno, strerror(errno)); return FALSE; } if (fread(&hdr, sizeof(cptdumpheader), 1, fp) != 1) { error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", "cptdumpheader", filename, errno, strerror(errno)); fclose(fp); return FALSE; } fclose(fp); if (hdr.id != CPTDUMP_OLD_MAGIC_NUMBER && hdr.id != CPTDUMP_MAGIC_NUMBER && hdr.id != CPTDUMP_PARTIAL_MAGIC_NUMBER && hdr.id != CPTDUMP_RESTORED_MAGIC_NUMBER && hdr.id != CPTDUMP_NORESTORE_MAGIC_NUMBER) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Unrecognized .vmss file (magic %x).\n", hdr.id); return FALSE; } return TRUE; } int vmware_vmss_init(char *filename, FILE *ofp) { cptdumpheader hdr; cptgroupdesc *grps = NULL; unsigned grpsize; unsigned i; FILE *fp = NULL; int result = TRUE; if (!machine_type("X86") && !machine_type("X86_64")) { error(INFO, LOGPRX"Invalid or unsupported host architecture for .vmss file: %s\n", MACHINE_TYPE); result = FALSE; goto exit; } if ((fp = fopen(filename, "r")) == NULL) { error(INFO, LOGPRX"Failed to open '%s': %s [Error %d] %s\n", filename, errno, strerror(errno)); result = FALSE; goto exit; } if (fread(&hdr, sizeof(cptdumpheader), 1, fp) != 1) { error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", "cptdumpheader", filename, errno, strerror(errno)); result = FALSE; goto exit; } DEBUG_PARSE_PRINT((ofp, LOGPRX"Header: id=%x version=%d numgroups=%d\n", hdr.id, hdr.version, hdr.numgroups)); vmss.cpt64bit = (hdr.id != CPTDUMP_OLD_MAGIC_NUMBER); DEBUG_PARSE_PRINT((ofp, LOGPRX"Checkpoint is %d-bit\n", vmss.cpt64bit ? 64 : 32)); if (!vmss.cpt64bit) { error(INFO, LOGPRX"Not implemented for 32-bit VMSS file!\n"); result = FALSE; goto exit; } grpsize = hdr.numgroups * sizeof (cptgroupdesc); grps = (cptgroupdesc *) malloc(grpsize * sizeof(cptgroupdesc)); if (grps == NULL) { error(INFO, LOGPRX"Failed to allocate memory! [Error %d] %s\n", errno, strerror(errno)); result = FALSE; goto exit; } if (fread(grps, sizeof(cptgroupdesc), grpsize, fp) != grpsize) { error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", "cptgroupdesc", filename, errno, strerror(errno)); result = FALSE; goto exit; } for (i = 0; i < hdr.numgroups; i++) { if (fseek(fp, grps[i].position, SEEK_SET) == -1) { error(INFO, LOGPRX"Bad offset of VMSS Group['%s'] in '%s' at %#llx.\n", grps[i].name, filename, (ulonglong)grps[i].position); continue; } DEBUG_PARSE_PRINT((ofp, LOGPRX"Group: %-20s offset=%#llx size=0x%#llx.\n", grps[i].name, (ulonglong)grps[i].position, (ulonglong)grps[i].size)); if (strcmp(grps[i].name, "memory") != 0 && (strcmp(grps[i].name, "cpu") != 0 || !machine_type("X86_64"))) { continue; } for (;;) { uint16_t tag; char name[TAG_NAMELEN_MASK + 1]; unsigned nameLen; unsigned nindx; int idx[3]; unsigned j; int nextgroup = FALSE; if (fread(&tag, sizeof(tag), 1, fp) != 1) { error(INFO, LOGPRX"Cannot read tag.\n"); break; } if (tag == NULL_TAG) break; nameLen = TAG_NAMELEN(tag); if (fread(name, nameLen, 1, fp) != 1) { error(INFO, LOGPRX"Cannot read tag name.\n"); break; } name[nameLen] = 0; DEBUG_PARSE_PRINT((ofp, LOGPRX"\t Item %20s", name)); nindx = TAG_NINDX(tag); if (nindx > 3) { error(INFO, LOGPRX"Too many indexes %d (> 3).\n", nindx); break; } idx[0] = idx[1] = idx[2] = NO_INDEX; for (j= 0; j < nindx; j++) { if (fread(&idx[j], sizeof(idx[0]), 1, fp) != 1) { error(INFO, LOGPRX"Cannot read index.\n"); nextgroup = TRUE; break; } DEBUG_PARSE_PRINT((ofp, "[%d]", idx[j])); } if (nextgroup) { DEBUG_PARSE_PRINT((ofp, "\n")); break; } if (IS_BLOCK_TAG(tag)) { uint64_t nbytes; uint64_t blockpos; uint64_t nbytesinmem; int compressed = IS_BLOCK_COMPRESSED_TAG(tag); uint16_t padsize; if (fread(&nbytes, sizeof(nbytes), 1, fp) != 1) { error(INFO, LOGPRX"Cannot read block size.\n"); break; } if (fread(&nbytesinmem, sizeof(nbytesinmem), 1, fp) != 1) { error(INFO, LOGPRX"Cannot read block memory size.\n"); break; } if (fread(&padsize, sizeof(padsize), 1, fp) != 1) { error(INFO, LOGPRX"Cannot read block padding size.\n"); break; } if ((blockpos = ftell(fp)) == -1) { error(INFO, LOGPRX"Cannot determine location within VMSS file.\n"); break; } blockpos += padsize; if (strcmp(name, "Memory") == 0) { /* The things that we really care about...*/ vmss.memoffset = blockpos; vmss.memsize = nbytesinmem; vmss.separate_vmem = FALSE; DEBUG_PARSE_PRINT((ofp, "\t=> %sBLOCK: position=%#llx size=%#llx memsize=%#llx\n", compressed ? "COMPRESSED " : "", (ulonglong)blockpos, (ulonglong)nbytes, (ulonglong)nbytesinmem)); if (compressed) { error(INFO, LOGPRX"Cannot handle compressed memory dump yet!\n"); result = FALSE; goto exit; } if (fseek(fp, blockpos + nbytes, SEEK_SET) == -1) { error(INFO, LOGPRX"Cannot seek past block at %#llx.\n", (ulonglong)(blockpos + nbytes)); break; } } else if (strcmp(name, "gpregs") == 0 && nbytes == VMW_GPREGS_SIZE && idx[0] < vmss.num_vcpus) { int cpu = idx[0]; if (fread(vmss.regs64[cpu], VMW_GPREGS_SIZE, 1, fp) != 1) { error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", name, filename, errno, strerror(errno)); break; } DEBUG_PARSE_PRINT((ofp, "\n")); vmss.vcpu_regs[cpu] |= REGS_PRESENT_GPREGS; } else if (strcmp(name, "CR64") == 0 && nbytes == VMW_CR64_SIZE && idx[0] < vmss.num_vcpus) { int cpu = idx[0]; DEBUG_PARSE_PRINT((ofp, "\t=> ")); if (fread(&vmss.regs64[cpu]->cr[0], VMW_CR64_SIZE, 1, fp) != 1) { error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", name, filename, errno, strerror(errno)); break; } for (j = 0; j < VMW_CR64_SIZE / 8; j++) DEBUG_PARSE_PRINT((ofp, "%s%016llX", j ? " " : "", (ulonglong)vmss.regs64[cpu]->cr[j])); DEBUG_PARSE_PRINT((ofp, "\n")); vmss.vcpu_regs[cpu] |= REGS_PRESENT_CRS; } else if (strcmp(name, "IDTR") == 0 && nbytes == VMW_IDTR_SIZE && idx[0] < vmss.num_vcpus) { int cpu = idx[0]; uint64_t idtr; if (fseek(fp, blockpos + 2, SEEK_SET) == -1) { error(INFO, LOGPRX"Cannot seek past block at %#llx.\n", (ulonglong)(blockpos + 2)); break; } if (fread(&idtr, sizeof(idtr), 1, fp) != 1) { error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", name, filename, errno, strerror(errno)); break; } DEBUG_PARSE_PRINT((ofp, "\n")); vmss.regs64[cpu]->idtr = idtr; vmss.vcpu_regs[cpu] |= REGS_PRESENT_IDTR; } else { if (fseek(fp, blockpos + nbytes, SEEK_SET) == -1) { error(INFO, LOGPRX"Cannot seek past block at %#llx.\n", (ulonglong)(blockpos + nbytes)); break; } DEBUG_PARSE_PRINT((ofp, "\n")); } } else { union { uint8_t val[TAG_VALSIZE_MASK]; uint32_t val32; uint64_t val64; } u; unsigned k; unsigned valsize = TAG_VALSIZE(tag); uint64_t blockpos = ftell(fp); DEBUG_PARSE_PRINT((ofp, "\t=> position=%#llx size=%#x: ", (ulonglong)blockpos, valsize)); if (fread(u.val, sizeof(u.val[0]), valsize, fp) != valsize) { error(INFO, LOGPRX"Cannot read item.\n"); break; } for (k = 0; k < valsize; k++) { /* Assume Little Endian */ DEBUG_PARSE_PRINT((ofp, "%02X", u.val[valsize - k - 1])); } if (strcmp(grps[i].name, "memory") == 0) { if (strcmp(name, "regionsCount") == 0) { vmss.regionscount = u.val32; } if (strcmp(name, "regionPageNum") == 0) { vmss.regions[idx[0]].startpagenum = u.val32; } if (strcmp(name, "regionPPN") == 0) { vmss.regions[idx[0]].startppn = u.val32; } if (strcmp(name, "regionSize") == 0) { vmss.regions[idx[0]].size = u.val32; } if (strcmp(name, "align_mask") == 0) { vmss.alignmask = u.val32; } } else if (strcmp(grps[i].name, "cpu") == 0) { if (strcmp(name, "cpu:numVCPUs") == 0) { if (vmss.regs64 != NULL) { error(INFO, LOGPRX"Duplicated cpu:numVCPUs entry.\n"); break; } vmss.num_vcpus = u.val32; vmss.regs64 = malloc(vmss.num_vcpus * sizeof(void *)); vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint64_t)); for (k = 0; k < vmss.num_vcpus; k++) { vmss.regs64[k] = malloc(sizeof(vmssregs64)); memset(vmss.regs64[k], 0, sizeof(vmssregs64)); vmss.vcpu_regs[k] = 0; } } else if (strcmp(name, "rax") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rax = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RAX; } else if (strcmp(name, "rcx") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rcx = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RCX; } else if (strcmp(name, "rdx") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rdx = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RDX; } else if (strcmp(name, "rbx") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rbx = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RBX; } else if (strcmp(name, "rbp") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rbp = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RBP; } else if (strcmp(name, "rsp") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rsp = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RSP; } else if (strcmp(name, "rsi") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rsi = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RSI; } else if (strcmp(name, "rdi") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rdi = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RDI; } else if (strcmp(name, "r8") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->r8 = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_R8; } else if (strcmp(name, "r9") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->r9 = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_R9; } else if (strcmp(name, "r10") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->r10 = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_R10; } else if (strcmp(name, "r11") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->r11 = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_R11; } else if (strcmp(name, "r12") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->r12 = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_R12; } else if (strcmp(name, "r13") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->r13 = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_R13; } else if (strcmp(name, "r14") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->r14 = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_R14; } else if (strcmp(name, "r15") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->r15 = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_R15; } else if (strcmp(name, "CR64") == 0) { int cpu = idx[0]; switch (idx[1]) { case 0: vmss.regs64[cpu]->cr[0] = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_CR0; break; case 1: vmss.regs64[cpu]->cr[1] = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_CR1; break; case 2: vmss.regs64[cpu]->cr[2] = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_CR2; break; case 3: vmss.regs64[cpu]->cr[3] = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_CR3; break; case 4: vmss.regs64[cpu]->cr[4] = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_CR4; break; } } else if (strcmp(name, "IDTR") == 0) { int cpu = idx[0]; if (idx[1] == 1) vmss.regs64[cpu]->idtr = u.val32; else if (idx[1] == 2) { vmss.regs64[cpu]->idtr |= (uint64_t) u.val32 << 32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_IDTR; } } else if (strcmp(name, "rip") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rip = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RIP; } else if (strcmp(name, "eflags") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rflags |= u.val32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RFLAGS; } else if (strcmp(name, "EFLAGS") == 0) { int cpu = idx[0]; vmss.regs64[cpu]->rflags |= u.val32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_RFLAGS; } else if (strcmp(name, "S.base64") == 0) { int cpu = idx[0]; int seg_index = idx[1]; switch (seg_index) { case SEG_FS: vmss.regs64[cpu]->fs_base = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_FS_BASE; break; case SEG_GS: vmss.regs64[cpu]->gs_base = u.val64; vmss.vcpu_regs[cpu] |= REGS_PRESENT_GS_BASE; break; } } else if (strcmp(name, "S") == 0) { int cpu = idx[0]; int seg_index = idx[1]; switch (seg_index) { case SEG_ES: vmss.regs64[cpu]->es = u.val32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_ES; break; case SEG_CS: vmss.regs64[cpu]->cs = u.val32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_CS; break; case SEG_SS: vmss.regs64[cpu]->ss = u.val32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_SS; break; case SEG_DS: vmss.regs64[cpu]->ds = u.val32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_DS; break; case SEG_FS: vmss.regs64[cpu]->fs = u.val32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_FS; break; case SEG_GS: vmss.regs64[cpu]->gs = u.val32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_GS; break; case SEG_LDTR: vmss.regs64[cpu]->ldtr = u.val32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_LDTR; break; case SEG_TR: vmss.regs64[cpu]->tr = u.val32; vmss.vcpu_regs[cpu] |= REGS_PRESENT_TR; break; default: error(INFO, "Unknown VMSS Segment [%d][%d]\n", cpu, seg_index); } } } DEBUG_PARSE_PRINT((ofp, "\n")); } } } if (vmss.memsize == 0) { char *vmem_filename, *p; if (!(pc->flags & SILENT)) fprintf(ofp, LOGPRX"Memory dump is not part of this vmss file.\n"); fclose(fp); fp = NULL; if (!(pc->flags & SILENT)) fprintf(ofp, LOGPRX"Try to locate the companion vmem file ...\n"); /* check the companion vmem file */ vmem_filename = strdup(filename); p = vmem_filename + strlen(vmem_filename) - 4; if (strcmp(p, "vmss") != 0 && strcmp(p, "vmsn") != 0) { free(vmem_filename); result = FALSE; goto exit; } strcpy(p, "vmem"); if ((fp = fopen(vmem_filename, "r")) == NULL) { error(INFO, LOGPRX"%s: %s\n", vmem_filename, strerror(errno)); free(vmem_filename); result = FALSE; goto exit; } fseek(fp, 0L, SEEK_END); vmss.memsize = ftell(fp); fseek(fp, 0L, SEEK_SET); vmss.separate_vmem = TRUE; vmss.filename = filename; if (!(pc->flags & SILENT)) fprintf(ofp, LOGPRX"vmem file: %s\n\n", vmem_filename); free(vmem_filename); } vmss.dfp = fp; exit: if (grps) free(grps); if (!result && fp) fclose(fp); return result; } uint vmware_vmss_page_size(void) { return VMW_PAGE_SIZE; } int read_vmware_vmss(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { uint64_t pos = paddr; if (vmss.regionscount > 0) { /* Memory is divided into regions and there are holes between them. */ uint32_t ppn = (uint32_t) (pos >> VMW_PAGE_SHIFT); int i; for (i = 0; i < vmss.regionscount; i++) { uint32_t hole; if (ppn < vmss.regions[i].startppn) break; /* skip holes. */ hole = vmss.regions[i].startppn - vmss.regions[i].startpagenum; pos -= (uint64_t)hole << VMW_PAGE_SHIFT; } } if (pos + cnt > vmss.memsize) { error(INFO, LOGPRX"Read beyond the end of file! paddr=%#lx cnt=%d\n", paddr, cnt); } pos += vmss.memoffset; if (fseek(vmss.dfp, pos, SEEK_SET) != 0) return SEEK_ERROR; if (fread(bufptr, 1, cnt, vmss.dfp) != cnt) return READ_ERROR; return cnt; } int write_vmware_vmss(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return SEEK_ERROR; } void vmware_vmss_display_regs(int cpu, FILE *ofp) { if (cpu >= vmss.num_vcpus) return; if (machine_type("X86_64")) { fprintf(ofp, " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" " RAX: %016llx RBX: %016llx RCX: %016llx\n" " RDX: %016llx RSI: %016llx RDI: %016llx\n" " RBP: %016llx R8: %016llx R9: %016llx\n" " R10: %016llx R11: %016llx R12: %016llx\n" " R13: %016llx R14: %016llx R15: %016llx\n", (ulonglong)vmss.regs64[cpu]->rip, (ulonglong)vmss.regs64[cpu]->rsp, (ulonglong)vmss.regs64[cpu]->rflags, (ulonglong)vmss.regs64[cpu]->rax, (ulonglong)vmss.regs64[cpu]->rbx, (ulonglong)vmss.regs64[cpu]->rcx, (ulonglong)vmss.regs64[cpu]->rdx, (ulonglong)vmss.regs64[cpu]->rsi, (ulonglong)vmss.regs64[cpu]->rdi, (ulonglong)vmss.regs64[cpu]->rbp, (ulonglong)vmss.regs64[cpu]->r8, (ulonglong)vmss.regs64[cpu]->r9, (ulonglong)vmss.regs64[cpu]->r10, (ulonglong)vmss.regs64[cpu]->r11, (ulonglong)vmss.regs64[cpu]->r12, (ulonglong)vmss.regs64[cpu]->r13, (ulonglong)vmss.regs64[cpu]->r14, (ulonglong)vmss.regs64[cpu]->r15 ); } } void get_vmware_vmss_regs(struct bt_info *bt, ulong *ipp, ulong *spp) { ulong ip, sp; ip = sp = 0; if (bt->tc->processor >= vmss.num_vcpus || vmss.regs64 == NULL || vmss.vcpu_regs[bt->tc->processor] != REGS_PRESENT_ALL) { machdep->get_stack_frame(bt, ipp, spp); return; } if (!is_task_active(bt->task)) { machdep->get_stack_frame(bt, ipp, spp); return; } bt->flags |= BT_DUMPFILE_SEARCH; if (machine_type("X86_64")) machdep->get_stack_frame(bt, ipp, spp); else if (machine_type("X86")) get_netdump_regs_x86(bt, ipp, spp); if (bt->flags & BT_DUMPFILE_SEARCH) return; ip = (ulong)vmss.regs64[bt->tc->processor]->rip; sp = (ulong)vmss.regs64[bt->tc->processor]->rsp; if (is_kernel_text(ip) && (((sp >= GET_STACKBASE(bt->task)) && (sp < GET_STACKTOP(bt->task))) || in_alternate_stack(bt->tc->processor, sp))) { *ipp = ip; *spp = sp; bt->flags |= BT_KERNEL_SPACE; return; } if (!is_kernel_text(ip) && in_user_stack(bt->tc->task, sp)) bt->flags |= BT_USER_SPACE; } int vmware_vmss_memory_dump(FILE *ofp) { cptdumpheader hdr; cptgroupdesc *grps = NULL; unsigned grpsize; unsigned i; int result = TRUE; FILE *fp = vmss.dfp; if (vmss.separate_vmem) { if ((fp = fopen(vmss.filename, "r")) == NULL) { error(INFO, LOGPRX"Failed to open '%s': %s [Error %d] %s\n", vmss.filename, errno, strerror(errno)); return FALSE; } } if (fseek(fp, 0, SEEK_SET) != 0) { fprintf(ofp, "Error seeking to position 0.\n"); fclose(fp); return FALSE; } if (fread(&hdr, sizeof(cptdumpheader), 1, fp) != 1) { fprintf(ofp, "Failed to read vmss file: [Error %d] %s\n", errno, strerror(errno)); fclose(fp); return FALSE; } fprintf(ofp, "vmware_vmss:\n"); fprintf(ofp, " Header: id=%x version=%d numgroups=%d\n", hdr.id, hdr.version, hdr.numgroups); vmss.cpt64bit = (hdr.id != CPTDUMP_OLD_MAGIC_NUMBER); fprintf(ofp, " Checkpoint is %d-bit\n", vmss.cpt64bit ? 64 : 32); grpsize = hdr.numgroups * sizeof (cptgroupdesc); grps = (cptgroupdesc *) malloc(grpsize * sizeof(cptgroupdesc)); if (grps == NULL) { fprintf(ofp, "Failed to allocate memory! [Error %d] %s\n", errno, strerror(errno)); fclose(fp); return FALSE; } if (fread(grps, sizeof(cptgroupdesc), grpsize, fp) != grpsize) { fprintf(ofp, "Failed to read vmss file: [Error %d] %s\n", errno, strerror(errno)); result = FALSE; goto exit; } for (i = 0; i < hdr.numgroups; i++) { if (fseek(fp, grps[i].position, SEEK_SET) == -1) { fprintf(ofp, "Bad offset of VMSS Group['%s'] in vmss file at %#llx.\n", grps[i].name, (ulonglong)grps[i].position); continue; } fprintf(ofp, "\nGroup: %s offset=%#llx size=0x%#llx\n", grps[i].name, (ulonglong)grps[i].position, (ulonglong)grps[i].size); for (;;) { uint16_t tag; char name[TAG_NAMELEN_MASK + 1]; unsigned nameLen; unsigned nindx; int idx[3]; unsigned j; int nextgroup = FALSE; if (fread(&tag, sizeof(tag), 1, fp) != 1) { fprintf(ofp, "Cannot read tag.\n"); break; } if (tag == NULL_TAG) break; nameLen = TAG_NAMELEN(tag); if (fread(name, nameLen, 1, fp) != 1) { fprintf(ofp, "Cannot read tag name.\n"); break; } name[nameLen] = 0; fprintf(ofp, " Item %20s", name); nindx = TAG_NINDX(tag); if (nindx > 3) { fprintf(ofp, "Too many indexes %d (> 3).\n", nindx); break; } idx[0] = idx[1] = idx[2] = NO_INDEX; for (j= 0; j < 3; j++) { if (j < nindx) { if (fread(&idx[j], sizeof(idx[0]), 1, fp) != 1) { fprintf(ofp, "Cannot read index.\n"); nextgroup = TRUE; break; } fprintf(ofp, "[%d]", idx[j]); } else fprintf(ofp, " "); } if (nextgroup) break; if (IS_BLOCK_TAG(tag)) { uint64_t nbytes; uint64_t blockpos; uint64_t nbytesinmem; int compressed = IS_BLOCK_COMPRESSED_TAG(tag); uint16_t padsize; unsigned k, l; char byte; if (fread(&nbytes, sizeof(nbytes), 1, fp) != 1) { fprintf(ofp, "Cannot read block size.\n"); break; } if (fread(&nbytesinmem, sizeof(nbytesinmem), 1, fp) != 1) { fprintf(ofp, "Cannot read block memory size.\n"); break; } if (fread(&padsize, sizeof(padsize), 1, fp) != 1) { fprintf(ofp, "Cannot read block padding size.\n"); break; } if ((blockpos = ftell(fp)) == -1) { fprintf(ofp, "Cannot determine location within VMSS file.\n"); break; } blockpos += padsize; fprintf(ofp, " => %sBLOCK: position=%#llx size=%#llx memsize=%#llx\n", compressed ? "COMPRESSED " : "", (ulonglong)blockpos, (ulonglong)nbytes, (ulonglong)nbytesinmem); if (nbytes && nbytes <= MAX_BLOCK_DUMP && !compressed) { fprintf(ofp, "Hex dump: \n"); l = 0; for (k = 0; k < nbytes; k++) { if (fread(&byte, 1, 1, fp) != 1) { fprintf(ofp, "Cannot read byte.\n"); result = FALSE; goto exit; } fprintf(ofp, " %02hhX", byte); if (l++ == 15) { fprintf(ofp, "\n"); l = 0; } } if (l) fprintf(ofp, "\n\n"); else fprintf(ofp, "\n"); } else { if (fseek(fp, blockpos + nbytes, SEEK_SET) == -1) { fprintf(ofp, "Cannot seek past block at %#llx.\n", (ulonglong)(blockpos + nbytes)); result = FALSE; goto exit; } } } else { union { uint8_t val[TAG_VALSIZE_MASK]; uint32_t val32; uint64_t val64; } u; unsigned k; unsigned valsize = TAG_VALSIZE(tag); uint64_t blockpos = ftell(fp); fprintf(ofp, " => position=%#llx size=%#x: ", (ulonglong)blockpos, valsize); if (fread(u.val, sizeof(u.val[0]), valsize, fp) != valsize) { fprintf(ofp, "Cannot read item.\n"); break; } for (k = 0; k < valsize; k++) { /* Assume Little Endian */ fprintf(ofp, "%02X", u.val[valsize - k - 1]); } fprintf(ofp, "\n"); } } } exit: if (vmss.separate_vmem) fclose(fp); if (grps) free(grps); return result; } void dump_registers_for_vmss_dump(void) { int i; vmssregs64 *regs; if (!machine_type("X86_64")) { fprintf(fp, "-r option not supported on this dumpfile type\n"); return; } for (i = 0; i < vmss.num_vcpus; i++) { regs = vmss.regs64[i]; if (i) fprintf(fp, "\n"); fprintf(fp, "CPU %d:\n", i); if (vmss.vcpu_regs[i] != REGS_PRESENT_ALL) { fprintf(fp, "Missing registers for this CPU: 0x%lx\n", vmss.vcpu_regs[i]); continue; } fprintf(fp, " RAX: %016llx RBX: %016llx RCX: %016llx\n", (ulonglong)regs->rax, (ulonglong)regs->rbx, (ulonglong)regs->rcx); fprintf(fp, " RDX: %016llx RSI: %016llx RDI: %016llx\n", (ulonglong)regs->rdx, (ulonglong)regs->rsi, (ulonglong)regs->rdi); fprintf(fp, " RSP: %016llx RBP: %016llx R8: %016llx\n", (ulonglong)regs->rsp, (ulonglong)regs->rbp, (ulonglong)regs->r8); fprintf(fp, " R9: %016llx R10: %016llx R11: %016llx\n", (ulonglong)regs->r9, (ulonglong)regs->r10, (ulonglong)regs->r11); fprintf(fp, " R12: %016llx R13: %016llx R14: %016llx\n", (ulonglong)regs->r12, (ulonglong)regs->r13, (ulonglong)regs->r14); fprintf(fp, " R15: %016llx RIP: %016llx RFLAGS: %08llx\n", (ulonglong)regs->r15, (ulonglong)regs->rip, (ulonglong)regs->rflags); fprintf(fp, " IDT: base: %016llx\n", (ulonglong)regs->idtr); fprintf(fp, " CR0: %016llx CR1: %016llx CR2: %016llx\n", (ulonglong)regs->cr[0], (ulonglong)regs->cr[1], (ulonglong)regs->cr[2]); fprintf(fp, " CR3: %016llx CR4: %016llx\n", (ulonglong)regs->cr[3], (ulonglong)regs->cr[4]); } } int vmware_vmss_valid_regs(struct bt_info *bt) { if (vmss.vcpu_regs[bt->tc->processor] == REGS_PRESENT_ALL) return TRUE; return FALSE; } int vmware_vmss_get_nr_cpus(void) { return vmss.num_vcpus; } int vmware_vmss_get_cr3_cr4_idtr(int cpu, ulong *cr3, ulong *cr4, ulong *idtr) { if (cpu >= vmss.num_vcpus || vmss.vcpu_regs[cpu] != REGS_PRESENT_ALL) return FALSE; *cr3 = vmss.regs64[cpu]->cr[3]; *cr4 = vmss.regs64[cpu]->cr[4]; *idtr = vmss.regs64[cpu]->idtr; return TRUE; } int vmware_vmss_get_cpu_reg(int cpu, int regno, const char *name, int size, void *value) { if (cpu >= vmss.num_vcpus) return FALSE; #define CASE_32(R,r) \ case R##_REGNUM: \ if (size != 4) \ return FALSE; \ if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_##R)) \ return FALSE; \ memcpy(value, &vmss.regs64[cpu]->r, size); \ break #define CASE_64(R,r) \ case R##_REGNUM: \ if (size != 8) \ return FALSE; \ if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_##R)) \ return FALSE; \ memcpy(value, &vmss.regs64[cpu]->r, size); \ break switch (regno) { CASE_64 (RAX, rax); CASE_64 (RBX, rbx); CASE_64 (RCX, rcx); CASE_64 (RDX, rdx); CASE_64 (RSI, rsi); CASE_64 (RDI, rdi); CASE_64 (RBP, rbp); CASE_64 (RSP, rsp); CASE_64 (R8, r8); CASE_64 (R9, r9); CASE_64 (R10, r10); CASE_64 (R11, r11); CASE_64 (R12, r12); CASE_64 (R13, r13); CASE_64 (R14, r14); CASE_64 (R15, r15); CASE_64 (RIP, rip); CASE_32 (ES, es); CASE_32 (CS, cs); CASE_32 (SS, ss); CASE_32 (DS, ds); CASE_32 (FS, fs); CASE_32 (GS, gs); CASE_64 (FS_BASE, fs_base); CASE_64 (GS_BASE, gs_base); case EFLAGS_REGNUM: if (!(vmss.vcpu_regs[cpu] & REGS_PRESENT_RFLAGS)) return FALSE; memcpy(value, &vmss.regs64[cpu]->rflags, size); break; default: return FALSE; } return TRUE; } int vmware_vmss_phys_base(ulong *phys_base) { *phys_base = vmss.phys_base; return TRUE; } int vmware_vmss_set_phys_base(ulong phys_base) { vmss.phys_base = phys_base; return TRUE; } crash-utility-crash-9cd43f5/s390dbf.c0000664000372000037200000010640215107550337016715 0ustar juerghjuergh/* * s390 debug feature command for crash * * Copyright (C) IBM Corp. 2006 * Author(s): Michael Holzheu * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #if defined(S390) || defined(S390X) #include "defs.h" #include #include /* * Compat layer to integrate lcrash commands into crash * Maps lcrash API to crash functions */ #define KL_NBPW sizeof(long) #define KL_ERRORFP stderr #define MAX_ARGS 128 #define MAX_CMDLINE 256 #define C_FALSE 0x00000001 /* Command takes no arguments */ #define C_TRUE 0x00000002 /* Command requires arguments */ #define C_ALL 0x00000004 /* All elements */ #define C_PERM 0x00000008 /* Allocate perminant blocks */ #define C_TEMP 0x00000000 /* For completeness */ #define C_FULL 0x00000010 /* Full output */ #define C_LIST 0x00000020 /* List items */ #define C_NEXT 0x00000040 /* Follow links */ #define C_WRITE 0x00000080 /* Write output to file */ #define C_NO_OPCHECK 0x00000100 /* Don't reject bad cmd line options */ #define C_ITER 0x00000200 /* set iteration threshold */ #define C_LFLG_SHFT 12 #define KL_ARCH_S390 0 #define KL_ARCH_S390X 1 #ifdef __s390x__ #define KL_ARCH KL_ARCH_S390X #define FMTPTR "l" #define KL_PTRSZ 8 #else #define KL_ARCH KL_ARCH_S390 #define FMTPTR "ll" #define KL_PTRSZ 4 #endif /* Start TOD time of kernel in usecs for relative time stamps */ static uint64_t tod_clock_base_us; typedef unsigned long uaddr_t; typedef unsigned long kaddr_t; typedef struct _syment { char *s_name; kaddr_t s_addr; } syment_t; typedef struct option_s { struct option_s *op_next; char op_char; char *op_arg; } option_t; typedef struct command_s { int flags; char cmdstr[MAX_CMDLINE]; char *command; char *cmdline; option_t *options; int nargs; char *args[MAX_ARGS]; char *pipe_cmd; FILE *ofp; FILE *efp; } command_t; static inline syment_t* kl_lkup_symaddr(kaddr_t addr) { static syment_t sym; struct syment *crash_sym; crash_sym = value_search(addr, &sym.s_addr); if (!crash_sym) return NULL; sym.s_name = crash_sym->name; return &sym; } static inline syment_t* kl_lkup_symname(char* name) { static syment_t sym; sym.s_addr = symbol_value(name); sym.s_name = NULL; if(!sym.s_addr) return NULL; else return &sym; } static inline void GET_BLOCK(kaddr_t addr, int size, void* ptr) { readmem(addr, KVADDR,ptr,size,"GET_BLOCK",FAULT_ON_ERROR); } static inline kaddr_t KL_VREAD_PTR(kaddr_t addr) { unsigned long ptr; readmem(addr, KVADDR,&ptr,sizeof(ptr),"GET_BLOCK",FAULT_ON_ERROR); return (kaddr_t)ptr; } static inline uint32_t KL_GET_UINT32(void* ptr) { return *((uint32_t*)ptr); } static inline uint64_t KL_GET_UINT64(void* ptr) { return *((uint64_t*)ptr); } static inline kaddr_t KL_GET_PTR(void* ptr) { return *((kaddr_t*)ptr); } static inline void* K_PTR(void* addr, char* struct_name, char* member_name) { return addr+MEMBER_OFFSET(struct_name,member_name); } static inline unsigned long KL_ULONG(void* ptr, char* struct_name, char* member_name) { return ULONG(ptr+MEMBER_OFFSET(struct_name,member_name)); } static inline uint32_t KL_VREAD_UINT32(kaddr_t addr) { uint32_t rc; readmem(addr, KVADDR,&rc,sizeof(rc),"KL_VREAD_UINT32",FAULT_ON_ERROR); return rc; } static inline uint32_t KL_INT(void* ptr, char* struct_name, char* member_name) { return UINT(ptr+MEMBER_OFFSET(struct_name,member_name)); } static inline int set_cmd_flags(command_t *cmd, int flags, char *extraops) { return 0; } #define USEC_PER_SEC 1000000L /* Time of day clock value for 1970/01/01 */ #define TOD_UNIX_EPOCH (0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096)) /* Time of day clock value for 1970/01/01 in usecs */ #define TOD_UNIX_EPOCH_US (TOD_UNIX_EPOCH >> 12) static inline void kl_s390tod_to_timeval(uint64_t todval, struct timeval *xtime) { uint64_t todval_us; /* Convert TOD to usec (51th bit of TOD is us) */ todval_us = todval >> 12; /* Add base if we have relative time stamps */ todval_us += tod_clock_base_us; /* Subtract EPOCH that we get time in usec since 1970 */ todval_us -= TOD_UNIX_EPOCH_US; xtime->tv_sec = todval_us / USEC_PER_SEC; xtime->tv_usec = todval_us % USEC_PER_SEC; } static inline int kl_struct_len(char* struct_name) { return STRUCT_SIZE(struct_name); } static inline kaddr_t kl_funcaddr(kaddr_t addr) { struct syment *crash_sym; crash_sym = value_search(addr, &addr); if (!crash_sym) return -1; else return crash_sym->value; } #define CMD_USAGE(cmd, s) \ fprintf(cmd->ofp, "Usage: %s %s\n", cmd->command, s); \ fprintf(cmd->ofp, "Enter \"help %s\" for details.\n",cmd->command); /* * s390 debug feature implementation */ #ifdef DBF_DYNAMIC_VIEWS /* views defined in shared libs */ #include #endif /* Local flags */ #define LOAD_FLAG (1 << C_LFLG_SHFT) #define VIEWS_FLAG (2 << C_LFLG_SHFT) #define SAVE_DBF_FLAG (4 << C_LFLG_SHFT) #ifndef MIN #define MIN(a,b) (((a)<(b))?(a):(b)) #endif /* Stuff which has to match with include/asm-s390/debug.h */ #define DBF_VERSION_V1 1 #define DBF_VERSION_V2 2 #define DBF_VERSION_V3 3 #define PAGE_SIZE 4096 #define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */ #define DEBUG_MAX_PROCF_LEN 64 /* max length for a proc file name */ #define DEBUG_SPRINTF_MAX_ARGS 10 /* define debug-structures for lcrash */ #define DEBUG_DATA(entry) (char*)(entry + 1) typedef struct debug_view_s debug_view_t; /* * struct to hold contents of struct __debug_entry from dump * for DBF_VERSION_V1 and DBF_VERSION_V2 */ typedef struct debug_entry_v1_s { union { struct { unsigned long long clock:52; unsigned long long exception:1; unsigned long long level:3; unsigned long long cpuid:8; } fields; unsigned long long stck; } id; kaddr_t caller; /* changed from void* to kaddr_t */ } __attribute__((packed)) debug_entry_v1_t; /* for DBF_VERSION_V3 */ typedef struct debug_entry_v3_s { unsigned long long clock:60; unsigned long long exception:1; unsigned long long level:3; kaddr_t caller; /* changed from void* to kaddr_t */ unsigned short cpuid; } __attribute__((packed)) debug_entry_v3_t; static unsigned int dbf_version; /* struct is used to manage contents of structs debug_info from dump * in lcrash */ typedef struct debug_info_s { struct debug_info_s *next; struct debug_info_s *prev; kaddr_t next_dbi; /* store next ptr of struct in dump */ kaddr_t prev_dbi; /* store prev ptr of struct in dump */ int level; int nr_areas; int page_order; int buf_size; int entry_size; void **areas; /* contents of debug areas from dump */ int active_area; int *active_entry; /* change to uint32_t ? */ debug_view_t *views[DEBUG_MAX_VIEWS]; char name[DEBUG_MAX_PROCF_LEN]; kaddr_t addr; int pages_per_area_v2; void ***areas_v2; } debug_info_t; /* functions to generate dbf output */ typedef int (debug_header_proc_t) (debug_info_t* id, debug_view_t* view, int area, void* entry, char* out_buf); typedef int (debug_format_proc_t) (debug_info_t* id, debug_view_t* view, char* out_buf, const char* in_buf); typedef int (debug_prolog_proc_t) (debug_info_t* id, debug_view_t* view, char* out_buf); struct debug_view_s { char name[DEBUG_MAX_PROCF_LEN]; debug_prolog_proc_t* prolog_proc; debug_header_proc_t* header_proc; debug_format_proc_t* format_proc; void* private_data; }; #define LCRASH_DB_VIEWS 1000 static debug_info_t *debug_area_first = NULL; static debug_info_t *debug_area_last = NULL; static debug_view_t *debug_views[LCRASH_DB_VIEWS]; static int initialized = 0; static iconv_t ebcdic_ascii_conv = 0; void s390dbf_usage(command_t * cmd); static int add_lcrash_debug_view(debug_view_t *); static int dbe_size = 0; static void EBCASC(char *inout, size_t len) { iconv(ebcdic_ascii_conv, &inout, &len, &inout, &len); } /* * prints header for debug entry */ static int dflt_header_fn(debug_info_t * id, debug_view_t *view, int area, void *entry, char *out_buf) { struct timeval time_val = { 0, 0 }; int rc = 0; unsigned long long time; unsigned short level = 0, cpuid = 0; char *except_str = "-"; kaddr_t caller = 0; char *caller_name; int name_width = 26; int offset; char caller_buf[30]; syment_t *caller_sym; switch (dbf_version) { case DBF_VERSION_V1: case DBF_VERSION_V2: level = ((debug_entry_v1_t *) entry)->id.fields.level; cpuid = ((debug_entry_v1_t *) entry)->id.fields.cpuid; time = ((debug_entry_v1_t *) entry)->id.stck; if (((debug_entry_v1_t *) entry)->id.fields.exception) except_str = "*"; caller = ((debug_entry_v1_t *) entry)->caller; kl_s390tod_to_timeval(time, &time_val); break; case DBF_VERSION_V3: level = ((debug_entry_v3_t *) entry)->level; cpuid = ((debug_entry_v3_t *) entry)->cpuid; time = ((debug_entry_v3_t *) entry)->clock; if (((debug_entry_v3_t *) entry)->exception) except_str = "*"; caller = ((debug_entry_v3_t *) entry)->caller; time_val.tv_sec = time / USEC_PER_SEC; time_val.tv_usec = time % USEC_PER_SEC; break; } if (KL_ARCH == KL_ARCH_S390) caller &= 0x7fffffff; caller_sym = kl_lkup_symaddr(caller); if (caller_sym) { caller_name = caller_sym->s_name; offset = caller - kl_funcaddr(caller); } else { sprintf(caller_buf, "%llx", (unsigned long long)caller); caller_name = caller_buf; offset = 0; } rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %04i <%-*s+%04i> ", area, time_val.tv_sec, time_val.tv_usec, level, except_str, cpuid, name_width, caller_name, offset); return rc; } /* * prints debug data in hex/ascii format */ static int hex_ascii_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { int i, rc = 0; if (out_buf == NULL || in_buf == NULL) { rc = id->buf_size * 4 + 3; goto out; } for (i = 0; i < id->buf_size; i++) { rc += sprintf(out_buf + rc, "%02x ", ((unsigned char *) in_buf)[i]); } rc += sprintf(out_buf + rc, "| "); for (i = 0; i < id->buf_size; i++) { unsigned char c = in_buf[i]; if (isascii(c) && isprint(c)) rc += sprintf(out_buf + rc, "%c", c); else rc += sprintf(out_buf + rc, "."); } rc += sprintf(out_buf + rc, "\n"); out: return rc; } /* * prints debug data in sprintf format */ static int sprintf_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { #define _BUFSIZE 1024 char buf[_BUFSIZE]; int i, k, rc = 0, num_longs = 0, num_strings = 0; int num_used_args ATTRIBUTE_UNUSED; /* use kaddr_t to store long values of 32bit and 64bit archs here */ kaddr_t inbuf_cpy[DEBUG_SPRINTF_MAX_ARGS]; /* store ptrs to strings to be deallocated at end of this function */ uaddr_t to_dealloc[DEBUG_SPRINTF_MAX_ARGS]; kaddr_t addr; memset(buf, 0, sizeof(buf)); memset(inbuf_cpy, 0, sizeof(inbuf_cpy)); memset(to_dealloc, 0, sizeof(to_dealloc)); if (out_buf == NULL || in_buf == NULL) { rc = id->buf_size * 4 + 3; goto out; } /* get the format string into buf */ addr = KL_GET_PTR((void*)in_buf); GET_BLOCK(addr, _BUFSIZE, buf); k = 0; for (i = 0; buf[i] && (buf[i] != '\n'); i++) { if (buf[i] != '%') continue; if (k == DEBUG_SPRINTF_MAX_ARGS) { fprintf(KL_ERRORFP, "\nToo much parameters in sprinf view (%i)\n" ,k + 1); fprintf(KL_ERRORFP, "Format String: %s)\n", buf); break; } /* for sprintf we have only unsigned long values ... */ if (buf[i+1] != 's'){ /* we use KL_GET_PTR here to read ulong value */ addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW)); inbuf_cpy[k] = addr; } else { /* ... or ptrs to strings in debug areas */ inbuf_cpy[k] = (uaddr_t) malloc(_BUFSIZE); to_dealloc[num_strings++] = inbuf_cpy[k]; addr = KL_GET_PTR((void*) in_buf + ((k + 1)* KL_NBPW)); GET_BLOCK(addr, _BUFSIZE, (void*)(uaddr_t)(inbuf_cpy[k])); } k++; } /* count of longs fit into one entry */ num_longs = id->buf_size / KL_NBPW; /* sizeof(long); */ if(num_longs < 1) /* bufsize of entry too small */ goto out; if(num_longs == 1) { /* no args, just print the format string */ rc = sprintf(out_buf + rc, "%s", buf); goto out; } /* number of arguments used for sprintf (without the format string) */ num_used_args = MIN(DEBUG_SPRINTF_MAX_ARGS, (num_longs - 1)); rc = sprintf(out_buf + rc, buf, (uaddr_t)(inbuf_cpy[0]), (uaddr_t)(inbuf_cpy[1]), (uaddr_t)(inbuf_cpy[2]), (uaddr_t)(inbuf_cpy[3]), (uaddr_t)(inbuf_cpy[4]), (uaddr_t)(inbuf_cpy[5]), (uaddr_t)(inbuf_cpy[6]), (uaddr_t)(inbuf_cpy[7]), (uaddr_t)(inbuf_cpy[8]), (uaddr_t)(inbuf_cpy[9])); out: while (num_strings--){ free((char*)(to_dealloc[num_strings])); } return rc; } /*********************************** * functions for debug-views ***********************************/ /* * prints out actual debug level */ static int prolog_level_fn(debug_info_t * id, debug_view_t *view, char *out_buf) { int rc = 0; if (out_buf == NULL) { rc = 2; goto out; } rc = sprintf(out_buf, "%i\n", id->level); out: return rc; } /* * prints out actual pages_per_area */ static int prolog_pages_fn(debug_info_t * id, debug_view_t *view, char *out_buf) { int rc = 0; if (out_buf == NULL) { rc = 2; goto out; } rc = sprintf(out_buf, "%i\n", id->pages_per_area_v2); out: return rc; } /* * prints out prolog */ static int prolog_fn(debug_info_t * id, debug_view_t *view, char *out_buf) { int rc = 0; rc = sprintf(out_buf, "AREA TIME LEVEL EXCEPTION CP CALLING FUNCTION" " + OFFSET DATA\n===============================" "===========================================\n"); return rc; } /* * prints debug data in hex format */ static int hex_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { int i, rc = 0; for (i = 0; i < id->buf_size; i++) { rc += sprintf(out_buf + rc, "%02x ", ((unsigned char *) in_buf)[i]); } rc += sprintf(out_buf + rc, "\n"); return rc; } /* * prints debug data in ascii format */ static int ascii_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { int i, rc = 0; if (out_buf == NULL || in_buf == NULL) { rc = id->buf_size + 1; goto out; } for (i = 0; i < id->buf_size; i++) { unsigned char c = in_buf[i]; if (!isprint(c)) rc += sprintf(out_buf + rc, "."); else rc += sprintf(out_buf + rc, "%c", c); } rc += sprintf(out_buf + rc, "\n"); out: return rc; } /* * prints debug data in ebcdic format */ static int ebcdic_format_fn(debug_info_t * id, debug_view_t *view, char *out_buf, const char *in_buf) { int i, rc = 0; if (out_buf == NULL || in_buf == NULL) { rc = id->buf_size + 1; goto out; } for (i = 0; i < id->buf_size; i++) { char c = in_buf[i]; EBCASC(&c, 1); if (!isprint(c)) rc += sprintf(out_buf + rc, "."); else rc += sprintf(out_buf + rc, "%c", c); } rc += sprintf(out_buf + rc, "\n"); out: return rc; } debug_view_t ascii_view = { "ascii", &prolog_fn, &dflt_header_fn, &ascii_format_fn, }; debug_view_t ebcdic_view = { "ebcdic", &prolog_fn, &dflt_header_fn, &ebcdic_format_fn, }; debug_view_t hex_view = { "hex", &prolog_fn, &dflt_header_fn, &hex_format_fn, }; debug_view_t level_view = { "level", &prolog_level_fn, NULL, NULL, }; debug_view_t pages_view = { "pages", &prolog_pages_fn, NULL, NULL, }; debug_view_t hex_ascii_view = { "hex_ascii", &prolog_fn, &dflt_header_fn, &hex_ascii_format_fn, }; debug_view_t sprintf_view = { "sprintf", &prolog_fn, &dflt_header_fn, &sprintf_format_fn, }; static debug_entry_v1_t * debug_find_oldest_entry(debug_entry_v1_t *entries, int num, int entry_size) { debug_entry_v1_t *result, *current; int i; uint64_t clock1, clock2; result = entries; current = entries; for (i=0; i < num; i++) { if (current->id.stck == 0) break; clock1 = current->id.fields.clock; clock2 = result->id.fields.clock; clock1 = KL_GET_UINT64(&clock1); clock2 = KL_GET_UINT64(&clock2); if (clock1 < clock2) result = current; current = (debug_entry_v1_t *) ((char *) current + entry_size); } return result; } /* * debug_format_output: * - calls prolog, header and format functions of view to format output */ static int debug_format_output_v1(debug_info_t * debug_area, debug_view_t *view, FILE * ofp) { int i, j, len; int nr_of_entries; debug_entry_v1_t *act_entry, *last_entry; char *act_entry_data; char buf[2048]; size_t items ATTRIBUTE_UNUSED; /* print prolog */ if (view->prolog_proc) { len = view->prolog_proc(debug_area, view, buf); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } /* print debug records */ if (!(view->format_proc) && !(view->header_proc)) goto out; if(debug_area->entry_size <= 0){ fprintf(ofp, "Invalid entry_size: %i\n",debug_area->entry_size); goto out; } nr_of_entries = (PAGE_SIZE << debug_area->page_order) / debug_area->entry_size; for (i = 0; i < debug_area->nr_areas; i++) { act_entry = debug_find_oldest_entry(debug_area->areas[i], nr_of_entries, debug_area->entry_size); last_entry = (debug_entry_v1_t *) ((char *) debug_area->areas[i] + (PAGE_SIZE << debug_area->page_order) - debug_area->entry_size); for (j = 0; j < nr_of_entries; j++) { act_entry_data = (char*)act_entry + dbe_size; if (act_entry->id.stck == 0) break; /* empty entry */ if (view->header_proc) { len = view->header_proc(debug_area, view, i, act_entry, buf); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } if (view->format_proc) { len = view->format_proc(debug_area, view, buf, act_entry_data); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } act_entry = (debug_entry_v1_t *) (((char *) act_entry) + debug_area->entry_size); if (act_entry > last_entry) act_entry = debug_area->areas[i]; } } out: return 1; } /* * debug_format_output_v2: * - calls prolog, header and format functions of view to format output */ static int debug_format_output_v2(debug_info_t * debug_area, debug_view_t *view, FILE * ofp) { int i, j, k, len; void *act_entry; char *act_entry_data; char buf[2048]; size_t items ATTRIBUTE_UNUSED; /* print prolog */ if (view->prolog_proc) { len = view->prolog_proc(debug_area, view, buf); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } /* print debug records */ if (!(view->format_proc) && !(view->header_proc)) goto out; if(debug_area->entry_size <= 0){ fprintf(ofp, "Invalid entry_size: %i\n",debug_area->entry_size); goto out; } for (i = 0; i < debug_area->nr_areas; i++) { int nr_entries_per_page = PAGE_SIZE/debug_area->entry_size; for (j = 0; j < debug_area->pages_per_area_v2; j++) { act_entry = debug_area->areas_v2[i][j]; for (k = 0; k < nr_entries_per_page; k++) { act_entry_data = (char*)act_entry + dbe_size; if (dbf_version == DBF_VERSION_V3 && ((debug_entry_v3_t *) act_entry)->clock == 0) break; /* empty entry */ else if (dbf_version < DBF_VERSION_V3 && ((debug_entry_v1_t *) act_entry)->id.stck == 0) break; /* empty entry */ if (view->header_proc) { len = view->header_proc(debug_area, view, i, act_entry, buf); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } if (view->format_proc) { len = view->format_proc(debug_area, view, buf, act_entry_data); items = fwrite(buf,len, 1, ofp); memset(buf, 0, 2048); } act_entry = ((char *) act_entry) + debug_area->entry_size; } } } out: return 1; } static debug_info_t * find_debug_area(const char *area_name) { debug_info_t* act_debug_info = debug_area_first; while(act_debug_info != NULL){ if (strcmp(act_debug_info->name, area_name) == 0) return act_debug_info; act_debug_info = act_debug_info->next; } return NULL; } static void tod_clock_base_init(void) { if (kernel_symbol_exists("tod_clock_base")) { /* * Kernels >= 4.14 that contain 6e2ef5e4f6cc5734 ("s390/time: * add support for the TOD clock epoch extension") */ get_symbol_data("tod_clock_base", sizeof(tod_clock_base_us), &tod_clock_base_us); /* Bit for usecs is at position 59 - therefore shift 4 */ tod_clock_base_us >>= 4; } else if (kernel_symbol_exists("sched_clock_base_cc") && !kernel_symbol_exists("tod_to_timeval")) { /* * Kernels >= 4.11 that contain ea417aa8a38bc7db ("s390/debug: * make debug event time stamps relative to the boot TOD clock") */ get_symbol_data("sched_clock_base_cc", sizeof(tod_clock_base_us), &tod_clock_base_us); /* Bit for usecs is at position 51 - therefore shift 12 */ tod_clock_base_us >>= 12; } else { /* All older kernels use absolute time stamps */ tod_clock_base_us = 0; } } static void dbf_init(void) { if (!initialized) { tod_clock_base_init(); if(dbf_version >= DBF_VERSION_V2) add_lcrash_debug_view(&pages_view); add_lcrash_debug_view(&ascii_view); add_lcrash_debug_view(&level_view); add_lcrash_debug_view(&ebcdic_view); add_lcrash_debug_view(&hex_view); add_lcrash_debug_view(&hex_ascii_view); add_lcrash_debug_view(&sprintf_view); ebcdic_ascii_conv = iconv_open("ISO-8859-1", "EBCDIC-US"); initialized = 1; } } static debug_view_t* get_debug_view(kaddr_t addr) { void* k_debug_view; int k_debug_view_size; debug_view_t* rc; rc = (debug_view_t*)malloc(sizeof(debug_view_t)); memset(rc, 0, sizeof(debug_view_t)); k_debug_view_size = kl_struct_len("debug_view"); k_debug_view = malloc(k_debug_view_size); GET_BLOCK(addr, k_debug_view_size, k_debug_view); strncpy(rc->name,K_PTR(k_debug_view,"debug_view","name"), DEBUG_MAX_PROCF_LEN); free(k_debug_view); return rc; } static void free_debug_view(debug_view_t* view) { if(view) free(view); } static void debug_get_areas_v1(debug_info_t* db_info, void* k_dbi) { kaddr_t mem_pos; kaddr_t dbe_addr; int area_size, i; /* get areas */ /* place to hold ptrs to debug areas in lcrash */ area_size = PAGE_SIZE << db_info->page_order; db_info->areas = (void**)malloc(db_info->nr_areas * sizeof(void *)); memset(db_info->areas, 0, db_info->nr_areas * sizeof(void *)); mem_pos = KL_ULONG(k_dbi,"debug_info","areas"); for (i = 0; i < db_info->nr_areas; i++) { dbe_addr = KL_VREAD_PTR(mem_pos); db_info->areas[i] = (debug_entry_v1_t *) malloc(area_size); /* read raw data for debug area */ GET_BLOCK(dbe_addr, area_size, db_info->areas[i]); mem_pos += KL_NBPW; } } static void debug_get_areas_v2(debug_info_t* db_info, void* k_dbi) { kaddr_t area_ptr; kaddr_t page_array_ptr; kaddr_t page_ptr; int i,j; db_info->areas_v2=(void***)malloc(db_info->nr_areas * sizeof(void **)); area_ptr = KL_ULONG(k_dbi,"debug_info","areas"); for (i = 0; i < db_info->nr_areas; i++) { db_info->areas_v2[i] = (void**)malloc(db_info->pages_per_area_v2 * sizeof(void*)); page_array_ptr = KL_VREAD_PTR(area_ptr); for(j=0; j < db_info->pages_per_area_v2; j++) { page_ptr = KL_VREAD_PTR(page_array_ptr); db_info->areas_v2[i][j] = (void*)malloc(PAGE_SIZE); /* read raw data for debug area */ GET_BLOCK(page_ptr, PAGE_SIZE, db_info->areas_v2[i][j]); page_array_ptr += KL_NBPW; } area_ptr += KL_NBPW; } } static debug_info_t* get_debug_info(kaddr_t addr,int get_areas) { void *k_dbi; kaddr_t mem_pos; kaddr_t view_addr; debug_info_t* db_info; int i; int dbi_size; /* get sizes of kernel structures */ if(!(dbi_size = kl_struct_len("debug_info"))){ fprintf (KL_ERRORFP, "Could not determine sizeof(struct debug_info)\n"); return(NULL); } if(!(dbe_size = kl_struct_len("__debug_entry"))){ fprintf(KL_ERRORFP, "Could not determine sizeof(struct __debug_entry)\n"); return(NULL); } /* get kernel debug_info structure */ k_dbi = malloc(dbi_size); GET_BLOCK(addr, dbi_size, k_dbi); db_info = (debug_info_t*)malloc(sizeof(debug_info_t)); memset(db_info, 0, sizeof(debug_info_t)); /* copy members */ db_info->level = KL_INT(k_dbi,"debug_info","level"); db_info->nr_areas = KL_INT(k_dbi,"debug_info","nr_areas"); db_info->pages_per_area_v2= KL_INT(k_dbi,"debug_info","pages_per_area"); db_info->page_order = KL_INT(k_dbi,"debug_info","page_order"); db_info->buf_size = KL_INT(k_dbi,"debug_info","buf_size"); db_info->entry_size = KL_INT(k_dbi,"debug_info","entry_size"); db_info->next_dbi = KL_ULONG(k_dbi,"debug_info","next"); db_info->prev_dbi = KL_ULONG(k_dbi,"debug_info","prev"); db_info->addr = addr; strncpy(db_info->name,K_PTR(k_dbi,"debug_info","name"), DEBUG_MAX_PROCF_LEN); if(get_areas){ if(dbf_version == DBF_VERSION_V1) debug_get_areas_v1(db_info,k_dbi); else debug_get_areas_v2(db_info,k_dbi); } else { db_info->areas = NULL; } /* get views */ mem_pos = (uaddr_t) K_PTR(k_dbi,"debug_info","views"); memset(&db_info->views, 0, DEBUG_MAX_VIEWS * sizeof(void*)); for (i = 0; i < DEBUG_MAX_VIEWS; i++) { view_addr = KL_GET_PTR((void*)(uaddr_t)mem_pos); if(view_addr == 0){ break; } else { db_info->views[i] = get_debug_view(view_addr); } mem_pos += KL_NBPW; } free(k_dbi); return db_info; } static void free_debug_info_v1(debug_info_t * db_info) { int i; if(db_info->areas){ for (i = 0; i < db_info->nr_areas; i++) { free(db_info->areas[i]); } } for (i = 0; i < DEBUG_MAX_VIEWS; i++) { free_debug_view(db_info->views[i]); } free(db_info->areas); free(db_info); } static void free_debug_info_v2(debug_info_t * db_info) { int i,j; if(db_info->areas) { for (i = 0; i < db_info->nr_areas; i++) { for(j = 0; j < db_info->pages_per_area_v2; j++) { free(db_info->areas_v2[i][j]); } free(db_info->areas[i]); } free(db_info->areas); db_info->areas = NULL; } for (i = 0; i < DEBUG_MAX_VIEWS; i++) { free_debug_view(db_info->views[i]); } free(db_info); } static void debug_write_output(debug_info_t *db_info, debug_view_t *db_view, FILE * fp) { if (dbf_version == DBF_VERSION_V1) { debug_format_output_v1(db_info, db_view, fp); free_debug_info_v1(db_info); } else { debug_format_output_v2(db_info, db_view, fp); free_debug_info_v2(db_info); } } static int get_debug_areas(void) { kaddr_t act_debug_area; syment_t *debug_sym; debug_info_t *act_debug_area_cpy; if(!(debug_sym = kl_lkup_symname("debug_area_first"))){ printf("Did not find debug_areas"); return -1; } act_debug_area = KL_VREAD_PTR(debug_sym->s_addr); while(act_debug_area != 0){ act_debug_area_cpy = get_debug_info(act_debug_area,0); act_debug_area = act_debug_area_cpy->next_dbi; if(debug_area_first == NULL){ debug_area_first = act_debug_area_cpy; } else { debug_area_last->next = act_debug_area_cpy; } debug_area_last = act_debug_area_cpy; } return 0; } static void free_debug_areas(void) { debug_info_t* next; debug_info_t* act_debug_info = debug_area_first; while(act_debug_info != NULL){ next = act_debug_info->next; if(dbf_version == DBF_VERSION_V1) free_debug_info_v1(act_debug_info); else free_debug_info_v2(act_debug_info); act_debug_info = next; } debug_area_first = NULL; debug_area_last = NULL; } static debug_view_t * find_lcrash_debug_view(const char *name) { int i; for (i = 0; (i < LCRASH_DB_VIEWS) && (debug_views[i] != NULL); i++) { if (strcmp(debug_views[i]->name, name) == 0) return debug_views[i]; } return NULL; } static void print_lcrash_debug_views(FILE * ofp) { int i; fprintf(ofp, "REGISTERED VIEWS\n"); fprintf(ofp, "=====================\n"); for (i = 0; i < LCRASH_DB_VIEWS; i++) { if (debug_views[i] == NULL) { return; } fprintf(ofp, " - %s\n", debug_views[i]->name); } } static int add_lcrash_debug_view(debug_view_t *view) { int i; for (i = 0; i < LCRASH_DB_VIEWS; i++) { if (debug_views[i] == NULL) { debug_views[i] = view; return 0; } if (strcmp(debug_views[i]->name, view->name) == 0) return -1; } return -1; } static int list_one_view(char *area_name, char *view_name, command_t * cmd) { debug_info_t *db_info; debug_view_t *db_view; if ((db_info = find_debug_area(area_name)) == NULL) { fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); return -1; } db_info = get_debug_info(db_info->addr,1); if ((db_view = find_lcrash_debug_view(view_name)) == NULL) { fprintf(cmd->efp, "View '%s' not registered!\n", view_name); return -1; } debug_write_output(db_info, db_view, cmd->ofp); return 0; } static int list_areas(FILE * ofp) { debug_info_t* act_debug_info = debug_area_first; fprintf(ofp, "Debug Logs:\n"); fprintf(ofp, "==================\n"); while(act_debug_info != NULL){ fprintf(ofp, " - %s\n", act_debug_info->name); act_debug_info = act_debug_info->next; } return 0; } static int list_one_area(const char *area_name, command_t * cmd) { debug_info_t *db_info; int i; if ((db_info = find_debug_area(area_name)) == NULL) { fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); return -1; } fprintf(cmd->ofp, "INSTALLED VIEWS FOR '%s':\n", area_name); fprintf(cmd->ofp, "================================================" "==============================\n"); for (i = 0; i < DEBUG_MAX_VIEWS; i++) { if (db_info->views[i] != NULL) { fprintf(cmd->ofp, " - %s ", db_info->views[i]->name); if (find_lcrash_debug_view(db_info->views[i]->name)) fprintf(cmd->ofp, "(available)\n"); else fprintf(cmd->ofp, "(not available)\n"); } } fprintf(cmd->ofp, "=================================================" "=============================\n"); return 0; } #ifdef DBF_DYNAMIC_VIEWS static int load_debug_view(const char *path, command_t * cmd) { void *library; const char *error; debug_view_t *(*view_init_func) (void); library = dlopen(path, RTLD_LAZY); if (library == NULL) { fprintf(cmd->efp, "Could not open %s: %s\n", path, dlerror()); return (1); } dlerror(); view_init_func = dlsym(library, "debug_view_init"); error = dlerror(); if (error) { fprintf(stderr, "could not find debug_view_init(): %s\n", error); exit(1); } add_lcrash_debug_view((*view_init_func) ()); fprintf(cmd->ofp, "view %s loaded\n", path); fflush(stdout); return 0; } #endif static int save_one_view(const char *dbf_dir_name, const char *area_name, const char *view_name, command_t *cmd) { char path_view[PATH_MAX]; debug_info_t *db_info; debug_view_t *db_view; FILE *view_fh; db_info = find_debug_area(area_name); if (db_info == NULL) { fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); return -1; } db_info = get_debug_info(db_info->addr, 1); db_view = find_lcrash_debug_view(view_name); if (db_view == NULL) { fprintf(cmd->efp, "View '%s' not registered!\n", view_name); return -1; } sprintf(path_view, "%s/%s/%s", dbf_dir_name, area_name, view_name); view_fh = fopen(path_view, "w"); if (view_fh == NULL) { fprintf(cmd->efp, "Could not create file: %s (%s)\n", path_view, strerror(errno)); return -1; } debug_write_output(db_info, db_view, view_fh); fclose(view_fh); return 0; } static int save_one_area(const char *dbf_dir_name, const char *area_name, command_t *cmd) { char dir_name_area[PATH_MAX]; debug_info_t *db_info; int i; db_info = find_debug_area(area_name); if (db_info == NULL) { fprintf(cmd->efp, "Debug log '%s' not found!\n", area_name); return -1; } sprintf(dir_name_area, "%s/%s", dbf_dir_name, area_name); if (mkdir(dir_name_area, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) != 0) { fprintf(cmd->efp, "Could not create directory: %s (%s)\n", dir_name_area, strerror(errno)); return -1; } for (i = 0; i < DEBUG_MAX_VIEWS; i++) { if (db_info->views[i] == NULL) continue; if (!find_lcrash_debug_view(db_info->views[i]->name)) continue; save_one_view(dbf_dir_name, area_name, db_info->views[i]->name, cmd); } return 0; } static void save_dbf(const char *dbf_dir_name, command_t *cmd) { debug_info_t *act_debug_info = debug_area_first; FILE *ofp = cmd->ofp; fprintf(ofp, "Saving s390dbf to directory \"%s\"\n", dbf_dir_name); if (mkdir(dbf_dir_name, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH) != 0) { fprintf(cmd->efp, "Could not create directory: %s (%s)\n", dbf_dir_name, strerror(errno)); return; } while (act_debug_info != NULL) { save_one_area(dbf_dir_name, act_debug_info->name, cmd); act_debug_info = act_debug_info->next; } } /* * s390dbf_cmd() -- Run the 's390dbf' command. */ static int s390dbf_cmd(command_t * cmd) { syment_t *dbf_version_sym; int rc = 0; /* check version */ if(!(dbf_version_sym = kl_lkup_symname("debug_feature_version"))){ fprintf(KL_ERRORFP, "Could not determine debug_feature_version\n"); return -1; } dbf_version = KL_VREAD_UINT32(dbf_version_sym->s_addr); if ((dbf_version != DBF_VERSION_V1) && (dbf_version != DBF_VERSION_V2) && (dbf_version != DBF_VERSION_V3)) { fprintf(cmd->efp, "lcrash does not support the" " debug feature version of the dump kernel:\n"); fprintf(cmd->efp, "DUMP: %i SUPPORTED: %i, %i and %i\n", dbf_version, DBF_VERSION_V1, DBF_VERSION_V2, DBF_VERSION_V3); return -1; } dbf_init(); if (cmd->flags & C_ALL) { return (0); } #ifdef DBF_DYNAMIC_VIEWS if (cmd->flags & LOAD_FLAG) { printf("loading: %s\n", cmd->args[0]); return (load_debug_view(cmd->args[0], cmd)); } #endif if (cmd->flags & VIEWS_FLAG) { print_lcrash_debug_views(cmd->ofp); return (0); } if (cmd->nargs > 2) { s390dbf_usage(cmd); return (1); } if(get_debug_areas() == -1) return -1; if (cmd->flags & SAVE_DBF_FLAG) { if (cmd->nargs != 2) { fprintf(cmd->efp, "Specify directory name for -s\n"); return 1; } save_dbf(cmd->args[1], cmd); return 0; } switch (cmd->nargs) { case 0: rc = list_areas(cmd->ofp); break; case 1: rc = list_one_area(cmd->args[0], cmd); break; case 2: rc = list_one_view(cmd->args[0], cmd->args[1], cmd); break; } free_debug_areas(); return rc; } #define _S390DBF_USAGE " [-v] [-s dirname] [debug log] [debug view]" /* * s390dbf_usage() -- Print the usage string for the 's390dbf' command. */ void s390dbf_usage(command_t * cmd) { CMD_USAGE(cmd, _S390DBF_USAGE); } /* * s390 debug feature command for crash */ char *help_s390dbf[] = { "s390dbf", "s390dbf prints out debug feature logs", "[-v] [-s dirname] [debug log] [debug view]" "", "Display Debug logs:", " + If called without parameters, all active debug logs are listed.", " + If called with the name of a debug log, all debug-views for which", " the debug-log has registered are listed. It is possible thatsome", " of the debug views are not available to 'crash'.", " + If called with the name of a debug-log and an available viewname,", " the specified view is printed.", " + If called with '-s dirname', the s390dbf is saved to the specified", " directory", " + If called with '-v', all debug views which are available to", " 'crash' are listed", NULL }; void cmd_s390dbf() { int i,c; command_t cmd = { .ofp = fp, .efp = stderr, .cmdstr = "s390dbf", .command = "s390dbf", }; cmd.nargs=argcnt - 1; for (i=1; i < argcnt; i++) cmd.args[i-1] = args[i]; while ((c = getopt(argcnt, args, "vs")) != EOF) { switch(c) { case 'v': cmd.flags |= VIEWS_FLAG; break; case 's': cmd.flags |= SAVE_DBF_FLAG; break; default: s390dbf_usage(&cmd); return; } } s390dbf_cmd(&cmd); } #endif crash-utility-crash-9cd43f5/symbols.c0000664000372000037200000152646315107550337017251 0ustar juerghjuergh/* symbols.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2020 David Anderson * Copyright (C) 2002-2020 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #if defined(GDB_7_6) || defined(GDB_10_2) || defined(GDB_16_2) #define __CONFIG_H__ 1 #include "config.h" #endif #include "bfd.h" #include "demangle.h" static void store_symbols(bfd *, int, void *, long, unsigned int); static void store_sysmap_symbols(void); static ulong relocate(ulong, char *, int); static int relocate_force(ulong, char *); static void kaslr_init(void); static void strip_module_symbol_end(char *s); static int compare_syms(const void *, const void *); static int compare_mods(const void *, const void *); static int compare_prios(const void *v1, const void *v2); static int compare_size_name(const void *, const void *); struct type_request; static void append_struct_symbol (struct gnu_request *, void *); static void request_types(ulong, ulong, char *); static asection *get_kernel_section(char *); static char * get_section(ulong vaddr, char *buf); static void symbol_dump(ulong, char *); static void check_for_dups(struct load_module *); static struct syment *kallsyms_module_symbol(struct load_module *, symbol_info *); static int kallsyms_module_function_size(struct syment *, struct load_module *, ulong *); static void store_load_module_symbols \ (bfd *, int, void *, long, uint, ulong, char *); static int load_module_index(struct syment *); static void section_header_info(bfd *, asection *, void *); static void store_section_data(struct load_module *, bfd *, asection *); static void calculate_load_order_v1(struct load_module *, bfd *); static void calculate_load_order_v2(struct load_module *, bfd *, int, void *, long, unsigned int); static void calculate_load_order_6_4(struct load_module *, bfd *, int, void *, long, unsigned int); static void check_insmod_builtin(struct load_module *, int, ulong *); static int is_insmod_builtin(struct load_module *, struct syment *); struct load_module; static int add_symbol_file(struct load_module *); static int add_symbol_file_kallsyms(struct load_module *, struct gnu_request *); static void find_mod_etext(struct load_module *); static long rodata_search(ulong *, ulong); static int ascii_long(ulong word); static int is_bfd_format(char *); static int is_binary_stripped(char *); static int namespace_ctl(int, struct symbol_namespace *, void *, void *); static void symval_hash_init(void); static struct syment *symval_hash_search(ulong); static void symname_hash_init(void); static void symname_hash_install(struct syment *); static struct syment *symname_hash_search(struct syment *[], char *); static void gnu_qsort(bfd *, void *, long, unsigned int, asymbol *, asymbol *); static int check_gnu_debuglink(bfd *); static int separate_debug_file_exists(const char *, unsigned long, int *); static int store_module_kallsyms_v1(struct load_module *, int, int, char *); static int store_module_kallsyms_v2(struct load_module *, int, int, char *); static void datatype_error(void **, char *, char *, char *, int); static char *get_thisfile(void); struct elf_common; static void Elf32_Sym_to_common(Elf32_Sym *, struct elf_common *); static void Elf64_Sym_to_common(Elf64_Sym *, struct elf_common *); static void cmd_datatype_common(ulong); static void do_datatype_addr(struct datatype_member *, ulong, int, ulong, char **, int); static void process_gdb_output(char *, unsigned, const char *, int); static char *expr_type_name(const char *); static int display_per_cpu_info(struct syment *, int, char *); static struct load_module *get_module_percpu_sym_owner(struct syment *); static int is_percpu_symbol(struct syment *); static void dump_percpu_symbols(struct load_module *); static void print_struct_with_dereference(ulong, struct datatype_member *, ulong); static int dereference_pointer(ulong, struct datatype_member *, ulong); #define KERNEL_SECTIONS (void *)(1) #define MODULE_SECTIONS (void *)(2) #define VERIFY_SECTIONS (void *)(3) #define EV_DWARFEXTRACT 101010101 #define PARSE_FOR_DATA (1) #define PARSE_FOR_DECLARATION (2) static void parse_for_member(struct datatype_member *, ulong); static int show_member_offset(FILE *, struct datatype_member *, char *); struct struct_elem; static void free_structure(struct struct_elem *); static unsigned char is_right_brace(const char *); static struct struct_elem *find_node(struct struct_elem *, char *); static void dump_node(struct struct_elem *, char *, unsigned char, unsigned char); static int module_mem_type(ulong, struct load_module *); static ulong module_mem_end(ulong, struct load_module *); static int in_module_range(ulong, struct load_module *, int, int); static struct syment *value_search_module_6_4(ulong, ulong *); static struct syment *next_symbol_by_symname(char *); static struct syment *prev_symbol_by_symname(char *); static struct syment *next_module_symbol_by_value(ulong); static struct syment *prev_module_symbol_by_value(ulong); static struct syment *next_module_symbol_by_syment(struct syment *); static struct syment *prev_module_symbol_by_syment(struct syment *); struct module_tag { char *start; char *end; char *start_str; char *end_str; }; #define MODULE_TAG(type, suffix) ("_MODULE_" #type "_" #suffix "_") #define MODULE_STR(type, suffix) ( "MODULE " #type " " #suffix) #define MODULE_TAGS(type) { \ .start = MODULE_TAG(type, START), \ .end = MODULE_TAG(type, END), \ .start_str = MODULE_STR(type, START), \ .end_str = MODULE_STR(type, END) \ } static const struct module_tag module_tag[] = { MODULE_TAGS(TEXT), MODULE_TAGS(DATA), MODULE_TAGS(RODATA), MODULE_TAGS(RO_AFTER_INIT), MODULE_TAGS(INIT_TEXT), MODULE_TAGS(INIT_DATA), MODULE_TAGS(INIT_RODATA), }; /* * structure/union printing stuff */ #define UINT8 (0x1) #define INT8 (0x2) #define UINT16 (0x4) #define INT16 (0x8) #define UINT32 (0x10) #define INT32 (0x20) #define UINT64 (0x40) #define INT64 (0x80) #define POINTER (0x100) #define FUNCTION (0x200) #define UNION_REQUEST (0x400) #define STRUCT_REQUEST (0x800) #define ARRAY (0x1000) #define ENUM (0x2000) #define TYPEDEF (0x4000) #define STRUCT_VERBOSE (0x8000) #define SHOW_OFFSET (0x10000) #define IN_UNION (0x20000) #define IN_STRUCT (0x40000) #define DATATYPE_QUERY (0x80000) #define ANON_MEMBER_QUERY (0x100000) #define SHOW_RAW_DATA (0x200000) #define DEREF_POINTERS (0x400000) #define INTEGER_TYPE (UINT8|INT8|UINT16|INT16|UINT32|INT32|UINT64|INT64) #define INITIAL_INDENT (4) #define INDENT_INCR (2) static void whatis_datatype(char *, ulong, FILE *); static void whatis_variable(struct syment *); static void print_struct(char *, ulong); static void print_union(char *, ulong); static void dump_datatype_member(FILE *, struct datatype_member *); static void dump_datatype_flags(ulong, FILE *); static long anon_member_offset(char *, char *); static long anon_member_size(char *, char *); static int gdb_whatis(char *); static void do_datatype_declaration(struct datatype_member *, ulong); static int member_to_datatype(char *, struct datatype_member *, ulong); #define DEBUGINFO_ERROR_MESSAGE1 \ "the use of a System.map file requires that the accompanying namelist\nargument is a kernel file built with the -g CFLAG. The namelist argument\nsupplied in this case is a debuginfo file, which must be accompanied by the\nkernel file from which it was derived.\n" #define DEBUGINFO_ERROR_MESSAGE2 \ "The namelist argument supplied in this case is a debuginfo file,\nwhich must be accompanied by the kernel file from which it was derived.\n" /* * This routine scours the namelist for kernel text and data symbols, * sorts, and stores, them in a static table for quick reference. */ void symtab_init(void) { char **matching; long symcount; void *minisyms; unsigned int size; asymbol *sort_x; asymbol *sort_y; if ((st->bfd = bfd_openr(pc->namelist, NULL)) == NULL) error(FATAL, "cannot open object file: %s\n", pc->namelist); if (!bfd_check_format_matches(st->bfd, bfd_object, &matching)) error(FATAL, "cannot determine object file format: %s\n", pc->namelist); /* * Check whether the namelist is a kerntypes file built by * dwarfextract, which places a magic number in e_version. */ if (file_elf_version(pc->namelist) == EV_DWARFEXTRACT) pc->flags |= KERNTYPES; if (pc->flags & SYSMAP) { bfd_map_over_sections(st->bfd, section_header_info, VERIFY_SECTIONS); if ((st->flags & (NO_SEC_LOAD|NO_SEC_CONTENTS)) == (NO_SEC_LOAD|NO_SEC_CONTENTS)) { error(INFO, "%s: no text and data contents\n", pc->namelist); error(FATAL, pc->flags & SYSMAP_ARG ? DEBUGINFO_ERROR_MESSAGE1 : DEBUGINFO_ERROR_MESSAGE2); } store_sysmap_symbols(); return; } else if (LKCD_KERNTYPES()) error(FATAL, "%s: use of kerntypes requires a system map\n", pc->namelist); /* * Pull a bait-and-switch on st->bfd if we've got a separate * .gnu_debuglink file that matches the CRC. Not done for kerntypes. */ if (!(LKCD_KERNTYPES()) && !(bfd_get_file_flags(st->bfd) & HAS_SYMS)) { if (!check_gnu_debuglink(st->bfd)) no_debugging_data(FATAL); } /* * Gather references to the kernel sections. */ if ((st->sections = (struct sec *) malloc(st->bfd->section_count * sizeof(struct sec *))) == NULL) error(FATAL, "symbol table section array malloc: %s\n", strerror(errno)); BZERO(st->sections, st->bfd->section_count * sizeof(struct sec *)); st->first_section_start = st->last_section_end = 0; bfd_map_over_sections(st->bfd, section_header_info, KERNEL_SECTIONS); if ((st->flags & (NO_SEC_LOAD|NO_SEC_CONTENTS)) == (NO_SEC_LOAD|NO_SEC_CONTENTS)) { if (!pc->namelist_debug && !pc->debuginfo_file) { error(INFO, "%s: no text and data contents\n", pc->namelist); error(FATAL, DEBUGINFO_ERROR_MESSAGE2); } } symcount = bfd_read_minisymbols(st->bfd, FALSE, &minisyms, &size); if (symcount <= 0) no_debugging_data(FATAL); sort_x = bfd_make_empty_symbol(st->bfd); sort_y = bfd_make_empty_symbol(st->bfd); if (sort_x == NULL || sort_y == NULL) error(FATAL, "bfd_make_empty_symbol() failed\n"); kaslr_init(); gnu_qsort(st->bfd, minisyms, symcount, size, sort_x, sort_y); store_symbols(st->bfd, FALSE, minisyms, symcount, size); free(minisyms); symname_hash_init(); symval_hash_init(); } /* * Adapted from gdb's get_debug_link_info() * * Look in: current directory * basename-of-namelist/.debug directory * /usr/lib/debug/boot (since we know it's a Red Hat kernel) */ static int check_gnu_debuglink(bfd *bfd) { int i, exists, found; asection *sect; bfd_size_type debuglink_size; char *contents; int crc_offset; unsigned long crc32; char *dirname; char *namelist_debug; char **matching; sect = bfd_get_section_by_name(bfd, ".gnu_debuglink"); if (!sect) { error(INFO, "%s: no .gnu_debuglink section\n", pc->namelist); return FALSE; } debuglink_size = bfd_section_size(sect); contents = GETBUF(debuglink_size); bfd_get_section_contents(bfd, sect, contents, (file_ptr)0, (bfd_size_type)debuglink_size); crc_offset = strlen (contents) + 1; crc_offset = (crc_offset + 3) & ~3; crc32 = bfd_get_32(bfd, (bfd_byte *)(contents + crc_offset)); if (CRASHDEBUG(1)) error(NOTE, "gnu_debuglink file: %s\ncrc32: %lx\n", contents, crc32); if ((pc->debuginfo_file = (char *) malloc(((strlen(pc->namelist) + strlen("/.debug/") + + strlen(".debug") + strlen(" /usr/lib/debug/boot/ "))*10) + strlen(pc->namelist_debug ? pc->namelist_debug : " "))) == NULL) error(FATAL, "debuginfo file name malloc: %s\n", strerror(errno)); dirname = GETBUF(strlen(pc->namelist)+1); strcpy(dirname, pc->namelist); for (i = strlen(dirname)-1; i >= 0; i--) { if (dirname[i] == '/') break; } dirname[i+1] = NULLCHAR; if (!strlen(dirname)) sprintf(dirname, "."); namelist_debug = NULL; if (pc->namelist_debug) { sprintf(pc->debuginfo_file, "%s", pc->namelist_debug); if (separate_debug_file_exists(pc->debuginfo_file, crc32, &exists)) { if (CRASHDEBUG(1)) fprintf(fp, "%s: CRC matches\n", pc->debuginfo_file); st->flags |= CRC_MATCHES; goto reset_bfd; } else { if ((st->flags & FORCE_DEBUGINFO) && exists) { error(WARNING, "%s:\n CRC value does not match\n\n", pc->debuginfo_file); goto reset_bfd; } else error(INFO, "%s:\n CRC value does not match\n\n", pc->debuginfo_file); namelist_debug = pc->namelist_debug; pc->namelist_debug = NULL; } } found = 0; sprintf(pc->debuginfo_file, "%s/%s", dirname, contents); if (separate_debug_file_exists(pc->debuginfo_file, crc32, &exists)) { if (CRASHDEBUG(1)) fprintf(fp, "%s: CRC matches\n", pc->debuginfo_file); st->flags |= CRC_MATCHES; goto reset_bfd; } else { if (CRASHDEBUG(1)) fprintf(fp, "%s: %s\n", pc->debuginfo_file, exists ? "CRC does not match" : "not readable/found"); if (exists) { error(INFO, "%s: CRC does not match\n\n", pc->debuginfo_file); found++; } } sprintf(pc->debuginfo_file, "%s/.debug/%s", dirname, contents); if (separate_debug_file_exists(pc->debuginfo_file, crc32, &exists)) { if (CRASHDEBUG(1)) fprintf(fp, "%s: CRC matches\n", pc->debuginfo_file); st->flags |= CRC_MATCHES; goto reset_bfd; } else { if (CRASHDEBUG(1)) fprintf(fp, "%s: %s\n", pc->debuginfo_file, exists ? "CRC does not match" : "not readable/found"); if (exists) { error(INFO, "%s: CRC does not match\n\n", pc->debuginfo_file); found++; } } sprintf(pc->debuginfo_file, "/usr/lib/debug/boot/%s", contents); if (separate_debug_file_exists(pc->debuginfo_file, crc32, &exists)) { if (CRASHDEBUG(1)) fprintf(fp, "%s: CRC matches\n", pc->debuginfo_file); st->flags |= CRC_MATCHES; goto reset_bfd; } else { if (CRASHDEBUG(1)) fprintf(fp, "%s: %s\n", pc->debuginfo_file, exists ? "CRC does not match" : "not readable/found"); if (exists) { error(INFO, "%s: CRC does not match\n\n", pc->debuginfo_file); found++; } } if (!found && namelist_debug) { error(INFO, "%s:\n use of -f option may suffice, or may fail miserably\n", namelist_debug); } if (!found && !namelist_debug) { no_debugging_data(INFO); error(INFO, "%s: debuginfo file not found\n", contents); error(FATAL, "either install the appropriate kernel debuginfo package, or\n copy %s to this machine", contents); } return FALSE; reset_bfd: if ((st->bfd = bfd_openr(pc->debuginfo_file, NULL)) == NULL) error(FATAL, "cannot open object file: %s\n", pc->debuginfo_file); if (!bfd_check_format_matches(st->bfd, bfd_object, &matching)) error(FATAL, "cannot determine object file format: %s\n", pc->debuginfo_file); FREEBUF(contents); FREEBUF(dirname); return TRUE; } /* * Based upon gdb's separate_debug_file_exists(). */ static int separate_debug_file_exists(const char *name, unsigned long crc, int *exists) { unsigned long file_crc = 0; int fd; char buffer[8*1024]; size_t count; fd = open(name, O_RDONLY); if (fd < 0) { *exists = FALSE; return 0; } *exists = TRUE; while ((count = read(fd, buffer, sizeof(buffer))) > 0) #ifdef GDB_5_3 file_crc = calc_crc32(file_crc, buffer, count); #else #if defined(GDB_7_6) || defined(GDB_10_2) || defined(GDB_16_2) file_crc = bfd_calc_gnu_debuglink_crc32(file_crc, (unsigned char *)buffer, count); #else file_crc = gnu_debuglink_crc32(file_crc, (unsigned char *)buffer, count); #endif #endif close (fd); return crc == file_crc; } /* * Callback for gdb to use a specified vmlinux.debug file. */ char * check_specified_kernel_debug_file() { if (pc->flags & GDB_INIT) return NULL; return (pc->namelist_debug ? pc->namelist_debug : NULL); } /* * Common bailout/warning routine when running against non-debug kernels. * * INFO: used when this routine should return. * FATAL: kills function if runtime, or kills program if during init. * WARNING: called by gdb_session_init() only, in an attempt to at least * get by with built-in debug data; if not possible the program * is killed. */ void no_debugging_data(int error_type) { switch (error_type) { case INFO: error(INFO, "%s: no debugging data available\n", pc->namelist); break; case FATAL: error(FATAL, "%s%s: no debugging data available\n", pc->flags & RUNTIME ? "" : "\n", pc->namelist); clean_exit(1); case WARNING: error(FATAL, "\n%s: no debugging data available\n", pc->namelist); clean_exit(1); } } /* * Get the address space formerly used as init-time text. While there * get the boundaries of the kernel .rodata section so that it won't * be confused with text. * * This is done indirectly by the call-back to section_header_info(). */ void get_text_init_space(void) { asection *section = NULL; if (pc->flags & SYSMAP) return; if (machine_type("ARM")) section = get_kernel_section(".init"); if (!section && !(section = get_kernel_section(".text.init"))) section = get_kernel_section(".init.text"); if (!section) { error(WARNING, "cannot determine text init space\n"); return; } kt->stext_init = (ulong)bfd_section_vma(section); kt->etext_init = kt->stext_init + (ulong)bfd_section_size(section); if (kt->relocate) { kt->stext_init -= kt->relocate; kt->etext_init -= kt->relocate; } } /* * Strip gcc-generated cloned text symbol name endings. */ static char * strip_symbol_end(const char *name, char *buf) { int i; char *p; char *strip[] = { ".isra.", ".part.", ".llvm.", NULL }; if (st->flags & NO_STRIP) return (char *)name; for (i = 0; strip[i]; i++) { if ((p = strstr(name, strip[i]))) { if (buf) { strcpy(buf, name); buf[p-name] = NULLCHAR; return buf; } else { *p = NULLCHAR; return (char *)name; } } } return (char *)name; } /* * Gather the relevant information from the dumpfile or live system * and determine whether to derive the KASLR offset. * * Setting st->_stext_vmlinux to UNINITIALIZED will trigger the * search for "_stext" from the vmlinux file during the initial * symbol sort operation. * * Setting RELOC_AUTO will ensure that derive_kaslr_offset() is * called after the sorting operation has captured the vmlinux * file's "_stext" symbol value -- which it will compare to the * relocated "_stext" value found in either a dumpfile's vmcoreinfo * or in /proc/kallsyms on a live system. * * Setting KASLR_CHECK will trigger a search for "module_load_offset" * or "kaslr_get_random_long" during the initial symbol sort operation, and * if found, will set (RELOC_AUTO|KASLR). On live systems, the search * is done here by checking /proc/kallsyms. */ static void kaslr_init(void) { char *string; if ((!machine_type("X86_64") && !machine_type("ARM64") && !machine_type("X86") && !machine_type("S390X") && !machine_type("RISCV64") && !machine_type("LOONGARCH64")) || (kt->flags & RELOC_SET)) return; if (!kt->vmcoreinfo._stext_SYMBOL && (string = pc->read_vmcoreinfo("SYMBOL(_stext)"))) { kt->vmcoreinfo._stext_SYMBOL = htol(string, RETURN_ON_ERROR, NULL); free(string); } /* * --kaslr=auto */ if ((kt->flags2 & (RELOC_AUTO|KASLR)) == (RELOC_AUTO|KASLR)) st->_stext_vmlinux = UNINITIALIZED; if (ACTIVE() && /* Linux 3.15 */ ((symbol_value_from_proc_kallsyms("kaslr_get_random_long") != BADVAL) || (symbol_value_from_proc_kallsyms("module_load_offset") != BADVAL))) { kt->flags2 |= (RELOC_AUTO|KASLR); st->_stext_vmlinux = UNINITIALIZED; } if (machine_type("S390X")) { kt->flags2 |= (RELOC_AUTO|KASLR); st->_stext_vmlinux = UNINITIALIZED; } if (QEMU_MEM_DUMP_NO_VMCOREINFO()) { if (KDUMP_DUMPFILE() && kdump_kaslr_check()) { kt->flags2 |= KASLR_CHECK; } else if (DISKDUMP_DUMPFILE() && diskdump_kaslr_check()) { kt->flags2 |= KASLR_CHECK; } } else if (KDUMP_DUMPFILE() || DISKDUMP_DUMPFILE()) { /* Linux 3.14 */ if ((string = pc->read_vmcoreinfo("KERNELOFFSET"))) { free(string); kt->flags2 |= KASLR_CHECK; st->_stext_vmlinux = UNINITIALIZED; } } if (SADUMP_DUMPFILE() || QEMU_MEM_DUMP_NO_VMCOREINFO() || VMSS_DUMPFILE()) { /* Need for kaslr_offset and phys_base */ kt->flags2 |= KASLR_CHECK; st->_stext_vmlinux = UNINITIALIZED; } } /* * Derives the kernel aslr offset by comparing the _stext symbol from the * the vmcoreinfo in the dump file to the _stext symbol in the vmlinux file. */ static void derive_kaslr_offset(bfd *abfd, int dynamic, bfd_byte *start, bfd_byte *end, unsigned int size, asymbol *store) { unsigned long relocate; ulong _stext_relocated; if (SADUMP_DUMPFILE() || QEMU_MEM_DUMP_NO_VMCOREINFO() || VMSS_DUMPFILE()) { ulong kaslr_offset = 0; ulong phys_base = 0; calc_kaslr_offset(&kaslr_offset, &phys_base); if (kaslr_offset) { kt->relocate = kaslr_offset * -1; kt->flags |= RELOC_SET; } if (phys_base) { if (SADUMP_DUMPFILE()) sadump_set_phys_base(phys_base); else if (KDUMP_DUMPFILE()) kdump_set_phys_base(phys_base); else if (DISKDUMP_DUMPFILE()) diskdump_set_phys_base(phys_base); else if (VMSS_DUMPFILE()) vmware_vmss_set_phys_base(phys_base); } return; } if (ACTIVE()) { _stext_relocated = symbol_value_from_proc_kallsyms("_stext"); if (_stext_relocated == BADVAL) return; } else { _stext_relocated = kt->vmcoreinfo._stext_SYMBOL; if (_stext_relocated == 0) return; } /* * To avoid mistaking an mismatched kernel version with * a kaslr offset, we make sure that the offset is * aligned by 0x1000, as it always will be for kaslr. */ if (st->_stext_vmlinux && (st->_stext_vmlinux != UNINITIALIZED)) { relocate = st->_stext_vmlinux - _stext_relocated; if (relocate && !(relocate & 0xfff)) { kt->relocate = relocate; kt->flags |= RELOC_SET; } } if (CRASHDEBUG(1) && (kt->flags & RELOC_SET)) { fprintf(fp, "KASLR:\n"); fprintf(fp, " _stext from %s: %lx\n", basename(pc->namelist), st->_stext_vmlinux); fprintf(fp, " _stext from %s: %lx\n", ACTIVE() ? "/proc/kallsyms" : "vmcoreinfo", _stext_relocated); fprintf(fp, " relocate: %lx (%ldMB)\n", kt->relocate * -1, (kt->relocate * -1) >> 20); } } /* * Store the symbols gathered by symtab_init(). The symbols are stored * in increasing numerical order. */ static void store_symbols(bfd *abfd, int dynamic, void *minisyms, long symcount, unsigned int size) { asymbol *store; asymbol *sym; bfd_byte *from, *fromend; symbol_info syminfo; struct syment *sp; char buf[BUFSIZE]; char *name; int first; if ((store = bfd_make_empty_symbol(abfd)) == NULL) error(FATAL, "bfd_make_empty_symbol() failed\n"); if ((st->symtable = (struct syment *) calloc(symcount, sizeof(struct syment))) == NULL) error(FATAL, "symbol table syment space malloc: %s\n", strerror(errno)); if (!namespace_ctl(NAMESPACE_INIT, &st->kernel_namespace, (void *)symcount, NULL)) error(FATAL, "symbol table namespace malloc: %s\n", strerror(errno)); st->syment_size = symcount * sizeof(struct syment); st->symcnt = 0; sp = st->symtable; first = 0; from = (bfd_byte *) minisyms; fromend = from + symcount * size; if (machine_type("X86")) { if (kt->flags2 & KASLR) { if ((kt->flags2 & RELOC_AUTO) && !(kt->flags & RELOC_SET)) derive_kaslr_offset(abfd, dynamic, from, fromend, size, store); } else if (!(kt->flags & RELOC_SET)) kt->flags |= RELOC_FORCE; } else if (machine_type("X86_64") || machine_type("ARM64") || machine_type("S390X") || machine_type("RISCV64") || machine_type("LOONGARCH64")) { if ((kt->flags2 & RELOC_AUTO) && !(kt->flags & RELOC_SET)) derive_kaslr_offset(abfd, dynamic, from, fromend, size, store); } else kt->flags &= ~RELOC_SET; for (; from < fromend; from += size) { if ((sym = bfd_minisymbol_to_symbol(abfd, dynamic, from, store)) == NULL) error(FATAL, "bfd_minisymbol_to_symbol() failed\n"); bfd_get_symbol_info(abfd, sym, &syminfo); name = strip_symbol_end(syminfo.name, buf); if (machdep->verify_symbol(name, syminfo.value, syminfo.type)) { if (kt->flags & (RELOC_SET|RELOC_FORCE)) sp->value = relocate(syminfo.value, (char *)syminfo.name, !(first++)); else sp->value = syminfo.value; sp->type = syminfo.type; namespace_ctl(NAMESPACE_INSTALL, &st->kernel_namespace, sp, name); sp++; st->symcnt++; } } st->symend = &st->symtable[st->symcnt]; st->flags |= KERNEL_SYMS; namespace_ctl(NAMESPACE_COMPLETE, &st->kernel_namespace, st->symtable, st->symend); } /* * Store the symbols from the designated System.map. The symbols are stored * in increasing numerical order. */ static void store_sysmap_symbols(void) { int c, first; long symcount; char buf[BUFSIZE]; char name[BUFSIZE]; FILE *map; char *mapitems[MAXARGS]; struct syment *sp, syment; if ((map = fopen(pc->system_map, "r")) == NULL) error(FATAL, "cannot open %s\n", pc->system_map); symcount = 0; while (fgets(buf, BUFSIZE, map)) symcount++; if ((st->symtable = (struct syment *) calloc(symcount, sizeof(struct syment))) == NULL) error(FATAL, "symbol table syment space malloc: %s\n", strerror(errno)); if (!namespace_ctl(NAMESPACE_INIT, &st->kernel_namespace, (void *)symcount, NULL)) error(FATAL, "symbol table namespace malloc: %s\n", strerror(errno)); if (!machine_type("X86") && !machine_type("X86_64") && !machine_type("ARM64") && !machine_type("S390X") && !machine_type("LOONGARCH64")) kt->flags &= ~RELOC_SET; first = 0; st->syment_size = symcount * sizeof(struct syment); st->symcnt = 0; sp = st->symtable; rewind(map); while (fgets(buf, BUFSIZE, map)) { if ((c = parse_line(buf, mapitems)) != 3) continue; syment.value = htol(mapitems[0], FAULT_ON_ERROR, NULL); syment.type = mapitems[1][0]; syment.name = mapitems[2]; strcpy(name, syment.name); strip_symbol_end(name, NULL); if (machdep->verify_symbol(name, syment.value, syment.type)) { if (kt->flags & RELOC_SET) sp->value = relocate(syment.value, syment.name, !(first++)); else sp->value = syment.value; sp->type = syment.type; namespace_ctl(NAMESPACE_INSTALL, &st->kernel_namespace, sp, name); sp++; st->symcnt++; } } fclose(map); st->symend = &st->symtable[st->symcnt]; st->flags |= KERNEL_SYMS; namespace_ctl(NAMESPACE_COMPLETE, &st->kernel_namespace, st->symtable, st->symend); symname_hash_init(); symval_hash_init(); } /* * Handle x86/arm64 kernels configured such that the vmlinux symbols * are not as loaded into the kernel (not unity-mapped). */ static ulong relocate(ulong symval, char *symname, int first_symbol) { if (XEN_HYPER_MODE()) { kt->flags &= ~(RELOC_SET|RELOC_FORCE); return symval; } switch (kt->flags & (RELOC_SET|RELOC_FORCE)) { case RELOC_SET: break; case RELOC_FORCE: if (first_symbol && !relocate_force(symval, symname)) kt->flags &= ~RELOC_FORCE; break; } if (machine_type("X86_64")) { /* * There are some symbols which are outside of any section * either because they are offsets or because they are absolute * addresses. These should not be relocated. */ if (symval >= st->first_section_start && symval <= st->last_section_end) { return symval - kt->relocate; } else { return symval; } } else return symval - kt->relocate; } /* * If no --reloc argument was passed, try to figure it out * by comparing the first vmlinux kernel symbol with the * first /proc/kallsyms symbol. (should be "_text") * * Live system only (at least for now). */ static int relocate_force(ulong symval, char *symname) { int count, found; FILE *kp; char buf[BUFSIZE]; char *kallsyms[MAXARGS]; ulong kallsym; if (!ACTIVE() || !file_exists("/proc/kallsyms", NULL)) { if (CRASHDEBUG(1)) fprintf(fp, "cannot determine relocation value: %s\n", !ACTIVE() ? "not a live system" : "/proc/kallsyms does not exist"); return FALSE; } if ((kp = fopen("/proc/kallsyms", "r")) == NULL) { if (CRASHDEBUG(1)) fprintf(fp, "cannot open /proc/kallsyms to determine relocation\n"); return FALSE; } if (CRASHDEBUG(1)) fprintf(fp, "relocate from: %s\n" " %s @ %lx\n" "relocate to: /proc/kallsyms\n", pc->namelist, symname, symval); found = FALSE; count = kallsym = 0; while (!found && fgets(buf, BUFSIZE, kp) && (parse_line(buf, kallsyms) == 3) && hexadecimal(kallsyms[0], 0)) { if (STREQ(kallsyms[2], symname)) { kallsym = htol(kallsyms[0], RETURN_ON_ERROR, NULL); found = TRUE; } count++; if (CRASHDEBUG(1)) fprintf(fp, " %s @ %s %s\n", kallsyms[2], kallsyms[0], STREQ(kallsyms[2], symname) ? "(match!)" : ""); } fclose(kp); /* * If the symbols match and have different values, * force the relocation. */ if (found) { if (symval != kallsym) { kt->relocate = symval - kallsym; return TRUE; } } if (CRASHDEBUG(1)) fprintf(fp, "cannot determine relocation value from" " %d symbols in /proc/kallsyms\n", count); return FALSE; } /* * Get a symbol value from /proc/kallsyms. */ ulong symbol_value_from_proc_kallsyms(char *symname) { FILE *kp; char buf[BUFSIZE]; char *kallsyms[MAXARGS]; ulong kallsym; int found; if (!file_exists("/proc/kallsyms", NULL)) { if (CRASHDEBUG(1)) error(INFO, "cannot determine value of %s: " "/proc/kallsyms does not exist\n\n", symname); return BADVAL; } if ((kp = fopen("/proc/kallsyms", "r")) == NULL) { if (CRASHDEBUG(1)) error(INFO, "cannot determine value of %s: " "cannot open /proc/kallsyms\n\n", symname); return BADVAL; } found = FALSE; while (!found && fgets(buf, BUFSIZE, kp) && (parse_line(buf, kallsyms) == 3)) { if (hexadecimal(kallsyms[0], 0) && STREQ(kallsyms[2], symname)) { kallsym = htol(kallsyms[0], RETURN_ON_ERROR, NULL); found = TRUE; break; } } fclose(kp); return(found ? kallsym : BADVAL); } /* * Install all static kernel symbol values into the symval_hash. */ static void symval_hash_init(void) { int index; struct syment *sp, *sph; for (sp = st->symtable; sp < st->symend; sp++) { index = SYMVAL_HASH_INDEX(sp->value); if (st->symval_hash[index].val_hash_head == NULL) { st->symval_hash[index].val_hash_head = sp; st->symval_hash[index].val_hash_last = sp; continue; } sph = st->symval_hash[index].val_hash_head; while (sph->val_hash_next) sph = sph->val_hash_next; sph->val_hash_next = sp; } } /* * Static kernel symbol value search */ static struct syment * symval_hash_search(ulong value) { int index; struct syment *sp, *splo; index = SYMVAL_HASH_INDEX(value); if (!st->symval_hash[index].val_hash_head) return NULL; st->val_hash_searches += 1; st->val_hash_iterations += 1; if (st->symval_hash[index].val_hash_last->value <= value) sp = st->symval_hash[index].val_hash_last; else sp = st->symval_hash[index].val_hash_head; for (splo = NULL; sp; sp = sp->val_hash_next) { if (sp->value == value) { st->symval_hash[index].val_hash_last = sp; return sp; } if (sp->value > value) break; st->val_hash_iterations += 1; splo = sp; } if (splo) st->symval_hash[index].val_hash_last = splo; return splo; } /* * Store all kernel static symbols into the symname_hash. */ static void symname_hash_init(void) { struct syment *sp; for (sp = st->symtable; sp < st->symend; sp++) symname_hash_install(sp); if ((sp = symbol_search("__per_cpu_start"))) st->__per_cpu_start = sp->value; if ((sp = symbol_search("__per_cpu_end"))) st->__per_cpu_end = sp->value; } static unsigned int symname_hash_index(char *name) { unsigned int len, value; unsigned char *array = (unsigned char *)name; len = strlen(name); if (!len) error(FATAL, "The length of the symbol name is zero!\n"); value = array[len - 1] * array[len / 2]; return (array[0] ^ value) % SYMNAME_HASH; } /* * Install a single static kernel symbol into the symname_hash. */ static void symname_hash_install(struct syment *spn) { struct syment *sp; unsigned int index; index = symname_hash_index(spn->name); spn->cnt = 1; if ((sp = st->symname_hash[index]) == NULL) st->symname_hash[index] = spn; else { while (sp) { if (STREQ(sp->name, spn->name)) { sp->cnt++; spn->cnt++; } if (sp->name_hash_next) sp = sp->name_hash_next; else { sp->name_hash_next = spn; break; } } } } /* * Install a single kernel module symbol into the mod_symname_hash. */ static void mod_symname_hash_install(struct syment *spn) { struct syment *sp; unsigned int index; if (!spn) return; index = symname_hash_index(spn->name); sp = st->mod_symname_hash[index]; if (!sp || (spn->value < sp->value)) { st->mod_symname_hash[index] = spn; spn->name_hash_next = sp; return; } for (; sp; sp = sp->name_hash_next) { if (spn == sp) return; if (!sp->name_hash_next || spn->value < sp->name_hash_next->value) { spn->name_hash_next = sp->name_hash_next; sp->name_hash_next = spn; return; } } } static void mod_symname_hash_remove(struct syment *spn) { struct syment *sp; unsigned int index; if (!spn) return; index = symname_hash_index(spn->name); if (st->mod_symname_hash[index] == spn) { st->mod_symname_hash[index] = spn->name_hash_next; return; } for (sp = st->mod_symname_hash[index]; sp; sp = sp->name_hash_next) { if (sp->name_hash_next == spn) { sp->name_hash_next = spn->name_hash_next; return; } } } static void mod_symtable_hash_install_range(struct syment *from, struct syment *to) { struct syment *sp; for (sp = from; sp <= to; sp++) mod_symname_hash_install(sp); } static void mod_symtable_hash_remove_range(struct syment *from, struct syment *to) { struct syment *sp; for (sp = from; sp <= to; sp++) mod_symname_hash_remove(sp); } /* * Static kernel symbol value search */ static struct syment * symname_hash_search(struct syment *table[], char *name) { struct syment *sp; sp = table[symname_hash_index(name)]; while (sp) { if (STREQ(sp->name, name)) return sp; sp = sp->name_hash_next; } return NULL; } /* * Output for sym -[lL] command. */ #define MODULE_PSEUDO_SYMBOL(sp) (STRNEQ((sp)->name, "_MODULE_")) #define MODULE_START(sp) (STRNEQ((sp)->name, "_MODULE_START_")) #define MODULE_END(sp) (STRNEQ((sp)->name, "_MODULE_END_")) #define MODULE_INIT_START(sp) (STRNEQ((sp)->name, "_MODULE_INIT_START_")) #define MODULE_INIT_END(sp) (STRNEQ((sp)->name, "_MODULE_INIT_END_")) #define MODULE_SECTION_START(sp) (STRNEQ((sp)->name, "_MODULE_SECTION_START")) #define MODULE_SECTION_END(sp) (STRNEQ((sp)->name, "_MODULE_SECTION_END")) #define MODULE_MEM_START(sp,t) (STRNEQ((sp)->name, module_tag[t].start)) #define MODULE_MEM_END(sp,t) (STRNEQ((sp)->name, module_tag[t].end)) /* For 6.4 and later */ static void module_symbol_dump(char *module) { int i, t; struct syment *sp, *sp_end; struct load_module *lm; const char *p1, *p2; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (module && !STREQ(module, lm->mod_name)) continue; if (received_SIGINT() || output_closed()) return; /* * module percpu symbols are within the .data..percpu section, * not in any module memory regions. */ if (MODULE_PERCPU_SYMS_LOADED(lm)) { p1 = "MODULE PERCPU START"; p2 = lm->mod_name; fprintf(fp, "%lx %s: %s\n", lm->mod_percpu, p1, p2); dump_percpu_symbols(lm); p1 = "MODULE PERCPU END"; fprintf(fp, "%lx %s: %s\n", lm->mod_percpu + lm->mod_percpu_size, p1, p2); } for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; sp = lm->symtable[t]; sp_end = lm->symend[t]; for ( ; sp <= sp_end; sp++) { if (MODULE_PSEUDO_SYMBOL(sp)) { if (MODULE_MEM_START(sp, t)) { p1 = module_tag[t].start_str; p2 = sp->name + strlen(module_tag[t].start); } else if (MODULE_MEM_END(sp, t)) { p1 = module_tag[t].end_str; p2 = sp->name + strlen(module_tag[t].end); } else if (MODULE_SECTION_START(sp)) { p1 = sp->name + strlen("_MODULE_SECTION_START "); p2 = "section start"; } else if (MODULE_SECTION_END(sp)) { p1 = sp->name + strlen("_MODULE_SECTION_END "); p2 = "section end"; } else { p1 = "unknown tag"; p2 = sp->name; } fprintf(fp, "%lx %s: %s\n", sp->value, p1, p2); } else show_symbol(sp, 0, SHOW_RADIX()); } } } } static void symbol_dump(ulong flags, char *module) { int i, start, percpu_syms; struct syment *sp, *sp_end; struct load_module *lm; char *p1, *p2;; #define TBD 1 #define DISPLAYED 2 if (flags & KERNEL_SYMS) { for (sp = st->symtable; sp < st->symend; sp++) { show_symbol(sp, 0, SHOW_RADIX()); if (received_SIGINT() || output_closed()) return; } } if (!(flags & MODULE_SYMS)) return; if (MODULE_MEMORY()) { module_symbol_dump(module); return; } for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (module && !STREQ(module, lm->mod_name)) continue; if (received_SIGINT() || output_closed()) return; sp = lm->mod_symtable; sp_end = lm->mod_symend; percpu_syms = 0; for (start = FALSE; sp <= sp_end; sp++) { if (IN_MODULE_PERCPU(sp->value, lm)) { if (percpu_syms == DISPLAYED) continue; if (!start) { percpu_syms = TBD; continue; } dump_percpu_symbols(lm); percpu_syms = DISPLAYED; } if (MODULE_PSEUDO_SYMBOL(sp)) { if (MODULE_SECTION_START(sp)) { p1 = sp->name + strlen("_MODULE_SECTION_START "); p2 = "section start"; } else if (MODULE_SECTION_END(sp)) { p1 = sp->name + strlen("_MODULE_SECTION_END "); p2 = "section end"; } else if (MODULE_START(sp)) { p1 = "MODULE START"; p2 = sp->name+strlen("_MODULE_START_"); start = TRUE; } else { p1 = "MODULE END"; p2 = sp->name+strlen("_MODULE_END_"); if (MODULE_PERCPU_SYMS_LOADED(lm) && !percpu_syms) { dump_percpu_symbols(lm); percpu_syms = DISPLAYED; } } fprintf(fp, "%lx %s: %s\n", sp->value, p1, p2); if (percpu_syms == TBD) { dump_percpu_symbols(lm); percpu_syms = DISPLAYED; } } else show_symbol(sp, 0, SHOW_RADIX()); } if (lm->mod_init_symtable) { sp = lm->mod_init_symtable; sp_end = lm->mod_init_symend; for ( ; sp <= sp_end; sp++) { if (MODULE_PSEUDO_SYMBOL(sp)) { if (MODULE_INIT_START(sp)) { p1 = "MODULE INIT START"; p2 = sp->name+strlen("_MODULE_INIT_START_"); } else { p1 = "MODULE INIT END"; p2 = sp->name+strlen("_MODULE_INIT_END_"); } fprintf(fp, "%lx %s: %s\n", sp->value, p1, p2); } else show_symbol(sp, 0, SHOW_RADIX()); } } } #undef TBD #undef DISPLAYED } static void dump_percpu_symbols(struct load_module *lm) { struct syment *sp, *sp_end; if (MODULE_PERCPU_SYMS_LOADED(lm)) { if (MODULE_MEMORY()) { /* The lm should have mod_load_symtable. */ sp = lm->mod_load_symtable; sp_end = lm->mod_load_symend; } else { sp = lm->mod_symtable; sp_end = lm->mod_symend; } for ( ; sp <= sp_end; sp++) { if (IN_MODULE_PERCPU(sp->value, lm)) show_symbol(sp, 0, SHOW_RADIX()); } } } /* * Get a pointer to the desired asection. */ static asection * get_kernel_section(char *name) { int i; asection **sec; sec = (asection **)st->sections; for (i = 0; i < st->bfd->section_count; i++, sec++) { if (STREQ(name, (*sec)->name)) return(*sec); } return NULL; } /* * Walk through the current set of symbols and check for duplicates. */ static void check_for_dups(struct load_module *lm) { struct syment *sp, *sp_end; if (MODULE_MEMORY()) { sp = lm->mod_load_symtable; sp_end = lm->mod_load_symend; } else { sp = lm->mod_symtable; sp_end = lm->mod_symend; } for ( ; sp <= sp_end; sp++) { if (symbol_name_count(sp->name) > 1) error(NOTE, "%s: duplicate symbol name: %s\n", lm->mod_name, sp->name); } } /* * Store the externally declared symbols for all modules in the system. * allowing for dynamic loading of symbols from individual mod object files * during runtime. */ struct module_symbol { unsigned long value; const char *name; }; void store_module_symbols_v1(ulong total, int mods_installed) { int i, m; ulong mod, mod_next, mod_name; uint nsyms; ulong syms, size_of_struct; long strbuflen, size; int mcnt, lm_mcnt; struct module_symbol *modsym; struct load_module *lm; char buf1[BUFSIZE]; char buf2[BUFSIZE*2]; char name[BUFSIZE]; char rodata[BUFSIZE*2]; char *strbuf, *modbuf, *modsymbuf; struct syment *sp; ulong first, last; st->mods_installed = mods_installed; if (!st->mods_installed) { st->flags &= ~MODULE_SYMS; return; } /* * If we've been here before, free up everything and start over. */ if (st->flags & MODULE_SYMS) { error(FATAL, "re-initialization of module symbols not implemented yet!\n"); } if ((st->ext_module_symtable = (struct syment *) calloc(total, sizeof(struct syment))) == NULL) error(FATAL, "module syment space malloc: %s\n", strerror(errno)); if (!namespace_ctl(NAMESPACE_INIT, &st->ext_module_namespace, (void *)total, NULL)) error(FATAL, "module namespace malloc: %s\n", strerror(errno)); if ((st->load_modules = (struct load_module *)calloc (st->mods_installed, sizeof(struct load_module))) == NULL) error(FATAL, "load_module array malloc: %s\n", strerror(errno)); modbuf = GETBUF(SIZE(module)); modsymbuf = NULL; m = mcnt = mod_next = 0; for (mod = kt->module_list; mod != kt->kernel_module; mod = mod_next) { readmem(mod, KVADDR, modbuf, SIZE(module), "module buffer", FAULT_ON_ERROR); nsyms = UINT(modbuf + OFFSET(module_nsyms)); syms = ULONG(modbuf + OFFSET(module_syms)); size = LONG(modbuf + OFFSET(module_size)); mod_name = ULONG(modbuf + OFFSET(module_name)); size_of_struct = ULONG(modbuf + OFFSET(module_size_of_struct)); if (!read_string(mod_name, name, BUFSIZE-1)) sprintf(name, "(unknown module)"); sprintf(rodata, "__insmod_%s_S.rodata", name); lm = &st->load_modules[m++]; BZERO(lm, sizeof(struct load_module)); lm->mod_base = lm->module_struct = mod; lm->mod_size = size; lm->mod_size_of_struct = size_of_struct; if (strlen(name) < MAX_MOD_NAME) strcpy(lm->mod_name, name); else { error(INFO, "module name greater than MAX_MOD_NAME: %s\n", name); BCOPY(name, lm->mod_name, MAX_MOD_NAME-1); } lm->mod_flags = MOD_EXT_SYMS; lm->mod_ext_symcnt = mcnt; lm->mod_etext_guess = 0; st->ext_module_symtable[mcnt].value = mod; st->ext_module_symtable[mcnt].type = 'm'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; sprintf(buf2, "%s%s", "_MODULE_START_", name); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf2); lm_mcnt = mcnt; mcnt++; if (nsyms) { modsymbuf = GETBUF(sizeof(struct module_symbol)*nsyms); readmem((ulong)syms, KVADDR, modsymbuf, nsyms * sizeof(struct module_symbol), "module symbols", FAULT_ON_ERROR); } for (i = first = last = 0; i < nsyms; i++) { modsym = (struct module_symbol *) (modsymbuf + (i * sizeof(struct module_symbol))); if (!first || first > (ulong)modsym->name) first = (ulong)modsym->name; if ((ulong)modsym->name > last) last = (ulong)modsym->name; } if (last > first) { strbuflen = (last-first) + BUFSIZE; if ((first + strbuflen) >= (lm->mod_base + lm->mod_size)) { strbuflen = (lm->mod_base + lm->mod_size) - first; } strbuf = GETBUF(strbuflen); if (!readmem(first, KVADDR, strbuf, strbuflen, "module symbol strings", RETURN_ON_ERROR)) { FREEBUF(strbuf); strbuf = NULL; } } else strbuf = NULL; for (i = 0; i < nsyms; i++) { modsym = (struct module_symbol *) (modsymbuf + (i * sizeof(struct module_symbol))); BZERO(buf1, BUFSIZE); if (strbuf) strcpy(buf1, &strbuf[(ulong)modsym->name - first]); else read_string((ulong)modsym->name, buf1, BUFSIZE-1); if (strlen(buf1)) { st->ext_module_symtable[mcnt].value = modsym->value; st->ext_module_symtable[mcnt].type = '?'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; strip_module_symbol_end(buf1); strip_symbol_end(buf1, NULL); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf1); if (strstr(buf1, rodata)) lm->mod_etext_guess = modsym->value; sprintf(buf2, "__insmod_%s_O/", lm->mod_name); if (strstr(buf1, buf2) && !strstr(buf1, "modules")) lm->mod_flags |= MOD_INITRD; mcnt++; } } if (modsymbuf) { FREEBUF(modsymbuf); modsymbuf = NULL; } if (strbuf) FREEBUF(strbuf); /* * If the module was compiled with kallsyms, add them in. */ switch (kt->flags & (KALLSYMS_V1|KALLSYMS_V2)) { case KALLSYMS_V1: mcnt += store_module_kallsyms_v1(lm, lm_mcnt, mcnt, modbuf); break; case KALLSYMS_V2: /* impossible, I hope... */ mcnt += store_module_kallsyms_v2(lm, lm_mcnt, mcnt, modbuf); break; } st->ext_module_symtable[mcnt].value = mod + size; st->ext_module_symtable[mcnt].type = 'm'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; sprintf(buf2, "%s%s", "_MODULE_END_", name); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf2); mcnt++; lm->mod_ext_symcnt = mcnt - lm->mod_ext_symcnt; if (!lm->mod_etext_guess) find_mod_etext(lm); NEXT_MODULE(mod_next, modbuf); } FREEBUF(modbuf); st->ext_module_symcnt = mcnt; st->ext_module_symend = &st->ext_module_symtable[mcnt]; namespace_ctl(NAMESPACE_COMPLETE, &st->ext_module_namespace, st->ext_module_symtable, st->ext_module_symend); qsort(st->ext_module_symtable, mcnt, sizeof(struct syment), compare_syms); qsort(st->load_modules, m, sizeof(struct load_module), compare_mods); for (m = 0; m < st->mods_installed; m++) { lm = &st->load_modules[m]; sprintf(buf1, "_MODULE_START_%s", lm->mod_name); sprintf(buf2, "_MODULE_END_%s", lm->mod_name); for (sp = st->ext_module_symtable; sp < st->ext_module_symend; sp++) { if (STREQ(sp->name, buf1)) { lm->mod_ext_symtable = sp; lm->mod_symtable = sp; } if (STREQ(sp->name, buf2)) { lm->mod_ext_symend = sp; lm->mod_symend = sp; } } mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); } st->flags |= MODULE_SYMS; if (symbol_query("__insmod_", NULL, NULL)) st->flags |= INSMOD_BUILTIN; } union kernel_symbol { struct kernel_symbol_v1 { unsigned long value; const char *name; } v1; /* kernel 4.19 introduced relative symbol positioning */ struct kernel_symbol_v2 { int value_offset; int name_offset; } v2; /* kernel 5.4 introduced symbol namespaces */ struct kernel_symbol_v3 { int value_offset; int name_offset; int namespace_offset; } v3; struct kernel_symbol_v4 { unsigned long value; const char *name; const char *namespace; } v4; }; static size_t kernel_symbol_type_init(void) { if (MEMBER_EXISTS("kernel_symbol", "value") && MEMBER_EXISTS("kernel_symbol", "name")) { if (MEMBER_EXISTS("kernel_symbol", "namespace")) { st->kernel_symbol_type = 4; return (sizeof(struct kernel_symbol_v4)); } else { st->kernel_symbol_type = 1; return (sizeof(struct kernel_symbol_v1)); } } if (MEMBER_EXISTS("kernel_symbol", "value_offset") && MEMBER_EXISTS("kernel_symbol", "name_offset")) { if (MEMBER_EXISTS("kernel_symbol", "namespace_offset")) { st->kernel_symbol_type = 3; return (sizeof(struct kernel_symbol_v3)); } else { st->kernel_symbol_type = 2; return (sizeof(struct kernel_symbol_v2)); } } error(FATAL, "kernel_symbol data structure has changed\n"); return 0; } static ulong modsym_name(ulong syms, union kernel_symbol *modsym, int i) { switch (st->kernel_symbol_type) { case 1: return (ulong)modsym->v1.name; case 2: return (syms + i * sizeof(struct kernel_symbol_v2) + offsetof(struct kernel_symbol_v2, name_offset) + modsym->v2.name_offset); case 3: return (syms + i * sizeof(struct kernel_symbol_v3) + offsetof(struct kernel_symbol_v3, name_offset) + modsym->v3.name_offset); case 4: return (ulong)modsym->v4.name; } return 0; } static ulong modsym_value(ulong syms, union kernel_symbol *modsym, int i) { switch (st->kernel_symbol_type) { case 1: return (ulong)modsym->v1.value; case 2: return (syms + i * sizeof(struct kernel_symbol_v2) + offsetof(struct kernel_symbol_v2, value_offset) + modsym->v2.value_offset); case 3: return (syms + i * sizeof(struct kernel_symbol_v3) + offsetof(struct kernel_symbol_v3, value_offset) + modsym->v3.value_offset); case 4: return (ulong)modsym->v4.value; } return 0; } /* * Linux 6.4 introduced module.mem memory layout */ void store_module_symbols_6_4(ulong total, int mods_installed) { int i, m, t; ulong mod, mod_next; char *mod_name; uint nsyms, ngplsyms; ulong syms, gpl_syms; ulong nksyms; long strbuflen; ulong size; int mcnt, lm_mcnt; union kernel_symbol *modsym; size_t kernel_symbol_size; struct load_module *lm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *strbuf = NULL, *modbuf, *modsymbuf; struct syment *sp; ulong first, last; st->mods_installed = mods_installed; if (!st->mods_installed) { st->flags &= ~MODULE_SYMS; return; } /* * If we've been here before, free up everything and start over. */ if (st->flags & MODULE_SYMS) error(FATAL, "re-initialization of module symbols not implemented yet!\n"); kernel_symbol_size = kernel_symbol_type_init(); if ((st->ext_module_symtable = (struct syment *) calloc(total, sizeof(struct syment))) == NULL) error(FATAL, "module syment space malloc (%ld symbols): %s\n", total, strerror(errno)); if (!namespace_ctl(NAMESPACE_INIT, &st->ext_module_namespace, (void *)total, NULL)) error(FATAL, "module namespace malloc: %s\n", strerror(errno)); if ((st->load_modules = (struct load_module *)calloc (st->mods_installed, sizeof(struct load_module))) == NULL) error(FATAL, "load_module array malloc: %s\n", strerror(errno)); modbuf = GETBUF(SIZE(module)); modsymbuf = NULL; m = mcnt = mod_next = 0; for (mod = kt->module_list; mod != kt->kernel_module; mod = mod_next) { readmem(mod, KVADDR, modbuf, SIZE(module), "module buffer", FAULT_ON_ERROR); syms = ULONG(modbuf + OFFSET(module_syms)); gpl_syms = ULONG(modbuf + OFFSET(module_gpl_syms)); nsyms = UINT(modbuf + OFFSET(module_num_syms)); ngplsyms = UINT(modbuf + OFFSET(module_num_gpl_syms)); nksyms = UINT(modbuf + OFFSET(module_num_symtab)); mod_name = modbuf + OFFSET(module_name); lm = &st->load_modules[m++]; BZERO(lm, sizeof(struct load_module)); size = 0; for_each_mod_mem_type(t) { lm->mem[t].base = ULONG(modbuf + OFFSET(module_mem) + SIZE(module_memory) * t + OFFSET(module_memory_base)); lm->mem[t].size = UINT(modbuf + OFFSET(module_mem) + SIZE(module_memory) * t + OFFSET(module_memory_size)); if (t < MOD_INIT_TEXT) size += lm->mem[t].size; } lm->mod_base = lm->mem[MOD_TEXT].base; /* module core size, init not included */ lm->mod_size = size; lm->module_struct = mod; if (strlen(mod_name) < MAX_MOD_NAME) strcpy(lm->mod_name, mod_name); else { error(INFO, "module name greater than MAX_MOD_NAME: %s\n", mod_name); strncpy(lm->mod_name, mod_name, MAX_MOD_NAME-1); } if (CRASHDEBUG(3)) fprintf(fp, "%lx (%lx): %s syms: %d gplsyms: %d ksyms: %ld\n", mod, lm->mod_base, lm->mod_name, nsyms, ngplsyms, nksyms); lm->mod_flags = MOD_EXT_SYMS; lm->mod_ext_symcnt = mcnt; lm->mod_text_start = lm->mod_base; lm->mod_init_module_ptr = lm->mem[MOD_INIT_TEXT].base; lm->mod_init_size = lm->mem[MOD_INIT_TEXT].size; lm->mod_init_text_size = lm->mem[MOD_INIT_TEXT].size; if (VALID_MEMBER(module_percpu)) lm->mod_percpu = ULONG(modbuf + OFFSET(module_percpu)); lm_mcnt = mcnt; for_each_mod_mem_type(t) { if (!lm->mem[t].size) continue; st->ext_module_symtable[mcnt].value = lm->mem[t].base; st->ext_module_symtable[mcnt].type = 'm'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; sprintf(buf2, "%s%s", module_tag[t].start, mod_name); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf2); lm_mcnt = mcnt; mcnt++; if (t >= MOD_INIT_TEXT) lm->mod_flags |= MOD_INIT; } if (nsyms && !IN_MODULE(syms, lm)) { error(WARNING, "[%s] module.syms outside of module " "address space (%lx)\n\n", lm->mod_name, syms); nsyms = 0; } if (nsyms) { modsymbuf = GETBUF(kernel_symbol_size*nsyms); readmem((ulong)syms, KVADDR, modsymbuf, nsyms * kernel_symbol_size, "module symbols", FAULT_ON_ERROR); } for (i = first = last = 0; i < nsyms; i++) { modsym = (union kernel_symbol *) (modsymbuf + (i * kernel_symbol_size)); if (!first || first > modsym_name(syms, modsym, i)) first = modsym_name(syms, modsym, i); if (modsym_name(syms, modsym, i) > last) last = modsym_name(syms, modsym, i); } if (last > first) { /* The buffer should not go over the block. */ ulong end = module_mem_end(first, lm); strbuflen = (last-first) + BUFSIZE; if ((first + strbuflen) >= end) { strbuflen = end - first; } strbuf = GETBUF(strbuflen); if (!readmem(first, KVADDR, strbuf, strbuflen, "module symbol strings", RETURN_ON_ERROR)) { FREEBUF(strbuf); strbuf = NULL; } } else strbuf = NULL; for (i = 0; i < nsyms; i++) { modsym = (union kernel_symbol *)(modsymbuf + (i * kernel_symbol_size)); BZERO(buf1, BUFSIZE); if (strbuf) strcpy(buf1, &strbuf[modsym_name(syms, modsym, i) - first]); else read_string(modsym_name(syms, modsym, i), buf1, BUFSIZE-1); if (strlen(buf1)) { st->ext_module_symtable[mcnt].value = modsym_value(syms, modsym, i); st->ext_module_symtable[mcnt].type = '?'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; strip_module_symbol_end(buf1); strip_symbol_end(buf1, NULL); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf1); mcnt++; } } if (modsymbuf) { FREEBUF(modsymbuf); modsymbuf = NULL; } if (strbuf) FREEBUF(strbuf); if (ngplsyms) { modsymbuf = GETBUF(kernel_symbol_size * ngplsyms); readmem((ulong)gpl_syms, KVADDR, modsymbuf, ngplsyms * kernel_symbol_size, "module gpl symbols", FAULT_ON_ERROR); } for (i = first = last = 0; i < ngplsyms; i++) { modsym = (union kernel_symbol *) (modsymbuf + (i * kernel_symbol_size)); if (!first || first > modsym_name(gpl_syms, modsym, i)) first = modsym_name(gpl_syms, modsym, i); if (modsym_name(gpl_syms, modsym, i) > last) last = modsym_name(gpl_syms, modsym, i); } if (last > first) { ulong end = module_mem_end(first, lm); strbuflen = (last-first) + BUFSIZE; if ((first + strbuflen) >= end) { strbuflen = end - first; } strbuf = GETBUF(strbuflen); if (!readmem(first, KVADDR, strbuf, strbuflen, "module gpl symbol strings", RETURN_ON_ERROR)) { FREEBUF(strbuf); strbuf = NULL; } } else strbuf = NULL; for (i = 0; i < ngplsyms; i++) { modsym = (union kernel_symbol *) (modsymbuf + (i * kernel_symbol_size)); BZERO(buf1, BUFSIZE); if (strbuf) strcpy(buf1, &strbuf[modsym_name(gpl_syms, modsym, i) - first]); else read_string(modsym_name(gpl_syms, modsym, i), buf1, BUFSIZE-1); if (strlen(buf1)) { st->ext_module_symtable[mcnt].value = modsym_value(gpl_syms, modsym, i); st->ext_module_symtable[mcnt].type = '?'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; strip_module_symbol_end(buf1); strip_symbol_end(buf1, NULL); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf1); mcnt++; } } if (modsymbuf) { FREEBUF(modsymbuf); modsymbuf = NULL; } if (strbuf) FREEBUF(strbuf); /* * If the module was compiled with kallsyms, add them in. */ switch (kt->flags & (KALLSYMS_V1|KALLSYMS_V2)) { case KALLSYMS_V1: /* impossible, I hope... */ mcnt += store_module_kallsyms_v1(lm, lm_mcnt, mcnt, modbuf); break; case KALLSYMS_V2: mcnt += store_module_kallsyms_v2(lm, lm_mcnt, mcnt, modbuf); break; } for_each_mod_mem_type(t) { if (!lm->mem[t].size) continue; st->ext_module_symtable[mcnt].value = lm->mem[t].base + lm->mem[t].size - 1; st->ext_module_symtable[mcnt].type = 'm'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; sprintf(buf2, "%s%s", module_tag[t].end, mod_name); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf2); mcnt++; } lm->mod_ext_symcnt = mcnt - lm->mod_ext_symcnt; NEXT_MODULE(mod_next, modbuf); } FREEBUF(modbuf); st->ext_module_symcnt = mcnt; st->ext_module_symend = &st->ext_module_symtable[mcnt]; namespace_ctl(NAMESPACE_COMPLETE, &st->ext_module_namespace, st->ext_module_symtable, st->ext_module_symend); qsort(st->ext_module_symtable, mcnt, sizeof(struct syment), compare_syms); /* sort by text base address */ qsort(st->load_modules, m, sizeof(struct load_module), compare_mods); for (m = 0; m < st->mods_installed; m++) { lm = &st->load_modules[m]; for_each_mod_mem_type(t) { if (!lm->mem[t].size) continue; sprintf(buf1, "%s%s", module_tag[t].start, lm->mod_name); sprintf(buf2, "%s%s", module_tag[t].end, lm->mod_name); for (sp = st->ext_module_symtable; sp < st->ext_module_symend; sp++) { if (STREQ(sp->name, buf1)) { lm->ext_symtable[t] = sp; break; } } for ( ; sp < st->ext_module_symend; sp++) { if (STREQ(sp->name, buf2)) { lm->ext_symend[t] = sp; break; } } if (lm->ext_symtable[t] && lm->ext_symend[t]) mod_symtable_hash_install_range(lm->ext_symtable[t], lm->ext_symend[t]); } lm->symtable = lm->ext_symtable; lm->symend = lm->ext_symend; } st->flags |= MODULE_SYMS; if (CRASHDEBUG(2)) { for (sp = st->ext_module_symtable; sp < st->ext_module_symend; sp++) fprintf(fp, "%16lx %s\n", sp->value, sp->name); } if (mcnt > total) error(FATAL, "store_module_symbols_6_4: total: %ld mcnt: %d\n", total, mcnt); } void store_module_symbols_v2(ulong total, int mods_installed) { int i, m; ulong mod, mod_next; char *mod_name; uint nsyms, ngplsyms; ulong syms, gpl_syms; ulong nksyms; long strbuflen; ulong size; int mcnt, lm_mcnt; union kernel_symbol *modsym; size_t kernel_symbol_size; struct load_module *lm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char *strbuf, *modbuf, *modsymbuf; struct syment *sp; ulong first, last; st->mods_installed = mods_installed; if (!st->mods_installed) { st->flags &= ~MODULE_SYMS; return; } /* * If we've been here before, free up everything and start over. */ if (st->flags & MODULE_SYMS) { error(FATAL, "re-initialization of module symbols not implemented yet!\n"); } kernel_symbol_size = kernel_symbol_type_init(); if ((st->ext_module_symtable = (struct syment *) calloc(total, sizeof(struct syment))) == NULL) error(FATAL, "v2 module syment space malloc (%ld symbols): %s\n", total, strerror(errno)); if (!namespace_ctl(NAMESPACE_INIT, &st->ext_module_namespace, (void *)total, NULL)) error(FATAL, "module namespace malloc: %s\n", strerror(errno)); if ((st->load_modules = (struct load_module *)calloc (st->mods_installed, sizeof(struct load_module))) == NULL) error(FATAL, "load_module array malloc: %s\n", strerror(errno)); modbuf = GETBUF(SIZE(module)); modsymbuf = NULL; m = mcnt = mod_next = 0; for (mod = kt->module_list; mod != kt->kernel_module; mod = mod_next) { readmem(mod, KVADDR, modbuf, SIZE(module), "module buffer", FAULT_ON_ERROR); syms = ULONG(modbuf + OFFSET(module_syms)); gpl_syms = ULONG(modbuf + OFFSET(module_gpl_syms)); nsyms = UINT(modbuf + OFFSET(module_num_syms)); ngplsyms = UINT(modbuf + OFFSET(module_num_gpl_syms)); if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) { nksyms = UINT(modbuf + OFFSET(module_num_symtab)); size = UINT(modbuf + MODULE_OFFSET2(module_core_size, rx)); } else { nksyms = ULONG(modbuf + OFFSET(module_num_symtab)); size = ULONG(modbuf + MODULE_OFFSET2(module_core_size, rx)); } mod_name = modbuf + OFFSET(module_name); lm = &st->load_modules[m++]; BZERO(lm, sizeof(struct load_module)); lm->mod_base = ULONG(modbuf + MODULE_OFFSET2(module_module_core, rx)); lm->module_struct = mod; lm->mod_size = size; if (strlen(mod_name) < MAX_MOD_NAME) strcpy(lm->mod_name, mod_name); else { error(INFO, "module name greater than MAX_MOD_NAME: %s\n", mod_name); strncpy(lm->mod_name, mod_name, MAX_MOD_NAME-1); } if (CRASHDEBUG(3)) fprintf(fp, "%lx (%lx): %s syms: %d gplsyms: %d ksyms: %ld\n", mod, lm->mod_base, lm->mod_name, nsyms, ngplsyms, nksyms); lm->mod_flags = MOD_EXT_SYMS; lm->mod_ext_symcnt = mcnt; lm->mod_init_module_ptr = ULONG(modbuf + MODULE_OFFSET2(module_module_init, rx)); if (VALID_MEMBER(module_percpu)) lm->mod_percpu = ULONG(modbuf + OFFSET(module_percpu)); if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) { lm->mod_etext_guess = lm->mod_base + UINT(modbuf + MODULE_OFFSET(module_core_text_size, module_core_size_rx)); lm->mod_init_size = UINT(modbuf + MODULE_OFFSET2(module_init_size, rx)); lm->mod_init_text_size = UINT(modbuf + MODULE_OFFSET(module_init_text_size, module_init_size_rx)); } else { lm->mod_etext_guess = lm->mod_base + ULONG(modbuf + MODULE_OFFSET(module_core_text_size, module_core_size_rx)); lm->mod_init_size = ULONG(modbuf + MODULE_OFFSET2(module_init_size, rx)); lm->mod_init_text_size = ULONG(modbuf + MODULE_OFFSET(module_init_text_size, module_init_size_rx)); } lm->mod_text_start = lm->mod_base; st->ext_module_symtable[mcnt].value = lm->mod_base; st->ext_module_symtable[mcnt].type = 'm'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; sprintf(buf2, "%s%s", "_MODULE_START_", mod_name); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf2); lm_mcnt = mcnt; mcnt++; if (lm->mod_init_size > 0) { st->ext_module_symtable[mcnt].value = lm->mod_init_module_ptr; st->ext_module_symtable[mcnt].type = 'm'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; sprintf(buf3, "%s%s", "_MODULE_INIT_START_", mod_name); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf3); lm_mcnt = mcnt; mcnt++; lm->mod_flags |= MOD_INIT; } if (nsyms && !IN_MODULE(syms, lm)) { error(WARNING, "[%s] module.syms outside of module " "address space (%lx)\n\n", lm->mod_name, syms); nsyms = 0; } if (nsyms) { modsymbuf = GETBUF(kernel_symbol_size*nsyms); readmem((ulong)syms, KVADDR, modsymbuf, nsyms * kernel_symbol_size, "module symbols", FAULT_ON_ERROR); } for (i = first = last = 0; i < nsyms; i++) { modsym = (union kernel_symbol *) (modsymbuf + (i * kernel_symbol_size)); if (!first || first > modsym_name(syms, modsym, i)) first = modsym_name(syms, modsym, i); if (modsym_name(syms, modsym, i) > last) last = modsym_name(syms, modsym, i); } if (last > first) { strbuflen = (last-first) + BUFSIZE; if ((first + strbuflen) >= (lm->mod_base + lm->mod_size)) { strbuflen = (lm->mod_base + lm->mod_size) - first; } strbuf = GETBUF(strbuflen); if (!readmem(first, KVADDR, strbuf, strbuflen, "module symbol strings", RETURN_ON_ERROR)) { FREEBUF(strbuf); strbuf = NULL; } } else strbuf = NULL; for (i = 0; i < nsyms; i++) { modsym = (union kernel_symbol *) (modsymbuf + (i * kernel_symbol_size)); BZERO(buf1, BUFSIZE); if (strbuf) strcpy(buf1, &strbuf[modsym_name(syms, modsym, i) - first]); else read_string(modsym_name(syms, modsym, i), buf1, BUFSIZE-1); if (strlen(buf1)) { st->ext_module_symtable[mcnt].value = modsym_value(syms, modsym, i); st->ext_module_symtable[mcnt].type = '?'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; strip_module_symbol_end(buf1); strip_symbol_end(buf1, NULL); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf1); mcnt++; } } if (modsymbuf) { FREEBUF(modsymbuf); modsymbuf = NULL; } if (strbuf) FREEBUF(strbuf); if (ngplsyms) { modsymbuf = GETBUF(kernel_symbol_size * ngplsyms); readmem((ulong)gpl_syms, KVADDR, modsymbuf, ngplsyms * kernel_symbol_size, "module gpl symbols", FAULT_ON_ERROR); } for (i = first = last = 0; i < ngplsyms; i++) { modsym = (union kernel_symbol *) (modsymbuf + (i * kernel_symbol_size)); if (!first || first > modsym_name(gpl_syms, modsym, i)) first = modsym_name(gpl_syms, modsym, i); if (modsym_name(gpl_syms, modsym, i) > last) last = modsym_name(gpl_syms, modsym, i); } if (last > first) { strbuflen = (last-first) + BUFSIZE; if ((first + strbuflen) >= (lm->mod_base + lm->mod_size)) { strbuflen = (lm->mod_base + lm->mod_size) - first; } strbuf = GETBUF(strbuflen); if (!readmem(first, KVADDR, strbuf, strbuflen, "module gpl symbol strings", RETURN_ON_ERROR)) { FREEBUF(strbuf); strbuf = NULL; } } else strbuf = NULL; for (i = 0; i < ngplsyms; i++) { modsym = (union kernel_symbol *) (modsymbuf + (i * kernel_symbol_size)); BZERO(buf1, BUFSIZE); if (strbuf) strcpy(buf1, &strbuf[modsym_name(gpl_syms, modsym, i) - first]); else read_string(modsym_name(gpl_syms, modsym, i), buf1, BUFSIZE-1); if (strlen(buf1)) { st->ext_module_symtable[mcnt].value = modsym_value(gpl_syms, modsym, i); st->ext_module_symtable[mcnt].type = '?'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; strip_module_symbol_end(buf1); strip_symbol_end(buf1, NULL); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf1); mcnt++; } } if (modsymbuf) { FREEBUF(modsymbuf); modsymbuf = NULL; } if (strbuf) FREEBUF(strbuf); /* * If the module was compiled with kallsyms, add them in. */ switch (kt->flags & (KALLSYMS_V1|KALLSYMS_V2)) { case KALLSYMS_V1: /* impossible, I hope... */ mcnt += store_module_kallsyms_v1(lm, lm_mcnt, mcnt, modbuf); break; case KALLSYMS_V2: mcnt += store_module_kallsyms_v2(lm, lm_mcnt, mcnt, modbuf); break; } st->ext_module_symtable[mcnt].value = lm->mod_base + size; st->ext_module_symtable[mcnt].type = 'm'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; sprintf(buf2, "%s%s", "_MODULE_END_", mod_name); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf2); mcnt++; if (lm->mod_init_size > 0) { st->ext_module_symtable[mcnt].value = lm->mod_init_module_ptr + lm->mod_init_size; st->ext_module_symtable[mcnt].type = 'm'; st->ext_module_symtable[mcnt].flags |= MODULE_SYMBOL; sprintf(buf4, "%s%s", "_MODULE_INIT_END_", mod_name); namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt], buf4); mcnt++; } lm->mod_ext_symcnt = mcnt - lm->mod_ext_symcnt; if (!lm->mod_etext_guess) find_mod_etext(lm); NEXT_MODULE(mod_next, modbuf); } FREEBUF(modbuf); st->ext_module_symcnt = mcnt; st->ext_module_symend = &st->ext_module_symtable[mcnt]; namespace_ctl(NAMESPACE_COMPLETE, &st->ext_module_namespace, st->ext_module_symtable, st->ext_module_symend); qsort(st->ext_module_symtable, mcnt, sizeof(struct syment), compare_syms); qsort(st->load_modules, m, sizeof(struct load_module), compare_mods); for (m = 0; m < st->mods_installed; m++) { lm = &st->load_modules[m]; sprintf(buf1, "_MODULE_START_%s", lm->mod_name); sprintf(buf2, "_MODULE_END_%s", lm->mod_name); sprintf(buf3, "_MODULE_INIT_START_%s", lm->mod_name); sprintf(buf4, "_MODULE_INIT_END_%s", lm->mod_name); for (sp = st->ext_module_symtable; sp < st->ext_module_symend; sp++) { if (STREQ(sp->name, buf1)) { lm->mod_ext_symtable = sp; lm->mod_symtable = sp; } if (STREQ(sp->name, buf2)) { lm->mod_ext_symend = sp; lm->mod_symend = sp; } if (STREQ(sp->name, buf3)) { lm->mod_init_symtable = sp; } if (STREQ(sp->name, buf4)) { lm->mod_init_symend = sp; } } mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); mod_symtable_hash_install_range(lm->mod_init_symtable, lm->mod_init_symend); } st->flags |= MODULE_SYMS; if (symbol_query("__insmod_", NULL, NULL)) st->flags |= INSMOD_BUILTIN; if (mcnt > total) error(FATAL, "store_module_symbols_v2: total: %ld mcnt: %d\n", total, mcnt); } /* * Get the module's kallsyms list if it was compiled in. */ static int store_module_kallsyms_v1(struct load_module *lm, int start, int curr, char *modbuf) { int i, j; struct syment *sp; ulong kallsyms_header; char *module_buf; char *header_buf; uint symbols; ulong name_off; ulong sec_name_off; ulong section_off; ulong symptr; ulong symbol_addr; ulong stringptr; ulong sectionptr; char *nameptr; char *secnameptr; ulong secptr; char type; int mcnt; int mcnt_idx; int found; struct symbol_namespace *ns; if (!(kt->flags & KALLSYMS_V1)) return 0; kallsyms_header = ULONG(modbuf + OFFSET(module_kallsyms_start)); if (!kallsyms_header) return 0; mcnt = 0; mcnt_idx = curr; module_buf = GETBUF(ULONG(modbuf + OFFSET(module_size))); ns = &st->ext_module_namespace; if (!readmem(lm->mod_base, KVADDR, module_buf, lm->mod_size, "module (kallsyms)", RETURN_ON_ERROR|QUIET)) { error(WARNING,"cannot access module kallsyms\n"); FREEBUF(module_buf); return 0; } #define IN_MODULE_BUF_V1(x) \ (((x) >= module_buf) && ((x) < (module_buf + lm->mod_size))) header_buf = module_buf + (kallsyms_header - lm->mod_base); symbols = UINT(header_buf + OFFSET(kallsyms_header_symbols)); // sections = UINT(header_buf + OFFSET(kallsyms_header_sections)); if (CRASHDEBUG(7)) fprintf(fp, "kallsyms: module: %s\n", lm->mod_name); symptr = (ulong)(header_buf + ULONG(header_buf + OFFSET(kallsyms_header_symbol_off))); stringptr = (ulong)(header_buf + ULONG(header_buf + OFFSET(kallsyms_header_string_off))); sectionptr = (ulong)(header_buf + ULONG(header_buf + OFFSET(kallsyms_header_section_off))); for (i = 0; i < symbols; i++, symptr += SIZE(kallsyms_symbol)) { symbol_addr = ULONG(symptr+OFFSET(kallsyms_symbol_symbol_addr)); name_off = ULONG(symptr+OFFSET(kallsyms_symbol_name_off)); section_off = ULONG(symptr+OFFSET(kallsyms_symbol_section_off)); nameptr = (char *)(stringptr + name_off); secptr = (ulong)(sectionptr + section_off); sec_name_off = ULONG(secptr+OFFSET(kallsyms_section_name_off)); secnameptr = (char *)(stringptr + sec_name_off); if (!IN_MODULE_BUF_V1(nameptr)) { if (CRASHDEBUG(7)) error(INFO, "%s: invalid nameptr: %lx (stringptr: %lx + name_off: %lx)\n", lm->mod_name, nameptr, stringptr, name_off); continue; } if (!IN_MODULE_BUF_V1(secnameptr)) { if (CRASHDEBUG(7)) error(INFO, "%s: invalid secnameptr: %lx (stringptr: %lx + sec_name_off: %lx)\n", lm->mod_name, secnameptr, stringptr, sec_name_off); continue; } if (!STREQ(nameptr, secnameptr)) { if (STREQ(secnameptr, ".text")) type = 't'; else if (STREQ(secnameptr, ".data")) type = 'd'; else if (STREQ(secnameptr, ".bss")) type = 'b'; else if (STREQ(secnameptr, ".rodata")) type = 'd'; else continue; strip_module_symbol_end(nameptr); strip_symbol_end(nameptr, NULL); if (CRASHDEBUG(7)) fprintf(fp," symbol: %lx \"%s\" section: %s\n", symbol_addr, nameptr, secnameptr); for (found = 0, j = start; j < curr; j++) { sp = &st->ext_module_symtable[j]; if ((sp->value == symbol_addr) && STREQ(nameptr, &ns->address[(ulong)sp->name])) { if (CRASHDEBUG(7)) fprintf(fp, "current symbol \"%s\" at %lx of type (%c)\n", &ns->address[(ulong)sp->name], sp->value, sp->type); if (sp->type == '?') sp->type = type; found++; break; } } if (found) continue; st->ext_module_symtable[mcnt_idx].value = symbol_addr; st->ext_module_symtable[mcnt_idx].type = type; st->ext_module_symtable[mcnt_idx].flags |= MODULE_SYMBOL; namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt_idx++], nameptr); mcnt++; } } lm->mod_flags |= MOD_KALLSYMS; FREEBUF(module_buf); return mcnt; } /* * Translate either an Elf32_Sym or Elf64_Sym to an elf_common structure * for more convenient use by store_module_kallsyms_v2(). */ struct elf_common { ulong st_name; ulong st_value; ulong st_shndx; unsigned char st_info; ulong st_size; }; static void Elf32_Sym_to_common(Elf32_Sym *e32, struct elf_common *ec) { ec->st_name = (ulong)e32->st_name; ec->st_value = (ulong)e32->st_value; ec->st_shndx = (ulong)e32->st_shndx; if ((e32->st_info >= ' ') && (e32->st_info < 0x7f)) ec->st_info = e32->st_info; else if (e32->st_info == 0x02) ec->st_info = 't'; else if (e32->st_info == 0x12) ec->st_info = 'T'; else ec->st_info = '?'; ec->st_size = (ulong)e32->st_size; } static void Elf64_Sym_to_common(Elf64_Sym *e64, struct elf_common *ec) { ec->st_name = (ulong)e64->st_name; ec->st_value = (ulong)e64->st_value; ec->st_shndx = (ulong)e64->st_shndx; if ((e64->st_info >= ' ') && (e64->st_info < 0x7f)) ec->st_info = e64->st_info; else if (e64->st_info == 0x02) ec->st_info = 't'; else if (e64->st_info == 0x12) ec->st_info = 'T'; else ec->st_info = '?'; ec->st_size = (ulong)e64->st_size; } static int store_module_kallsyms_v2(struct load_module *lm, int start, int curr, char *modbuf) { int i, j, found; struct elf_common elf_common, *ec; ulong nksyms, ksymtab, kstrtab; char *module_buf, *ptr, *locsymtab, *locstrtab, *nameptr; struct syment *sp; struct symbol_namespace *ns; int mcnt; int mcnt_idx; char *module_buf_init = NULL; ulong base, base_init, size, size_init; if (!(kt->flags & KALLSYMS_V2)) return 0; mcnt = 0; BZERO(&elf_common, sizeof(struct elf_common)); mcnt_idx = curr; ns = &st->ext_module_namespace; ec = &elf_common; /* kallsyms data looks to be in MOD_DATA region. */ if (MODULE_MEMORY()) { base = lm->mem[MOD_DATA].base; size = lm->mem[MOD_DATA].size; base_init = lm->mem[MOD_INIT_DATA].base; size_init = lm->mem[MOD_INIT_DATA].size; } else { base = lm->mod_base; size = lm->mod_size; base_init = lm->mod_init_module_ptr; size_init = lm->mod_init_size; } module_buf = GETBUF(size); if (!readmem(base, KVADDR, module_buf, size, "module (kallsyms)", RETURN_ON_ERROR|QUIET)) { error(WARNING,"cannot access module kallsyms\n"); FREEBUF(module_buf); return 0; } if (lm->mod_init_size > 0) { module_buf_init = GETBUF(size_init); if (!readmem(base_init, KVADDR, module_buf_init, size_init, "module init (kallsyms)", RETURN_ON_ERROR|QUIET)) { error(WARNING,"cannot access module init kallsyms\n"); FREEBUF(module_buf_init); } } if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) nksyms = UINT(modbuf + OFFSET(module_num_symtab)); else nksyms = ULONG(modbuf + OFFSET(module_num_symtab)); ksymtab = ULONG(modbuf + OFFSET(module_symtab)); if (!IN_MODULE(ksymtab, lm) && !IN_MODULE_INIT(ksymtab, lm)) { error(WARNING, "%s: module.symtab outside of module address space\n", lm->mod_name); FREEBUF(module_buf); if (module_buf_init) FREEBUF(module_buf_init); return 0; } if (IN_MODULE(ksymtab, lm)) locsymtab = module_buf + (ksymtab - base); else locsymtab = module_buf_init + (ksymtab - base_init); kstrtab = ULONG(modbuf + OFFSET(module_strtab)); if (!IN_MODULE(kstrtab, lm) && !IN_MODULE_INIT(kstrtab, lm)) { error(WARNING, "%s: module.strtab outside of module address space\n", lm->mod_name); FREEBUF(module_buf); if (module_buf_init) FREEBUF(module_buf_init); return 0; } if (IN_MODULE(kstrtab, lm)) locstrtab = module_buf + (kstrtab - base); else locstrtab = module_buf_init + (kstrtab - base_init); for (i = 1; i < nksyms; i++) { /* ELF starts real symbols at 1 */ switch (BITS()) { case 32: ptr = locsymtab + (i * sizeof(Elf32_Sym)); Elf32_Sym_to_common((Elf32_Sym *)ptr, ec); break; case 64: ptr = locsymtab + (i * sizeof(Elf64_Sym)); Elf64_Sym_to_common((Elf64_Sym *)ptr, ec); break; } if (!IN_MODULE(ec->st_value, lm) && !IN_MODULE_INIT(ec->st_value, lm)) continue; if (ec->st_shndx == SHN_UNDEF) continue; if (!IN_MODULE(kstrtab + ec->st_name, lm) && !IN_MODULE_INIT(kstrtab + ec->st_name, lm)) { if (CRASHDEBUG(3)) { error(WARNING, "%s: bad st_name index: %lx -> %lx\n " " st_value: %lx st_shndx: %ld st_info: %c\n", lm->mod_name, ec->st_name, (kstrtab + ec->st_name), ec->st_value, ec->st_shndx, ec->st_info); } continue; } nameptr = locstrtab + ec->st_name; if (*nameptr == '\0') continue; /* * On ARM/ARM64 we have linker mapping symbols like '$a' * or '$x' for ARM64, and '$d'. * On LoongArch we have linker mapping symbols like '.L' * or 'L0'. * Make sure that these don't end up into our symbol list. */ if ((machine_type("ARM") || machine_type("ARM64") || machine_type("LOONGARCH64")) && !machdep->verify_symbol(nameptr, ec->st_value, ec->st_info)) continue; if (CRASHDEBUG(7)) fprintf(fp, "%s: st_name: %ld st_value: %lx st_shndx: %ld st_info: %c\n", nameptr, ec->st_name, ec->st_value, ec->st_shndx, ec->st_info); strip_symbol_end(nameptr, NULL); for (found = 0, j = start; j < curr; j++) { sp = &st->ext_module_symtable[j]; if ((sp->value == ec->st_value) && STREQ(nameptr, &ns->address[(ulong)sp->name])) { if (CRASHDEBUG(7)) fprintf(fp, "current symbol \"%s\" at %lx of type (%c)\n", &ns->address[(ulong)sp->name], sp->value, sp->type); if (sp->type == '?') sp->type = ec->st_info; found++; break; } } if (found) continue; st->ext_module_symtable[mcnt_idx].value = ec->st_value; st->ext_module_symtable[mcnt_idx].type = ec->st_info; st->ext_module_symtable[mcnt_idx].flags |= MODULE_SYMBOL; namespace_ctl(NAMESPACE_INSTALL, &st->ext_module_namespace, &st->ext_module_symtable[mcnt_idx++], nameptr); mcnt++; } lm->mod_flags |= MOD_KALLSYMS; FREEBUF(module_buf); if (module_buf_init) FREEBUF(module_buf_init); return mcnt; } /* * Strip the kernel clutter tagged on the end of an exported module symbol. */ static void strip_module_symbol_end(char *buf) { char *p1, *lastR; if (!(lastR = strrchr(buf, 'R'))) return; if (((p1 = lastR-1) < buf) || (*p1 != '_')) return; if ((kt->flags & SMP) && STRNEQ(p1, "_Rsmp_")) { *p1 = NULLCHAR; return; } if (!hexadecimal(lastR+1, 0)) return; *p1 = NULLCHAR; } /* * Return the lowest or highest module virtual address. */ ulong lowest_module_address(void) { int i, t; struct load_module *lm; ulong low, lowest; if (!st->mods_installed) return 0; lowest = (ulong)(-1); for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (MODULE_MEMORY()) for_each_mod_mem_type(t) { if (!lm->mem[t].size) continue; low = lm->mem[t].base; if (low < lowest) lowest = low; } else { low = lm->mod_base; if (low < lowest) lowest = low; } } return lowest; } ulong highest_module_address(void) { int i, t; struct load_module *lm; ulong high, highest; highest = 0; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (MODULE_MEMORY()) { for_each_mod_mem_type(t) { if (!lm->mem[t].size) continue; high = lm->mem[t].base + lm->mem[t].size; if (high > highest) highest = high; } } else { high = lm->mod_base + lm->mod_size; if (high > highest) highest = high; } } return highest; } /* * Look through a string for bogus kernel clutter of an exported * module symbol. In the case of LM_P_FILTER, shift the string left * as appropriate to get rid of the extra stuff. In the case of * LM_DIS_FILTER, translation of the previous address is done first, * and its results are stuffed into the string. In both cases, * this routine is recursive to catch multiple instances. */ #define SMP_CLUTTER (strlen("_Rsmp_")) #define UP_CLUTTER (strlen("_R")) #define CLUTTER_IDLEN (8) char * load_module_filter(char *s, int type) { char *arglist[MAXARGS]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; int clen, last; int prev; char *pstart, *p1, *p2, *smp, *pend, *colon; ulong vaddr; ulong offset; struct syment *sp; int argc; switch (type) { case LM_P_FILTER: if (!(pstart = strstr(s, "_R"))) return s; smp = strstr(s, "_Rsmp_"); pend = &s[strlen(s)]; p2 = pstart + (smp ? SMP_CLUTTER : UP_CLUTTER); if ((p2 >= pend) || !hexadecimal(p2, CLUTTER_IDLEN)) return s; clen = smp ? SMP_CLUTTER+CLUTTER_IDLEN : UP_CLUTTER+CLUTTER_IDLEN; if (bracketed(s, pstart, clen)) { /* hack it out for now */ pstart--; shift_string_left(pstart, clen+2); if (*pstart == ',') shift_string_left(pstart-1, 1); } else shift_string_left(pstart, clen); return (load_module_filter(s, type)); /* catch multiples */ case LM_DIS_FILTER: strip_beginning_whitespace(s); strcpy(buf1, s); argc = parse_line(buf1, arglist); if (argc < 2) return s; /* * Fix up the first half of the disassembly expression, * that is, the address and symbol to the left of the * colon. */ colon = NULL; if (hexadecimal(arglist[0], VADDR_PRLEN+2) && bracketed(arglist[1], &arglist[1][1], 0) && (colon = strstr(s, ":"))) { strcpy(buf2, colon+2); vaddr = htol(arglist[0], FAULT_ON_ERROR, NULL); if ((sp = value_search(vaddr, &offset))) { if (offset) sprintf(s, "%s <%s+%ld>:\t%s", arglist[0], sp->name, offset, buf2); else sprintf(s, "%s <%s>:\t%s", arglist[0], sp->name, buf2); } } /* * Now work on the second part -- if it exists. * Find a virtual address followed by a bracked symbol * at the end of the line. */ if (colon) { strcpy(buf1, s); argc = parse_line(buf1, arglist); colon = strstr(s, ":"); } last = argc-1; prev = argc-2; if (bracketed(arglist[last], &arglist[last][1], 0) && hexadecimal(arglist[prev], VADDR_PRLEN+2)) { vaddr = htol(arglist[prev], FAULT_ON_ERROR, NULL); p1 = strstr(s, arglist[last]); if ((sp = value_search(vaddr, &offset)) && !(colon && (p1 < colon))) { if (offset) sprintf(p1, "<%s+%ld>\n", sp->name, offset); else sprintf(p1, "<%s>\n", sp->name); } } pend = &s[strlen(s)-3]; if (STREQ(pend, ":\t\n")) LASTCHAR(s) = NULLCHAR; return s; default: return NULL; /* can't get here */ } } /* * Handle the various commands for controlling symbol string space: * * NAMESPACE_INIT: Allocates an estimated size for the string space. * NAMESPACE_REUSE: Resets appropriate fields to allow a previously * allocated module string buffer to be reused. * NAMESPACE_FREE: Frees (module) string space. * NAMESPACE_INSTALL: Copies a symbol name string into the next available * buffer space. If the string cannot be squeezed in, * the whole string space is reallocated, which may * change its starting address. For that reason, the * buffer index is temporarily stored in the sp->name * field, which NAMESPACE_COMPLETE later transforms into * the proper address when the buffer is set. * NAMESPACE_COMPLETE: Reallocs a completed string buffer to the exact * size that is required, and then calculates and stores * the proper addresses into the name fields of the * passed-in syment array. */ #define AVERAGE_SYMBOL_SIZE (16) static size_t rust_demangle_symbol(const char *symbol, char *out, size_t out_size) { int i; size_t loc = 0; size_t len = strlen(symbol); char *buf = NULL; /* * Rust symbols always start with _R (v0) or _ZN (legacy) */ const char *mangled_rust[] = { "_R", "_ZN", NULL }; if (!out || out_size < len) return 0; for (i = 0; mangled_rust[i]; i++) { size_t sz = strlen(mangled_rust[i]); char *p = memmem(symbol, len, mangled_rust[i], sz); if (p) { loc = p - symbol; if (loc) memcpy(out, symbol, loc); break; } } buf = rust_demangle(symbol + loc, DMGL_RUST); if (buf) { memcpy(out + loc, buf, strlen(buf)); free(buf); return 1; } else if (loc != 0) memset(out, 0, loc); return 0; } static int namespace_ctl(int cmd, struct symbol_namespace *ns, void *nsarg1, void *nsarg2) { char *addr; struct syment *sp, *sp_end; char *name; long cnt; int len; char demangled[BUFSIZE] = {0}; switch (cmd) { case NAMESPACE_INIT: cnt = (long)nsarg1; if ((addr = calloc(cnt, AVERAGE_SYMBOL_SIZE)) == NULL) return FALSE; ns->address = addr; ns->index = 0; ns->cnt = 0; ns->size = cnt * AVERAGE_SYMBOL_SIZE; return TRUE; case NAMESPACE_REUSE: ns->index = 0; ns->cnt = 0; return TRUE; case NAMESPACE_FREE: if (!ns->address) error(FATAL, "attempt to free unallocated module namespace\n"); free(ns->address); ns->address = 0; ns->index = 0; ns->size = 0; ns->cnt = 0; return TRUE; case NAMESPACE_INSTALL: sp = (struct syment *)nsarg1; name = (char *)nsarg2; len = strlen(name)+1; if (rust_demangle_symbol(name, demangled, sizeof(demangled))) { len = strlen(demangled) + 1; name = demangled; } if ((ns->index + len) >= ns->size) { if (!(addr = realloc(ns->address, ns->size*2))) error(FATAL, "symbol name space malloc: %s\n", strerror(errno)); ns->address = addr; ns->size *= 2; } sp->name = (char *)ns->index; BCOPY(name, &ns->address[ns->index], len); ns->index += len; ns->cnt++; return TRUE; case NAMESPACE_COMPLETE: sp = (struct syment *)nsarg1; sp_end = (struct syment *)nsarg2; if (ns->index < (ns->size-1)) { if ((addr = realloc(ns->address, ns->index+1))) { ns->address = addr; ns->size = ns->index+1; } } for ( ; sp < sp_end; sp++) sp->name = ns->address + (long)sp->name; return TRUE; default: return FALSE; /* can't get here */ } } /* * These comparison functions must return an integer less than, * equal to, or greater than zero if the first argument is * considered to be respectively less than, equal to, or * greater than the second. If two members compare as equal, * their order in the sorted array is undefined. */ static int compare_syms(const void *v1, const void *v2) { struct syment *s1, *s2; char sn1[BUFSIZE], sn2[BUFSIZE]; s1 = (struct syment *)v1; s2 = (struct syment *)v2; if (s1->value == s2->value) { if (STRNEQ(s1->name, "__insmod")) return -1; if (STRNEQ(s2->name, "__insmod")) return 1; if (MODULE_MEM_START(s2, MOD_TEXT) || STRNEQ(s2->name, "_MODULE_START_")) return 1; /* Get pseudo section name. */ if (MODULE_SECTION_START(s1)) sscanf(s1->name, "_MODULE_SECTION_START [%s]", sn1); else if (MODULE_SECTION_END(s1)) sscanf(s1->name, "_MODULE_SECTION_END [%s]", sn1); if (MODULE_SECTION_START(s2)) sscanf(s2->name, "_MODULE_SECTION_START [%s]", sn2); else if (MODULE_SECTION_END(s2)) sscanf(s2->name, "_MODULE_SECTION_END [%s]", sn2); /* * Sort pseudo symbols in mind section. * The same values must be sorted like examples. * - exp1 * c9046000 MODULE START: sctp * c9046000 [.data]: section start * c9046000 (D) sctp_timer_events * * - exp2 * c9046c68 [.bss]: section end * c9046c68 MODULE END: sctp * * - exp3 * c90e9b44 [.text]: section end * c90e9b44 [.exit.text]: section start * c90e9b44 (T) cleanup_module * c90e9b44 (t) sctp_exit * c90e9c81 [.exit.text]: section end */ if (MODULE_SECTION_END(s1)) { if (!MODULE_PSEUDO_SYMBOL(s2) || MODULE_END(s2)) return -1; else if (MODULE_SECTION_START(s2) && !STREQ(sn1, sn2)) return -1; return 1; } if (MODULE_SECTION_END(s2)) { if (MODULE_END(s1) || !MODULE_PSEUDO_SYMBOL(s1)) return 1; else if (MODULE_SECTION_START(s1) && STREQ(sn1, sn2)) return 1; return -1; } if (MODULE_SECTION_START(s2)) { if (MODULE_START(s1)) return -1; return 1; } } return (s1->value < s2->value ? -1 : s1->value == s2->value ? 0 : 1); } static int compare_mods(const void *v1, const void *v2) { struct load_module *lm1, *lm2; lm1 = (struct load_module *)v1; lm2 = (struct load_module *)v2; return (lm1->mod_base < lm2->mod_base ? -1 : lm1->mod_base == lm2->mod_base ? 0 : 1); } /* * Check whether a value falls into a text-type (SEC_CODE) section. * If it's a module address, and symbols are not loaded, we're forced * to use our "mod_etext_guess" value. */ int is_kernel_text(ulong value) { int i, s; asection **sec, *section; struct load_module *lm; ulong start, end; struct syment *sp; start = 0; if (pc->flags & SYSMAP) { if ((sp = value_search(value, NULL)) && is_symbol_text(sp)) return TRUE; for (sp = st->symtable; sp < st->symend; sp++) { if (!is_symbol_text(sp)) continue; if ((value >= sp->value) && (value < kt->etext)) return TRUE; break; } } else { sec = (asection **)st->sections; for (i = 0; i < st->bfd->section_count; i++, sec++) { section = *sec; if (section->flags & SEC_CODE) { start = (ulong)bfd_section_vma(section); end = start + (ulong)bfd_section_size(section); if (kt->flags2 & KASLR) { start += (kt->relocate * -1); end += (kt->relocate * -1); } if ((value >= start) && (value < end)) return TRUE; } } } if ((sp = value_search(value, NULL)) && is_symbol_text(sp)) return TRUE; if (NO_MODULES() || !(st->flags & MODULE_SYMS)) return FALSE; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (!IN_MODULE(value, lm) && !IN_MODULE_INIT(value, lm)) continue; if (lm->mod_flags & MOD_LOAD_SYMS) { for (s = (lm->mod_sections-1); s >= 0; s--) { if (!(lm->mod_section_data[s].flags & SEC_CODE)) continue; if (MODULE_MEMORY()) start = lm->mod_section_data[s].addr; else start = lm->mod_base + lm->mod_section_data[s].offset; end = start + lm->mod_section_data[s].size; if ((value >= start) && (value < end)) return TRUE; } } else if (MODULE_MEMORY()) { if (IN_MODULE_TEXT(value, lm)) return TRUE; } else { switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V1: start = lm->mod_base + lm->mod_size_of_struct; break; case KMOD_V2: if (IN_MODULE(value, lm)) start = lm->mod_base; else start = lm->mod_init_module_ptr; break; } end = lm->mod_etext_guess; if (IN_MODULE_INIT(value, lm) && end < lm->mod_init_module_ptr + lm->mod_init_size) end = lm->mod_init_module_ptr + lm->mod_init_size; if ((value >= start) && (value < end)) return TRUE; } } return FALSE; } /* * Detemine whether an address is offset into a text function, i.e., not * the starting address of the function. */ int is_kernel_text_offset(ulong value) { struct syment *sp; ulong offset; if (!is_kernel_text(value)) return FALSE; if (!(sp = value_search(value, &offset))) return FALSE; return(offset ? TRUE : FALSE); } int is_symbol_text(struct syment *sp) { if ((sp->type == 'T') || (sp->type == 't')) return TRUE; if ((sp->type == 'W') || (sp->type == 'w')) { if ((sp->value >= kt->stext) && (sp->value < kt->etext)) return TRUE; } return FALSE; } /* * Check whether an address is most likely kernel data. * * TBD: This should be refined to recognize module text/data. */ int is_kernel_data(ulong value) { return(IS_KVADDR(value) && !is_kernel_text(value) && !IS_MODULE_VADDR(value)); } /* * Check whether the closest symbol to a value is rodata. */ int is_rodata(ulong value, struct syment **spp) { struct syment *sp; if (!(sp = value_search(value, NULL))) return FALSE; if ((sp->type == 'r') || (sp->type == 'R')) { if (spp) *spp = sp; return TRUE; } return FALSE; } /* * For a given kernel virtual address, request that gdb return * the address range of the containing function. For module * text addresses, its debuginfo data must be loaded. */ int get_text_function_range(ulong vaddr, ulong *low, ulong *high) { struct syment *sp; struct gnu_request gnu_request, *req = &gnu_request; struct load_module *lm; ulong size; if (!(sp = value_search(vaddr, NULL))) return FALSE; if (module_symbol(vaddr, NULL, &lm, NULL, 0)) { if (kallsyms_module_function_size(sp, lm, &size)) { *low = sp->value; *high = sp->value + size; return TRUE; } } BZERO(req, sizeof(struct gnu_request)); req->command = GNU_GET_FUNCTION_RANGE; req->pc = sp->value; req->name = sp->name; gdb_interface(req); if (req->flags & GNU_COMMAND_FAILED) return FALSE; if ((vaddr < req->addr) || (vaddr >= req->addr2)) return FALSE; *low = req->addr; *high = req->addr2; return TRUE; } /* * Get the text size of a module function from kallsyms. */ static int kallsyms_module_function_size(struct syment *sp, struct load_module *lm, ulong *size) { int i; ulong nksyms, ksymtab, st_size; char *ptr, *module_buf, *module_buf_init, *modbuf, *locsymtab; struct elf_common elf_common, *ec; if (!(lm->mod_flags & MOD_KALLSYMS) || !(kt->flags & KALLSYMS_V2)) return FALSE; if (THIS_KERNEL_VERSION >= LINUX(5,0,0)) /* st_size not useable */ return FALSE; module_buf = GETBUF(lm->mod_size); modbuf = module_buf + (lm->module_struct - lm->mod_base); if (!readmem(lm->mod_base, KVADDR, module_buf, lm->mod_size, "module (kallsyms)", RETURN_ON_ERROR|QUIET)) { FREEBUF(module_buf); return FALSE; } if (lm->mod_init_size > 0) { module_buf_init = GETBUF(lm->mod_init_size); if (!readmem(lm->mod_init_module_ptr, KVADDR, module_buf_init, lm->mod_init_size, "module init (kallsyms)", RETURN_ON_ERROR|QUIET)) { FREEBUF(module_buf_init); module_buf_init = NULL; } } else module_buf_init = NULL; if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) nksyms = UINT(modbuf + OFFSET(module_num_symtab)); else nksyms = ULONG(modbuf + OFFSET(module_num_symtab)); ksymtab = ULONG(modbuf + OFFSET(module_symtab)); if (!IN_MODULE(ksymtab, lm) && !IN_MODULE_INIT(ksymtab, lm)) { FREEBUF(module_buf); if (module_buf_init) FREEBUF(module_buf_init); return FALSE; } if (IN_MODULE(ksymtab, lm)) locsymtab = module_buf + (ksymtab - lm->mod_base); else locsymtab = module_buf_init + (ksymtab - lm->mod_init_module_ptr); st_size = 0; ec = &elf_common; BZERO(&elf_common, sizeof(struct elf_common)); for (i = 1; i < nksyms; i++) { /* ELF starts real symbols at 1 */ switch (BITS()) { case 32: ptr = locsymtab + (i * sizeof(Elf32_Sym)); Elf32_Sym_to_common((Elf32_Sym *)ptr, ec); break; case 64: ptr = locsymtab + (i * sizeof(Elf64_Sym)); Elf64_Sym_to_common((Elf64_Sym *)ptr, ec); break; } if (sp->value == ec->st_value) { if (CRASHDEBUG(1)) fprintf(fp, "kallsyms_module_function_size: " "st_value: %lx st_size: %ld\n", ec->st_value, ec->st_size); st_size = ec->st_size; break; } } if (module_buf_init) FREEBUF(module_buf_init); FREEBUF(module_buf); if (st_size) { *size = st_size; return TRUE; } return FALSE; } static void dump_symname_hash_table(struct syment *table[]) { int i, cnt, tot; struct syment *sp; fprintf(fp, " "); for (i = tot = 0; i < SYMNAME_HASH; i++) { fprintf(fp, "[%3d]: ", i); if ((sp = table[i]) == NULL) fprintf(fp, "%3d ", 0); else { cnt = 1; while (sp->name_hash_next) { cnt++; sp = sp->name_hash_next; } fprintf(fp, "%3d ", cnt); tot += cnt; } if (i && (((i+1) % 6) == 0)) fprintf(fp, "\n "); } if (SYMNAME_HASH % 6) fprintf(fp, "\n"); } /* * "help -s" output */ void dump_symbol_table(void) { int i, s, cnt; struct load_module *lm; struct syment *sp; struct downsized *ds; int others; asection **sec; fprintf(fp, " flags: %lx%s(", st->flags, count_bits_long(st->flags) > 3 ? "\n " : " "); others = 0; if (st->flags & KERNEL_SYMS) fprintf(fp, "%sKERNEL_SYMS", others++ ? "|" : ""); if (st->flags & MODULE_SYMS) fprintf(fp, "%sMODULE_SYMS", others++ ? "|" : ""); if (st->flags & LOAD_MODULE_SYMS) fprintf(fp, "%sLOAD_MODULE_SYMS", others++ ? "|" : ""); if (st->flags & INSMOD_BUILTIN) fprintf(fp, "%sINSMOD_BUILTIN", others++ ? "|" : ""); if (st->flags & GDB_SYMS_PATCHED) fprintf(fp, "%sGDB_SYMS_PATCHED", others++ ? "|" : ""); if (st->flags & NO_SEC_LOAD) fprintf(fp, "%sNO_SEC_LOAD", others++ ? "|" : ""); if (st->flags & NO_SEC_CONTENTS) fprintf(fp, "%sNO_SEC_CONTENTS", others++ ? "|" : ""); if (st->flags & FORCE_DEBUGINFO) fprintf(fp, "%sFORCE_DEBUGINFO", others++ ? "|" : ""); if (st->flags & CRC_MATCHES) fprintf(fp, "%sCRC_MATCHES", others++ ? "|" : ""); if (st->flags & ADD_SYMBOL_FILE) fprintf(fp, "%sADD_SYMBOL_FILE", others++ ? "|" : ""); if (st->flags & USE_OLD_ADD_SYM) fprintf(fp, "%sUSE_OLD_ADD_SYM", others++ ? "|" : ""); if (st->flags & PERCPU_SYMS) fprintf(fp, "%sPERCPU_SYMS", others++ ? "|" : ""); if (st->flags & MODSECT_V1) fprintf(fp, "%sMODSECT_V1", others++ ? "|" : ""); if (st->flags & MODSECT_V2) fprintf(fp, "%sMODSECT_V2", others++ ? "|" : ""); if (st->flags & MODSECT_V3) fprintf(fp, "%sMODSECT_V3", others++ ? "|" : ""); if (st->flags & MODSECT_UNKNOWN) fprintf(fp, "%sMODSECT_UNKNOWN", others++ ? "|" : ""); if (st->flags & NO_STRIP) fprintf(fp, "%sNO_STRIP", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " bfd: %lx\n", (ulong)st->bfd); fprintf(fp, " symtable: %lx\n", (ulong)st->symtable); fprintf(fp, " symend: %lx\n", (ulong)st->symend); fprintf(fp, " symcnt: %ld\n", st->symcnt); fprintf(fp, " syment_size: %ld\n", st->syment_size); fprintf(fp, " first_ksymbol: "); if (st->first_ksymbol) { fprintf(fp, "%lx (%s)\n", st->first_ksymbol, st->flags & KERNEL_SYMS ? value_symbol(st->first_ksymbol) : ""); } else fprintf(fp, "(unused)\n"); if (st->__per_cpu_start || st->__per_cpu_end) { fprintf(fp, " __per_cpu_start: %lx\n", st->__per_cpu_start); fprintf(fp, " __per_cpu_end: %lx\n", st->__per_cpu_end); } else { fprintf(fp, " __per_cpu_start: (unused)\n"); fprintf(fp, " __per_cpu_end: (unused)\n"); } fprintf(fp, " first_section_start: %lx\n", st->first_section_start); fprintf(fp, " last_section_end: %lx\n", st->last_section_end); fprintf(fp, " _stext_vmlinux: %lx ", st->_stext_vmlinux); if (st->_stext_vmlinux == UNINITIALIZED) fprintf(fp, "(UNINITIALIZED)\n"); else if (st->_stext_vmlinux == 0) fprintf(fp, "(unused)\n"); else fprintf(fp, "\n"); if (SADUMP_DUMPFILE() || QEMU_MEM_DUMP_NO_VMCOREINFO() || VMSS_DUMPFILE()) { fprintf(fp, "divide_error_vmlinux: %lx\n", st->divide_error_vmlinux); fprintf(fp, " idt_table_vmlinux: %lx\n", st->idt_table_vmlinux); fprintf(fp, "saved_command_line_vmlinux: %lx\n", st->saved_command_line_vmlinux); fprintf(fp, " pti_init_vmlinux: %lx\n", st->pti_init_vmlinux); fprintf(fp, " kaiser_init_vmlinux: %lx\n", st->kaiser_init_vmlinux); } else { fprintf(fp, "divide_error_vmlinux: (unused)\n"); fprintf(fp, " idt_table_vmlinux: (unused)\n"); fprintf(fp, "saved_command_line_vmlinux: (unused)\n"); fprintf(fp, " pti_init_vmlinux: (unused)\n"); fprintf(fp, " kaiser_init_vmlinux: (unused)\n"); } if (SADUMP_DUMPFILE()) fprintf(fp, "linux_banner_vmlinux: %lx\n", st->linux_banner_vmlinux); else fprintf(fp, "linux_banner_vmlinux: (unused)\n"); fprintf(fp, " symval_hash[%d]: %lx\n", SYMVAL_HASH, (ulong)&st->symval_hash[0]); if (CRASHDEBUG(1)) { fprintf(fp, " "); for (i = 0; i < SYMVAL_HASH; i++) { fprintf(fp, " [%3d]: ", i); sp = st->symval_hash[i].val_hash_head; if (!sp) { fprintf(fp, " 0 "); } else { cnt = 1; while ((sp = sp->val_hash_next)) cnt++; fprintf(fp, "%3d ", cnt); } if (i && (((i+1)%6)== 0)) fprintf(fp, "\n "); } } fprintf(fp, "%s val_hash_searches: %.0f\n", CRASHDEBUG(1) ? "\n" : "", st->val_hash_searches); fprintf(fp, " val_hash_iterations: %.0f (avg: %.1f)\n", st->val_hash_iterations, st->val_hash_iterations/st->val_hash_searches); fprintf(fp, " symname_hash[%d]: %lx\n", SYMNAME_HASH, (ulong)&st->symname_hash[0]); if (CRASHDEBUG(1)) dump_symname_hash_table(st->symname_hash); fprintf(fp, "mod_symname_hash[%d]: %lx\n", SYMNAME_HASH, (ulong)&st->mod_symname_hash[0]); if (CRASHDEBUG(1)) dump_symname_hash_table(st->mod_symname_hash); fprintf(fp, " symbol_namespace: "); fprintf(fp, "address: %lx ", (ulong)st->kernel_namespace.address); fprintf(fp, "index: %ld ", st->kernel_namespace.index); fprintf(fp, "size: %ld ", (ulong)st->kernel_namespace.size); fprintf(fp, "cnt: %ld\n", st->kernel_namespace.cnt); fprintf(fp, " ext_module_symtable: %lx\n", (ulong)st->ext_module_symtable); fprintf(fp, " ext_module_symend: %lx\n", (ulong)st->ext_module_symend); fprintf(fp, " ext_module_symcnt: %ld\n", (ulong)st->ext_module_symcnt); fprintf(fp, "ext_module_namespace: "); fprintf(fp, "address: %lx ", (ulong)st->ext_module_namespace.address); fprintf(fp, "index: %ld ", st->ext_module_namespace.index); fprintf(fp, "size: %ld ", (ulong)st->ext_module_namespace.size); fprintf(fp, "cnt: %ld\n", st->ext_module_namespace.cnt); fprintf(fp, " mods_installed: %d\n", st->mods_installed); fprintf(fp, " current: %lx\n", (ulong)st->current); fprintf(fp, " load_modules: %lx\n", (ulong)st->load_modules); for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; others = 0; fprintf(fp, "\n mod_base: %lx\n", lm->mod_base); fprintf(fp, " module_struct: %lx\n", lm->module_struct); fprintf(fp, " mod_name: %s\n", lm->mod_name); fprintf(fp, " mod_size: %ld\n", lm->mod_size); fprintf(fp, " mod_namelist: %s\n", lm->mod_namelist); fprintf(fp, " mod_flags: %lx (", lm->mod_flags); if (lm->mod_flags & MOD_EXT_SYMS) fprintf(fp, "%sMOD_EXT_SYMS", others++ ? "|" : ""); if (lm->mod_flags & MOD_LOAD_SYMS) fprintf(fp, "%sMOD_LOAD_SYMS", others++ ? "|" : ""); if (lm->mod_flags & MOD_REMOTE) fprintf(fp, "%sMOD_REMOTE", others++ ? "|" : ""); if (lm->mod_flags & MOD_KALLSYMS) fprintf(fp, "%sMOD_KALLSYMS", others++ ? "|" : ""); if (lm->mod_flags & MOD_INITRD) fprintf(fp, "%sMOD_INITRD", others++ ? "|" : ""); if (lm->mod_flags & MOD_NOPATCH) fprintf(fp, "%sMOD_NOPATCH", others++ ? "|" : ""); if (lm->mod_flags & MOD_INIT) fprintf(fp, "%sMOD_INIT", others++ ? "|" : ""); if (lm->mod_flags & MOD_DO_READNOW) fprintf(fp, "%sMOD_DO_READNOW", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " mod_symtable: %lx\n", (ulong)lm->mod_symtable); fprintf(fp, " mod_symend: %lx\n", (ulong)lm->mod_symend); fprintf(fp, " mod_init_symtable: %lx\n", (ulong)lm->mod_init_symtable); fprintf(fp, " mod_init_symend: %lx\n", (ulong)lm->mod_init_symend); fprintf(fp, " mod_ext_symcnt: %ld\n", lm->mod_ext_symcnt); fprintf(fp, " mod_ext_symtable: %lx\n", (ulong)lm->mod_ext_symtable); fprintf(fp, " mod_ext_symend: %lx\n", (ulong)lm->mod_ext_symend); fprintf(fp, " mod_load_symcnt: %ld\n", lm->mod_load_symcnt); fprintf(fp, " mod_load_symtable: %lx\n", (ulong)lm->mod_load_symtable); fprintf(fp, " mod_load_symend: %lx\n", (ulong)lm->mod_load_symend); fprintf(fp, " mod_load_namespace: "); fprintf(fp, "address: %lx ", (ulong)lm->mod_load_namespace.address); fprintf(fp, "index: %ld ", lm->mod_load_namespace.index); fprintf(fp, "size: %ld ", (ulong)lm->mod_load_namespace.size); fprintf(fp, "cnt: %ld\n", lm->mod_load_namespace.cnt); fprintf(fp, " mod_symalloc: %ld\n", lm->mod_symalloc); fprintf(fp, " mod_size_of_struct: %ld (%lx)\n", lm->mod_size_of_struct, lm->mod_size_of_struct); fprintf(fp, " mod_text_start: %lx (%lx)\n", lm->mod_text_start, lm->mod_text_start ? lm->mod_text_start - lm->mod_base : 0); fprintf(fp, " mod_etext_guess: %lx (%lx)\n", lm->mod_etext_guess, lm->mod_etext_guess ? lm->mod_etext_guess - lm->mod_base : 0); fprintf(fp, " mod_rodata_start: %lx (%lx)\n", lm->mod_rodata_start, lm->mod_rodata_start ? lm->mod_rodata_start - lm->mod_base : 0); fprintf(fp, " mod_data_start: %lx (%lx)\n", lm->mod_data_start, lm->mod_data_start ? lm->mod_data_start - lm->mod_base : 0); fprintf(fp, " mod_bss_start: %lx (%lx)\n", lm->mod_bss_start, lm->mod_bss_start ? lm->mod_bss_start - lm->mod_base : 0); fprintf(fp, " mod_init_size: %ld\n", lm->mod_init_size); fprintf(fp, " mod_init_text_size: %ld\n", lm->mod_init_text_size); fprintf(fp, " mod_init_module_ptr: %lx\n", lm->mod_init_module_ptr); if (lm->mod_percpu_size) { fprintf(fp, " mod_percpu_size: %lx\n", lm->mod_percpu_size); fprintf(fp, " mod_percpu: %lx - %lx\n", lm->mod_percpu, lm->mod_percpu + lm->mod_percpu_size); } else { if (lm->mod_percpu) { fprintf(fp, " mod_percpu_size: (not loaded)\n"); fprintf(fp, " mod_percpu: %lx - (unknown)\n", lm->mod_percpu); } else { fprintf(fp, " mod_percpu_size: (not used)\n"); fprintf(fp, " mod_percpu: (not used)\n"); } } fprintf(fp, " mod_sections: %d\n", lm->mod_sections); fprintf(fp, " mod_section_data: %lx %s\n", (ulong)lm->mod_section_data, lm->mod_section_data ? "" : "(not allocated)"); if (MODULE_MEMORY()) { int t; for_each_mod_mem_type(t) { fprintf(fp, " mem[%d]: %lx (%x)\n", t, lm->mem[t].base, lm->mem[t].size); } fprintf(fp, " symtable: %lx\n", (ulong)lm->symtable); fprintf(fp, " ext_symtable: %lx\n", (ulong)lm->ext_symtable); for_each_mod_mem_type(t) { fprintf(fp, " ext_symtable[%d]: %lx - %lx\n", t, (ulong)lm->ext_symtable[t], (ulong)lm->ext_symend[t]); } fprintf(fp, " load_symtable: %lx\n", (ulong)lm->load_symtable); for_each_mod_mem_type(t) { fprintf(fp, " load_symtable[%d]: %lx - %lx\n", t, (ulong)lm->load_symtable[t], (ulong)lm->load_symend[t]); } } for (s = 0; s < lm->mod_sections; s++) { fprintf(fp, " %20s prio: %x flags: %08x %s: %-16lx size: %lx\n", lm->mod_section_data[s].name, lm->mod_section_data[s].priority, lm->mod_section_data[s].flags, MODULE_MEMORY() ? "addr" : "offset", MODULE_MEMORY() ? lm->mod_section_data[s].addr : lm->mod_section_data[s].offset, lm->mod_section_data[s].size); } fprintf(fp, " loaded_objfile: %lx\n", (ulong)lm->loaded_objfile); if (CRASHDEBUG(1) && lm->mod_load_symtable) { for (sp = lm->mod_load_symtable; sp <= lm->mod_load_symend; sp++) { fprintf(fp, " %lx %s\n", sp->value, sp->name); } } } fprintf(fp, "\n"); fprintf(fp, " dwarf_eh_frame_file_offset: %llx\n", (unsigned long long)st->dwarf_eh_frame_file_offset); fprintf(fp, " dwarf_eh_frame_size: %ld\n", st->dwarf_eh_frame_size); fprintf(fp, "dwarf_debug_frame_file_offset: %llx\n", (unsigned long long)st->dwarf_debug_frame_file_offset); fprintf(fp, " dwarf_debug_frame_size: %ld\n", st->dwarf_debug_frame_size); fprintf(fp, "\n"); sec = (asection **)st->sections; fprintf(fp, " sections: %s\n", sec ? "" : "(not in use)"); for (i = 0; sec && (i < st->bfd->section_count); i++, sec++) { asection *section; section = *sec; fprintf(fp, "%25s vma: %.*lx size: %ld\n", section->name, VADDR_PRLEN, (ulong)bfd_section_vma(section), (ulong)bfd_section_size(section)); } fprintf(fp, "\n downsized: "); if (st->downsized.name) { for (ds = &st->downsized, cnt = 0; ds->name; ds = ds->next) fprintf(fp, "%s%s", cnt++ ? ", " : "", ds->name); fprintf(fp, "\n"); } else fprintf(fp, "(none)\n"); fprintf(fp, " kernel_symbol_type: v%d\n", st->kernel_symbol_type); } /* * Determine whether a file is in ELF format by checking the magic number * in the first EI_NIDENT characters of the file; if those match, check * whether the file is a known BFD format. */ int is_elf_file(char *s) { int fd; char magic[EI_NIDENT]; if ((fd = open(s, O_RDONLY)) < 0) { error(INFO, "%s: %s\n", s, strerror(errno)); return FALSE; } if (read(fd, magic, EI_NIDENT) != EI_NIDENT) { /* error(INFO, "%s: %s\n", s, strerror(errno)); */ close(fd); return FALSE; } close(fd); magic[EI_CLASS] = NULLCHAR; if (!STREQ(magic, ELFMAG)) return FALSE; return(is_bfd_format(s)); } /* * Verify a vmlinux file, issuing a warning for processor and endianness * mismatches. */ int is_kernel(char *file) { int fd, swap; char eheader[BUFSIZE]; Elf32_Ehdr *elf32; Elf64_Ehdr *elf64; if ((fd = open(file, O_RDONLY)) < 0) { error(INFO, "%s: %s\n", file, strerror(errno)); return FALSE; } if (read(fd, eheader, BUFSIZE) != BUFSIZE) { /* error(INFO, "%s: %s\n", file, strerror(errno)); */ close(fd); return FALSE; } close(fd); if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) return FALSE; elf32 = (Elf32_Ehdr *)&eheader[0]; elf64 = (Elf64_Ehdr *)&eheader[0]; swap = (((eheader[EI_DATA] == ELFDATA2LSB) && (__BYTE_ORDER == __BIG_ENDIAN)) || ((eheader[EI_DATA] == ELFDATA2MSB) && (__BYTE_ORDER == __LITTLE_ENDIAN))); if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && (swap16(elf32->e_type, swap) == ET_EXEC) && (swap32(elf32->e_version, swap) == EV_CURRENT)) { switch (swap16(elf32->e_machine, swap)) { case EM_386: if (machine_type_mismatch(file, "X86", NULL, 0)) { if (machine_type("X86_64")) { /* * Since is_bfd_format() returns TRUE * in this case, just bail out here. */ return FALSE; } goto bailout; } break; case EM_S390: if (machine_type_mismatch(file, "S390", NULL, 0)) goto bailout; break; case EM_ARM: if (machine_type_mismatch(file, "ARM", NULL, 0)) goto bailout; break; case EM_PPC: if (machine_type_mismatch(file, "PPC", NULL, 0)) goto bailout; break; case EM_MIPS: if (machine_type_mismatch(file, "MIPS", NULL, 0)) goto bailout; break; case EM_SPARCV9: if (machine_type_mismatch(file, "SPARC64", NULL, 0)) goto bailout; break; case EM_LOONGARCH: if (machine_type_mismatch(file, "LOONGARCH64", NULL, 0)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, 0)) goto bailout; } if (endian_mismatch(file, elf32->e_ident[EI_DATA], 0)) goto bailout; } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && ((swap16(elf64->e_type, swap) == ET_EXEC) || (swap16(elf64->e_type, swap) == ET_DYN)) && (swap32(elf64->e_version, swap) == EV_CURRENT)) { switch (swap16(elf64->e_machine, swap)) { case EM_IA_64: if (machine_type_mismatch(file, "IA64", NULL, 0)) goto bailout; break; case EM_PPC64: if (machine_type_mismatch(file, "PPC64", NULL, 0)) goto bailout; break; case EM_X86_64: if (machine_type_mismatch(file, "X86_64", NULL, 0)) goto bailout; break; case EM_386: if (machine_type_mismatch(file, "X86", NULL, 0)) goto bailout; break; case EM_S390: if (machine_type_mismatch(file, "S390X", NULL, 0)) goto bailout; break; case EM_AARCH64: if (machine_type_mismatch(file, "ARM64", NULL, 0)) goto bailout; break; case EM_MIPS: if (machine_type_mismatch(file, "MIPS64", NULL, 0)) goto bailout; break; case EM_RISCV: if (machine_type_mismatch(file, "RISCV64", NULL, 0)) goto bailout; break; case EM_LOONGARCH: if (machine_type_mismatch(file, "LOONGARCH64", NULL, 0)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, 0)) goto bailout; } if (endian_mismatch(file, elf64->e_ident[EI_DATA], 0)) goto bailout; } else return FALSE; bailout: return(is_bfd_format(file)); } int is_compressed_kernel(char *file, char **tmp) { int len, type, fd; char *tmpdir, *tempname; unsigned char header[BUFSIZE]; char command[BUFSIZE]; char message[BUFSIZE]; #define GZIP (1) #define BZIP2 (2) #define XZ (3) #define FNAME (1 << 3) if ((fd = open(file, O_RDONLY)) < 0) return FALSE; if (read(fd, header, BUFSIZE) != BUFSIZE) { close(fd); return FALSE; } close(fd); type = 0; if ((header[0] == 0x1f) && (header[1] == 0x8b) && (header[2] == 8)) { if (!(header[3] & FNAME)) { if (!(st->flags & FORCE_DEBUGINFO)) { error(INFO, "%s: " "original filename unknown\n", file); error(CONT, "Use \"-f %s\" on command line to prevent this message.\n\n", file); } } else if (!STRNEQ((char *)&header[10], "vmlinux") && !(st->flags & FORCE_DEBUGINFO)) { error(INFO, "%s: compressed file name does not " "start with \"vmlinux\"\n", &header[10]); error(CONT, "Use \"-f %s\" on command line to override.\n\n", file); return FALSE; } type = GZIP; } if ((header[0] == 'B') && (header[1] == 'Z') && (header[2] == 'h')) { if (!STRNEQ(basename(file), "vmlinux") && !(st->flags & FORCE_DEBUGINFO)) { error(INFO, "%s: compressed file name does not start " "with \"vmlinux\"\n", file); error(CONT, "Use \"-f %s\" on command line to override.\n\n", file); return FALSE; } type = BZIP2; } if (!memcmp(header, "\xfd""7zXZ", 6)) { if (!STRNEQ(basename(file), "vmlinux") && !(st->flags & FORCE_DEBUGINFO)) { error(INFO, "%s: compressed file name does not start " "with \"vmlinux\"\n", file); error(CONT, "Use \"-f %s\" on command line to override.\n\n", file); return FALSE; } type = XZ; } if (!type) return FALSE; if (!(tmpdir = getenv("TMPDIR"))) tmpdir = "/var/tmp"; len = strlen(tmpdir) + strlen(basename(file)) + strlen("_XXXXXX") + 2; if (!(tempname = (char *)malloc(len))) return FALSE; sprintf(tempname, "%s/%s_XXXXXX", tmpdir, basename(file)); fd = mkstemp(tempname); if (fd < 0) { perror("mkstemp"); free(tempname); return FALSE; } pc->cleanup = tempname; sprintf(message, "uncompressing %s", file); please_wait(message); switch (type) { case GZIP: sprintf(command, "%s -c %s > %s", file_exists("/bin/gunzip", NULL) ? "/bin/gunzip" : "/usr/bin/gunzip", file, tempname); break; case BZIP2: sprintf(command, "%s -c %s > %s", file_exists("/bin/bunzip2", NULL) ? "/bin/bunzip2" : "/usr/bin/bunzip2", file, tempname); break; case XZ: sprintf(command, "%s -c %s > %s", file_exists("/bin/unxz", NULL) ? "/bin/unxz" : "/usr/bin/unxz", file, tempname); break; } if (system(command) < 0) { please_wait_done(); error(INFO, "%s of %s failed\n", type == GZIP ? "gunzip" : (type == BZIP2 ? "bunzip2" : "unxz"), file); free(tempname); return FALSE; } please_wait_done(); if (is_bfd_format(tempname) && is_kernel(tempname)) { *tmp = tempname; return TRUE; } unlink(tempname); close(fd); free(tempname); pc->cleanup = NULL; return FALSE; } int is_shared_object(char *file) { int fd, swap; char eheader[BUFSIZE]; Elf32_Ehdr *elf32; Elf64_Ehdr *elf64; if (is_directory(file)) return FALSE; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; if (read(fd, eheader, BUFSIZE) != BUFSIZE) { close(fd); return FALSE; } close(fd); if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) return FALSE; elf32 = (Elf32_Ehdr *)&eheader[0]; elf64 = (Elf64_Ehdr *)&eheader[0]; swap = (((eheader[EI_DATA] == ELFDATA2LSB) && (__BYTE_ORDER == __BIG_ENDIAN)) || ((eheader[EI_DATA] == ELFDATA2MSB) && (__BYTE_ORDER == __LITTLE_ENDIAN))); if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && (swap16(elf32->e_type, swap) == ET_DYN)) { switch (swap16(elf32->e_machine, swap)) { case EM_386: if (machine_type("X86") || machine_type("ARM") || machine_type("MIPS")) return TRUE; break; case EM_S390: if (machine_type("S390")) return TRUE; break; case EM_ARM: if (machine_type("ARM")) return TRUE; break; case EM_MIPS: if (machine_type("MIPS")) return TRUE; break; case EM_PPC: if (machine_type("PPC")) return TRUE; break; } if (CRASHDEBUG(1)) error(INFO, "%s: machine type mismatch: %d\n", file, swap16(elf32->e_machine, swap)); return FALSE; } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && (swap16(elf64->e_type, swap) == ET_DYN)) { switch (swap16(elf64->e_machine, swap)) { case EM_IA_64: if (machine_type("IA64")) return TRUE; break; case EM_PPC64: if (machine_type("PPC64")) return TRUE; break; case EM_X86_64: if (machine_type("X86_64") || machine_type("ARM64") || machine_type("PPC64") || machine_type("RISCV64")) return TRUE; break; case EM_S390: if (machine_type("S390X")) return TRUE; break; case EM_AARCH64: if (machine_type("ARM64")) return TRUE; break; case EM_SPARCV9: if (machine_type("SPARC64")) return TRUE; break; case EM_MIPS: if (machine_type("MIPS64")) return TRUE; break; case EM_RISCV: if (machine_type("RISCV64")) return TRUE; break; case EM_LOONGARCH: if (machine_type("LOONGARCH64")) return TRUE; break; } if (CRASHDEBUG(1)) error(INFO, "%s: machine type mismatch: %d\n", file, swap16(elf32->e_machine, swap)); } return FALSE; } /* * Given a choice between two namelists, pick the one for gdb to use. * For now, just check get their stats and check their sizes; the larger * one presumably has debug data. */ int select_namelist(char *new) { struct stat stat1, stat2; char *namep; if (pc->server_namelist) { pc->namelist_debug = new; return TRUE; } if (!file_exists(pc->namelist, &stat1) || !file_exists(new, &stat2)) { return FALSE; } if (stat1.st_size > stat2.st_size) { pc->namelist_debug = pc->namelist; if (pc->namelist_orig) { namep = pc->namelist_debug_orig; pc->namelist_debug_orig = pc->namelist_orig; pc->namelist_orig = namep; } pc->namelist = new; } else if (stat2.st_size > stat1.st_size) pc->namelist_debug = new; else { error(INFO, "cannot distinguish %s and %s\n", pc->namelist, new); return FALSE; } return TRUE; } /* * Make a sweep of a non-dump, non-ELF file to guess whether it's a * legitimate System.map file. */ int is_system_map(char *s) { int i, lines, retval; char *mapitems[MAXARGS]; char buf[16384]; FILE *map; /* * First simulate what "file" does by verifying that the first 16K * bytes are ascii data. */ if ((map = fopen(s, "r")) == NULL) { error(INFO, "cannot open %s\n", s); return FALSE; } retval = FALSE; if (fread(buf, sizeof(char), 16384, map) != (16384*sizeof(char))) { if (CRASHDEBUG(1)) error(INFO, "%s: cannot read 16K\n", s); goto not_system_map; } for (i = 0; i < 16384; i++) { if (!ascii(buf[i])) goto not_system_map; } rewind(map); for (lines = 0; lines < 100; lines++) { if (!fgets(buf, BUFSIZE, map)) goto not_system_map; if (parse_line(buf, mapitems) != 3) goto not_system_map; if ((strlen(mapitems[0]) > MAX_HEXADDR_STRLEN) || !hexadecimal(mapitems[0], 0) || (strlen(mapitems[1]) > 1)) goto not_system_map; } if ((pc->flags & SYSMAP) && !same_file("/boot/System.map", s)) error(INFO, "overriding /boot/System.map with %s\n", s); retval = TRUE; not_system_map: fclose(map); return retval; } /* * Check whether a file is a known BFD format. */ static int is_bfd_format(char *filename) { #ifdef GDB_5_3 struct _bfd *bfd; #else struct bfd *bfd; #endif if ((bfd = bfd_openr(filename, NULL)) == NULL) return FALSE; if (!bfd_check_format_matches(bfd, bfd_object, NULL)) { bfd_close(bfd); return FALSE; } bfd_close(bfd); return TRUE; } static int is_binary_stripped(char *filename) { #ifdef GDB_5_3 struct _bfd *bfd; #else struct bfd *bfd; #endif int number_of_symbols; if ((bfd = bfd_openr(filename, NULL)) == NULL) { error(INFO, "cannot open ELF file: %s\n", filename); return FALSE; } if (!bfd_check_format(bfd, bfd_object)) { error(INFO, "invalid ELF file: %s\n", filename); bfd_close(bfd); return FALSE; } number_of_symbols = bfd_canonicalize_symtab(bfd, NULL); bfd_close(bfd); return (number_of_symbols == 0); } /* * This command may be used to: * * 1. Translate a symbol to its value. * 2. Translate a value to it symbol. * 3. List all stored symbols. * 4. Query for symbols containing a string. * 5. Show the next and previous symbols. */ void cmd_sym(void) { int c; struct syment *sp, *spp, *spn; ulong value, show_flags; ulong offset; int next, prev, multiples, others; char *name; int errflag; char buf[BUFSIZE]; next = prev = others = 0; show_flags = SHOW_LINENUM | SHOW_RADIX(); while ((c = getopt(argcnt, args, "lLQ:q:npsMm:")) != EOF) { switch(c) { case 'n': next++; break; case 'p': prev++; break; case 'Q': fprintf(fp, "%d found ", symbol_query(optarg, NULL, &sp)); if (sp) fprintf(fp, "(%s)", sp->name); fprintf(fp, "\n"); others++; break; case 'q': if (!symbol_query(optarg, "", NULL)) fprintf(fp, "(none found)\n"); others++; break; case 'm': symbol_dump(MODULE_SYMS, optarg); others++; break; case 'M': symbol_dump(MODULE_SYMS, NULL); others++; break; case 'L': /* obsolete */ case 'l': symbol_dump(KERNEL_SYMS|MODULE_SYMS, NULL); others++; break; case 's': show_flags &= ~SHOW_LINENUM; show_flags |= SHOW_SECTION; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind]) { do { name = NULL; multiples = 0; sp = NULL; show_flags &= ~SHOW_MODULE; if (clean_arg() && (!symbol_exists(args[optind]) && hexadecimal(args[optind], 0))) { errflag = 0; value = htol(args[optind], RETURN_ON_ERROR, &errflag); if (errflag || !in_ksymbol_range(value)) { error(INFO, "invalid address: %s\n", args[optind]); } else if ((sp = value_search(value, &offset))){ name = sp->name; if (module_symbol(sp->value, NULL, NULL, NULL, 0)) show_flags |= SHOW_MODULE; if (prev && (spp = prev_symbol(NULL, sp))) show_symbol(spp, 0, show_flags); show_symbol(sp, offset, show_flags); } else if (module_symbol(value, &sp, NULL, buf, *gdb_output_radix)) { name = buf; if (prev && sp && (spp = prev_symbol(NULL, sp))) show_symbol(spp, 0, show_flags); fprintf(fp, "%lx (?) %s\n", value, buf); } else fprintf(fp, "symbol not found: %s\n", args[optind]); } else { if ((sp = symbol_search(args[optind]))) { multiples = symbol_name_count(sp->name); do_multiples: if (module_symbol(sp->value, NULL, NULL, NULL, 0)) show_flags |= SHOW_MODULE; name = sp->name; if (prev && (spp = prev_symbol(NULL, sp))) show_symbol(spp, 0, show_flags); show_symbol(sp, 0, show_flags); } else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); } } if (name && next && (spn = next_symbol(NULL, sp))) show_symbol(spn, 0, show_flags); if (multiples > 1) { if ((sp = symbol_search_next(name, sp))) goto do_multiples; } optind++; } while(args[optind]); } else if (!others) cmd_usage(pc->curcmd, SYNOPSIS); } /* * Demangle a mangled Rust symbol to human readable symbol */ void cmd_rustfilt(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind]) { char *buf; buf = rust_demangle(args[optind], DMGL_RUST); if (buf) { fprintf(fp, "%s", buf); free(buf); } else fprintf(fp, "Not a rust symbol: \n%s", args[optind]); } else cmd_usage(pc->curcmd, SYNOPSIS); } /* * Common symbol display for cmd_sym(). */ void show_symbol(struct syment *sp, ulong offset, ulong show_flags) { char buf[BUFSIZE]; char *p1; ulong radix; struct load_module *lm; lm = NULL; if (CRASHDEBUG(1)) show_flags |= SHOW_LINENUM; switch (show_flags & (SHOW_HEX_OFFS|SHOW_DEC_OFFS)) { case SHOW_DEC_OFFS: radix = 10; break; default: case SHOW_HEX_OFFS: radix = 16; break; } if (MODULE_START(sp)) { p1 = sp->name + strlen("_MODULE_START_"); fprintf(fp, "%lx (%c) (%s module)", sp->value, sp->type, p1); if (offset) fprintf(fp, (radix == 16) ? "+0x%lx" : "+%ld", offset); fprintf(fp, "\n"); return; } else if (show_flags & SHOW_MODULE) module_symbol(sp->value, NULL, &lm, NULL, 0); if (offset) fprintf(fp, (radix == 16) ? "%lx (%c) %s+0x%lx" : "%lx (%c) %s+%ld", sp->value+offset, sp->type, sp->name, offset); else fprintf(fp, "%lx (%c) %s", sp->value, sp->type, sp->name); if (lm) fprintf(fp, " [%s]", lm->mod_name); if (is_kernel_text(sp->value+offset) && (show_flags & SHOW_LINENUM)) fprintf(fp, " %s", get_line_number(sp->value+offset, buf, TRUE)); if (show_flags & SHOW_SECTION) fprintf(fp, " [%s]", get_section(sp->value+offset, buf)); fprintf(fp, "\n"); } /* * Use the gdb_interface to get a line number associated with a * text address -- but first check whether the address gets past * any machine-dependent line_number_hooks reference. */ char * get_line_number(ulong addr, char *buf, int reserved) { char *p; struct gnu_request request, *req; struct line_number_hook *lnh; struct syment *sp; char bldbuf[BUFSIZE], *name; struct load_module *lm; buf[0] = NULLCHAR; lm = NULL; if (NO_LINE_NUMBERS() || !is_kernel_text(addr)) return(buf); if (module_symbol(addr, NULL, &lm, NULL, 0)) { if (!(lm->mod_flags & MOD_LOAD_SYMS)) return(buf); } if ((lnh = machdep->line_number_hooks)) { name = closest_symbol(addr); while (lnh->func) { if (STREQ(name, lnh->func)) { sprintf(buf, "%s/%s", get_build_directory(bldbuf) ? bldbuf : "..", *(lnh->file)); break; } lnh++; } } if (!strlen(buf)) { req = &request; BZERO(req, sizeof(struct gnu_request)); req->command = GNU_GET_LINE_NUMBER; req->addr = addr; req->buf = buf; if (lm && lm->loaded_objfile) req->lm = lm; if ((sp = value_search(addr, NULL))) req->name = sp->name; gdb_interface(req); } while ((p = strstr(buf, "//"))) shift_string_left(p+1, 1); return(buf); } static char * get_section(ulong vaddr, char *buf) { int i; asection **sec; asection *section; ulong start, end; struct load_module *lm; buf[0] = NULLCHAR; if (module_symbol(vaddr, NULL, &lm, NULL, *gdb_output_radix)) { if (lm->mod_flags & MOD_LOAD_SYMS) { for (i = (lm->mod_sections-1); i >= 0; i--) { if (MODULE_MEMORY()) start = lm->mod_section_data[i].addr; else start = lm->mod_base + lm->mod_section_data[i].offset; end = start + lm->mod_section_data[i].size; if ((vaddr >= start) && (vaddr < end)) { strcpy(buf, lm->mod_section_data[i].name); break; } } } else sprintf(buf, "in %s module", lm->mod_name); } else { sec = (asection **)st->sections; for (i = 0; i < st->bfd->section_count; i++, sec++) { section = *sec; start = (ulong)bfd_section_vma(section); end = start + (ulong)bfd_section_size(section); if ((vaddr >= start) && (vaddr < end)) { strcpy(buf, bfd_section_name(section)); break; } } } return buf; } /* * Get the kernel build directory. */ char * get_build_directory(char *buf) { char *p; if (symbol_exists("schedule")) get_line_number(symbol_value("schedule"), buf, FALSE); else if (symbol_exists("do_schedule")) get_line_number(symbol_value("do_schedule"), buf, FALSE); else return NULL; if ((p = strstr(buf, "/kernel/")) || (p = strstr(buf, "/./arch/"))) *p = NULLCHAR; else return(NULL); return buf; } /* * Search for all symbols containing a string. */ int symbol_query(char *s, char *print_pad, struct syment **spp) { int i, t; struct syment *sp, *sp_end; struct load_module *lm; int cnt, search_init; cnt = 0; for (sp = st->symtable; sp < st->symend; sp++) { if (strstr(sp->name, s)) { if (print_pad) { if (strlen(print_pad)) fprintf(fp, "%s", print_pad); show_symbol(sp, 0, SHOW_RADIX()); } if (spp) *spp = sp; cnt++; } } if (!MODULE_MEMORY()) goto old_module; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_LOAD_SYMS) { sp = lm->mod_load_symtable; sp_end = lm->mod_load_symend; for (; sp < sp_end; sp++) { if (MODULE_PSEUDO_SYMBOL(sp)) continue; if (strstr(sp->name, s)) { if (print_pad) { if (strlen(print_pad)) fprintf(fp, "%s", print_pad); show_symbol(sp, 0, SHOW_RADIX()|SHOW_MODULE); } if (spp) *spp = sp; cnt++; } } } else { for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; sp = lm->symtable[t]; sp_end = lm->symend[t]; for (; sp < sp_end; sp++) { if (MODULE_PSEUDO_SYMBOL(sp)) continue; if (strstr(sp->name, s)) { if (print_pad) { if (strlen(print_pad)) fprintf(fp, "%s", print_pad); show_symbol(sp, 0, SHOW_RADIX()|SHOW_MODULE); } if (spp) *spp = sp; cnt++; } } } } } return cnt; old_module: search_init = FALSE; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_INIT) search_init = TRUE; sp = lm->mod_symtable; sp_end = lm->mod_symend; for ( ; sp < sp_end; sp++) { if (MODULE_START(sp)) continue; if (strstr(sp->name, s)) { if (print_pad) { if (strlen(print_pad)) fprintf(fp, "%s", print_pad); show_symbol(sp, 0, SHOW_RADIX()|SHOW_MODULE); } if (spp) *spp = sp; cnt++; } } } if (!search_init) return(cnt); for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (!lm->mod_init_symtable) continue; sp = lm->mod_init_symtable; sp_end = lm->mod_init_symend; for ( ; sp < sp_end; sp++) { if (MODULE_START(sp)) continue; if (strstr(sp->name, s)) { if (print_pad) { if (strlen(print_pad)) fprintf(fp, "%s", print_pad); show_symbol(sp, 0, SHOW_RADIX()|SHOW_MODULE); } if (spp) *spp = sp; cnt++; } } } return(cnt); } static int skip_symbols(struct syment *sp, char *s) { int pseudos, skip = FALSE; pseudos = (strstr(s, "_MODULE_START_") || strstr(s, "_MODULE_END_") || strstr(s, "_MODULE_INIT_START_") || strstr(s, "_MODULE_INIT_END_")); if (!pseudos && MODULE_PSEUDO_SYMBOL(sp)) skip = TRUE; return skip; } /* * Return the syment of a symbol. */ struct syment * symbol_search(char *s) { struct syment *sp_hashed, *sp; sp_hashed = symname_hash_search(st->symname_hash, s); for (sp = sp_hashed ? sp_hashed : st->symtable; sp < st->symend; sp++) { if (STREQ(s, sp->name)) return(sp); } sp = st->mod_symname_hash[symname_hash_index(s)]; while (sp) { if (skip_symbols(sp, s)) { sp = sp->name_hash_next; continue; } if (STREQ(sp->name, s)) return sp; sp = sp->name_hash_next; } return((struct syment *)NULL); } /* * Count the number of instances of a symbol name. */ int symbol_name_count(char *s) { int i, t; struct syment *sp, *sp_end; struct load_module *lm; int count, pseudos, search_init; count = 0; for (sp = st->symtable; sp < st->symend; sp++) { if (STREQ(s, sp->name)) { count = sp->cnt; break; } } if (!MODULE_MEMORY()) goto old_module; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_LOAD_SYMS) { sp = lm->mod_load_symtable; sp_end = lm->mod_load_symend; for (; sp < sp_end; sp++) { if (STREQ(s, sp->name)) count++; } } else { for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; sp = lm->symtable[t]; sp_end = lm->symend[t]; for (; sp < sp_end; sp++) { if (STREQ(s, sp->name)) count++; } } } } return count++; old_module: pseudos = (strstr(s, "_MODULE_START_") || strstr(s, "_MODULE_END_")); search_init = FALSE; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_INIT) search_init = TRUE; sp = lm->mod_symtable; sp_end = lm->mod_symend; for ( ; sp < sp_end; sp++) { if (!pseudos && MODULE_PSEUDO_SYMBOL(sp)) continue; if (STREQ(s, sp->name)) count++; } } if (!search_init) return(count); pseudos = (strstr(s, "_MODULE_INIT_START_") || strstr(s, "_MODULE_INIT_END_")); for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (!lm->mod_init_symtable) continue; sp = lm->mod_init_symtable; sp_end = lm->mod_init_symend; for ( ; sp < sp_end; sp++) { if (!pseudos && MODULE_PSEUDO_SYMBOL(sp)) continue; if (STREQ(s, sp->name)) count++; } } return(count); } /* * Return the syment of the next symbol with the same name of the input symbol. */ struct syment * symbol_search_next(char *s, struct syment *spstart) { int i, t; struct syment *sp, *sp_end; struct load_module *lm; int found_start; int pseudos, search_init; found_start = FALSE; for (sp = st->symtable; sp < st->symend; sp++) { if (sp == spstart) { found_start = TRUE; continue; } else if (!found_start) continue; if (strcmp(s, sp->name) == 0) { return(sp); } } if (!MODULE_MEMORY()) goto old_module; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; sp = lm->symtable[t]; sp_end = lm->symend[t]; if (!found_start && (spstart < sp || spstart > sp_end)) continue; for ( ; sp < sp_end; sp++) { if (sp == spstart) { found_start = TRUE; continue; } else if (!found_start) continue; if (STREQ(s, sp->name)) return sp; } } } return NULL; old_module: pseudos = (strstr(s, "_MODULE_START_") || strstr(s, "_MODULE_END_")); search_init = FALSE; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_INIT) search_init = TRUE; sp = lm->mod_symtable; sp_end = lm->mod_symend; for ( ; sp < sp_end; sp++) { if (!pseudos && MODULE_PSEUDO_SYMBOL(sp)) continue; if (sp == spstart) { found_start = TRUE; continue; } else if (!found_start) continue; if (STREQ(s, sp->name)) return(sp); } } if (!search_init) return((struct syment *)NULL); pseudos = (strstr(s, "_MODULE_INIT_START_") || strstr(s, "_MODULE_INIT_END_")); for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (!lm->mod_init_symtable) continue; sp = lm->mod_init_symtable; sp_end = lm->mod_init_symend; for ( ; sp < sp_end; sp++) { if (!pseudos && MODULE_PSEUDO_SYMBOL(sp)) continue; if (sp == spstart) { found_start = TRUE; continue; } else if (!found_start) continue; if (STREQ(s, sp->name)) return(sp); } } return((struct syment *)NULL); } /* * Determine whether an address falls within the kernel's, or any module's, * address space. */ int in_ksymbol_range(ulong value) { int i; for (i = st->symcnt-1; i >= 0; i--) { if (!strstr(st->symtable[i].name, "xen_elfnote")) break; } if ((value >= st->symtable[0].value) && (value <= st->symtable[i].value)) { if ((st->flags & PERCPU_SYMS) && (value < st->first_ksymbol)) return FALSE; else return TRUE; } if (module_symbol(value, NULL, NULL, NULL, *gdb_output_radix)) return TRUE; if (machdep->value_to_symbol(value, NULL)) return TRUE; return FALSE; } /* * Determine whether an address falls within any module's address space. * If syment or load_module pointers are passed, send them back. * If a pointer to a name buffer is passed, stuff it with the particulars. */ int module_symbol(ulong value, struct syment **spp, struct load_module **lmp, char *name, ulong radix) { int i; struct load_module *lm; struct syment *sp; char buf[BUFSIZE]; ulong offs, offset; ulong base, end; if (NO_MODULES() || !(st->flags & MODULE_SYMS)) return FALSE; if (!radix) radix = *gdb_output_radix; if ((radix != 10) && (radix != 16)) radix = 16; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (MODULE_MEMORY()) { if (IN_MODULE(value, lm) || IN_MODULE_INIT(value, lm)) { int type = module_mem_type(value, lm); base = lm->mem[type].base; end = base + lm->mem[type].size; } else if (IN_MODULE_PERCPU(value, lm)) { base = lm->mod_percpu; end = lm->mod_percpu + lm->mod_percpu_size; } else continue; } else { if (IN_MODULE(value, lm)) { base = lm->mod_base; end = lm->mod_base + lm->mod_size; } else if (IN_MODULE_INIT(value, lm)) { base = lm->mod_init_module_ptr; end = lm->mod_init_module_ptr + lm->mod_init_size; } else if (IN_MODULE_PERCPU(value, lm)) { base = lm->mod_percpu; end = lm->mod_percpu + lm->mod_percpu_size; } else continue; } if ((value >= base) && (value < end)) { if (lmp) *lmp = lm; if (name) { offs = value - base; if ((sp = value_search(value, &offset))) { if (offset) sprintf(buf, radix == 16 ? "%s+0x%lx" : "%s+%ld", sp->name, offset); else sprintf(buf, "%s", sp->name); strcpy(name, buf); if (spp) *spp = sp; return TRUE; } sprintf(name, "(%s module)", lm->mod_name); if (offs) { sprintf(buf, radix == 16 ? "+0x%lx" : "+%ld", offs); strcat(name, buf); } } return TRUE; } } return FALSE; } static struct syment * value_search_module_6_4(ulong value, ulong *offset) { int i, t; struct syment *sp, *sp_end, *spnext, *splast; struct load_module *lm; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (!IN_MODULE(value, lm) && !IN_MODULE_INIT(value, lm)) continue; for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; sp = lm->symtable[t]; sp_end = lm->symend[t]; if (value < sp->value || value > sp_end->value) continue; splast = NULL; for ( ; sp <= sp_end; sp++) { if (machine_type("ARM64") && IN_MODULE_PERCPU(sp->value, lm) && !IN_MODULE_PERCPU(value, lm)) continue; if (value == sp->value) { if (MODULE_MEM_END(sp, t)) break; if (MODULE_PSEUDO_SYMBOL(sp)) { spnext = sp + 1; if (MODULE_PSEUDO_SYMBOL(spnext)) continue; if (spnext->value == value) sp = spnext; } if (sp->name[0] == '.') { spnext = sp+1; if (spnext->value == value) sp = spnext; } if (offset) *offset = 0; return sp; } if (sp->value > value) { sp = splast ? splast : sp - 1; if (offset) *offset = value - sp->value; return sp; } if (!MODULE_PSEUDO_SYMBOL(sp)) { splast = sp; } } } } return NULL; } struct syment * value_search_module(ulong value, ulong *offset) { int i; struct syment *sp, *sp_end, *spnext, *splast; struct load_module *lm; int search_init_sections, search_init; if (MODULE_MEMORY()) return value_search_module_6_4(value, offset); search_init = FALSE; search_init_sections = 0; for (i = 0; i < st->mods_installed; i++) { if (st->load_modules[i].mod_flags & MOD_INIT) search_init_sections++; } retry: for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (search_init) { if (lm->mod_init_symtable) { sp = lm->mod_init_symtable; sp_end = lm->mod_init_symend; } else continue; } else { sp = lm->mod_symtable; sp_end = lm->mod_symend; } if (sp->value > value) /* invalid -- between modules */ break; if (sp_end->value < value) /* not within the module */ continue; /* * splast will contain the last module symbol encountered. * Note: "__insmod_"-type symbols will be set in splast only * when they have unique values. */ splast = NULL; for ( ; sp <= sp_end; sp++) { if (machine_type("ARM64") && IN_MODULE_PERCPU(sp->value, lm) && !IN_MODULE_PERCPU(value, lm)) continue; if (value == sp->value) { if (MODULE_END(sp) || MODULE_INIT_END(sp)) break; if (MODULE_PSEUDO_SYMBOL(sp)) { spnext = sp + 1; if (MODULE_PSEUDO_SYMBOL(spnext)) continue; if (spnext->value == value) sp = spnext; } if (is_insmod_builtin(lm, sp)) { spnext = sp+1; if ((spnext < sp_end) && (value == spnext->value)) sp = spnext; } if (sp->name[0] == '.') { spnext = sp+1; if (spnext->value == value) sp = spnext; } if (offset) *offset = 0; return((struct syment *)sp); } if (sp->value > value) { sp = splast ? splast : sp - 1; if (offset) *offset = value - sp->value; return(sp); } if (!MODULE_PSEUDO_SYMBOL(sp)) { if (is_insmod_builtin(lm, sp)) { if (!splast || (sp->value > splast->value)) splast = sp; } else splast = sp; } } } if (search_init_sections) { if (!search_init) { search_init = TRUE; goto retry; } } return((struct syment *)NULL); } /* * Return the syment of the symbol closest to the value, along with * the offset from the symbol value if requested. */ struct syment * value_search(ulong value, ulong *offset) { struct syment *sp, *spnext; if (!in_ksymbol_range(value)) return((struct syment *)NULL); if ((sp = machdep->value_to_symbol(value, offset))) return sp; if (IS_VMALLOC_ADDR(value)) goto check_modules; if ((sp = symval_hash_search(value)) == NULL) sp = st->symtable; for ( ; sp < st->symend; sp++) { if (value == sp->value) { #if !defined(GDB_5_3) && !defined(GDB_6_0) && !defined(GDB_6_1) if (STRNEQ(sp->name, ".text.")) { spnext = sp+1; if (spnext->value == value) sp = spnext; } #endif if (offset) *offset = 0; /* * Avoid "SyS" and "compat_SyS" kernel syscall * aliases by returning the real symbol name, * which is the next symbol in the list. */ if ((STRNEQ(sp->name, "SyS_") || STRNEQ(sp->name, "compat_SyS_")) && ((spnext = sp+1) < st->symend) && (spnext->value == value)) sp = spnext; /* * If any of the special text region starting address * delimiters declared in vmlinux.lds.S match the * first "real" text symbol in the region, return * that (next) one instead. */ if (strstr_rightmost(sp->name, "_text_start") && ((spnext = sp+1) < st->symend) && (spnext->value == value)) sp = spnext; return((struct syment *)sp); } if (sp->value > value) { if (offset) *offset = value - ((sp-1)->value); return((struct syment *)(sp-1)); } } check_modules: sp = value_search_module(value, offset); return sp; } ulong highest_bss_symbol(void) { struct syment *sp; ulong highest = 0; for (sp = st->symtable; sp < st->symend; sp++) { if ((sp->type == 'b') || (sp->type == 'B')) { if (sp->value > highest) highest = sp->value; } } return highest; } /* * Search for a value only within the base kernel's symbols, * also avoiding the machdep->value_to_symbol() call, which will * most likely be the prime consumer of this call. */ struct syment * value_search_base_kernel(ulong value, ulong *offset) { struct syment *sp; if (value < st->symtable[0].value) return((struct syment *)NULL); if ((sp = symval_hash_search(value)) == NULL) sp = st->symtable; for ( ; sp < st->symend; sp++) { if (value == sp->value) { if (offset) *offset = 0; return((struct syment *)sp); } if (sp->value > value) { if (offset) *offset = value - ((sp-1)->value); return((struct syment *)(sp-1)); } } /* * If we go off the end, just use the last symbol plus offset. */ sp = st->symend; if (offset) *offset = value - ((sp-1)->value); return((struct syment *)(sp-1)); } /* * Allow platforms to assign symbols to their own special values. */ struct syment * generic_machdep_value_to_symbol(ulong value, ulong *offset) { return NULL; } /* * For a given value, format a string containing the nearest symbol name * plus the offset if appropriate. Display the offset in the specified * radix (10 or 16) -- if it's 0, set it to the current pc->output_radix. */ char * value_to_symstr(ulong value, char *buf, ulong radix) { struct syment *sp; ulong offset; char *p1, locbuf[BUFSIZE]; struct load_module *lm; sp = NULL; offset = 0; buf[0] = NULLCHAR; if (!radix) radix = *gdb_output_radix; if ((radix != 10) && (radix != 16)) radix = 16; if ((sp = value_search(value, &offset))) { if (offset) sprintf(buf, radix == 16 ? "%s+0x%lx" : "%s+%ld", sp->name, offset); else sprintf(buf, "%s", sp->name); } if (module_symbol(value, NULL, NULL, locbuf, *gdb_output_radix)) { if (sp) { if (STRNEQ(locbuf, "_MODULE_START_")) shift_string_left(locbuf, strlen("_MODULE_START_")); if ((p1 = strstr(locbuf, "+"))) *p1 = NULLCHAR; if (offset) { if (is_module_name(locbuf, NULL, &lm) && (value < lm->mod_text_start)) sprintf(buf, radix == 16 ? "(%s module)+0x%lx" : "(%s module)+%ld", locbuf, offset); else sprintf(buf, radix == 16 ? "%s+0x%lx" : "%s+%ld", locbuf, offset); } else { if (is_module_name(locbuf, NULL, &lm) && (value < lm->mod_text_start)) sprintf(buf, "(%s)", locbuf); else sprintf(buf, "%s", locbuf); } } else sprintf(buf, "%s", locbuf); } return(buf); } /* * For a given value, return the closest (lower-in-value) symbol name. */ char * closest_symbol(ulong value) { struct syment *sp; if ((sp = value_search(value, NULL))) return(sp->name); else return(NULL); } /* * Same as above, but return the closest (lower-in-value) symbol value. */ ulong closest_symbol_value(ulong value) { struct syment *sp; if ((sp = value_search(value, NULL))) return(sp->value); else return(0); } /* Only for 6.4 and later */ static struct syment * next_symbol_by_symname(char *symbol) { struct syment *sp; if ((sp = symbol_search(symbol))) { sp++; if (MODULE_PSEUDO_SYMBOL(sp) && strstr(sp->name, "_END")) return next_module_symbol_by_value(sp->value); return sp; } return NULL; } /* val_in should be a pseudo module end symbol. */ static struct syment * next_module_symbol_by_value(ulong val_in) { struct load_module *lm; struct syment *sp, *sp_end; ulong start, min; int i, t; retry: sp = sp_end = NULL; min = (ulong)-1; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; /* Search for the next lowest symtable. */ for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; start = lm->symtable[t]->value; if (start > val_in && start < min) { min = start; sp = lm->symtable[t]; sp_end = lm->symend[t]; } } } if (!sp) return NULL; for ( ; sp < sp_end; sp++) { if (MODULE_PSEUDO_SYMBOL(sp)) continue; if (sp->value > val_in) return sp; } /* Found a table that has only pseudo symbols. */ val_in = sp_end->value; goto retry; } /* Only for 6.4 and later */ static struct syment * next_module_symbol_by_syment(struct syment *sp_in) { struct load_module *lm; struct syment *sp; int i, t; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; if (sp_in < lm->symtable[t] || sp_in > lm->symend[t]) continue; if (sp_in == lm->symend[t]) return next_module_symbol_by_value(sp_in->value); sp = sp_in + 1; if (MODULE_PSEUDO_SYMBOL(sp)) return next_module_symbol_by_value(sp->value); return sp; } } return NULL; } /* * For a given symbol, return a pointer to the next (higher) symbol's syment. * Either a symbol name or syment pointer may be passed as an argument. */ struct syment * next_symbol(char *symbol, struct syment *sp_in) { int i; int found, search_init; struct syment *sp, *sp_end; struct load_module *lm; char buf[BUFSIZE], *p1; if (!symbol && !sp_in) error(FATAL, "next_symbol: two NULL args!\n"); if (sp_in) { found = FALSE; for (sp = st->symtable; sp < st->symend; sp++) { if (sp == sp_in) found = TRUE; else if (found) { if (sp->value > sp_in->value) return sp; } } if (MODULE_MEMORY()) return next_module_symbol_by_syment(sp_in); search_init = FALSE; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_INIT) search_init = TRUE; sp = lm->mod_symtable; sp_end = lm->mod_symend; for ( ; sp < sp_end; sp++) { if (MODULE_PSEUDO_SYMBOL(sp)) continue; if (sp == sp_in) found = TRUE; else if (found) { if ((sp->value == sp_in->value) && is_insmod_builtin(lm, sp)) continue; return sp; } } } for (i = 0; search_init && (i < st->mods_installed); i++) { lm = &st->load_modules[i]; if (!lm->mod_init_symtable) continue; sp = lm->mod_init_symtable; sp_end = lm->mod_init_symend; for ( ; sp < sp_end; sp++) { if (MODULE_PSEUDO_SYMBOL(sp)) continue; if (sp == sp_in) found = TRUE; else if (found) return sp; } } return NULL; } if (MODULE_MEMORY()) return next_symbol_by_symname(symbol); /* * Deal with a few special cases... */ if (strstr(symbol, " module)")) { sprintf(buf, "_MODULE_START_"); strcat(buf, &symbol[1]); p1 = strstr(buf, " module)"); *p1 = NULLCHAR; symbol = buf; } if (STREQ(symbol, "_end")) { if (!st->mods_installed) return NULL; lm = &st->load_modules[0]; return lm->mod_symtable; } if ((sp = symbol_search(symbol))) { sp++; if (MODULE_END(sp)) { sp--; i = load_module_index(sp); if ((i+1) == st->mods_installed) return NULL; lm = &st->load_modules[i+1]; sp = lm->mod_symtable; } return sp; } return NULL; } /* Only for 6.4 and later */ static struct syment * prev_symbol_by_symname(char *symbol) { struct syment *sp; if ((sp = symbol_search(symbol))) { if (sp == st->symtable) return NULL; if (module_symbol(sp->value, NULL, NULL, NULL, 0)) { if (MODULE_PSEUDO_SYMBOL(sp) && strstr(sp->name, "_START")) return prev_module_symbol_by_value(sp->value); else sp--; } else sp--; return sp; } return NULL; } /* val_in should be a pseudo module start symbol. */ static struct syment * prev_module_symbol_by_value(ulong val_in) { struct load_module *lm; struct syment *sp, *sp_end; ulong end, max; int i, t; retry: sp = sp_end = NULL; max = 0; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; /* Search for the previous highest table. */ for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; end = lm->symend[t]->value; if (end < val_in && end > max) { max = end; sp = lm->symtable[t]; sp_end = lm->symend[t]; } } } if (!sp) return NULL; for ( ; sp_end > sp; sp_end--) { if (MODULE_PSEUDO_SYMBOL(sp_end)) continue; if (sp_end->value < val_in) return sp_end; } /* Found a table that has only pseudo symbols. */ val_in = sp->value; goto retry; } /* Only for 6.4 and later */ static struct syment * prev_module_symbol_by_syment(struct syment *sp_in) { struct load_module *lm; struct syment *sp; int i, t; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; if (sp_in < lm->symtable[t] || sp_in > lm->symend[t]) continue; if (sp_in == lm->symtable[t]) return prev_module_symbol_by_value(sp_in->value); sp = sp_in - 1; if (MODULE_PSEUDO_SYMBOL(sp)) return prev_module_symbol_by_value(sp->value); return sp; } } return NULL; } /* * For a given symbol, return a pointer to the previous (lower) symbol's syment. * Either a symbol name or syment pointer may be passed as an argument. */ struct syment * prev_symbol(char *symbol, struct syment *sp_in) { int i, search_init; struct syment *sp, *sp_end, *sp_prev; char buf[BUFSIZE], *p1; struct load_module *lm; if (!symbol && !sp_in) error(FATAL, "prev_symbol: two NULL args!\n"); if (sp_in) { sp_prev = NULL; for (sp = st->symtable; sp < st->symend; sp++) { if (sp == sp_in) return sp_prev; sp_prev = sp; } if (MODULE_MEMORY()) return prev_module_symbol_by_syment(sp_in); search_init = FALSE; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_INIT) search_init = TRUE; sp = lm->mod_symtable; sp_end = lm->mod_symend; for ( ; sp < sp_end; sp++) { if (MODULE_PSEUDO_SYMBOL(sp)) continue; if (sp == sp_in) return sp_prev; if (is_insmod_builtin(lm, sp)) { if (sp->value > sp_prev->value) sp_prev = sp; } else sp_prev = sp; } } for (i = 0; search_init && (i < st->mods_installed); i++) { lm = &st->load_modules[i]; if (!lm->mod_init_symtable) continue; sp = lm->mod_init_symtable; sp_end = lm->mod_init_symend; for ( ; sp < sp_end; sp++) { if (MODULE_PSEUDO_SYMBOL(sp)) continue; if (sp == sp_in) return sp_prev; sp_prev = sp; } } return NULL; } if (MODULE_MEMORY()) return prev_symbol_by_symname(symbol); if (strstr(symbol, " module)")) { sprintf(buf, "_MODULE_START_"); strcat(buf, &symbol[1]); p1 = strstr(buf, " module)"); *p1 = NULLCHAR; symbol = buf; } if ((sp = symbol_search(symbol))) { if (sp == st->symtable) return((struct syment *)NULL); if (module_symbol(sp->value, NULL, NULL, NULL, 0)) { if (MODULE_START(sp)) { i = load_module_index(sp); if (i == 0) sp = symbol_search("_end"); else { lm = &st->load_modules[i-1]; sp = lm->mod_symend; sp--; } } else sp--; } else sp--; return sp; } return NULL; } /* * Read the specified amount of data from the given symbol's value. */ void get_symbol_data(char *symbol, long size, void *local) { struct syment *sp; if ((sp = symbol_search(symbol))) readmem(sp->value, KVADDR, local, size, symbol, FAULT_ON_ERROR); else error(FATAL, "cannot resolve: \"%s\"\n", symbol); } /* * Same as above, but allow for failure. */ int try_get_symbol_data(char *symbol, long size, void *local) { struct syment *sp; if ((sp = symbol_search(symbol)) && readmem(sp->value, KVADDR, local, size, symbol, RETURN_ON_ERROR|QUIET)) return TRUE; return FALSE; } /* * Return the value of a given symbol. */ ulong symbol_value(char *symbol) { struct syment *sp; if (!(sp = symbol_search(symbol))) error(FATAL, "cannot resolve \"%s\"\n", symbol); return(sp->value); } /* * Return the value of a symbol from a specific module. */ ulong symbol_value_module(char *symbol, char *module) { int i; struct syment *sp, *sp_end; struct load_module *lm; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (!STREQ(module, lm->mod_name)) continue; sp = lm->mod_symtable; sp_end = lm->mod_symend; for ( ; sp < sp_end; sp++) { if (STREQ(symbol, sp->name)) return(sp->value); } if (lm->mod_init_symtable) { sp = lm->mod_init_symtable; sp_end = lm->mod_init_symend; for ( ; sp < sp_end; sp++) { if (STREQ(symbol, sp->name)) return(sp->value); } } } return 0; } /* * Return the symbol name of a given value, with no allowance for offsets. * Returns NULL on failure to allow for testing of a value. */ char * value_symbol(ulong value) { struct syment *sp; ulong offset; if ((sp = value_search(value, &offset))) { if (offset) return NULL; else return sp->name; } return NULL; } /* * Determine whether a symbol exists. */ int symbol_exists(char *symbol) { if (symname_hash_search(st->symname_hash, symbol)) return TRUE; if (symname_hash_search(st->mod_symname_hash, symbol)) return TRUE; return(FALSE); } /* * Determine whether a per-cpu symbol exists. * The old-style per-cpu symbol names were pre-pended with * "per_cpu__", whereas the new-style ones (as of 2.6.34) * are not. This function allows the symbol argument to * use either the old- or new-sytle format, and find either * type. */ struct syment * per_cpu_symbol_search(char *symbol) { struct syment *sp; char old[BUFSIZE]; char *new; if (STRNEQ(symbol, "per_cpu__")) { if ((sp = symbol_search(symbol))) return sp; new = symbol + strlen("per_cpu__"); if ((sp = symbol_search(new))) { if ((sp->type == 'V') || (is_percpu_symbol(sp))) return sp; if ((sp->type == 'd') && (st->__per_cpu_start == st->__per_cpu_end)) return sp; } } else { if ((sp = symbol_search(symbol))) { if ((sp->type == 'V') || (is_percpu_symbol(sp))) return sp; } sprintf(old, "per_cpu__%s", symbol); if ((sp = symbol_search(old))) return sp; } if (CRASHDEBUG(1)) error(INFO, "per_cpu_symbol_search(%s): NULL\n", symbol); return NULL; } /* * Determine whether a static kernel symbol exists. */ int kernel_symbol_exists(char *symbol) { return !!symname_hash_search(st->symname_hash, symbol); } /* * Similar to above, but return the syment of the kernel symbol. */ struct syment * kernel_symbol_search(char *symbol) { return symname_hash_search(st->symname_hash, symbol); } /* * Return the number of instances of a symbol name along with pointers to * their syment structures. */ int get_syment_array(char *symbol, struct syment **sp_array, int max) { int i, cnt, t; struct syment *sp, *sp_end; struct load_module *lm; cnt = 0; for (sp = st->symtable; sp < st->symend; sp++) { if ((*symbol == *(sp->name)) && STREQ(symbol, sp->name)) { if (!sp_array) return sp->cnt; if (max) { if (cnt == max) { error(INFO, "symbol count overflow (%s)\n", symbol); return cnt; } else sp_array[cnt] = sp; } if (sp->cnt == 1) return 1; cnt++; } } if (!MODULE_MEMORY()) goto old_module; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; sp = lm->symtable[t]; sp_end = lm->symend[t]; for (; sp < sp_end; sp++) { if (STREQ(symbol, sp->name)) { if (max && (cnt < max)) sp_array[cnt] = sp; cnt++; } } } } return cnt; old_module: for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; sp = lm->mod_symtable; sp_end = lm->mod_symend; for ( ; sp < sp_end; sp++) { if (STREQ(symbol, sp->name)) { if (max && (cnt < max)) sp_array[cnt] = sp; cnt++; } } if (lm->mod_init_symtable) { sp = lm->mod_init_symtable; sp_end = lm->mod_init_symend; for ( ; sp < sp_end; sp++) { if (STREQ(symbol, sp->name)) { if (max && (cnt < max)) sp_array[cnt] = sp; cnt++; } } } } return cnt; } /* * Perform any datatype-related initializations here. */ void datatype_init(void) { BNEG(&offset_table, sizeof(offset_table)); BNEG(&size_table, sizeof(size_table)); BZERO(&array_table, sizeof(array_table)); } /* * This function is called through the following macros: * * #define STRUCT_SIZE(X) datatype_info((X), NULL, NULL) * #define UNION_SIZE(X) datatype_info((X), NULL, NULL) * #define DATATYPE_SIZE(X) datatype_info((X)->name, NULL, (X)) * #define MEMBER_OFFSET(X,Y) datatype_info((X), (Y), NULL) * #define STRUCT_EXISTS(X) (datatype_info((X), NULL, NULL) >= 0) * #define MEMBER_EXISTS(X,Y) (datatype_info((X), (Y), NULL) >= 0) * #define MEMBER_SIZE(X,Y) datatype_info((X), (Y), MEMBER_SIZE_REQUEST) * #define MEMBER_TYPE(X,Y) datatype_info((X), (Y), MEMBER_TYPE_REQUEST) * #define MEMBER_TYPE_NAME(X,Y) datatype_info((X), (Y), MEMBER_TYPE_NAME_REQUEST) * #define ANON_MEMBER_OFFSET(X,Y) datatype_info((X), (Y), ANON_MEMBER_OFFSET_REQUEST) * * to determine structure or union sizes, or member offsets. */ long datatype_info(char *name, char *member, struct datatype_member *dm) { struct gnu_request request, *req = &request; long offset, size, member_size; int member_typecode; ulong type_found; char buf[BUFSIZE]; if (dm == ANON_MEMBER_OFFSET_REQUEST) return anon_member_offset(name, member); if (dm == ANON_MEMBER_SIZE_REQUEST) return anon_member_size(name, member); strcpy(buf, name); BZERO(req, sizeof(*req)); req->command = GNU_GET_DATATYPE; req->flags |= GNU_RETURN_ON_ERROR; req->name = buf; req->member = member; req->fp = pc->nullfp; gdb_interface(req); if (req->flags & GNU_COMMAND_FAILED) return (dm == MEMBER_TYPE_NAME_REQUEST) ? 0 : -1; if (!req->typecode) { sprintf(buf, "struct %s", name); gdb_interface(req); } if (!req->typecode) { sprintf(buf, "union %s", name); gdb_interface(req); } member_typecode = TYPE_CODE_UNDEF; member_size = 0; type_found = 0; if (CRASHDEBUG(2)) { if (req->typecode) { console("name: %s ", req->name); if (member) console("member: %s ", req->member); console("typecode: %d%s ", req->typecode, req->is_typedef ? " (TYPEDEF)" : ""); console("length: %ld ", req->length); console("member_offset: %ld\n", req->member_offset); } else console("%s: unknown\n", name); } switch (req->typecode) { case TYPE_CODE_STRUCT: type_found = STRUCT_REQUEST; size = req->length; if (req->member_offset >= 0) { offset = req->member_offset/BITS_PER_BYTE; member_size = req->member_length; member_typecode = req->member_typecode; } else { offset = -1; member_size = 0; member_typecode = TYPE_CODE_UNDEF; } break; case TYPE_CODE_UNION: type_found = UNION_REQUEST; size = req->length; if (req->member_offset >= 0) { offset = req->member_offset/BITS_PER_BYTE; member_size = req->member_length; member_typecode = req->member_typecode; } else { offset = -1; member_size = 0; member_typecode = TYPE_CODE_UNDEF; } break; case TYPE_CODE_RANGE: case TYPE_CODE_INT: size = req->length; offset = 0; switch (size) { case SIZEOF_64BIT: type_found = INT64; break; case SIZEOF_32BIT: type_found = INT32; break; case SIZEOF_16BIT: type_found = INT16; break; case SIZEOF_8BIT: type_found = INT8; break; } break; case TYPE_CODE_PTR: size = req->length; offset = 0; type_found = POINTER; break; case TYPE_CODE_FUNC: size = req->length; offset = 0; type_found = FUNCTION; break; case TYPE_CODE_ARRAY: size = req->length; offset = 0; type_found = ARRAY; break; case TYPE_CODE_ENUM: size = req->length; offset = 0; type_found = ENUM; break; default: type_found = 0; size = -1; offset = -1; break; } if (dm && (dm != MEMBER_SIZE_REQUEST) && (dm != MEMBER_TYPE_REQUEST) && (dm != STRUCT_SIZE_REQUEST) && (dm != MEMBER_TYPE_NAME_REQUEST)) { dm->type = type_found; dm->size = size; dm->member_size = member_size; dm->member_typecode = member_typecode; dm->member_offset = offset; if (req->is_typedef) { dm->flags |= TYPEDEF; } if (req->tagname) { dm->tagname = req->tagname; dm->value = req->value; } } if (!type_found) return (dm == MEMBER_TYPE_NAME_REQUEST) ? 0 : -1; if (dm == MEMBER_SIZE_REQUEST) return member_size; else if (dm == MEMBER_TYPE_REQUEST) return member_typecode; else if (dm == MEMBER_TYPE_NAME_REQUEST) { if (req->member_main_type_name) return (ulong)req->member_main_type_name; else if (req->member_main_type_tag_name) return (ulong)req->member_main_type_tag_name; else if (req->member_target_type_name) return (ulong)req->member_target_type_name; else if (req->member_target_type_tag_name) return (ulong)req->member_target_type_tag_name; else return 0; } else if (dm == STRUCT_SIZE_REQUEST) { if ((req->typecode == TYPE_CODE_STRUCT) || (req->typecode == TYPE_CODE_UNION) || req->is_typedef) return size; else return -1; } else if (member) { if ((req->typecode == TYPE_CODE_STRUCT) || (req->typecode == TYPE_CODE_UNION)) return offset; else return -1; } else return size; } /* * Determine the offset of a member in an anonymous union * in a structure or union. */ static long anon_member_offset(char *name, char *member) { char buf[BUFSIZE]; ulong value; int type; value = -1; type = STRUCT_REQUEST; sprintf(buf, "printf \"%%p\", &((struct %s *)0x0)->%s", name, member); open_tmpfile2(); retry: if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { rewind(pc->tmpfile2); if (fgets(buf, BUFSIZE, pc->tmpfile2)) { if (hexadecimal(buf, 0)) value = htol(buf, RETURN_ON_ERROR|QUIET, NULL); else if (STRNEQ(buf, "(nil)")) value = 0; } } if ((value == -1) && (type == STRUCT_REQUEST)) { type = UNION_REQUEST; sprintf(buf, "printf \"%%p\", &((union %s *)0x0)->%s", name, member); rewind(pc->tmpfile2); goto retry; } close_tmpfile2(); return value; } /* * Determine the size of a member in an anonymous union * in a structure or union. */ static long anon_member_size(char *name, char *member) { char buf[BUFSIZE]; ulong value; int type; value = -1; type = STRUCT_REQUEST; sprintf(buf, "printf \"%%ld\", (u64)(&((struct %s*)0)->%s + 1) - (u64)&((struct %s*)0)->%s", name, member, name, member); open_tmpfile2(); retry: if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { rewind(pc->tmpfile2); if (fgets(buf, BUFSIZE, pc->tmpfile2)) { if (hexadecimal(buf, 0)) value = htol(buf, RETURN_ON_ERROR|QUIET, NULL); else if (STRNEQ(buf, "(nil)")) value = 0; } } if ((value == -1) && (type == STRUCT_REQUEST)) { type = UNION_REQUEST; sprintf(buf, "printf \"%%ld\", (u64)(&((union %s*)0)->%s + 1) - (u64)&((union %s*)0)->%s", name, member, name, member); rewind(pc->tmpfile2); goto retry; } close_tmpfile2(); return value; } /* * Get the basic type info for a symbol. Let the caller pass in the * gnu_request structure to have access to the full response; in either * case, return the type code. The member field can be used for structures * with no type names, and if there, the member data will be filled in * as well. */ int get_symbol_type(char *name, char *member, struct gnu_request *caller_req) { struct gnu_request *req; int typecode; if (!caller_req) req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); else { req = caller_req; BZERO(req, sizeof(struct gnu_request)); } req->command = GNU_GET_SYMBOL_TYPE; req->name = name; req->member = member; req->flags = GNU_RETURN_ON_ERROR; req->fp = pc->nullfp; gdb_interface(req); if (req->flags & GNU_COMMAND_FAILED) typecode = TYPE_CODE_UNDEF; else if (member) { if (req->member_offset >= 0) typecode = req->member_typecode; else typecode = TYPE_CODE_UNDEF; } else typecode = req->typecode; if (!caller_req) FREEBUF(req); return(typecode); } int get_symbol_length(char *symbol) { struct gnu_request *req; int len; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); if (get_symbol_type(symbol, NULL, req) == TYPE_CODE_UNDEF) error(FATAL, "cannot determine length of symbol: %s\n", symbol); len = (int)req->length; FREEBUF(req); return len; } /* * Initialize the caller's restore_radix, and if valid, * temporarily override the current output radix. */ void set_temporary_radix(unsigned int radix, unsigned int *restore_radix) { *restore_radix = *gdb_output_radix; if ((radix == 10) || (radix == 16)) { *gdb_output_radix = radix; \ *gdb_output_format = (*gdb_output_radix == 10) ? 0 : 'x'; } } /* * Restore the output radix to the current/default value saved * by the caller. */ void restore_current_radix(unsigned int restore_radix) { if ((restore_radix == 10) || (restore_radix == 16)) { *gdb_output_radix = restore_radix; *gdb_output_format = (*gdb_output_radix == 10) ? 0 : 'x'; } } /* * Externally available routine to dump a structure at an address. */ void dump_struct(char *s, ulong addr, unsigned radix) { unsigned restore_radix; long len; restore_radix = 0; if ((len = STRUCT_SIZE(s)) < 0) error(FATAL, "invalid structure name: %s\n", s); set_temporary_radix(radix, &restore_radix); print_struct(s, addr); restore_current_radix(restore_radix); } /* * Externally available routine to dump a structure member, given the * base structure address. The input string must be in struct.member format. */ void dump_struct_member(char *s, ulong addr, unsigned radix) { struct datatype_member datatype_member, *dm; unsigned restore_radix; char *buf, *p1; restore_radix = 0; buf = GETBUF(strlen(s)+1); strcpy(buf, s); p1 = strstr(buf, "."); *p1 = NULLCHAR; p1++; dm = &datatype_member; dm->name = buf; dm->member = p1; if (!STRUCT_EXISTS(dm->name)) { FREEBUF(buf); error(FATAL, "invalid structure name: %s\n", dm->name); } set_temporary_radix(radix, &restore_radix); open_tmpfile(); print_struct(dm->name, addr); if (MEMBER_EXISTS(dm->name, dm->member)) parse_for_member(dm, PARSE_FOR_DATA); else parse_for_member_extended(dm, PARSE_FOR_DATA); close_tmpfile(); restore_current_radix(restore_radix); FREEBUF(buf); } /* * Externally available routine to dump a union at an address. */ void dump_union(char *s, ulong addr, unsigned radix) { unsigned restore_radix; long len; restore_radix = 0; if ((len = UNION_SIZE(s)) < 0) error(FATAL, "invalid union name: %s\n", s); set_temporary_radix(radix, &restore_radix); print_union(s, addr); restore_current_radix(restore_radix); } /* * This command displays either a structure definition, or a formatted display * of the contents of a structure at a specified address. If no address is * specified, the structure size and the file in which the structure is defined * are also displayed. A structure member may be appended to the structure * name (in a "struct.member" format) in order to limit the scope of the data * displayed to that particular member. Structure data is shown in hexadecimal * format. The raw data in a structure may be dumped with the -r flag. */ void cmd_struct(void) { cmd_datatype_common(STRUCT_REQUEST); } /* * This command displays either a union definition, or a formatted display * of the contents of a union at a specified address. If no address is * specified, the union size and the file in which the union is defined * are also displayed. A union member may be appended to the union * name (in a "union.member" format) in order to limit the scope of the data * displayed to that particular member. Structure data is shown in hexadecimal * format. The raw data in a union may be dumped with the -r flag. */ void cmd_union(void) { cmd_datatype_common(UNION_REQUEST); } /* * After determining what type of data type follows the *, this routine * has the identical functionality as cmd_struct() or cmd_union(). */ void cmd_pointer(void) { cmd_datatype_common(0); } static void print_struct_with_dereference(ulong addr, struct datatype_member *dm, ulong flags) { int indent; char *p1; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; struct datatype_member datatype_member, *dm1; dm1 = &datatype_member; open_tmpfile(); if (flags & UNION_REQUEST) print_union(dm->name, addr); else if (flags & STRUCT_REQUEST) print_struct(dm->name, addr); rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { indent = count_leading_spaces(buf1); if ((indent != 2) || strstr(buf1, "{") || strstr(buf1, "}")) { print_verbatim(pc->saved_fp, buf1); continue; } sprintf(buf2, "%s.", dm->name); strcpy(buf3, &buf1[2]); p1 = strstr(buf3, " ="); *p1 = NULLCHAR; strcat(buf2, buf3); if ((arg_to_datatype(buf2, dm1, RETURN_ON_ERROR) == 2) && dereference_pointer(addr, dm1, flags)) continue; print_verbatim(pc->saved_fp, buf1); } close_tmpfile(); } static int dereference_pointer(ulong addr, struct datatype_member *dm, ulong flags) { char buf1[BUFSIZE]; char buf2[BUFSIZE*2]; char *typeptr, *member, *charptr, *voidptr, *p1, *sym; int found, ptrptr, funcptr, typedef_is_ptr, use_symbol; ulong target, value; found = ptrptr = funcptr = typedef_is_ptr = use_symbol = FALSE; member = GETBUF(strlen(dm->member)+4); typeptr = charptr = voidptr = NULL; open_tmpfile2(); whatis_datatype(dm->name, flags, pc->tmpfile2); rewind(pc->tmpfile2); while (fgets(buf1, BUFSIZE, pc->tmpfile2)) { sprintf(member, " *%s;", dm->member); if (strstr(buf1, member) && (buf1[4] != ' ')) { typeptr = &buf1[4]; found++; break; } sprintf(member, "**%s;", dm->member); if (strstr(buf1, member) && (buf1[4] != ' ')) { typeptr = &buf1[4]; found++; ptrptr = TRUE; break; } sprintf(member, "(*%s)(", dm->member); if (strstr(buf1, member) && (buf1[4] != ' ')) { typeptr = &buf1[4]; funcptr = TRUE; found++; break; } sprintf(member, " %s;", dm->member); if (strstr(buf1, member) && (buf1[4] != ' ')) { typeptr = &buf1[4]; typedef_is_ptr = TRUE; strcpy(buf2, typeptr); p1 = strstr(buf2, " "); *p1 = NULLCHAR; if (datatype_exists(buf2) == TYPE_CODE_PTR) { found++; break; } } } close_tmpfile2(); FREEBUF(member); if (!found) { console("%s.%s: not found!\n", dm->name, dm->member); return FALSE; } if (funcptr) { p1 = strstr(buf1, ";"); *p1 = NULLCHAR; } else if (ptrptr) { p1 = strstr(buf1, "**"); *(p1+2) = NULLCHAR; charptr = voidptr = NULL; } else if (typedef_is_ptr) { p1 = strstr(typeptr, " "); *p1 = NULLCHAR; } else { p1 = strstr(buf1, "*"); *(p1+1) = NULLCHAR; charptr = strstr(&buf1[4], "char *"); voidptr = strstr(&buf1[4], "void *"); } console("%s.%s typeptr: %s ", dm->name, dm->member, typeptr); if (charptr) console("[char *]"); else if (voidptr) console("[void *]"); else if (funcptr) console("[func *]"); else if (typedef_is_ptr) console("[typedef is ptr]"); console("\n"); if (!readmem(addr + dm->member_offset, KVADDR, &target, sizeof(void *), "target address", RETURN_ON_ERROR|QUIET)) { error(INFO, "cannot access %s.%s %lx\n", dm->name, dm->member, addr + dm->member_offset); return FALSE; } if ((sym = value_symbol(target))) { switch (get_symbol_type(sym, NULL, NULL)) { case TYPE_CODE_ARRAY: case TYPE_CODE_UNION: case TYPE_CODE_STRUCT: case TYPE_CODE_INT: case TYPE_CODE_PTR: use_symbol = TRUE; console("use_symbol: %s\n", sym); break; } } if (funcptr) { fprintf(pc->saved_fp, " %s = 0x%lx\n -> ", typeptr, target); if (sym) fprintf(pc->saved_fp, "<%s>\n", sym); else if (target) fprintf(pc->saved_fp, "(unknown)\n"); else fprintf(pc->saved_fp, "NULL\n"); return TRUE; } if (charptr) { fprintf(pc->saved_fp, " %s%s = 0x%lx\n -> ", typeptr, dm->member, target); if (sym) fprintf(pc->saved_fp, "<%s> ", sym); if (!target) fprintf(pc->saved_fp, "NULL\n"); else if (!accessible(target) || !read_string(target, buf1, BUFSIZE-1)) fprintf(pc->saved_fp, "(not accessible)\n"); else fprintf(pc->saved_fp, "\"%s\"\n", buf1); return TRUE; } if (voidptr && !use_symbol) { fprintf(pc->saved_fp, " %s%s = 0x%lx\n -> ", typeptr, dm->member, target); if (sym) fprintf(pc->saved_fp, "<%s>\n", sym); else if (!target) fprintf(pc->saved_fp, "NULL\n"); else if (voidptr) fprintf(pc->saved_fp, "(unknown target type)\n"); return TRUE; } if (!target || !accessible(target)) { fprintf(pc->saved_fp, " %s%s%s = 0x%lx\n -> ", typeptr, typedef_is_ptr ? " " : "", dm->member, target); if (!target) fprintf(pc->saved_fp, "NULL\n"); else fprintf(pc->saved_fp, "(not accessible)\n"); return TRUE; } if (ptrptr) { fprintf(pc->saved_fp, " %s%s = 0x%lx\n -> ", typeptr, dm->member, target); if (sym) fprintf(pc->saved_fp, "<%s> ", sym); if (!target || !readmem(target, KVADDR, &value, sizeof(void *), "target value", RETURN_ON_ERROR|QUIET)) fprintf(pc->saved_fp, "\n"); else fprintf(pc->saved_fp, "%lx\n", value); return TRUE; } if (use_symbol) sprintf(buf2, "p %s\n", sym); else sprintf(buf2, "p *((%s)(0x%lx))\n", typeptr, target); console("gdb command: %s", buf2); if (!typedef_is_ptr) { p1 = strstr(typeptr, "*"); *(p1-1) = NULLCHAR; } if (!datatype_exists(typeptr)) { fprintf(pc->saved_fp, " %s %s%s = 0x%lx\n -> (%s: no debuginfo data)\n", typeptr, typedef_is_ptr ? "" : "*", dm->member, target, typeptr); return TRUE; } open_tmpfile2(); if (!gdb_pass_through(buf2, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { console("gdb request failed: %s\n", buf2); close_tmpfile2(); return FALSE; } fprintf(pc->saved_fp, " %s %s%s = 0x%lx\n -> ", typeptr, typedef_is_ptr ? "" : "*", dm->member, target); rewind(pc->tmpfile2); while (fgets(buf1, BUFSIZE, pc->tmpfile2)) { if (buf1[0] == '$') { if (sym) fprintf(pc->saved_fp, "<%s> ", sym); if (typedef_is_ptr || use_symbol) { if (strstr(buf1, "(") && strstr(buf1, ")")) { fprintf(pc->saved_fp, "\n"); break; } } p1 = strstr(buf1, "="); fprintf(pc->saved_fp, "%s", p1+2); } else fprintf(pc->saved_fp, " %s", buf1); } close_tmpfile2(); return TRUE; } static void cmd_datatype_common(ulong flags) { int c; ulong addr, aflag; char *cpuspec; ulong *cpus; struct syment *sp; ulong list_head_offset; int count; int argc_members; int optind_save; unsigned int radix, restore_radix; struct datatype_member datatype_member, *dm; char *separator; char *structname, *members; char *memberlist[MAXARGS]; char *typename; dm = &datatype_member; count = 0xdeadbeef; aflag = addr = 0; list_head_offset = 0; argc_members = 0; radix = restore_radix = 0; separator = members = NULL; cpuspec = NULL; cpus = NULL; while ((c = getopt(argcnt, args, "pxdhfuc:rvol:")) != EOF) { switch (c) { case 'p': flags |= DEREF_POINTERS; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; case 'h': case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'c': count = atoi(optarg); break; case 'r': flags |= SHOW_RAW_DATA; break; case 'v': flags |= STRUCT_VERBOSE; break; case 'o': flags |= SHOW_OFFSET; break; case 'l': if (IS_A_NUMBER(optarg)) list_head_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, dm, RETURN_ON_ERROR) > 1) list_head_offset = dm->member_offset; else error(FATAL, "invalid -l option: %s\n", optarg); break; case 'f': if (!pc->dumpfile) error(FATAL, "-f option requires a dumpfile\n"); pc->curcmd_flags |= MEMTYPE_FILEADDR; break; case 'u': pc->curcmd_flags |= MEMTYPE_UVADDR; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if ((count_chars(args[optind], ',')+1) > MAXARGS) error(FATAL, "too many members in comma-separated list!\n"); if ((LASTCHAR(args[optind]) == ',') || (LASTCHAR(args[optind]) == '.')) error(FATAL, "invalid format: %s\n", args[optind]); optind_save = optind; /* * Take care of address and count (array). */ while (args[++optind]) { if (aflag && (count != 0xdeadbeef)) error(FATAL, "too many arguments!\n"); if (!aflag) { cpuspec = strchr(args[optind], ':'); if (cpuspec) *cpuspec++ = NULLCHAR; } if (clean_arg() && IS_A_NUMBER(args[optind])) { if (aflag) count = stol(args[optind], FAULT_ON_ERROR, NULL); else if (cpuspec) { if (pc->curcmd_flags & MEMTYPE_FILEADDR) error(FATAL, "-f option cannot be used with percpu\n"); addr = htol(args[optind], FAULT_ON_ERROR, NULL); aflag++; } else { if (pc->curcmd_flags & MEMTYPE_FILEADDR) pc->curcmd_private = stoll(args[optind], FAULT_ON_ERROR, NULL); else if (pc->curcmd_flags & MEMTYPE_UVADDR) { addr = htol(args[optind], FAULT_ON_ERROR, NULL); } else if (!IS_KVADDR(addr = htol(args[optind], FAULT_ON_ERROR, NULL))) error(FATAL, "invalid kernel virtual address: %s\n", args[optind]); aflag++; } } else if ((sp = symbol_search(args[optind]))) { if (cpuspec && !is_percpu_symbol(sp)) { error(WARNING, "%s is not percpu; cpuspec ignored.\n", sp->name); cpuspec = NULL; } if (cpuspec) { if ((typename = expr_type_name(sp->name))) { if (LASTCHAR(typename) == '*') error(WARNING, "percpu symbol \"%s\" is of type pointer\n", sp->name); FREEBUF(typename); } } addr = sp->value; aflag++; } else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); goto freebuf; } } if (cpuspec) { cpus = get_cpumask_buf(); if (STREQ(cpuspec, "")) SET_BIT(cpus, CURRENT_CONTEXT()->processor); else make_cpumask(cpuspec, cpus, FAULT_ON_ERROR, NULL); } optind = optind_save; if (count == 0xdeadbeef) count = 1; else if (!aflag) error(FATAL, "no kernel virtual address argument entered\n"); if ((flags & DEREF_POINTERS) && !aflag) error(FATAL, "-p option requires address argument\n"); if (list_head_offset) addr -= list_head_offset; /* * Handle struct.member[,member] argument format. */ if (strstr(args[optind], ".")) { structname = GETBUF(strlen(args[optind])+1); strcpy(structname, args[optind]); separator = strstr(structname, "."); members = GETBUF(strlen(args[optind])+1); strcpy(members, separator+1); replace_string(members, ",", ' '); argc_members = parse_line(members, memberlist); } else structname = args[optind]; if ((arg_to_datatype(structname, dm, DATATYPE_QUERY|ANON_MEMBER_QUERY|RETURN_ON_ERROR) < 1)) error(FATAL, "invalid data structure reference: %s\n", structname); if (! (flags & (STRUCT_REQUEST|UNION_REQUEST)) ) { flags |= dm->type; if (!(flags & (UNION_REQUEST|STRUCT_REQUEST))) error(FATAL, "invalid argument"); } else if ( (flags &(STRUCT_REQUEST|UNION_REQUEST)) != dm->type) { error(FATAL, "data type mismatch: %s is not a %s\n", dm->name, flags & UNION_REQUEST ? "union" : "struct"); } if ((argc_members > 1) && !aflag) { error(INFO, flags & SHOW_OFFSET ? "-o option not valid with multiple member format\n" : "multiple member format not supported in this syntax\n"); *separator = NULLCHAR; argc_members = 0; flags |= SHOW_OFFSET; } if ((argc_members > 1) && aflag && (flags & SHOW_OFFSET)) error(FATAL, "-o option not valid with multiple member format\n"); set_temporary_radix(radix, &restore_radix); /* * No address was passed -- dump the structure/member declaration. */ if (!aflag) { if (argc_members && !member_to_datatype(memberlist[0], dm, ANON_MEMBER_QUERY)) error(FATAL, "invalid data structure reference: %s.%s\n", dm->name, memberlist[0]); do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF)); } else if (cpus) { for (c = 0; c < kt->cpus; c++) { ulong cpuaddr; if (!NUM_IN_BITMAP(cpus, c)) continue; cpuaddr = addr + kt->__per_cpu_offset[c]; fprintf(fp, "[%d]: ", c); if (hide_offline_cpu(c)) { fprintf(fp, "[OFFLINE]\n"); continue; } fprintf(fp, "%lx\n", cpuaddr); do_datatype_addr(dm, cpuaddr , count, flags, memberlist, argc_members); } } else do_datatype_addr(dm, addr, count, flags, memberlist, argc_members); restore_current_radix(restore_radix); freebuf: if (argc_members) { FREEBUF(structname); FREEBUF(members); } if (cpus) FREEBUF(cpus); } static void do_datatype_addr(struct datatype_member *dm, ulong addr, int count, ulong flags, char **memberlist, int argc_members) { int i, c; long len = dm->size; if (count < 0) { addr -= len * abs(count); addr += len; } if (pc->curcmd_flags & MEMTYPE_FILEADDR) addr = 0; /* unused, but parsed by gdb */ for (c = 0; c < abs(count); c++, addr += len, pc->curcmd_private += len) { if (c) fprintf(fp,"\n"); i = 0; do { if (argc_members) { if (argc_members > 1 && flags & SHOW_RAW_DATA) error(FATAL, "only one structure member allowed with -r\n"); /* This call works fine with fields * of the second, third, ... levels. * There is no need to fix it */ if (!member_to_datatype(memberlist[i], dm, ANON_MEMBER_QUERY)) error(FATAL, "invalid data structure reference: %s.%s\n", dm->name, memberlist[i]); } /* * Display member addresses or data */ if (flags & SHOW_OFFSET) { dm->vaddr = addr; do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF)); } else if (flags & SHOW_RAW_DATA) { if (dm->member) { addr += dm->member_offset; len = MEMBER_SIZE(dm->name, dm->member); if (len < 0) len = ANON_MEMBER_SIZE(dm->name, dm->member); if (len < 0) error(FATAL, "invalid data structure reference: %s.%s\n", dm->name, dm->member); } raw_data_dump(addr, len, flags & STRUCT_VERBOSE); } else if ((flags & DEREF_POINTERS) && !dm->member) { print_struct_with_dereference(addr, dm, flags); } else { if (dm->member) open_tmpfile(); if (flags & UNION_REQUEST) print_union(dm->name, addr); else if (flags & STRUCT_REQUEST) print_struct(dm->name, addr); if (dm->member) { if (!((flags & DEREF_POINTERS) && dereference_pointer(addr, dm, flags))) { if (count_chars(dm->member, '.') || count_chars(dm->member, '[')) parse_for_member_extended(dm, PARSE_FOR_DATA); else parse_for_member(dm, PARSE_FOR_DATA); } close_tmpfile(); } } } while (++i < argc_members); } } int is_string(char *structure, char *member) { int retval; char *t; char buf[BUFSIZE]; retval = FALSE; open_tmpfile(); whatis_datatype(structure, STRUCT_REQUEST, pc->tmpfile); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (!(t = strstr(buf, "char "))) continue; t += 5; if (*t == '*') t++; if (t != strstr(t, member)) continue; t += strlen(member); if (*t == ';' || *t == '[') { retval = TRUE; break; } } close_tmpfile(); return retval; } /* * Generic function for dumping data structure declarations, with a small * fixup for typedefs, sizes and member offsets. */ static void do_datatype_declaration(struct datatype_member *dm, ulong flags) { long len; char buf[BUFSIZE]; char *p1, *p2, *multiline; FILE *sfp; if (CRASHDEBUG(1)) dump_datatype_member(fp, dm); if (dm->member && count_chars(dm->member, '.')) error(FATAL, "invalid data structure reference: %s.%s\n", dm->name, dm->member); open_tmpfile(); whatis_datatype(dm->name, flags, pc->tmpfile); rewind(pc->tmpfile); if (dm->member) flags |= SHOW_OFFSET; sfp = pc->saved_fp; len = dm->size; multiline = NULL; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "type = ")) { multiline = strstr(buf, "{"); if (flags & TYPEDEF) fprintf(sfp, "typedef "); p1 = buf + strlen("type = "); if ((p2 = strstr(buf, "(*)()"))) { *p2 = NULLCHAR; fprintf(sfp, "%s(*%s)();\n", p1, dm->name); } else if ((p2 = strstr(buf, "()"))) { *p2 = NULLCHAR; fprintf(sfp, "%s(%s)();\n", p1, dm->name); } else if (multiline) fprintf(sfp, "%s", p1); else fprintf(sfp, "%s %s;\n", strip_linefeeds(p1), dm->name); } else { if (multiline && STRNEQ(buf, "}") && (flags & TYPEDEF)){ if (strstr(buf, "} **()")) fprintf(sfp, "} **(%s)();\n", dm->name); else fprintf(sfp, "%s %s;\n", strip_linefeeds(buf), dm->name); } else { if ((flags & SHOW_OFFSET) && whitespace(buf[0])) show_member_offset(sfp, dm, buf); else fprintf(sfp, "%s", buf); } } } if (!dm->member) { switch (*gdb_output_radix) { default: case 10: fprintf(sfp, "SIZE: %ld\n", len); break; case 16: fprintf(sfp, "SIZE: 0x%lx\n", len); break; } } close_tmpfile(); } /* * Take a argument string, which may be in "struct.member" or "union.member" * format, figure out whether it's a structure or a union reference, and * fill in the appropriate fields of the dataytype_member structure. * Return 1 if it's a straight struct or union reference, 2 if it has * a legitimate .member attached to it, or 0 if it's bogus. */ int arg_to_datatype(char *s, struct datatype_member *dm, ulong flags) { char *p1; int both; BZERO(dm, sizeof(struct datatype_member)); both = FALSE; dm->name = s; if (!(p1 = strstr(s, "."))) both = FALSE; else if (flags & DATATYPE_QUERY) { *p1 = NULLCHAR; both = FALSE; } else { if ((p1 == s) || !strlen(p1+1)) goto datatype_member_fatal; *p1 = NULLCHAR; if (strstr(p1+1, ".")) goto datatype_member_fatal; both = TRUE; } if ((dm->size = DATATYPE_SIZE(dm)) < 0) { if (flags & RETURN_ON_ERROR) goto datatype_member_fatal; error(FATAL, "cannot handle \"%s\": try \"gdb whatis\" or \"gdb ptype\"\n", s); } if (!both) return 1; if (member_to_datatype(p1 + 1, dm, flags)) return 2; datatype_member_fatal: if (flags & RETURN_ON_ERROR) { if (both) *p1 = '.'; return 0; } if (both) { *p1 = '.'; if (strstr(p1+1, ".")) error(FATAL, "only one %s member allowed: %s\n", (dm->type == STRUCT_REQUEST) ? "struct" : ((dm->type == UNION_REQUEST) ? "union" : "struct/union"), s); } return (error(FATAL, "invalid argument: %s\n", s)); } static int member_to_datatype(char *s, struct datatype_member *dm, ulong flags) { dm->member = s; if ((dm->member_offset = MEMBER_OFFSET(dm->name, s)) >= 0) return TRUE; if ((flags & ANON_MEMBER_QUERY) && ((dm->member_offset = ANON_MEMBER_OFFSET(dm->name, s)) >= 0)) return TRUE; return FALSE; } /* * debug routine -- not called on purpose by anybody. */ static void dump_datatype_member(FILE *ofp, struct datatype_member *dm) { int others; others = 0; fprintf(ofp, " name: %s\n", dm->name); fprintf(ofp, " member: %s\n", dm->member); fprintf(ofp, " type: %lx (", dm->type); if (dm->type & STRUCT_REQUEST) fprintf(ofp, "%sSTRUCT_REQUEST", others++ ? "|" : ""); if (dm->type & UNION_REQUEST) fprintf(fp, "%sUNION_REQUEST", others++ ? "|" : ""); if (dm->type & INT64) fprintf(ofp, "%sINT64", others++ ? "|" : ""); if (dm->type & INT32) fprintf(ofp, "%sINT32", others++ ? "|" : ""); if (dm->type & INT16) fprintf(ofp, "%sINT16", others++ ? "|" : ""); if (dm->type & INT8) fprintf(ofp, "%sINT8", others++ ? "|" : ""); if (dm->type & POINTER) fprintf(ofp, "%sPOINTER", others++ ? "|" : ""); if (dm->type & FUNCTION) fprintf(ofp, "%sFUNCTION", others++ ? "|" : ""); if (dm->type & ARRAY) fprintf(ofp, "%sARRAY", others++ ? "|" : ""); if (dm->type & ENUM) fprintf(ofp, "%sENUM", others++ ? "|" : ""); if (dm->type & IN_UNION) fprintf(ofp, "%sIN_UNION", others++ ? "|" : ""); if (dm->type & IN_STRUCT) fprintf(ofp, "%sIN_STRUCT", others++ ? "|" : ""); fprintf(ofp, ")\n"); fprintf(ofp, " size: %ld\n", dm->size); fprintf(ofp, " member_offset: %ld\n", dm->member_offset); fprintf(ofp, " member_size: %ld\n", dm->member_size); fprintf(ofp, "member_typecode: %d\n", dm->member_typecode); fprintf(ofp, " flags: %lx ", dm->flags); dump_datatype_flags(dm->flags, ofp); fprintf(ofp, " tagname: %s\n", dm->tagname); fprintf(ofp, " value: %ld\n", dm->value); fprintf(ofp, " vaddr: %lx\n", dm->vaddr); fprintf(ofp, "\n"); } struct type_request { int cnt; /* current number of entries in types array */ int idx; /* index to next entry in types array */ struct type_info { /* dynamically-sized array of collected types */ char *name; ulong size; } *types; }; static int compare_size_name(const void *va, const void *vb) { struct type_info *a, *b; a = (struct type_info *)va; b = (struct type_info *)vb; if (a->size == b->size) return strcmp(a->name, b->name); else return a->size < b->size ? -1 : 1; } static void append_struct_symbol (struct gnu_request *req, void *data) { int i; long s; struct type_request *treq = (struct type_request *)data; for (i = 0; i < treq->idx; i++) if (!strcmp(treq->types[i].name, req->name)) break; if (i < treq->idx) // We've already collected this type return; if (treq->idx == treq->cnt) { s = sizeof(struct type_info) * treq->cnt; RESIZEBUF(treq->types, s, s * 3); treq->cnt *= 3; } treq->types[treq->idx].name = req->name; treq->types[treq->idx].size = req->length; treq->idx++; } static void request_types(ulong lowest, ulong highest, char *member_name) { int i, len; char buf[BUFSIZE]; struct type_request typereq; struct gnu_request request = {0}; typereq.idx = 0; typereq.cnt = 16; typereq.types = (void *)GETBUF(16 * sizeof(struct type_info)); #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) error(FATAL, "-r option not supported with this version of gdb\n"); #else request.type_name = member_name; #endif request.command = GNU_ITERATE_DATATYPES; request.lowest = lowest; request.highest = highest; request.member = member_name; request.callback = append_struct_symbol; request.callback_data = (void *)&typereq; gdb_interface(&request); qsort(typereq.types, typereq.idx, sizeof(struct type_info), compare_size_name); if (typereq.idx == 0) fprintf(fp, "(none found)\n"); else { sprintf(buf, "%ld", typereq.types[typereq.idx-1].size); len = MAX(strlen(buf), strlen("SIZE")); fprintf(fp, "%s TYPE\n", mkstring(buf, len, RJUST, "SIZE")); for (i = 0; i < typereq.idx; i++) fprintf(fp, "%s %s\n", mkstring(buf, len, RJUST|LONG_DEC, MKSTR(typereq.types[i].size)), typereq.types[i].name); } FREEBUF(typereq.types); } /* * This command displays the definition of structures, unions, typedefs or * text/data symbols: * * 1. For a structure name, the output is the same as if the "struct" * command was used. * 2. For a union name, the output is the same as if the "union" command * was used. * 3. For a typedef name that translates to a structure or union, the output * is the same as if the "struct" or "union" command was used. * 4. For a typedef name that translates to a primitive datatype, the one-line * declaration is displayed. * 5. For a kernel symbol name, the output is the same as if the "sym" command * was used. * 6. If the -r and -m are given, then the structures/unions of specified size * and/or contain a member type. */ void cmd_whatis(void) { int c, do_request; struct datatype_member datatype_member, *dm; struct syment *sp; char buf[BUFSIZE], *pl, *ph, *member; long len; ulong lowest, highest; ulong flags; dm = &datatype_member; flags = 0; lowest = highest = 0; pl = buf; member = NULL; do_request = FALSE; while ((c = getopt(argcnt, args, "om:r:")) != EOF) { switch(c) { case 'o': flags |= SHOW_OFFSET; break; case 'm': member = optarg; do_request = TRUE; break; case 'r': strncpy(buf, optarg, 15); if ((ph = strstr(buf, "-")) != NULL) *(ph++) = '\0'; highest = lowest = stol(pl, FAULT_ON_ERROR, NULL); if (ph) highest = stol(ph, FAULT_ON_ERROR, NULL); do_request = TRUE; break; default: argerrs++; break; } } if (!argerrs && do_request) { request_types(lowest, highest, member); return; } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if (STREQ(args[optind], "struct") || STREQ(args[optind], "union") || STREQ(args[optind], "enum")) optind++; else if ((sp = symbol_search(args[optind]))) { whatis_variable(sp); return; } if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if (arg_to_datatype(args[optind], dm, RETURN_ON_ERROR)) { if ((len = dm->size) < 0) goto whatis_failure; flags |= dm->type; if (dm->type == ENUM) { if (dm->tagname) fprintf(fp, "%senum%s%s = %ld\n", dm->flags & TYPEDEF ? "typedef " : "", strlen(dm->tagname) ? " " : "", dm->tagname, dm->value); else dump_enumerator_list(args[optind]); return; } do_datatype_declaration(dm, flags | (dm->flags & TYPEDEF)); } else { if (!gdb_whatis(concat_args(buf, 1, FALSE))) goto whatis_failure; } return; whatis_failure: error(INFO, "cannot resolve: %s\n", concat_args(buf, 1, FALSE)); cmd_usage(pc->curcmd, SYNOPSIS); } /* * Try gdb's whatis on a command string. */ static int gdb_whatis(char *s) { char buf[BUFSIZE], *p1; open_tmpfile(); sprintf(buf, "whatis %s", s); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); return FALSE; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { p1 = buf; if (STRNEQ(buf, "type = ")) p1 += strlen("type = "); fprintf(pc->saved_fp, "%s", p1); } close_tmpfile(); return TRUE; } /* * Given the name of an enum, have gdb dump its enumerator list. */ int dump_enumerator_list(char *e) { struct gnu_request *req; struct datatype_member datatype_member, *dm; dm = &datatype_member; if (!arg_to_datatype(e, dm, RETURN_ON_ERROR) || (dm->size < 0) || (dm->type != ENUM) || dm->tagname) return FALSE; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_GET_DATATYPE; req->name = e; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); FREEBUF(req); return TRUE; } /* * Given the name of an enum, return its value. */ int enumerator_value(char *e, long *value) { struct datatype_member datatype_member, *dm; dm = &datatype_member; if (arg_to_datatype(e, dm, RETURN_ON_ERROR)) { if ((dm->size >= 0) && (dm->type == ENUM) && dm->tagname) { *value = dm->value; return TRUE; } } return FALSE; } /* * Verify that a datatype exists, but return on error. */ int datatype_exists(char *s) { int retval; char buf[BUFSIZE], *p; struct gnu_request *req; strcpy(buf, s); if ((p = strstr(buf, "."))) *p = NULLCHAR; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_GET_DATATYPE; req->name = buf; req->flags = GNU_RETURN_ON_ERROR; req->fp = pc->nullfp; gdb_interface(req); retval = req->typecode; FREEBUF(req); return retval; } /* * Set the output radix if requested, and pass it on to gdb. */ void cmd_p(void) { int c; struct syment *sp, *percpu_sp; unsigned radix; int do_load_module_filter; char buf1[BUFSIZE]; char *cpuspec, *p; do_load_module_filter = radix = 0; while ((c = getopt(argcnt, args, "dhxu")) != EOF) { switch(c) { case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; case 'h': case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'u': pc->curcmd_flags |= MEMTYPE_UVADDR; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); p = cpuspec = strrchr(args[optind], ':'); if (cpuspec) *cpuspec++ = NULLCHAR; sp = NULL; if ((sp = symbol_search(args[optind])) && !args[optind+1]) { if ((percpu_sp = per_cpu_symbol_search(args[optind])) && display_per_cpu_info(percpu_sp, radix, cpuspec)) return; if (module_symbol(sp->value, NULL, NULL, NULL, *gdb_output_radix)) do_load_module_filter = TRUE; } else if ((percpu_sp = per_cpu_symbol_search(args[optind])) && display_per_cpu_info(percpu_sp, radix, cpuspec)) return; else if (st->flags & LOAD_MODULE_SYMS) do_load_module_filter = TRUE; if (cpuspec) { if (sp) error(WARNING, "%s is not percpu; cpuspec ignored.\n", sp->name); else /* maybe a valid C expression (e.g. ':') */ *p = ':'; } process_gdb_output(concat_args(buf1, 0, TRUE), radix, sp ? sp->name : NULL, do_load_module_filter); } static void process_gdb_output(char *gdb_request, unsigned radix, const char *leader, int do_load_module_filter) { unsigned restore_radix; int success; char buf1[BUFSIZE]; char *p1; if (leader || do_load_module_filter) open_tmpfile(); set_temporary_radix(radix, &restore_radix); success = gdb_pass_through(gdb_request, NULL, GNU_RETURN_ON_ERROR); if (success && (leader || do_load_module_filter)) { int firstline; if (leader) { fprintf(pc->saved_fp, "%s = ", leader); fflush(pc->saved_fp); } firstline = TRUE; rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (firstline && (p1 = strstr(buf1, "{")) && !STRNEQ(p1, "{\n")) { *p1 = NULLCHAR; fprintf(pc->saved_fp, "%s", buf1); fprintf(pc->saved_fp, "\n {"); print_verbatim(pc->saved_fp, p1+1); } else print_verbatim(pc->saved_fp, do_load_module_filter ? load_module_filter(buf1, LM_P_FILTER) : buf1); firstline = FALSE; } } if (leader || do_load_module_filter) close_tmpfile(); restore_current_radix(restore_radix); if (!success) error(FATAL, "gdb request failed: %s\n", gdb_request); } /* * Get the type of an expression using gdb's "whatis" command. * The returned string is dynamically allocated, and it should * be passed to FREEBUF() when no longer needed. * Return NULL if the type cannot be determined. */ static char * expr_type_name(const char *expr) { char buf[BUFSIZE], *p; open_tmpfile(); sprintf(buf, "whatis %s", expr); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); return NULL; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile) && !STRNEQ(buf, "type = ")) ; p = feof(pc->tmpfile) ? NULL : buf + strlen("type = "); close_tmpfile(); if (p) { size_t len = strlen(clean_line(p)); /* GDB reports unknown types as <...descriptive text...> */ if (p[0] == '<' && p[len-1] == '>') return NULL; return strcpy(GETBUF(len + 1), p); } return NULL; } /* * Display the datatype of the per_cpu__xxx symbol and * the addresses of each its per-cpu instances. */ static int display_per_cpu_info(struct syment *sp, int radix, char *cpuspec) { ulong *cpus; int c; ulong addr; char buf[BUFSIZE]; char leader[sizeof("&per_cpu(") + strlen(sp->name) + sizeof(", " STR(UINT_MAX) ")")]; char *typename; int do_load_module_filter; if (((kt->flags & (SMP|PER_CPU_OFF)) != (SMP|PER_CPU_OFF)) || (!is_percpu_symbol(sp)) || !((sp->type == 'd') || (sp->type == 'D') || (sp->type == 'V'))) return FALSE; if (cpuspec) { cpus = get_cpumask_buf(); if (STREQ(cpuspec, "")) SET_BIT(cpus, CURRENT_CONTEXT()->processor); else make_cpumask(cpuspec, cpus, FAULT_ON_ERROR, NULL); } else cpus = NULL; typename = expr_type_name(sp->name); if (!cpus) { fprintf(fp, "PER-CPU DATA TYPE:\n "); if (!typename) fprintf(fp, "[undetermined type] %s;\n", sp->name); else whatis_variable(sp); fprintf(fp, "PER-CPU ADDRESSES:\n"); } do_load_module_filter = module_symbol(sp->value, NULL, NULL, NULL, *gdb_output_radix); for (c = 0; c < kt->cpus; c++) { if (hide_offline_cpu(c)) { fprintf(fp, "cpu %d is OFFLINE\n", c); continue; } if (cpus && !NUM_IN_BITMAP(cpus, c)) continue; addr = sp->value + kt->__per_cpu_offset[c]; if (!cpus) fprintf(fp, " [%d]: %lx\n", c, addr); else if (typename) { snprintf(buf, sizeof buf, "p *(%s*) 0x%lx", typename, addr); sprintf(leader, "per_cpu(%s, %u)", sp->name, c); process_gdb_output(buf, radix, leader, do_load_module_filter); } else { snprintf(buf, sizeof buf, "p (void*) 0x%lx", addr); sprintf(leader, "&per_cpu(%s, %u)", sp->name, c); process_gdb_output(buf, radix, leader, do_load_module_filter); } } if (typename) FREEBUF(typename); if (cpus) FREEBUF(cpus); return TRUE; } static struct load_module * get_module_percpu_sym_owner(struct syment *sp) { int i; struct load_module *lm; if (!IS_MODULE_SYMBOL(sp)) return NULL; /* * Find out percpu symbol owner module. * If found out, sp is module's percpu symbol. */ for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (!MODULE_PERCPU_SYMS_LOADED(lm)) continue; if (IN_MODULE_PERCPU(sp->value, lm)) return lm; } return NULL; } static int is_percpu_symbol(struct syment *sp) { if (sp->value >= st->__per_cpu_start) { if (sp->value < st->__per_cpu_end) /* kernel percpu symbol */ return 1; else if (get_module_percpu_sym_owner(sp)) /* module percpu symbol */ return 2; } return 0; } /* * As a latch ditch effort before a command is thrown away by exec_command(), * args[0] is checked to see whether it's the name of a variable, structure, * union, or typedef. If so, args[0] is changed to the appropriate command, * i.e., "p", "struct", "union", or "whatis", and the original args are all * shifted into the next higer args[] location. */ int is_datatype_command(void) { int i; long len; char *command; struct datatype_member datatype_member, *dm; struct syment *sp; char *rdarg; char buf[BUFSIZE]; if (!args[0]) return FALSE; strcpy(buf, args[0]); dm = &datatype_member; if ((sp = symbol_search(args[0])) && (argcnt == 1)) { if (is_gdb_command(FALSE, RETURN_ON_ERROR)) { pc->curcmd = pc->program_name; error(FATAL, "ambiguous command: %s (symbol and gdb command)\n", args[0]); } command = "p"; } else if (STREQ(args[0], "enum")) command = "whatis"; else if (!datatype_exists(args[0])) return FALSE; else if (!arg_to_datatype(buf, dm, RETURN_ON_ERROR|DATATYPE_QUERY)) return FALSE; else { if (is_gdb_command(FALSE, RETURN_ON_ERROR)) { pc->curcmd = pc->program_name; error(FATAL, "ambiguous command: %s (symbol/data type and gdb command)\n", args[0]); } if ((sp = symbol_search(args[0])) && (argcnt == 1)) { command = "p"; dm->type = 0; } else if ((len = DATATYPE_SIZE(dm)) < 0) { return FALSE; } else if (sp) { command = "p"; dm->type = 0; } switch (dm->type) { case STRUCT_REQUEST: if ((dm->flags & TYPEDEF) && (argcnt == 1)) command = "whatis"; else command = "struct"; break; case UNION_REQUEST: if ((dm->flags & TYPEDEF) && (argcnt == 1)) command = "whatis"; else command = "union"; break; case POINTER: command = "whatis"; break; case ARRAY: command = "whatis"; break; case FUNCTION: command = "whatis"; break; case ENUM: command = "whatis"; break; default: if (dm->type & INTEGER_TYPE) { switch (dm->type) { case INT64: rdarg = "-64"; break; case INT32: rdarg = "-32"; break; case INT16: rdarg = "-16"; break; case INT8: rdarg = "-8"; break; default: rdarg = NULL; break; } if (args[1]) { if ((sp = symbol_search(args[1]))) { command = "p"; args[0] = args[1]; argcnt--; } else { command = "rd"; args[0] = rdarg; } } else command = "whatis"; } else return FALSE; break; } } for (i = argcnt; i; i--) args[i] = args[i-1]; args[0] = command; argcnt++; return TRUE; } /* * Given a structure name and an address, have gdb do most of the work. */ static void print_struct(char *s, ulong addr) { char buf[BUFSIZE]; if (is_downsized(s)) pc->curcmd_flags |= PARTIAL_READ_OK; if (is_typedef(s)) sprintf(buf, "output *(%s *)0x%lx", s, addr); else sprintf(buf, "output *(struct %s *)0x%lx", s, addr); fprintf(fp, "struct %s ", s); gdb_pass_through(buf, NULL, GNU_RETURN_ON_ERROR); fprintf(fp, "\n"); pc->curcmd_flags &= ~PARTIAL_READ_OK; } /* * Given a union name and an address, let gdb do the work. */ static void print_union(char *s, ulong addr) { char buf[BUFSIZE]; if (is_downsized(s)) pc->curcmd_flags |= PARTIAL_READ_OK; if (is_typedef(s)) sprintf(buf, "output *(%s *)0x%lx", s, addr); else sprintf(buf, "output *(union %s *)0x%lx", s, addr); fprintf(fp, "union %s ", s); gdb_pass_through(buf, NULL, GNU_RETURN_ON_ERROR); pc->curcmd_flags &= ~PARTIAL_READ_OK; } /* * Given a structure or union, find its definition in the datatype symbol * file, and dump it. If the verbose flags is set, everything from the * file is shown; otherwise the bitpos, size and id data is stripped. */ static void whatis_datatype(char *st, ulong flags, FILE *ofp) { char lookbuf[BUFSIZE]; if (flags & TYPEDEF) sprintf(lookbuf, "ptype %s", st); else if (flags & UNION_REQUEST) sprintf(lookbuf, "ptype union %s", st); else if (flags & STRUCT_REQUEST) sprintf(lookbuf, "ptype struct %s", st); else return; if (!gdb_pass_through(lookbuf, ofp, GNU_RETURN_ON_ERROR)) { /* * When a structure is defined using the format: * * typedef struct { * yada yada yada * } type_t; * * gdb says it's a structure and not a typedef. So * if the union or struct pass-through fails, it can't * hurt to retry it with just "ptype type_t" before * giving up. */ if (flags & (UNION_REQUEST|STRUCT_REQUEST)) { sprintf(lookbuf, "ptype %s", st); gdb_pass_through(lookbuf, ofp, 0); } } } /* * Scan the symbol file for a variable declaration. */ static void whatis_variable(struct syment *sp) { char *p1; char buf[BUFSIZE]; open_tmpfile(); sprintf(buf, "whatis %s", sp->name); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); error(FATAL, "gdb request failed: whatis %s\n", sp->name); } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "type = ")) break; } close_tmpfile(); clean_line(buf); if ((p1 = strstr(buf, "["))) { shift_string_right(p1, strlen(sp->name)); BCOPY(sp->name, p1, strlen(sp->name)); p1 = buf + strlen("type = "); fprintf(fp, "%s;\n", p1); } else if ((p1 = strstr(buf, "("))) { if (index(buf, '(') == rindex(buf, '(')) { shift_string_right(p1, strlen(sp->name)); BCOPY(sp->name, p1, strlen(sp->name)); } else { p1 = strstr(buf, ")"); shift_string_right(p1, strlen(sp->name)); BCOPY(sp->name, p1, strlen(sp->name)); } p1 = buf + strlen("type = "); fprintf(fp, "%s;\n", p1); } else { p1 = buf + strlen("type = "); fprintf(fp, "%s%s%s;\n", p1, LASTCHAR(p1) == '*' ? "":" ", sp->name); } } /* * Determines whether the current structure or union member is a typedef. */ int is_typedef(char *name) { struct datatype_member datatype_member, *dm; if (!name) drop_core("is_typedef() received NULL name string\n"); dm = &datatype_member; BZERO(dm, sizeof(struct datatype_member)); dm->name = name; return (DATATYPE_SIZE(dm) < 0 ? FALSE : (dm->flags & TYPEDEF)); } static void dump_datatype_flags(ulong flags, FILE *ofp) { int others; others = 0; fprintf(ofp, "("); if (flags & UINT8) fprintf(ofp, "%sUINT8", others++ ? "|" : ""); if (flags & INT8) fprintf(ofp, "%sINT8", others++ ? "|" : ""); if (flags & UINT16) fprintf(ofp, "%sUINT16", others++ ? "|" : ""); if (flags & INT16) fprintf(ofp, "%sINT16", others++ ? "|" : ""); if (flags & UINT32) fprintf(ofp, "%sUINT32", others++ ? "|" : ""); if (flags & INT32) fprintf(ofp, "%sINT32", others++ ? "|" : ""); if (flags & UINT64) fprintf(ofp, "%sUINT64", others++ ? "|" : ""); if (flags & INT64) fprintf(ofp, "%sINT64", others++ ? "|" : ""); if (flags & POINTER) fprintf(ofp, "%sPOINTER", others++ ? "|" : ""); if (flags & FUNCTION) fprintf(ofp, "%sFUNCTION", others++ ? "|" : ""); if (flags & ARRAY) fprintf(ofp, "%sARRAY", others++ ? "|" : ""); if (flags & ENUM) fprintf(ofp, "%sENUM", others++ ? "|" : ""); if (flags & TYPEDEF) fprintf(ofp, "%sTYPEDEF", others++ ? "|" : ""); if (flags & STRUCT_VERBOSE) fprintf(ofp, "%sSTRUCT_VERBOSE", others++ ? "|" : ""); if (flags & SHOW_OFFSET) fprintf(ofp, "%sSHOW_OFFSET", others++ ? "|" : ""); if (flags & DATATYPE_QUERY) fprintf(ofp, "%sDATATYPE_QUERY", others++ ? "|" : ""); if (flags & ANON_MEMBER_QUERY) fprintf(ofp, "%sANON_MEMBER_QUERY", others++ ? "|" : ""); if (flags & SHOW_RAW_DATA) fprintf(ofp, "%sSHOW_RAW_DATA", others++ ? "|" : ""); if (flags & DEREF_POINTERS) fprintf(ofp, "%sDEREF_POINTERS", others++ ? "|" : ""); fprintf(ofp, ")\n"); } /* * When a request is made to print just a member of a structure or union, * the whole datatype is dumped to a temporary file, and this routine * parses through it for the targeted member. */ static void parse_for_member(struct datatype_member *dm, ulong flag) { char *s; char buf[BUFSIZE]; char lookfor1[BUFSIZE]; char lookfor2[BUFSIZE]; char lookfor3[BUFSIZE]; char lookfor4[BUFSIZE]; char lookfor5[BUFSIZE]; long curpos, last_open_bracket; int indent, on, array, embed; char *p1; s = dm->member; indent = 0; array = FALSE; on = 0; embed = 0; rewind(pc->tmpfile); switch (flag) { case PARSE_FOR_DATA: sprintf(lookfor1, " %s ", s); sprintf(lookfor2, " %s[", s); next_item: while (fgets(buf, BUFSIZE, pc->tmpfile)) { if ((embed && (count_leading_spaces(buf) == embed)) || (strstr(buf, "}}") && embed == count_leading_spaces(buf) - 2)) embed = 0; if (!on && !embed && strstr(buf, "= {") && !strstr(buf, lookfor1)) embed = count_leading_spaces(buf); if (embed) continue; if (strstr(buf, lookfor1) || strstr(buf, lookfor2)) { on++; if (strstr(buf, "= {")) indent = count_leading_spaces(buf); if (strstr(buf, "[")) array = TRUE; } if (on) { if ((indent && (on > 1) && (count_leading_spaces(buf) == indent) && !strstr(buf, "}")) || (buf[0] == '}')) { break; } if (indent && (on > 1) && indent == count_leading_spaces(buf) - 2 && strstr(buf, "}}")) { fprintf(pc->saved_fp, "%s", buf); break; } if (!indent) { if ((p1 = strstr(buf, ", \n"))) sprintf(p1, "\n"); fprintf(pc->saved_fp, "%s", buf); break; } if (strstr(buf, "}") && (count_leading_spaces(buf) == indent)) { if ((p1 = strstr(buf, "}, \n"))) sprintf(p1, "}\n"); fprintf(pc->saved_fp, "%s", buf); break; } fprintf(pc->saved_fp, "%s", buf); on++; } } if (array) { on = array = FALSE; on = 0; goto next_item; } break; case PARSE_FOR_DECLARATION: last_open_bracket = curpos = 0; sprintf(lookfor1, " %s;", s); sprintf(lookfor2, "*%s;", s); sprintf(lookfor3, " %s[", s); sprintf(lookfor4, "*%s[", s); sprintf(lookfor5, " %s :", s); while (fgets(buf, BUFSIZE, pc->tmpfile)) { indent = count_leading_spaces(buf); switch (indent) { case 0: curpos = ftell(pc->tmpfile); continue; case INITIAL_INDENT: if (strstr(buf, "{")) last_open_bracket = curpos; break; default: if (!on && (indent != INITIAL_INDENT)) continue; } if (strstr(buf, lookfor1) || strstr(buf, lookfor2) || strstr(buf, lookfor3) || strstr(buf, lookfor4) || strstr(buf, lookfor5)) { if (strstr(buf, "}") && !on) { on = TRUE; fseek(pc->tmpfile, last_open_bracket, SEEK_SET); } else { print_verbatim(pc->saved_fp, buf); if (indent == INITIAL_INDENT) break; } } else if (on) print_verbatim(pc->saved_fp, buf); curpos = ftell(pc->tmpfile); } break; } } struct struct_elem { char field_name[BUFSIZE]; unsigned char field_len; char value[BUFSIZE]; unsigned char is_array_root:1; struct struct_elem *parent; struct struct_elem *inner; struct struct_elem *next; struct struct_elem *prev; }; #define ALLOC_XXX_ELEMENT(xxx, clone_parent) \ { \ if (current == NULL) { \ error(FATAL, "Internal error while parsing structure %s\n", dm->name); \ } \ current->xxx = (struct struct_elem *)GETBUF(sizeof(struct struct_elem)); \ if (clone_parent) current->xxx->parent = current->parent; \ else current->xxx->parent = current; \ current = current->xxx; \ } #define ALLOC_INNER_ELEMENT { ALLOC_XXX_ELEMENT(inner, 0) } #define ALLOC_NEXT_ELEMENT { ALLOC_XXX_ELEMENT(next, 1) } static void free_structure(struct struct_elem *p) { if (p == NULL) return; free_structure(p->inner); free_structure(p->next); FREEBUF(p); } static unsigned char is_right_brace(const char *b) { unsigned char r = 0; for (; *b == ' '; b++); if (*b == '}') { b++; r = 1; if (*b == '}') { r = 2; b++; } } if (*b == ',') b++; if (*b == '\0') return r; else return 0; } static struct struct_elem * find_node(struct struct_elem *s, char *n) { char *p, *b, *e; struct struct_elem *t = s; unsigned i; if (('\0' == *n) || (s == NULL)) return s; /* [n .. p) - struct member with index*/ if ((p = strstr(n, ".")) == NULL) p = n + strlen(n); /* [n .. b) - struct member without index*/ for (b = n; (b < p) && (*b != '['); b++); /* s - is the current level of items [s, s->next, ..., s->...->next] */ for (; s; s = s->next) { if (*s->field_name == '\0') continue; /* `field_name` doesn't match */ if (((b - n) != s->field_len) || memcmp(s->field_name, n, b - n)) continue; // For case like `pids.node` where pids is an array if (s->is_array_root && *b != '[' && *p) return NULL; if (*b == '[') { /* Array */ i = strtol(b + 1, &e, 10); /* Check if the current node is array and * we've parsed index more or less correctly */ if (!(s->is_array_root && *e == ']' && (e != b + 1))) return NULL; /* Look for the i-th element */ for (s = s->inner; s && i; s = s->next, i--); if (i || (s == NULL)) return NULL; } /* Ok. We've found node, it's - the last member * in our search string, let's return it. */ if ('\0' == *p) return s; else return find_node(s->inner, p + 1); } // We haven't found any field. // Might happen, we've encountered anonymous structure // of union. Lets try every record without `field_name` s = t; t = NULL; for (; s; s = s->next) { if (*s->field_name) continue; t = find_node(s->inner, n); if (t) break; } return t; } static void dump_node(struct struct_elem *p, char *f, unsigned char level, unsigned char is_array) { unsigned int i; if (p == NULL) return; do { #define PUT_INDENTED_STRING(m, ...) { \ for (i = 0; i++ < 2 + 2 * (m * is_array + level); fprintf(pc->saved_fp, " ")); \ fprintf(pc->saved_fp, __VA_ARGS__); } if (p->inner) { if (*p->field_name) { PUT_INDENTED_STRING(1, "%s = %s\n", f ? f : p->field_name, p->inner->is_array_root ? "{{" : "{"); } else { if (f) /* For union */ PUT_INDENTED_STRING(1, "%s = ", f); PUT_INDENTED_STRING(1, "%s\n", p->inner->is_array_root ? "{{" : "{"); } dump_node(p->inner, NULL, is_array + level + 1, p->inner->is_array_root); PUT_INDENTED_STRING(1, "%s%s\n", p->inner->is_array_root ? "}}" : "}", (p->next && !p->next->is_array_root) ? "," : ""); } else { PUT_INDENTED_STRING(1, "%s = %s%s", f ? f : p->field_name, p->value, p->next ? ",\n" : "\n"); } if (level) { p = p->next; if (p && p->is_array_root) PUT_INDENTED_STRING(0, "}, {\n"); } } while (p && level); } void parse_for_member_extended(struct datatype_member *dm, ulong __attribute__ ((unused)) flag) { struct struct_elem *i, *current = NULL, *root = NULL; char buf[BUFSIZE]; char *p, *p1; char *s_e; // structure_element unsigned int len; unsigned char trailing_comma, braces, found = 0; rewind(pc->tmpfile); root = (struct struct_elem *)GETBUF(sizeof(struct struct_elem)); current = root; ALLOC_INNER_ELEMENT; while (fgets(buf, BUFSIZE, pc->tmpfile)) { len = strlen(buf) - 1; for (; buf[len] <= ' '; buf[len--] = '\0'); if ((trailing_comma = (buf[len] == ','))) buf[len--] = '\0'; if ((braces = is_right_brace(buf))) { for (; braces && current; braces--) current = current->parent; if ((current->parent == root) || trailing_comma) ALLOC_NEXT_ELEMENT; continue; } for (p1 = buf; *p1 == ' '; p1++); if ((p = strstr(buf, " = ")) != NULL) s_e = p + 3; else s_e = p1; /* * After that we have pointers: * foobar = bazzz * -----^ ^ ^ * | ------| | * | | | * p1 p s_e * * OR * * { * ^ * | * --------- * | | * p1 s_e * * p == NULL * * * p1 - the first non-whitespace symbol in line * p - pointer to line ' = '. * If not NULL, there is identifier * s_e - element of structure (brace / double brace / array separator / scalar) * */ if (current && p && (p - p1 < BUFSIZE)) { // strncpy(current->field_name, p1, p - p1); (NOTE: gcc-9.0.1 emits [-Wstringop-truncation] warning) current->field_len = p - p1; memcpy(current->field_name, p1, current->field_len); current->field_name[current->field_len] = '\0'; } if ( p && (*s_e != '{' || (*s_e == '{' && buf[len] == '}') )) { /* Scalar or one-line array * next = 0x0 * or * files = {0x0, 0x0} */ strcpy(current->value, s_e); if (trailing_comma) ALLOC_NEXT_ELEMENT; } else if ( *s_e == '{' ) { ALLOC_INNER_ELEMENT; if (*(s_e + 1) == '{') { current->parent->is_array_root = 1; ALLOC_INNER_ELEMENT; } } else if (strstr(s_e, "}, {")) { /* Next array element */ current = current->parent; ALLOC_NEXT_ELEMENT; ALLOC_INNER_ELEMENT; } else if (buf == (p = strstr(buf, "struct "))) { p += 7; /* strlen "struct " */ p1 = strstr(buf, " {"); strncpy(current->field_name, p, p1 - p); ALLOC_INNER_ELEMENT; } } for (i = root->inner; i; i = i->next) { if ((current = find_node(i->inner, dm->member))) { dump_node(current, dm->member, 0, 0); found = 1; break; } } free_structure(root); if (!found) error(INFO, "invalid data structure member reference: %s\n", dm->member); } /* * Dig out a member name from a formatted gdb structure declaration dump, * and print its offset from the named structure passed in. */ static int show_member_offset(FILE *ofp, struct datatype_member *dm, char *inbuf) { int i, c, len; long offset; char *t1, *target; char *arglist[MAXARGS]; char buf1[BUFSIZE]; char fmt[BUFSIZE]; char workbuf[BUFSIZE]; int end_of_block; if (!STRNEQ(inbuf, " ")) { fprintf(ofp, "rejecting: %s", inbuf); return FALSE; } if (STRNEQ(inbuf, " union {")) dm->flags |= IN_UNION; if (STRNEQ(inbuf, " struct {")) dm->flags |= IN_STRUCT; end_of_block = STRNEQ(inbuf, " } "); switch (*gdb_output_radix) { default: case 10: sprintf(buf1, "%ld", dm->size); break; case 16: sprintf(buf1, "0x%lx", dm->size); } len = strlen(buf1) + 4; strcpy(workbuf, inbuf); c = parse_line(workbuf, arglist); target = NULL; if (strstr(inbuf, ":")) { for (i = 0; i < c; i++) { if (i && STREQ(arglist[i], ":")) { target = arglist[i-1]; break; } } } else if (c) { for (i = 0; i < c; i++) { if (strstr(inbuf, "(*")) { if (STRNEQ(arglist[i], "(*")) target = arglist[i]+2; else if (STRNEQ(arglist[i], "*(*")) target = arglist[i]+3; else if (STRNEQ(arglist[i], "**(*")) target = arglist[i]+4; else continue; if (!(t1 = strstr(target, ")"))) continue; *t1 = NULLCHAR; break; } } if (i == c) { target = arglist[c-1]; if (!strstr(target, ";")) target = NULL; } } if (!target) goto do_empty_offset; null_first_space(clean_line(replace_string(target, "*[];()", ' '))); if (strlen(target) == 0) goto do_empty_offset; if (dm->member && !STREQ(dm->member, target)) { if (end_of_block) dm->flags &= ~(IN_UNION|IN_STRUCT); return FALSE; } offset = MEMBER_OFFSET(dm->name, target); if (offset == -1) offset = ANON_MEMBER_OFFSET(dm->name, target); if (offset == -1) goto do_empty_offset; if (end_of_block && dm->member) { if (dm->vaddr) sprintf(buf1, " [%lx]", offset + dm->vaddr); else sprintf(buf1, *gdb_output_radix == 10 ? " [%ld]" : " [0x%lx]", offset); sprintf(fmt, "%c%ds", '%', len+1); fprintf(ofp, fmt, " "); switch (dm->flags & (IN_UNION|IN_STRUCT)) { case IN_UNION: fprintf(ofp, "union {\n"); break; case IN_STRUCT: fprintf(ofp, "struct {\n"); break; } dm->flags &= ~(IN_UNION|IN_STRUCT); } if (dm->vaddr) sprintf(buf1, " [%lx]", offset + dm->vaddr); else sprintf(buf1, *gdb_output_radix == 10 ? " [%ld]" : " [0x%lx]", offset); sprintf(fmt, "%c%ds", '%', len); fprintf(ofp, fmt, buf1); fprintf(ofp, "%s", &inbuf[3]); return TRUE; do_empty_offset: if (end_of_block) dm->flags &= ~(IN_UNION|IN_STRUCT); if (dm->member) return FALSE; len = strlen(buf1)+1; fprintf(ofp, "%s%s", space(len), inbuf); return FALSE; } /* * Get and store the size of a "known" array. This function is only called * once per requested array; after the first time, ARRAY_LENGTH() should be * used. * * For data symbols, get_symbol_type() does the work. * For structure member arrays, datatype_info() does the work. * For two-dimension arrays, or if the designated function above fails, * then just parse "whatis" or "ptype" commands as a last resort. */ int get_array_length(char *s, int *two_dim, long entry_size) { char copy[BUFSIZE]; char buf[BUFSIZE]; char lookfor1[BUFSIZE]; char lookfor2[BUFSIZE]; int retval; struct datatype_member datatype_member, *dm; struct gnu_request gnu_request, *req; char *p1, *p2; strcpy(copy, s); dm = &datatype_member; BZERO(dm, sizeof(struct datatype_member)); if ((retval = builtin_array_length(s, 0, two_dim))) return retval; /* symbol_search cannot be done with just kernel type information */ if (!(LKCD_KERNTYPES()) && symbol_search(s)) { if (!two_dim) { req = &gnu_request; if ((get_symbol_type(copy, NULL, req) == TYPE_CODE_ARRAY) && req->target_typecode && req->target_length) { retval = req->length / req->target_length; goto store_builtin; } } sprintf(buf, "whatis %s", s); } else { if (arg_to_datatype(copy, dm, RETURN_ON_ERROR)) { if (!dm->member) goto store_builtin; datatype_info(dm->name, dm->member, dm); switch (dm->type) { case UNION_REQUEST: if (entry_size && dm->member_size && (dm->member_typecode == TYPE_CODE_ARRAY)) { retval = dm->member_size/entry_size; goto store_builtin; } sprintf(buf, "ptype union %s", dm->name); break; case STRUCT_REQUEST: if (entry_size && dm->member_size && (dm->member_typecode == TYPE_CODE_ARRAY)) { retval = dm->member_size/entry_size; goto store_builtin; } sprintf(buf, "ptype struct %s", dm->name); break; default: goto store_builtin; } sprintf(lookfor1, " %s[", dm->member); sprintf(lookfor2, "*%s[", dm->member); } else goto store_builtin; } open_tmpfile2(); if (two_dim) *two_dim = 0; gdb_pass_through(buf, pc->tmpfile2, 0); rewind(pc->tmpfile2); while (fgets(buf, BUFSIZE, pc->tmpfile2)) { if (STRNEQ(buf, "type = ") && (p1 = strstr(buf, "[")) && (p2 = strstr(buf, "]")) && (index(buf, '[') == rindex(buf, '['))) { *p2 = NULLCHAR; p1++; if (strlen(p1)) { retval = atoi(p1); break; } } if (STRNEQ(buf, "type = ") && (count_chars(buf, '[') == 2) && (count_chars(buf, ']') == 2) && two_dim) { p1 = strstr(buf, "["); p2 = strstr(buf, "]"); *p2 = NULLCHAR; p1++; if (strlen(p1)) *two_dim = atoi(p1); else break; p2++; p1 = strstr(p2, "["); p2 = strstr(p1, "]"); p1++; if (strlen(p1)) retval = atoi(p1); else { retval = 0; *two_dim = 0; break; } break; } if (dm->type && (strstr(buf, lookfor1) || strstr(buf, lookfor2)) && (p1 = strstr(buf, "[")) && (p2 = strstr(buf, "]")) && (index(buf, '[') == rindex(buf, '['))) { *p2 = NULLCHAR; p1++; if (strlen(p1)) { retval = atoi(p1); break; } } } close_tmpfile2(); store_builtin: return (builtin_array_length(s, retval, two_dim)); } /* * Get and store the size of a "known" array. * A wrapper for get_array_length(), for cases in which * the name of the result to be stored is different from the * structure.member to be evaluated. */ int get_array_length_alt(char *name, char *s, int *two_dim, long entry_size) { int retval; retval = get_array_length(s, two_dim, entry_size); if (retval) retval = builtin_array_length(name, retval, two_dim); return retval; } /* * Designed for use by non-debug kernels, but used by all. */ int builtin_array_length(char *s, int len, int *two_dim) { int *lenptr; int *dimptr; lenptr = dimptr = NULL; if (STREQ(s, "kmem_cache_s.name")) lenptr = &array_table.kmem_cache_s_name; else if (STREQ(s, "kmem_cache_s.c_name")) lenptr = &array_table.kmem_cache_s_c_name; else if (STREQ(s, "kmem_cache_s.array")) lenptr = &array_table.kmem_cache_s_array; else if (STREQ(s, "kmem_cache.array")) lenptr = &array_table.kmem_cache_s_array; else if (STREQ(s, "kmem_cache_s.cpudata")) lenptr = &array_table.kmem_cache_s_cpudata; else if (STREQ(s, "log_buf")) lenptr = &array_table.log_buf; else if (STREQ(s, "irq_desc") || STREQ(s, "_irq_desc")) lenptr = &array_table.irq_desc; else if (STREQ(s, "irq_action")) lenptr = &array_table.irq_action; else if (STREQ(s, "timer_vec.vec")) lenptr = &array_table.timer_vec_vec; else if (STREQ(s, "timer_vec_root.vec")) lenptr = &array_table.timer_vec_root_vec; else if (STREQ(s, "tvec_s.vec")) lenptr = &array_table.tvec_s_vec; else if (STREQ(s, "tvec_root_s.vec")) lenptr = &array_table.tvec_root_s_vec; else if (STREQ(s, "net_device.name")) lenptr = &array_table.net_device_name; else if (STREQ(s, "neigh_table.hash_buckets")) lenptr = &array_table.neigh_table_hash_buckets; else if (STREQ(s, "neighbour.ha")) lenptr = &array_table.neighbour_ha; else if (STREQ(s, "swap_info")) lenptr = &array_table.swap_info; else if (STREQ(s, "page_hash_table")) lenptr = &array_table.page_hash_table; else if (STREQ(s, "pglist_data.node_zones")) lenptr = &array_table.pglist_data_node_zones; else if (STREQ(s, "zone_struct.free_area")) lenptr = &array_table.zone_struct_free_area; else if (STREQ(s, "zone.free_area")) lenptr = &array_table.zone_free_area; else if (STREQ(s, "prio_array.queue")) lenptr = &array_table.prio_array_queue; else if (STREQ(s, "height_to_maxindex")) lenptr = &array_table.height_to_maxindex; else if (STREQ(s, "height_to_maxnodes")) lenptr = &array_table.height_to_maxnodes; else if (STREQ(s, "pid_hash")) lenptr = &array_table.pid_hash; else if (STREQ(s, "free_area")) { lenptr = &array_table.free_area; if (two_dim) dimptr = &array_table.free_area_DIMENSION; } else if (STREQ(s, "kmem_cache.node")) lenptr = &array_table.kmem_cache_node; else if (STREQ(s, "kmem_cache.cpu_slab")) lenptr = &array_table.kmem_cache_cpu_slab; else if (STREQ(s, "rt_prio_array.queue")) lenptr = &array_table.rt_prio_array_queue; else if (STREQ(s, "task_struct.rlim")) lenptr = &array_table.task_struct_rlim; else if (STREQ(s, "signal_struct.rlim")) lenptr = &array_table.signal_struct_rlim; else if (STREQ(s, "vm_numa_stat")) lenptr = &array_table.vm_numa_stat; else if (STREQ(s, "pid.numbers")) lenptr = &array_table.pid_numbers; if (!lenptr) /* not stored */ return(len); if (*lenptr) { /* pre-set */ if (dimptr && two_dim) *two_dim = *dimptr; return(*lenptr); } if (len) { *lenptr = len; /* initialize passed-in value(s) */ if (dimptr && two_dim) *dimptr = *two_dim; return(len); } return(0); /* in table, but not set yet */ } /* * "help -o" output */ void dump_offset_table(char *spec, ulong makestruct) { char buf[BUFSIZE], *p1; char revname[BUFSIZE]; struct new_utsname *uts; long long data_debug; data_debug = pc->flags & DATADEBUG; pc->flags &= ~DATADEBUG; uts = NULL; if (makestruct) { uts = &kt->utsname; sprintf(revname, "%s_%s", pc->machine_type, uts->release); p1 = revname + strlen(pc->machine_type); while (*p1) { if (((*p1 >= '0') && (*p1 <= '9')) || ((*p1 >= 'a') && (*p1 <= 'z')) || ((*p1 >= 'A') && (*p1 <= 'Z'))) p1++; else *p1++ = '_'; } } if (spec || makestruct) open_tmpfile(); fprintf(fp, " offset_table:\n"); fprintf(fp, " list_head_next: %ld\n", OFFSET(list_head_next)); fprintf(fp, " list_head_prev: %ld\n", OFFSET(list_head_prev)); fprintf(fp, " task_struct_pid: %ld\n", OFFSET(task_struct_pid)); fprintf(fp, " task_struct_state: %ld\n", OFFSET(task_struct_state)); fprintf(fp, " task_struct_exit_state: %ld\n", OFFSET(task_struct_exit_state)); fprintf(fp, " task_struct_comm: %ld\n", OFFSET(task_struct_comm)); fprintf(fp, " task_struct_mm: %ld\n", OFFSET(task_struct_mm)); fprintf(fp, " task_struct_tss: %ld\n", OFFSET(task_struct_tss)); fprintf(fp, " task_struct_thread: %ld\n", OFFSET(task_struct_thread)); fprintf(fp, " task_struct_active_mm: %ld\n", OFFSET(task_struct_active_mm)); fprintf(fp, " task_struct_tss_eip: %ld\n", OFFSET(task_struct_tss_eip)); fprintf(fp, " task_struct_tss_esp: %ld\n", OFFSET(task_struct_tss_esp)); fprintf(fp, " task_struct_tss_ksp: %ld\n", OFFSET(task_struct_tss_ksp)); fprintf(fp, " task_struct_thread_eip: %ld\n", OFFSET(task_struct_thread_eip)); fprintf(fp, " inactive_task_frame_bp: %ld\n", OFFSET(inactive_task_frame_bp)); fprintf(fp, " inactive_task_frame_ret_addr: %ld\n", OFFSET(inactive_task_frame_ret_addr)); fprintf(fp, " task_struct_thread_esp: %ld\n", OFFSET(task_struct_thread_esp)); fprintf(fp, " task_struct_thread_ksp: %ld\n", OFFSET(task_struct_thread_ksp)); fprintf(fp, " task_struct_thread_reg01: %ld\n", OFFSET(task_struct_thread_reg01)); fprintf(fp, " task_struct_thread_reg03: %ld\n", OFFSET(task_struct_thread_reg03)); fprintf(fp, " task_struct_thread_reg29: %ld\n", OFFSET(task_struct_thread_reg29)); fprintf(fp, " task_struct_thread_reg31: %ld\n", OFFSET(task_struct_thread_reg31)); fprintf(fp, "task_struct_thread_context_x19: %ld\n", OFFSET(task_struct_thread_context_x19)); fprintf(fp, "task_struct_thread_context_x20: %ld\n", OFFSET(task_struct_thread_context_x20)); fprintf(fp, "task_struct_thread_context_x21: %ld\n", OFFSET(task_struct_thread_context_x21)); fprintf(fp, "task_struct_thread_context_x22: %ld\n", OFFSET(task_struct_thread_context_x22)); fprintf(fp, "task_struct_thread_context_x23: %ld\n", OFFSET(task_struct_thread_context_x23)); fprintf(fp, "task_struct_thread_context_x24: %ld\n", OFFSET(task_struct_thread_context_x24)); fprintf(fp, "task_struct_thread_context_x25: %ld\n", OFFSET(task_struct_thread_context_x25)); fprintf(fp, "task_struct_thread_context_x26: %ld\n", OFFSET(task_struct_thread_context_x26)); fprintf(fp, "task_struct_thread_context_x27: %ld\n", OFFSET(task_struct_thread_context_x27)); fprintf(fp, "task_struct_thread_context_x28: %ld\n", OFFSET(task_struct_thread_context_x28)); fprintf(fp, " task_struct_thread_context_fp: %ld\n", OFFSET(task_struct_thread_context_fp)); fprintf(fp, " task_struct_thread_context_sp: %ld\n", OFFSET(task_struct_thread_context_sp)); fprintf(fp, " task_struct_thread_context_pc: %ld\n", OFFSET(task_struct_thread_context_pc)); fprintf(fp, " task_struct_processor: %ld\n", OFFSET(task_struct_processor)); fprintf(fp, " task_struct_p_pptr: %ld\n", OFFSET(task_struct_p_pptr)); fprintf(fp, " task_struct_parent: %ld\n", OFFSET(task_struct_parent)); fprintf(fp, " task_struct_has_cpu: %ld\n", OFFSET(task_struct_has_cpu)); fprintf(fp, " task_struct_cpus_runnable: %ld\n", OFFSET(task_struct_cpus_runnable)); fprintf(fp, " task_struct_next_task: %ld\n", OFFSET(task_struct_next_task)); fprintf(fp, " task_struct_files: %ld\n", OFFSET(task_struct_files)); fprintf(fp, " task_struct_fs: %ld\n", OFFSET(task_struct_fs)); fprintf(fp, " task_struct_pidhash_next: %ld\n", OFFSET(task_struct_pidhash_next)); fprintf(fp, " task_struct_next_run: %ld\n", OFFSET(task_struct_next_run)); fprintf(fp, " task_struct_flags: %ld\n", OFFSET(task_struct_flags)); fprintf(fp, " task_struct_sig: %ld\n", OFFSET(task_struct_sig)); fprintf(fp, " task_struct_signal: %ld\n", OFFSET(task_struct_signal)); fprintf(fp, " task_struct_blocked: %ld\n", OFFSET(task_struct_blocked)); fprintf(fp, " task_struct_sigpending: %ld\n", OFFSET(task_struct_sigpending)); fprintf(fp, " task_struct_pending: %ld\n", OFFSET(task_struct_pending)); fprintf(fp, " task_struct_sigqueue: %ld\n", OFFSET(task_struct_sigqueue)); fprintf(fp, " task_struct_sighand: %ld\n", OFFSET(task_struct_sighand)); fprintf(fp, " task_struct_run_list: %ld\n", OFFSET(task_struct_run_list)); fprintf(fp, " task_struct_pgrp: %ld\n", OFFSET(task_struct_pgrp)); fprintf(fp, " task_struct_tgid: %ld\n", OFFSET(task_struct_tgid)); fprintf(fp, " task_struct_namespace: %ld\n", OFFSET(task_struct_namespace)); fprintf(fp, " task_struct_rss_stat: %ld\n", OFFSET(task_struct_rss_stat)); fprintf(fp, " task_rss_stat_count: %ld\n", OFFSET(task_rss_stat_count)); fprintf(fp, " task_struct_pids: %ld\n", OFFSET(task_struct_pids)); fprintf(fp, " task_struct_pid_links: %ld\n", OFFSET(task_struct_pid_links)); fprintf(fp, " task_struct_last_run: %ld\n", OFFSET(task_struct_last_run)); fprintf(fp, " task_struct_timestamp: %ld\n", OFFSET(task_struct_timestamp)); fprintf(fp, " task_struct_sched_info: %ld\n", OFFSET(task_struct_sched_info)); fprintf(fp, " task_struct_rt: %ld\n", OFFSET(task_struct_rt)); fprintf(fp, " sched_rt_entity_run_list: %ld\n", OFFSET(sched_rt_entity_run_list)); fprintf(fp, " sched_info_last_arrival: %ld\n", OFFSET(sched_info_last_arrival)); fprintf(fp, " task_struct_thread_info: %ld\n", OFFSET(task_struct_thread_info)); fprintf(fp, " task_struct_stack: %ld\n", OFFSET(task_struct_stack)); fprintf(fp, " task_struct_nsproxy: %ld\n", OFFSET(task_struct_nsproxy)); fprintf(fp, " task_struct_rlim: %ld\n", OFFSET(task_struct_rlim)); fprintf(fp, " task_struct_prio: %ld\n", OFFSET(task_struct_prio)); fprintf(fp, " task_struct_on_rq: %ld\n", OFFSET(task_struct_on_rq)); fprintf(fp, " task_struct_policy: %ld\n", OFFSET(task_struct_policy)); fprintf(fp, " thread_info_task: %ld\n", OFFSET(thread_info_task)); fprintf(fp, " thread_info_cpu: %ld\n", OFFSET(thread_info_cpu)); fprintf(fp, " thread_info_flags: %ld\n", OFFSET(thread_info_flags)); fprintf(fp, " thread_info_previous_esp: %ld\n", OFFSET(thread_info_previous_esp)); fprintf(fp, " nsproxy_mnt_ns: %ld\n", OFFSET(nsproxy_mnt_ns)); fprintf(fp, " mnt_namespace_root: %ld\n", OFFSET(mnt_namespace_root)); fprintf(fp, " mnt_namespace_list: %ld\n", OFFSET(mnt_namespace_list)); fprintf(fp, " mnt_namespace_mounts: %ld\n", OFFSET(mnt_namespace_mounts)); fprintf(fp, " mnt_namespace_nr_mounts: %ld\n", OFFSET(mnt_namespace_nr_mounts)); fprintf(fp, " pid_namespace_idr: %ld\n", OFFSET(pid_namespace_idr)); fprintf(fp, " idr_idr_rt: %ld\n", OFFSET(idr_idr_rt)); fprintf(fp, " pid_link_pid: %ld\n", OFFSET(pid_link_pid)); fprintf(fp, " pid_hash_chain: %ld\n", OFFSET(pid_hash_chain)); fprintf(fp, " pid_numbers: %ld\n", OFFSET(pid_numbers)); fprintf(fp, " upid_nr: %ld\n", OFFSET(upid_nr)); fprintf(fp, " upid_ns: %ld\n", OFFSET(upid_ns)); fprintf(fp, " upid_pid_chain: %ld\n", OFFSET(upid_pid_chain)); fprintf(fp, " pid_tasks: %ld\n", OFFSET(pid_tasks)); fprintf(fp, " hlist_node_next: %ld\n", OFFSET(hlist_node_next)); fprintf(fp, " hlist_node_pprev: %ld\n", OFFSET(hlist_node_pprev)); fprintf(fp, " pid_pid_chain: %ld\n", OFFSET(pid_pid_chain)); fprintf(fp, " thread_struct_eip: %ld\n", OFFSET(thread_struct_eip)); fprintf(fp, " thread_struct_esp: %ld\n", OFFSET(thread_struct_esp)); fprintf(fp, " thread_struct_ksp: %ld\n", OFFSET(thread_struct_ksp)); fprintf(fp, " thread_struct_rip: %ld\n", OFFSET(thread_struct_rip)); fprintf(fp, " thread_struct_rsp: %ld\n", OFFSET(thread_struct_rsp)); fprintf(fp, " thread_struct_rsp0: %ld\n", OFFSET(thread_struct_rsp0)); fprintf(fp, " signal_struct_count: %ld\n", OFFSET(signal_struct_count)); fprintf(fp, " signal_struct_nr_threads: %ld\n", OFFSET(signal_struct_nr_threads)); fprintf(fp, " signal_struct_action: %ld\n", OFFSET(signal_struct_action)); fprintf(fp, " signal_struct_shared_pending: %ld\n", OFFSET(signal_struct_shared_pending)); fprintf(fp, " signal_struct_rlim: %ld\n", OFFSET(signal_struct_rlim)); fprintf(fp, " task_struct_start_time: %ld\n", OFFSET(task_struct_start_time)); fprintf(fp, " task_struct_times: %ld\n", OFFSET(task_struct_times)); fprintf(fp, " task_struct_cpu: %ld\n", OFFSET(task_struct_cpu)); fprintf(fp, " task_struct_utime: %ld\n", OFFSET(task_struct_utime)); fprintf(fp, " task_struct_stime: %ld\n", OFFSET(task_struct_stime)); fprintf(fp, " tms_tms_utime: %ld\n", OFFSET(tms_tms_utime)); fprintf(fp, " tms_tms_stime: %ld\n", OFFSET(tms_tms_stime)); fprintf(fp, " timekeeper_xtime: %ld\n", OFFSET(timekeeper_xtime)); fprintf(fp, " timekeeper_xtime_sec: %ld\n", OFFSET(timekeeper_xtime_sec)); fprintf(fp, " k_sigaction_sa: %ld\n", OFFSET(k_sigaction_sa)); fprintf(fp, " sigaction_sa_handler: %ld\n", OFFSET(sigaction_sa_handler)); fprintf(fp, " sigaction_sa_flags: %ld\n", OFFSET(sigaction_sa_flags)); fprintf(fp, " sigaction_sa_mask: %ld\n", OFFSET(sigaction_sa_mask)); fprintf(fp, " sigpending_head: %ld\n", OFFSET(sigpending_head)); fprintf(fp, " sigpending_signal: %ld\n", OFFSET(sigpending_signal)); fprintf(fp, " sigpending_list: %ld\n", OFFSET(sigpending_list)); fprintf(fp, " signal_queue_next: %ld\n", OFFSET(signal_queue_next)); fprintf(fp, " signal_queue_info: %ld\n", OFFSET(signal_queue_info)); fprintf(fp, " sigqueue_next: %ld\n", OFFSET(sigqueue_next)); fprintf(fp, " sigqueue_info: %ld\n", OFFSET(sigqueue_info)); fprintf(fp, " sigqueue_list: %ld\n", OFFSET(sigqueue_list)); fprintf(fp, " sighand_struct_action: %ld\n", OFFSET(sighand_struct_action)); fprintf(fp, " siginfo_si_signo: %ld\n", OFFSET(siginfo_si_signo)); fprintf(fp, " thread_struct_fph: %ld\n", OFFSET(thread_struct_fph)); fprintf(fp, " thread_struct_cr3: %ld\n", OFFSET(thread_struct_cr3)); fprintf(fp, " thread_struct_ptbr: %ld\n", OFFSET(thread_struct_ptbr)); fprintf(fp, " thread_struct_pg_tables: %ld\n", OFFSET(thread_struct_pg_tables)); fprintf(fp, " switch_stack_r26: %ld\n", OFFSET(switch_stack_r26)); fprintf(fp, " switch_stack_b0: %ld\n", OFFSET(switch_stack_b0)); fprintf(fp, " switch_stack_ar_bspstore: %ld\n", OFFSET(switch_stack_ar_bspstore)); fprintf(fp, " switch_stack_ar_pfs: %ld\n", OFFSET(switch_stack_ar_pfs)); fprintf(fp, " switch_stack_ar_rnat: %ld\n", OFFSET(switch_stack_ar_rnat)); fprintf(fp, " switch_stack_pr: %ld\n", OFFSET(switch_stack_pr)); fprintf(fp, " cpuinfo_ia64_proc_freq: %ld\n", OFFSET(cpuinfo_ia64_proc_freq)); fprintf(fp, " cpuinfo_ia64_unimpl_va_mask: %ld\n", OFFSET(cpuinfo_ia64_unimpl_va_mask)); fprintf(fp, " cpuinfo_ia64_unimpl_pa_mask: %ld\n", OFFSET(cpuinfo_ia64_unimpl_pa_mask)); fprintf(fp, " device_node_type: %ld\n", OFFSET(device_node_type)); fprintf(fp, " device_node_allnext: %ld\n", OFFSET(device_node_allnext)); fprintf(fp, " device_node_properties: %ld\n", OFFSET(device_node_properties)); fprintf(fp, " property_name: %ld\n", OFFSET(property_name)); fprintf(fp, " property_value: %ld\n", OFFSET(property_value)); fprintf(fp, " property_next: %ld\n", OFFSET(property_next)); fprintf(fp, " machdep_calls_setup_residual: %ld\n", OFFSET(machdep_calls_setup_residual)); fprintf(fp, " RESIDUAL_VitalProductData: %ld\n", OFFSET(RESIDUAL_VitalProductData)); fprintf(fp, " VPD_ProcessorHz: %ld\n", OFFSET(VPD_ProcessorHz)); fprintf(fp, " bd_info_bi_intfreq: %ld\n", OFFSET(bd_info_bi_intfreq)); fprintf(fp, " hwrpb_struct_cycle_freq: %ld\n", OFFSET(hwrpb_struct_cycle_freq)); fprintf(fp, " hwrpb_struct_processor_offset: %ld\n", OFFSET(hwrpb_struct_processor_offset)); fprintf(fp, " hwrpb_struct_processor_size: %ld\n", OFFSET(hwrpb_struct_processor_size)); fprintf(fp, " percpu_struct_halt_PC: %ld\n", OFFSET(percpu_struct_halt_PC)); fprintf(fp, " percpu_struct_halt_ra: %ld\n", OFFSET(percpu_struct_halt_ra)); fprintf(fp, " percpu_struct_halt_pv: %ld\n", OFFSET(percpu_struct_halt_pv)); fprintf(fp, " mm_struct_mmap: %ld\n", OFFSET(mm_struct_mmap)); fprintf(fp, " mm_struct_pgd: %ld\n", OFFSET(mm_struct_pgd)); fprintf(fp, " mm_struct_mm_count: %ld\n", OFFSET(mm_struct_mm_count)); fprintf(fp, " mm_struct_rss: %ld\n", OFFSET(mm_struct_rss)); fprintf(fp, " mm_struct_anon_rss: %ld\n", OFFSET(mm_struct_anon_rss)); fprintf(fp, " mm_struct_file_rss: %ld\n", OFFSET(mm_struct_file_rss)); fprintf(fp, " mm_struct_total_vm: %ld\n", OFFSET(mm_struct_total_vm)); fprintf(fp, " mm_struct_start_code: %ld\n", OFFSET(mm_struct_start_code)); fprintf(fp, " mm_struct_arg_start: %ld\n", OFFSET(mm_struct_arg_start)); fprintf(fp, " mm_struct_arg_end: %ld\n", OFFSET(mm_struct_arg_end)); fprintf(fp, " mm_struct_env_start: %ld\n", OFFSET(mm_struct_env_start)); fprintf(fp, " mm_struct_env_end: %ld\n", OFFSET(mm_struct_env_end)); fprintf(fp, " mm_struct_rss_stat: %ld\n", OFFSET(mm_struct_rss_stat)); fprintf(fp, " mm_rss_stat_count: %ld\n", OFFSET(mm_rss_stat_count)); fprintf(fp, " vm_area_struct_vm_mm: %ld\n", OFFSET(vm_area_struct_vm_mm)); fprintf(fp, " vm_area_struct_vm_next: %ld\n", OFFSET(vm_area_struct_vm_next)); fprintf(fp, " vm_area_struct_vm_start: %ld\n", OFFSET(vm_area_struct_vm_start)); fprintf(fp, " vm_area_struct_vm_end: %ld\n", OFFSET(vm_area_struct_vm_end)); fprintf(fp, " vm_area_struct_vm_flags: %ld\n", OFFSET(vm_area_struct_vm_flags)); fprintf(fp, " vm_area_struct_vm_file: %ld\n", OFFSET(vm_area_struct_vm_file)); fprintf(fp, " vm_area_struct_vm_offset: %ld\n", OFFSET(vm_area_struct_vm_offset)); fprintf(fp, " vm_area_struct_vm_pgoff: %ld\n", OFFSET(vm_area_struct_vm_pgoff)); fprintf(fp, " vm_struct_addr: %ld\n", OFFSET(vm_struct_addr)); fprintf(fp, " vm_struct_size: %ld\n", OFFSET(vm_struct_size)); fprintf(fp, " vm_struct_next: %ld\n", OFFSET(vm_struct_next)); fprintf(fp, " vmap_area_va_start: %ld\n", OFFSET(vmap_area_va_start)); fprintf(fp, " vmap_area_va_end: %ld\n", OFFSET(vmap_area_va_end)); fprintf(fp, " vmap_area_list: %ld\n", OFFSET(vmap_area_list)); fprintf(fp, " vmap_area_vm: %ld\n", OFFSET(vmap_area_vm)); fprintf(fp, " vmap_area_flags: %ld\n", OFFSET(vmap_area_flags)); fprintf(fp, " vmap_area_purge_list: %ld\n", OFFSET(vmap_area_purge_list)); fprintf(fp, " vmap_node_busy: %ld\n", OFFSET(vmap_node_busy)); fprintf(fp, " rb_list_head: %ld\n", OFFSET(rb_list_head)); fprintf(fp, " module_size_of_struct: %ld\n", OFFSET(module_size_of_struct)); fprintf(fp, " module_next: %ld\n", OFFSET(module_next)); fprintf(fp, " module_name: %ld\n", OFFSET(module_name)); fprintf(fp, " module_syms: %ld\n", OFFSET(module_syms)); fprintf(fp, " module_nsyms: %ld\n", OFFSET(module_nsyms)); fprintf(fp, " module_size: %ld\n", OFFSET(module_size)); fprintf(fp, " module_flags: %ld\n", OFFSET(module_flags)); fprintf(fp, " module_num_syms: %ld\n", OFFSET(module_num_syms)); fprintf(fp, " module_gpl_syms: %ld\n", OFFSET(module_gpl_syms)); fprintf(fp, " module_num_gpl_syms: %ld\n", OFFSET(module_num_gpl_syms)); fprintf(fp, " module_list: %ld\n", OFFSET(module_list)); fprintf(fp, " module_module_core: %ld\n", OFFSET(module_module_core)); fprintf(fp, " module_core_size: %ld\n", OFFSET(module_core_size)); fprintf(fp, " module_core_text_size: %ld\n", OFFSET(module_core_text_size)); fprintf(fp, " module_init_size: %ld\n", OFFSET(module_init_size)); fprintf(fp, " module_init_text_size: %ld\n", OFFSET(module_init_text_size)); fprintf(fp, " module_module_init: %ld\n", OFFSET(module_module_init)); fprintf(fp, " module_module_core_rx: %ld\n", OFFSET(module_module_core_rx)); fprintf(fp, " module_module_core_rw: %ld\n", OFFSET(module_module_core_rw)); fprintf(fp, " module_core_size_rx: %ld\n", OFFSET(module_core_size_rx)); fprintf(fp, " module_core_size_rw: %ld\n", OFFSET(module_core_size_rw)); fprintf(fp, " module_module_init_rx: %ld\n", OFFSET(module_module_init_rx)); fprintf(fp, " module_module_init_rw: %ld\n", OFFSET(module_module_init_rw)); fprintf(fp, " module_init_size_rx: %ld\n", OFFSET(module_init_size_rx)); fprintf(fp, " module_init_size_rw: %ld\n", OFFSET(module_init_size_rw)); fprintf(fp, " module_num_symtab: %ld\n", OFFSET(module_num_symtab)); fprintf(fp, " module_symtab: %ld\n", OFFSET(module_symtab)); fprintf(fp, " module_strtab: %ld\n", OFFSET(module_strtab)); fprintf(fp, " module_percpu: %ld\n", OFFSET(module_percpu)); fprintf(fp, " module_mem: %ld\n", OFFSET(module_mem)); fprintf(fp, " module_memory_base: %ld\n", OFFSET(module_memory_base)); fprintf(fp, " module_memory_size: %ld\n", OFFSET(module_memory_size)); fprintf(fp, " module_sect_attrs: %ld\n", OFFSET(module_sect_attrs)); fprintf(fp, " module_sect_attrs_attrs: %ld\n", OFFSET(module_sect_attrs_attrs)); fprintf(fp, " module_sect_attrs_nsections: %ld\n", OFFSET(module_sect_attrs_nsections)); fprintf(fp, " module_sect_attr_mattr: %ld\n", OFFSET(module_sect_attr_mattr)); fprintf(fp, " module_sect_attr_name: %ld\n", OFFSET(module_sect_attr_name)); fprintf(fp, " module_sect_attr_address: %ld\n", OFFSET(module_sect_attr_address)); fprintf(fp, " attribute_owner: %ld\n", OFFSET(attribute_owner)); fprintf(fp, " module_sect_attr_attr: %ld\n", OFFSET(module_sect_attr_attr)); fprintf(fp, " module_sections_attrs: %ld\n", OFFSET(module_sections_attrs)); fprintf(fp, " module_attribute_attr: %ld\n", OFFSET(module_attribute_attr)); fprintf(fp, " module_kallsyms_start: %ld\n", OFFSET(module_kallsyms_start)); fprintf(fp, " kallsyms_header_sections: %ld\n", OFFSET(kallsyms_header_sections)); fprintf(fp, " kallsyms_header_section_off: %ld\n", OFFSET(kallsyms_header_section_off)); fprintf(fp, " kallsyms_header_symbols: %ld\n", OFFSET(kallsyms_header_symbols)); fprintf(fp, " kallsyms_header_symbol_off: %ld\n", OFFSET(kallsyms_header_symbol_off)); fprintf(fp, " kallsyms_header_string_off: %ld\n", OFFSET(kallsyms_header_string_off)); fprintf(fp, " kallsyms_symbol_section_off: %ld\n", OFFSET(kallsyms_symbol_section_off)); fprintf(fp, " kallsyms_symbol_symbol_addr: %ld\n", OFFSET(kallsyms_symbol_symbol_addr)); fprintf(fp, " kallsyms_symbol_name_off: %ld\n", OFFSET(kallsyms_symbol_name_off)); fprintf(fp, " kallsyms_section_start: %ld\n", OFFSET(kallsyms_section_start)); fprintf(fp, " kallsyms_section_size: %ld\n", OFFSET(kallsyms_section_size)); fprintf(fp, " kallsyms_section_name_off: %ld\n", OFFSET(kallsyms_section_name_off)); fprintf(fp, " kernel_symbol_value: %ld\n", OFFSET(kernel_symbol_value)); fprintf(fp, " module_taints: %ld\n", OFFSET(module_taints)); fprintf(fp, " module_license_gplok: %ld\n", OFFSET(module_license_gplok)); fprintf(fp, " module_gpgsig_ok: %ld\n", OFFSET(module_gpgsig_ok)); fprintf(fp, " tnt_bit: %ld\n", OFFSET(tnt_bit)); fprintf(fp, " tnt_true: %ld\n", OFFSET(tnt_true)); fprintf(fp, " tnt_false: %ld\n", OFFSET(tnt_false)); fprintf(fp, " tnt_mod: %ld\n", OFFSET(tnt_mod)); fprintf(fp, " page_next: %ld\n", OFFSET(page_next)); fprintf(fp, " page_prev: %ld\n", OFFSET(page_prev)); fprintf(fp, " page_next_hash: %ld\n", OFFSET(page_next_hash)); fprintf(fp, " page_list: %ld\n", OFFSET(page_list)); fprintf(fp, " page_list_next: %ld\n", OFFSET(page_list_next)); fprintf(fp, " page_list_prev: %ld\n", OFFSET(page_list_prev)); fprintf(fp, " page_inode: %ld\n", OFFSET(page_inode)); fprintf(fp, " page_offset: %ld\n", OFFSET(page_offset)); fprintf(fp, " page_count: %ld\n", OFFSET(page_count)); fprintf(fp, " page_flags: %ld\n", OFFSET(page_flags)); fprintf(fp, " page_mapping: %ld\n", OFFSET(page_mapping)); fprintf(fp, " page_index: %ld\n", OFFSET(page_index)); fprintf(fp, " page_buffers: %ld\n", OFFSET(page_buffers)); fprintf(fp, " page_lru: %ld\n", OFFSET(page_lru)); fprintf(fp, " page_pte: %ld\n", OFFSET(page_pte)); fprintf(fp, " page_inuse: %ld\n", OFFSET(page_inuse)); fprintf(fp, " page_objects: %ld\n", OFFSET(page_objects)); fprintf(fp, " page_slab: %ld\n", OFFSET(page_slab)); fprintf(fp, " page_slab_page: %ld\n", OFFSET(page_slab_page)); fprintf(fp, " page_first_page: %ld\n", OFFSET(page_first_page)); fprintf(fp, " page_freelist: %ld\n", OFFSET(page_freelist)); fprintf(fp, " page_s_mem: %ld\n", OFFSET(page_s_mem)); fprintf(fp, " page_active: %ld\n", OFFSET(page_active)); fprintf(fp, " page_compound_head: %ld\n", OFFSET(page_compound_head)); fprintf(fp, " page_private: %ld\n", OFFSET(page_private)); fprintf(fp, " page_page_type: %ld\n", OFFSET(page_page_type)); fprintf(fp, " trace_print_flags_mask: %ld\n", OFFSET(trace_print_flags_mask)); fprintf(fp, " trace_print_flags_name: %ld\n", OFFSET(trace_print_flags_name)); fprintf(fp, " swap_info_struct_swap_file: %ld\n", OFFSET(swap_info_struct_swap_file)); fprintf(fp, " swap_info_struct_swap_vfsmnt: %ld\n", OFFSET(swap_info_struct_swap_vfsmnt)); fprintf(fp, " swap_info_struct_flags: %ld\n", OFFSET(swap_info_struct_flags)); fprintf(fp, " swap_info_struct_swap_map: %ld\n", OFFSET(swap_info_struct_swap_map)); fprintf(fp, " swap_info_struct_swap_device: %ld\n", OFFSET(swap_info_struct_swap_device)); fprintf(fp, " swap_info_struct_prio: %ld\n", OFFSET(swap_info_struct_prio)); fprintf(fp, " swap_info_struct_max: %ld\n", OFFSET(swap_info_struct_max)); fprintf(fp, " swap_info_struct_pages: %ld\n", OFFSET(swap_info_struct_pages)); fprintf(fp, " swap_info_struct_inuse_pages: %ld\n", OFFSET(swap_info_struct_inuse_pages)); fprintf(fp, "swap_info_struct_old_block_size: %ld\n", OFFSET(swap_info_struct_old_block_size)); fprintf(fp, " swap_info_struct_bdev: %ld\n", OFFSET(swap_info_struct_bdev)); fprintf(fp, " block_device_bd_inode: %ld\n", OFFSET(block_device_bd_inode)); fprintf(fp, " block_device_bd_list: %ld\n", OFFSET(block_device_bd_list)); fprintf(fp, " block_device_bd_disk: %ld\n", OFFSET(block_device_bd_disk)); fprintf(fp, " block_device_bd_device: %ld\n", OFFSET(block_device_bd_device)); fprintf(fp, " block_device_bd_stats: %ld\n", OFFSET(block_device_bd_stats)); fprintf(fp, " address_space_nrpages: %ld\n", OFFSET(address_space_nrpages)); fprintf(fp, " address_space_page_tree: %ld\n", OFFSET(address_space_page_tree)); fprintf(fp, " gendisk_major: %ld\n", OFFSET(gendisk_major)); fprintf(fp, " gendisk_fops: %ld\n", OFFSET(gendisk_fops)); fprintf(fp, " gendisk_disk_name: %ld\n", OFFSET(gendisk_disk_name)); fprintf(fp, " irq_desc_t_status: %ld\n", OFFSET(irq_desc_t_status)); fprintf(fp, " irq_desc_t_handler: %ld\n", OFFSET(irq_desc_t_handler)); fprintf(fp, " irq_desc_t_chip: %ld\n", OFFSET(irq_desc_t_chip)); fprintf(fp, " irq_desc_t_action: %ld\n", OFFSET(irq_desc_t_action)); fprintf(fp, " irq_desc_t_depth: %ld\n", OFFSET(irq_desc_t_depth)); fprintf(fp, " irqdesc_action: %ld\n", OFFSET(irqdesc_action)); fprintf(fp, " irqdesc_ctl: %ld\n", OFFSET(irqdesc_ctl)); fprintf(fp, " irqdesc_level: %ld\n", OFFSET(irqdesc_level)); fprintf(fp, " irq_desc_t_irq_data: %ld\n", OFFSET(irq_desc_t_irq_data)); fprintf(fp, " irq_desc_t_kstat_irqs: %ld\n", OFFSET(irq_desc_t_kstat_irqs)); fprintf(fp, " irq_desc_t_affinity: %ld\n", OFFSET(irq_desc_t_affinity)); fprintf(fp, " irq_data_irq: %ld\n", OFFSET(irq_data_irq)); fprintf(fp, " irq_data_chip: %ld\n", OFFSET(irq_data_chip)); fprintf(fp, " irq_data_affinity: %ld\n", OFFSET(irq_data_affinity)); fprintf(fp, " irq_common_data_affinity: %ld\n", OFFSET(irq_common_data_affinity)); fprintf(fp, " irq_desc_irq_data: %ld\n", OFFSET(irq_desc_irq_data)); fprintf(fp, " irq_desc_irq_common_data: %ld\n", OFFSET(irq_desc_irq_common_data)); fprintf(fp, " kernel_stat_irqs: %ld\n", OFFSET(kernel_stat_irqs)); fprintf(fp, " irqaction_handler: %ld\n", OFFSET(irqaction_handler)); fprintf(fp, " irqaction_flags: %ld\n", OFFSET(irqaction_flags)); fprintf(fp, " irqaction_mask: %ld\n", OFFSET(irqaction_mask)); fprintf(fp, " irqaction_name: %ld\n", OFFSET(irqaction_name)); fprintf(fp, " irqaction_dev_id: %ld\n", OFFSET(irqaction_dev_id)); fprintf(fp, " irqaction_next: %ld\n", OFFSET(irqaction_next)); fprintf(fp, " hw_interrupt_type_typename: %ld\n", OFFSET(hw_interrupt_type_typename)); fprintf(fp, " hw_interrupt_type_startup: %ld\n", OFFSET(hw_interrupt_type_startup)); fprintf(fp, " hw_interrupt_type_shutdown: %ld\n", OFFSET(hw_interrupt_type_shutdown)); fprintf(fp, " hw_interrupt_type_handle: %ld\n", OFFSET(hw_interrupt_type_handle)); fprintf(fp, " hw_interrupt_type_enable: %ld\n", OFFSET(hw_interrupt_type_enable)); fprintf(fp, " hw_interrupt_type_disable: %ld\n", OFFSET(hw_interrupt_type_disable)); fprintf(fp, " hw_interrupt_type_ack: %ld\n", OFFSET(hw_interrupt_type_ack)); fprintf(fp, " hw_interrupt_type_end: %ld\n", OFFSET(hw_interrupt_type_end)); fprintf(fp, "hw_interrupt_type_set_affinity: %ld\n", OFFSET(hw_interrupt_type_set_affinity)); fprintf(fp, " irq_chip_typename: %ld\n", OFFSET(irq_chip_typename)); fprintf(fp, " irq_chip_startup: %ld\n", OFFSET(irq_chip_startup)); fprintf(fp, " irq_chip_shutdown: %ld\n", OFFSET(irq_chip_shutdown)); fprintf(fp, " irq_chip_enable: %ld\n", OFFSET(irq_chip_enable)); fprintf(fp, " irq_chip_disable: %ld\n", OFFSET(irq_chip_disable)); fprintf(fp, " irq_chip_ack: %ld\n", OFFSET(irq_chip_ack)); fprintf(fp, " irq_chip_mask: %ld\n", OFFSET(irq_chip_mask)); fprintf(fp, " irq_chip_mask_ack: %ld\n", OFFSET(irq_chip_mask_ack)); fprintf(fp, " irq_chip_unmask: %ld\n", OFFSET(irq_chip_unmask)); fprintf(fp, " irq_chip_eoi: %ld\n", OFFSET(irq_chip_eoi)); fprintf(fp, " irq_chip_end: %ld\n", OFFSET(irq_chip_end)); fprintf(fp, " irq_chip_set_affinity: %ld\n", OFFSET(irq_chip_set_affinity)); fprintf(fp, " irq_chip_retrigger: %ld\n", OFFSET(irq_chip_retrigger)); fprintf(fp, " irq_chip_set_type: %ld\n", OFFSET(irq_chip_set_type)); fprintf(fp, " irq_chip_set_wake: %ld\n", OFFSET(irq_chip_set_wake)); fprintf(fp, "irq_cpustat_t___softirq_active: %ld\n", OFFSET(irq_cpustat_t___softirq_active)); fprintf(fp, " irq_cpustat_t___softirq_mask: %ld\n", OFFSET(irq_cpustat_t___softirq_mask)); fprintf(fp, " files_struct_fdt: %ld\n", OFFSET(files_struct_fdt)); fprintf(fp, " fdtable_max_fds: %ld\n", OFFSET(fdtable_max_fds)); fprintf(fp, " fdtable_max_fdset: %ld\n", OFFSET(fdtable_max_fdset)); fprintf(fp, " fdtable_open_fds: %ld\n", OFFSET(fdtable_open_fds)); fprintf(fp, " fdtable_fd: %ld\n", OFFSET(fdtable_fd)); fprintf(fp, " files_struct_max_fds: %ld\n", OFFSET(files_struct_max_fds)); fprintf(fp, " files_struct_max_fdset: %ld\n", OFFSET(files_struct_max_fdset)); fprintf(fp, " files_struct_open_fds: %ld\n", OFFSET(files_struct_open_fds)); fprintf(fp, " files_struct_fd: %ld\n", OFFSET(files_struct_fd)); fprintf(fp, " files_struct_open_fds_init: %ld\n", OFFSET(files_struct_open_fds_init)); fprintf(fp, " file_f_dentry: %ld\n", OFFSET(file_f_dentry)); fprintf(fp, " file_f_vfsmnt: %ld\n", OFFSET(file_f_vfsmnt)); fprintf(fp, " file_f_count: %ld\n", OFFSET(file_f_count)); fprintf(fp, " file_f_path: %ld\n", OFFSET(file_f_path)); fprintf(fp, " file_f_inode: %ld\n", OFFSET(file_f_inode)); fprintf(fp, " path_mnt: %ld\n", OFFSET(path_mnt)); fprintf(fp, " path_dentry: %ld\n", OFFSET(path_dentry)); fprintf(fp, " fs_struct_root: %ld\n", OFFSET(fs_struct_root)); fprintf(fp, " fs_struct_pwd: %ld\n", OFFSET(fs_struct_pwd)); fprintf(fp, " fs_struct_rootmnt: %ld\n", OFFSET(fs_struct_rootmnt)); fprintf(fp, " fs_struct_pwdmnt: %ld\n", OFFSET(fs_struct_pwdmnt)); fprintf(fp, " dentry_d_inode: %ld\n", OFFSET(dentry_d_inode)); fprintf(fp, " dentry_d_parent: %ld\n", OFFSET(dentry_d_parent)); fprintf(fp, " dentry_d_name: %ld\n", OFFSET(dentry_d_name)); fprintf(fp, " dentry_d_iname: %ld\n", OFFSET(dentry_d_iname)); fprintf(fp, " dentry_d_covers: %ld\n", OFFSET(dentry_d_covers)); fprintf(fp, " dentry_d_sb: %ld\n", OFFSET(dentry_d_sb)); fprintf(fp, " qstr_len: %ld\n", OFFSET(qstr_len)); fprintf(fp, " qstr_name: %ld\n", OFFSET(qstr_name)); fprintf(fp, " inode_i_mode: %ld\n", OFFSET(inode_i_mode)); fprintf(fp, " inode_i_op: %ld\n", OFFSET(inode_i_op)); fprintf(fp, " inode_i_sb: %ld\n", OFFSET(inode_i_sb)); fprintf(fp, " inode_u: %ld\n", OFFSET(inode_u)); fprintf(fp, " inode_i_flock: %ld\n", OFFSET(inode_i_flock)); fprintf(fp, " inode_i_fop: %ld\n", OFFSET(inode_i_fop)); fprintf(fp, " inode_i_mapping: %ld\n", OFFSET(inode_i_mapping)); fprintf(fp, " inode_i_sb_list: %ld\n", OFFSET(inode_i_sb_list)); fprintf(fp, " vfsmount_mnt_next: %ld\n", OFFSET(vfsmount_mnt_next)); fprintf(fp, " vfsmount_mnt_devname: %ld\n", OFFSET(vfsmount_mnt_devname)); fprintf(fp, " vfsmount_mnt_dirname: %ld\n", OFFSET(vfsmount_mnt_dirname)); fprintf(fp, " vfsmount_mnt_sb: %ld\n", OFFSET(vfsmount_mnt_sb)); fprintf(fp, " vfsmount_mnt_list: %ld\n", OFFSET(vfsmount_mnt_list)); fprintf(fp, " vfsmount_mnt_mountpoint: %ld\n", OFFSET(vfsmount_mnt_mountpoint)); fprintf(fp, " vfsmount_mnt_parent: %ld\n", OFFSET(vfsmount_mnt_parent)); fprintf(fp, " vfsmount_mnt_flags: %ld\n", OFFSET(vfsmount_mnt_flags)); fprintf(fp, " proc_mounts_cursor: %ld\n", OFFSET(proc_mounts_cursor)); fprintf(fp, " mount_mnt_parent: %ld\n", OFFSET(mount_mnt_parent)); fprintf(fp, " mount_mnt_mountpoint: %ld\n", OFFSET(mount_mnt_mountpoint)); fprintf(fp, " mount_mnt_list: %ld\n", OFFSET(mount_mnt_list)); fprintf(fp, " mount_mnt_devname: %ld\n", OFFSET(mount_mnt_devname)); fprintf(fp, " mount_mnt: %ld\n", OFFSET(mount_mnt)); fprintf(fp, " mount_mnt_node: %ld\n", OFFSET(mount_mnt_node)); fprintf(fp, " namespace_root: %ld\n", OFFSET(namespace_root)); fprintf(fp, " namespace_list: %ld\n", OFFSET(namespace_list)); fprintf(fp, " super_block_s_dirty: %ld\n", OFFSET(super_block_s_dirty)); fprintf(fp, " super_block_s_type: %ld\n", OFFSET(super_block_s_type)); fprintf(fp, " super_block_s_files: %ld\n", OFFSET(super_block_s_files)); fprintf(fp, " super_block_s_inodes: %ld\n", OFFSET(super_block_s_inodes)); fprintf(fp, " nlm_file_f_file: %ld\n", OFFSET(nlm_file_f_file)); fprintf(fp, " file_system_type_name: %ld\n", OFFSET(file_system_type_name)); fprintf(fp, " file_lock_fl_owner: %ld\n", OFFSET(file_lock_fl_owner)); fprintf(fp, " nlm_host_h_exportent: %ld\n", OFFSET(nlm_host_h_exportent)); fprintf(fp, " svc_client_cl_ident: %ld\n", OFFSET(svc_client_cl_ident)); fprintf(fp, " kmem_cache_s_c_nextp: %ld\n", OFFSET(kmem_cache_s_c_nextp)); fprintf(fp, " kmem_cache_s_c_name: %ld\n", OFFSET(kmem_cache_s_c_name)); fprintf(fp, " kmem_cache_s_c_num: %ld\n", OFFSET(kmem_cache_s_c_num)); fprintf(fp, " kmem_cache_s_c_org_size: %ld\n", OFFSET(kmem_cache_s_c_org_size)); fprintf(fp, " kmem_cache_s_c_flags: %ld\n", OFFSET(kmem_cache_s_c_flags)); fprintf(fp, " kmem_cache_s_c_offset: %ld\n", OFFSET(kmem_cache_s_c_offset)); fprintf(fp, " kmem_cache_s_c_firstp: %ld\n", OFFSET(kmem_cache_s_c_firstp)); fprintf(fp, " kmem_cache_s_c_gfporder: %ld\n", OFFSET(kmem_cache_s_c_gfporder)); fprintf(fp, " kmem_cache_s_c_magic: %ld\n", OFFSET(kmem_cache_s_c_magic)); fprintf(fp, " kmem_cache_s_c_align: %ld\n", OFFSET(kmem_cache_s_c_align)); fprintf(fp, " kmem_cache_s_num: %ld\n", OFFSET(kmem_cache_s_num)); fprintf(fp, " kmem_cache_s_next: %ld\n", OFFSET(kmem_cache_s_next)); fprintf(fp, " kmem_cache_s_name: %ld\n", OFFSET(kmem_cache_s_name)); fprintf(fp, " kmem_cache_s_objsize: %ld\n", OFFSET(kmem_cache_s_objsize)); fprintf(fp, " kmem_cache_s_flags: %ld\n", OFFSET(kmem_cache_s_flags)); fprintf(fp, " kmem_cache_s_gfporder: %ld\n", OFFSET(kmem_cache_s_gfporder)); fprintf(fp, " kmem_cache_s_slabs: %ld\n", OFFSET(kmem_cache_s_slabs)); fprintf(fp, " kmem_cache_s_slabs_full: %ld\n", OFFSET(kmem_cache_s_slabs_full)); fprintf(fp, " kmem_cache_s_slabs_partial: %ld\n", OFFSET(kmem_cache_s_slabs_partial)); fprintf(fp, " kmem_cache_s_slabs_free: %ld\n", OFFSET(kmem_cache_s_slabs_free)); fprintf(fp, " kmem_cache_s_cpudata: %ld\n", OFFSET(kmem_cache_s_cpudata)); fprintf(fp, " kmem_cache_s_colour_off: %ld\n", OFFSET(kmem_cache_s_colour_off)); fprintf(fp, " cpucache_s_avail: %ld\n", OFFSET(cpucache_s_avail)); fprintf(fp, " cpucache_s_limit: %ld\n", OFFSET(cpucache_s_limit)); fprintf(fp, " array_cache_avail: %ld\n", OFFSET(array_cache_avail)); fprintf(fp, " array_cache_limit: %ld\n", OFFSET(array_cache_limit)); fprintf(fp, " kmem_cache_s_array: %ld\n", OFFSET(kmem_cache_s_array)); fprintf(fp, " kmem_cache_s_lists: %ld\n", OFFSET(kmem_cache_s_lists)); fprintf(fp, " kmem_list3_slabs_partial: %ld\n", OFFSET(kmem_list3_slabs_partial)); fprintf(fp, " kmem_list3_slabs_full: %ld\n", OFFSET(kmem_list3_slabs_full)); fprintf(fp, " kmem_list3_slabs_free: %ld\n", OFFSET(kmem_list3_slabs_free)); fprintf(fp, " kmem_list3_free_objects: %ld\n", OFFSET(kmem_list3_free_objects)); fprintf(fp, " kmem_list3_shared: %ld\n", OFFSET(kmem_list3_shared)); fprintf(fp, " kmem_slab_s_s_nextp: %ld\n", OFFSET(kmem_slab_s_s_nextp)); fprintf(fp, " kmem_slab_s_s_freep: %ld\n", OFFSET(kmem_slab_s_s_freep)); fprintf(fp, " kmem_slab_s_s_inuse: %ld\n", OFFSET(kmem_slab_s_s_inuse)); fprintf(fp, " kmem_slab_s_s_mem: %ld\n", OFFSET(kmem_slab_s_s_mem)); fprintf(fp, " kmem_slab_s_s_index: %ld\n", OFFSET(kmem_slab_s_s_index)); fprintf(fp, " kmem_slab_s_s_offset: %ld\n", OFFSET(kmem_slab_s_s_offset)); fprintf(fp, " kmem_slab_s_s_magic: %ld\n", OFFSET(kmem_slab_s_s_magic)); fprintf(fp, " slab_s_list: %ld\n", OFFSET(slab_s_list)); fprintf(fp, " slab_s_s_mem: %ld\n", OFFSET(slab_s_s_mem)); fprintf(fp, " slab_s_inuse: %ld\n", OFFSET(slab_s_inuse)); fprintf(fp, " slab_s_free: %ld\n", OFFSET(slab_s_free)); fprintf(fp, " slab_list: %ld\n", OFFSET(slab_list)); fprintf(fp, " slab_s_mem: %ld\n", OFFSET(slab_s_mem)); fprintf(fp, " slab_inuse: %ld\n", OFFSET(slab_inuse)); fprintf(fp, " slab_free: %ld\n", OFFSET(slab_free)); fprintf(fp, " slab_slab_list: %ld\n", OFFSET(slab_slab_list)); fprintf(fp, " kmem_cache_size: %ld\n", OFFSET(kmem_cache_size)); fprintf(fp, " kmem_cache_objsize: %ld\n", OFFSET(kmem_cache_objsize)); fprintf(fp, " kmem_cache_offset: %ld\n", OFFSET(kmem_cache_offset)); fprintf(fp, " kmem_cache_order: %ld\n", OFFSET(kmem_cache_order)); fprintf(fp, " kmem_cache_local_node: %ld\n", OFFSET(kmem_cache_local_node)); fprintf(fp, " kmem_cache_objects: %ld\n", OFFSET(kmem_cache_objects)); fprintf(fp, " kmem_cache_inuse: %ld\n", OFFSET(kmem_cache_inuse)); fprintf(fp, " kmem_cache_align: %ld\n", OFFSET(kmem_cache_align)); fprintf(fp, " kmem_cache_name: %ld\n", OFFSET(kmem_cache_name)); fprintf(fp, " kmem_cache_list: %ld\n", OFFSET(kmem_cache_list)); fprintf(fp, " kmem_cache_red_left_pad: %ld\n", OFFSET(kmem_cache_red_left_pad)); fprintf(fp, " kmem_cache_node: %ld\n", OFFSET(kmem_cache_node)); fprintf(fp, " kmem_cache_cpu_slab: %ld\n", OFFSET(kmem_cache_cpu_slab)); fprintf(fp, " kmem_cache_cpu_partial: %ld\n", OFFSET(kmem_cache_cpu_partial)); fprintf(fp, " kmem_cache_cpu_cache: %ld\n", OFFSET(kmem_cache_cpu_cache)); fprintf(fp, " kmem_cache_oo: %ld\n", OFFSET(kmem_cache_oo)); fprintf(fp, " kmem_cache_random: %ld\n", OFFSET(kmem_cache_random)); fprintf(fp, " kmem_cache_node_nr_partial: %ld\n", OFFSET(kmem_cache_node_nr_partial)); fprintf(fp, " kmem_cache_node_nr_slabs: %ld\n", OFFSET(kmem_cache_node_nr_slabs)); fprintf(fp, " kmem_cache_node_partial: %ld\n", OFFSET(kmem_cache_node_partial)); fprintf(fp, " kmem_cache_node_full: %ld\n", OFFSET(kmem_cache_node_full)); fprintf(fp, " kmem_cache_node_total_objects: %ld\n", OFFSET(kmem_cache_node_total_objects)); fprintf(fp, " kmem_cache_cpu_freelist: %ld\n", OFFSET(kmem_cache_cpu_freelist)); fprintf(fp, " kmem_cache_cpu_page: %ld\n", OFFSET(kmem_cache_cpu_page)); fprintf(fp, " kmem_cache_cpu_node: %ld\n", OFFSET(kmem_cache_cpu_node)); fprintf(fp, " kmem_cache_flags: %ld\n", OFFSET(kmem_cache_flags)); fprintf(fp, " kmem_cache_memcg_params: %ld\n", OFFSET(kmem_cache_memcg_params)); fprintf(fp, "memcg_cache_params___root_caches_node: %ld\n", OFFSET(memcg_cache_params___root_caches_node)); fprintf(fp, " memcg_cache_params_children: %ld\n", OFFSET(memcg_cache_params_children)); fprintf(fp, " memcg_cache_params_children_node: %ld\n", OFFSET(memcg_cache_params_children_node)); fprintf(fp, " net_device_next: %ld\n", OFFSET(net_device_next)); fprintf(fp, " net_device_name: %ld\n", OFFSET(net_device_name)); fprintf(fp, " net_device_type: %ld\n", OFFSET(net_device_type)); fprintf(fp, " net_device_addr_len: %ld\n", OFFSET(net_device_addr_len)); fprintf(fp, " net_device_ip_ptr: %ld\n", OFFSET(net_device_ip_ptr)); fprintf(fp, " net_device_ip6_ptr: %ld\n", OFFSET(net_device_ip6_ptr)); fprintf(fp, " net_device_dev_list: %ld\n", OFFSET(net_device_dev_list)); fprintf(fp, " net_dev_base_head: %ld\n", OFFSET(net_dev_base_head)); fprintf(fp, " device_next: %ld\n", OFFSET(device_next)); fprintf(fp, " device_name: %ld\n", OFFSET(device_name)); fprintf(fp, " device_type: %ld\n", OFFSET(device_type)); fprintf(fp, " device_ip_ptr: %ld\n", OFFSET(device_ip_ptr)); fprintf(fp, " device_addr_len: %ld\n", OFFSET(device_addr_len)); fprintf(fp, " socket_sk: %ld\n", OFFSET(socket_sk)); fprintf(fp, " sock_daddr: %ld\n", OFFSET(sock_daddr)); fprintf(fp, " sock_rcv_saddr: %ld\n", OFFSET(sock_rcv_saddr)); fprintf(fp, " sock_dport: %ld\n", OFFSET(sock_dport)); fprintf(fp, " sock_sport: %ld\n", OFFSET(sock_sport)); fprintf(fp, " sock_num: %ld\n", OFFSET(sock_num)); fprintf(fp, " sock_family: %ld\n", OFFSET(sock_family)); fprintf(fp, " sock_type: %ld\n", OFFSET(sock_type)); fprintf(fp, " sock_sk_type: %ld\n", OFFSET(sock_sk_type)); fprintf(fp, " sock_sk_common: %ld\n", OFFSET(sock_sk_common)); fprintf(fp, " sock_common_skc_family: %ld\n", OFFSET(sock_common_skc_family)); fprintf(fp, " sock_common_skc_v6_daddr: %ld\n", OFFSET(sock_common_skc_v6_daddr)); fprintf(fp, " sock_common_skc_v6_rcv_saddr: %ld\n", OFFSET(sock_common_skc_v6_rcv_saddr)); fprintf(fp, " socket_alloc_vfs_inode: %ld\n", OFFSET(socket_alloc_vfs_inode)); fprintf(fp, " inet_sock_inet: %ld\n", OFFSET(inet_sock_inet)); fprintf(fp, " inet_opt_daddr: %ld\n", OFFSET(inet_opt_daddr)); fprintf(fp, " inet_opt_rcv_saddr: %ld\n", OFFSET(inet_opt_rcv_saddr)); fprintf(fp, " inet_opt_dport: %ld\n", OFFSET(inet_opt_dport)); fprintf(fp, " inet_opt_sport: %ld\n", OFFSET(inet_opt_sport)); fprintf(fp, " inet_opt_num: %ld\n", OFFSET(inet_opt_num)); fprintf(fp, " inet6_dev_addr_list: %ld\n", OFFSET(inet6_dev_addr_list)); fprintf(fp, " inet6_ifaddr_addr: %ld\n", OFFSET(inet6_ifaddr_addr)); fprintf(fp, " inet6_ifaddr_if_list: %ld\n", OFFSET(inet6_ifaddr_if_list)); fprintf(fp, " inet6_ifaddr_if_next: %ld\n", OFFSET(inet6_ifaddr_if_next)); fprintf(fp, " in6_addr_in6_u: %ld\n", OFFSET(in6_addr_in6_u)); fprintf(fp, " ipv6_pinfo_rcv_saddr: %ld\n", OFFSET(ipv6_pinfo_rcv_saddr)); fprintf(fp, " ipv6_pinfo_daddr: %ld\n", OFFSET(ipv6_pinfo_daddr)); fprintf(fp, " timer_list_list: %ld\n", OFFSET(timer_list_list)); fprintf(fp, " timer_list_next: %ld\n", OFFSET(timer_list_next)); fprintf(fp, " timer_list_entry: %ld\n", OFFSET(timer_list_entry)); fprintf(fp, " timer_list_expires: %ld\n", OFFSET(timer_list_expires)); fprintf(fp, " timer_list_function: %ld\n", OFFSET(timer_list_function)); fprintf(fp, " timer_vec_root_vec: %ld\n", OFFSET(timer_vec_root_vec)); fprintf(fp, " timer_vec_vec: %ld\n", OFFSET(timer_vec_vec)); fprintf(fp, " tvec_root_s_vec: %ld\n", OFFSET(tvec_root_s_vec)); fprintf(fp, " tvec_s_vec: %ld\n", OFFSET(tvec_s_vec)); fprintf(fp, " tvec_t_base_s_tv1: %ld\n", OFFSET(tvec_t_base_s_tv1)); fprintf(fp, " timer_base_vectors: %ld\n", OFFSET(timer_base_vectors)); fprintf(fp, " wait_queue_task: %ld\n", OFFSET(wait_queue_task)); fprintf(fp, " wait_queue_next: %ld\n", OFFSET(wait_queue_next)); fprintf(fp, " __wait_queue_task: %ld\n", OFFSET(__wait_queue_task)); fprintf(fp, " __wait_queue_head_task_list: %ld\n", OFFSET(__wait_queue_head_task_list)); fprintf(fp, " __wait_queue_task_list: %ld\n", OFFSET(__wait_queue_task_list)); fprintf(fp, " wait_queue_entry_private: %ld\n", OFFSET(wait_queue_entry_private)); fprintf(fp, " wait_queue_head_head: %ld\n", OFFSET(wait_queue_head_head)); fprintf(fp, " wait_queue_entry_entry: %ld\n", OFFSET(wait_queue_entry_entry)); fprintf(fp, " pglist_data_node_zones: %ld\n", OFFSET(pglist_data_node_zones)); fprintf(fp, " pglist_data_node_mem_map: %ld\n", OFFSET(pglist_data_node_mem_map)); fprintf(fp, " pglist_data_node_start_paddr: %ld\n", OFFSET(pglist_data_node_start_paddr)); fprintf(fp, " pglist_data_node_start_mapnr: %ld\n", OFFSET(pglist_data_node_start_mapnr)); fprintf(fp, " pglist_data_node_size: %ld\n", OFFSET(pglist_data_node_size)); fprintf(fp, " pglist_data_node_id: %ld\n", OFFSET(pglist_data_node_id)); fprintf(fp, " pglist_data_node_next: %ld\n", OFFSET(pglist_data_node_next)); fprintf(fp, " pglist_data_bdata: %ld\n", OFFSET(pglist_data_bdata)); fprintf(fp, " pglist_data_nr_zones: %ld\n", OFFSET(pglist_data_nr_zones)); fprintf(fp, " pglist_data_node_start_pfn: %ld\n", OFFSET(pglist_data_node_start_pfn)); fprintf(fp, " pglist_data_pgdat_next: %ld\n", OFFSET(pglist_data_pgdat_next)); fprintf(fp, "pglist_data_node_present_pages: %ld\n", OFFSET(pglist_data_node_present_pages)); fprintf(fp, "pglist_data_node_spanned_pages: %ld\n", OFFSET(pglist_data_node_spanned_pages)); fprintf(fp, " page_cache_bucket_chain: %ld\n", OFFSET(page_cache_bucket_chain)); fprintf(fp, " zone_struct_free_pages: %ld\n", OFFSET(zone_struct_free_pages)); fprintf(fp, " zone_struct_free_area: %ld\n", OFFSET(zone_struct_free_area)); fprintf(fp, " zone_struct_zone_pgdat: %ld\n", OFFSET(zone_struct_zone_pgdat)); fprintf(fp, " zone_struct_name: %ld\n", OFFSET(zone_struct_name)); fprintf(fp, " zone_struct_size: %ld\n", OFFSET(zone_struct_size)); fprintf(fp, " zone_struct_memsize: %ld\n", OFFSET(zone_struct_memsize)); fprintf(fp, " zone_struct_zone_start_pfn: %ld\n", OFFSET(zone_struct_zone_start_pfn)); fprintf(fp, " zone_struct_zone_start_paddr: %ld\n", OFFSET(zone_struct_zone_start_paddr)); fprintf(fp, " zone_struct_zone_start_mapnr: %ld\n", OFFSET(zone_struct_zone_start_mapnr)); fprintf(fp, " zone_struct_zone_mem_map: %ld\n", OFFSET(zone_struct_zone_mem_map)); fprintf(fp, "zone_struct_inactive_clean_pages: %ld\n", OFFSET(zone_struct_inactive_clean_pages)); fprintf(fp, "zone_struct_inactive_clean_list: %ld\n", OFFSET(zone_struct_inactive_clean_list)); fprintf(fp, "zone_struct_inactive_dirty_pages: %ld\n", OFFSET(zone_struct_inactive_dirty_pages)); fprintf(fp, " zone_struct_active_pages: %ld\n", OFFSET(zone_struct_active_pages)); fprintf(fp, " zone_struct_pages_min: %ld\n", OFFSET(zone_struct_pages_min)); fprintf(fp, " zone_struct_pages_low: %ld\n", OFFSET(zone_struct_pages_low)); fprintf(fp, " zone_struct_pages_high: %ld\n", OFFSET(zone_struct_pages_high)); fprintf(fp, " zone_free_pages: %ld\n", OFFSET(zone_free_pages)); fprintf(fp, " zone_watermark: %ld\n", OFFSET(zone_watermark)); fprintf(fp, " zone_free_area: %ld\n", OFFSET(zone_free_area)); fprintf(fp, " zone_zone_pgdat: %ld\n", OFFSET(zone_zone_pgdat)); fprintf(fp, " zone_zone_mem_map: %ld\n", OFFSET(zone_zone_mem_map)); fprintf(fp, " zone_name: %ld\n", OFFSET(zone_name)); fprintf(fp, " zone_spanned_pages: %ld\n", OFFSET(zone_spanned_pages)); fprintf(fp, " zone_present_pages: %ld\n", OFFSET(zone_present_pages)); fprintf(fp, " zone_zone_start_pfn: %ld\n", OFFSET(zone_zone_start_pfn)); fprintf(fp, " zone_pages_min: %ld\n", OFFSET(zone_pages_min)); fprintf(fp, " zone_pages_low: %ld\n", OFFSET(zone_pages_low)); fprintf(fp, " zone_pages_high: %ld\n", OFFSET(zone_pages_high)); fprintf(fp, " zone_vm_stat: %ld\n", OFFSET(zone_vm_stat)); fprintf(fp, " zone_nr_active: %ld\n", OFFSET(zone_nr_active)); fprintf(fp, " zone_nr_inactive: %ld\n", OFFSET(zone_nr_inactive)); fprintf(fp, " zone_all_unreclaimable: %ld\n", OFFSET(zone_all_unreclaimable)); fprintf(fp, " zone_flags: %ld\n", OFFSET(zone_flags)); fprintf(fp, " zone_pages_scanned: %ld\n", OFFSET(zone_pages_scanned)); fprintf(fp, " neighbour_next: %ld\n", OFFSET(neighbour_next)); fprintf(fp, " neighbour_hash: %ld\n", OFFSET(neighbour_hash)); fprintf(fp, " neighbour_primary_key: %ld\n", OFFSET(neighbour_primary_key)); fprintf(fp, " neighbour_ha: %ld\n", OFFSET(neighbour_ha)); fprintf(fp, " neighbour_dev: %ld\n", OFFSET(neighbour_dev)); fprintf(fp, " neighbour_nud_state: %ld\n", OFFSET(neighbour_nud_state)); fprintf(fp, " neigh_table_hash_buckets: %ld\n", OFFSET(neigh_table_hash_buckets)); fprintf(fp, " neigh_table_hash_heads: %ld\n", OFFSET(neigh_table_hash_heads)); fprintf(fp, " neigh_table_hash_mask: %ld\n", OFFSET(neigh_table_hash_mask)); fprintf(fp, " neigh_table_hash_shift: %ld\n", OFFSET(neigh_table_hash_shift)); fprintf(fp, " neigh_table_nht_ptr: %ld\n", OFFSET(neigh_table_nht_ptr)); fprintf(fp, " neigh_table_key_len: %ld\n", OFFSET(neigh_table_key_len)); fprintf(fp, " in_device_ifa_list: %ld\n", OFFSET(in_device_ifa_list)); fprintf(fp, " in_ifaddr_ifa_next: %ld\n", OFFSET(in_ifaddr_ifa_next)); fprintf(fp, " in_ifaddr_ifa_address: %ld\n", OFFSET(in_ifaddr_ifa_address)); fprintf(fp, " pci_dev_global_list: %ld\n", OFFSET(pci_dev_global_list)); fprintf(fp, " pci_dev_next: %ld\n", OFFSET(pci_dev_next)); fprintf(fp, " pci_dev_bus: %ld\n", OFFSET(pci_dev_bus)); fprintf(fp, " pci_dev_devfn: %ld\n", OFFSET(pci_dev_devfn)); fprintf(fp, " pci_dev_class: %ld\n", OFFSET(pci_dev_class)); fprintf(fp, " pci_dev_device: %ld\n", OFFSET(pci_dev_device)); fprintf(fp, " pci_dev_vendor: %ld\n", OFFSET(pci_dev_vendor)); fprintf(fp, " pci_bus_number: %ld\n", OFFSET(pci_bus_number)); fprintf(fp, " pci_dev_dev: %ld\n", OFFSET(pci_dev_dev)); fprintf(fp, " pci_dev_hdr_type: %ld\n", OFFSET(pci_dev_hdr_type)); fprintf(fp, " pci_dev_pcie_flags_reg: %ld\n", OFFSET(pci_dev_pcie_flags_reg)); fprintf(fp, " pci_bus_node: %ld\n", OFFSET(pci_bus_node)); fprintf(fp, " pci_bus_devices: %ld\n", OFFSET(pci_bus_devices)); fprintf(fp, " pci_bus_dev: %ld\n", OFFSET(pci_bus_dev)); fprintf(fp, " pci_bus_children: %ld\n", OFFSET(pci_bus_children)); fprintf(fp, " pci_bus_parent: %ld\n", OFFSET(pci_bus_parent)); fprintf(fp, " pci_bus_self: %ld\n", OFFSET(pci_bus_self)); fprintf(fp, " device_kobj: %ld\n", OFFSET(device_kobj)); fprintf(fp, " kobject_name: %ld\n", OFFSET(kobject_name)); fprintf(fp, " resource_entry_t_from: %ld\n", OFFSET(resource_entry_t_from)); fprintf(fp, " resource_entry_t_num: %ld\n", OFFSET(resource_entry_t_num)); fprintf(fp, " resource_entry_t_name: %ld\n", OFFSET(resource_entry_t_name)); fprintf(fp, " resource_entry_t_next: %ld\n", OFFSET(resource_entry_t_next)); fprintf(fp, " resource_name: %ld\n", OFFSET(resource_name)); fprintf(fp, " resource_start: %ld\n", OFFSET(resource_start)); fprintf(fp, " resource_end: %ld\n", OFFSET(resource_end)); fprintf(fp, " resource_sibling: %ld\n", OFFSET(resource_sibling)); fprintf(fp, " resource_child: %ld\n", OFFSET(resource_child)); fprintf(fp, " runqueue_curr: %ld\n", OFFSET(runqueue_curr)); fprintf(fp, " runqueue_idle: %ld\n", OFFSET(runqueue_idle)); fprintf(fp, " runqueue_active: %ld\n", OFFSET(runqueue_active)); fprintf(fp, " runqueue_expired: %ld\n", OFFSET(runqueue_expired)); fprintf(fp, " runqueue_arrays: %ld\n", OFFSET(runqueue_arrays)); fprintf(fp, " runqueue_cpu: %ld\n", OFFSET(runqueue_cpu)); fprintf(fp, " cpu_s_idle: %ld\n", OFFSET(cpu_s_idle)); fprintf(fp, " cpu_s_curr: %ld\n", OFFSET(cpu_s_curr)); fprintf(fp, " prio_array_queue: %ld\n", OFFSET(prio_array_queue)); fprintf(fp, " rt_prio_array_queue: %ld\n", OFFSET(rt_prio_array_queue)); fprintf(fp, " prio_array_nr_active: %ld\n", OFFSET(prio_array_nr_active)); fprintf(fp, " pt_regs_regs: %ld\n", OFFSET(pt_regs_regs)); fprintf(fp, " pt_regs_cp0_badvaddr: %ld\n", OFFSET(pt_regs_cp0_badvaddr)); fprintf(fp, " user_regs_struct_ebp: %ld\n", OFFSET(user_regs_struct_ebp)); fprintf(fp, " user_regs_struct_eip: %ld\n", OFFSET(user_regs_struct_eip)); fprintf(fp, " user_regs_struct_esp: %ld\n", OFFSET(user_regs_struct_esp)); fprintf(fp, " user_regs_struct_rip: %ld\n", OFFSET(user_regs_struct_rip)); fprintf(fp, " user_regs_struct_rsp: %ld\n", OFFSET(user_regs_struct_rsp)); fprintf(fp, " user_regs_struct_eflags: %ld\n", OFFSET(user_regs_struct_eflags)); fprintf(fp, " user_regs_struct_cs: %ld\n", OFFSET(user_regs_struct_cs)); fprintf(fp, " user_regs_struct_ss: %ld\n", OFFSET(user_regs_struct_ss)); fprintf(fp, " user_regs_struct_eip: %ld\n", OFFSET(user_regs_struct_eip)); fprintf(fp, " user_regs_struct_rax: %ld\n", OFFSET(user_regs_struct_rax)); fprintf(fp, " user_regs_struct_eax: %ld\n", OFFSET(user_regs_struct_eax)); fprintf(fp, " user_regs_struct_rbx: %ld\n", OFFSET(user_regs_struct_rbx)); fprintf(fp, " user_regs_struct_ebx: %ld\n", OFFSET(user_regs_struct_ebx)); fprintf(fp, " user_regs_struct_rcx: %ld\n", OFFSET(user_regs_struct_rcx)); fprintf(fp, " user_regs_struct_ecx: %ld\n", OFFSET(user_regs_struct_ecx)); fprintf(fp, " user_regs_struct_rdx: %ld\n", OFFSET(user_regs_struct_rdx)); fprintf(fp, " user_regs_struct_edx: %ld\n", OFFSET(user_regs_struct_edx)); fprintf(fp, " user_regs_struct_rsi: %ld\n", OFFSET(user_regs_struct_rsi)); fprintf(fp, " user_regs_struct_esi: %ld\n", OFFSET(user_regs_struct_esi)); fprintf(fp, " user_regs_struct_rdi: %ld\n", OFFSET(user_regs_struct_rdi)); fprintf(fp, " user_regs_struct_edi: %ld\n", OFFSET(user_regs_struct_edi)); fprintf(fp, " user_regs_struct_ds: %ld\n", OFFSET(user_regs_struct_ds)); fprintf(fp, " user_regs_struct_es: %ld\n", OFFSET(user_regs_struct_es)); fprintf(fp, " user_regs_struct_fs: %ld\n", OFFSET(user_regs_struct_fs)); fprintf(fp, " user_regs_struct_gs: %ld\n", OFFSET(user_regs_struct_gs)); fprintf(fp, " user_regs_struct_rbp: %ld\n", OFFSET(user_regs_struct_rbp)); fprintf(fp, " user_regs_struct_r8: %ld\n", OFFSET(user_regs_struct_r8)); fprintf(fp, " user_regs_struct_r9: %ld\n", OFFSET(user_regs_struct_r9)); fprintf(fp, " user_regs_struct_r10: %ld\n", OFFSET(user_regs_struct_r10)); fprintf(fp, " user_regs_struct_r11: %ld\n", OFFSET(user_regs_struct_r11)); fprintf(fp, " user_regs_struct_r12: %ld\n", OFFSET(user_regs_struct_r12)); fprintf(fp, " user_regs_struct_r13: %ld\n", OFFSET(user_regs_struct_r13)); fprintf(fp, " user_regs_struct_r14: %ld\n", OFFSET(user_regs_struct_r14)); fprintf(fp, " user_regs_struct_r15: %ld\n", OFFSET(user_regs_struct_r15)); fprintf(fp, " e820map_nr_map: %ld\n", OFFSET(e820map_nr_map)); fprintf(fp, " e820entry_addr: %ld\n", OFFSET(e820entry_addr)); fprintf(fp, " e820entry_size: %ld\n", OFFSET(e820entry_size)); fprintf(fp, " e820entry_type: %ld\n", OFFSET(e820entry_type)); fprintf(fp, " char_device_struct_name: %ld\n", OFFSET(char_device_struct_name)); fprintf(fp, " char_device_struct_next: %ld\n", OFFSET(char_device_struct_next)); fprintf(fp, " char_device_struct_fops: %ld\n", OFFSET(char_device_struct_fops)); fprintf(fp, " char_device_struct_major: %ld\n", OFFSET(char_device_struct_major)); fprintf(fp, " char_device_struct_baseminor: %ld\n", OFFSET(char_device_struct_baseminor)); fprintf(fp, " char_device_struct_cdev: %ld\n", OFFSET(char_device_struct_cdev)); fprintf(fp, " cdev_ops: %ld\n", OFFSET(cdev_ops)); fprintf(fp, " probe_next: %ld\n", OFFSET(probe_next)); fprintf(fp, " probe_dev: %ld\n", OFFSET(probe_dev)); fprintf(fp, " probe_data: %ld\n", OFFSET(probe_data)); fprintf(fp, " kobj_map_probes: %ld\n", OFFSET(kobj_map_probes)); fprintf(fp, " blk_major_name_next: %ld\n", OFFSET(blk_major_name_next)); fprintf(fp, " blk_major_name_major: %ld\n", OFFSET(blk_major_name_major)); fprintf(fp, " blk_major_name_name: %ld\n", OFFSET(blk_major_name_name)); fprintf(fp, " radix_tree_root_height: %ld\n", OFFSET(radix_tree_root_height)); fprintf(fp, " radix_tree_root_rnode: %ld\n", OFFSET(radix_tree_root_rnode)); fprintf(fp, " radix_tree_node_slots: %ld\n", OFFSET(radix_tree_node_slots)); fprintf(fp, " radix_tree_node_height: %ld\n", OFFSET(radix_tree_node_height)); fprintf(fp, " radix_tree_node_shift: %ld\n", OFFSET(radix_tree_node_shift)); fprintf(fp, " rb_root_rb_node: %ld\n", OFFSET(rb_root_rb_node)); fprintf(fp, " rb_node_rb_left: %ld\n", OFFSET(rb_node_rb_left)); fprintf(fp, " rb_node_rb_right: %ld\n", OFFSET(rb_node_rb_right)); fprintf(fp, " rb_root_cached_rb_leftmost: %ld\n", OFFSET(rb_root_cached_rb_leftmost)); fprintf(fp, " x8664_pda_pcurrent: %ld\n", OFFSET(x8664_pda_pcurrent)); fprintf(fp, " x8664_pda_data_offset: %ld\n", OFFSET(x8664_pda_data_offset)); fprintf(fp, " x8664_pda_kernelstack: %ld\n", OFFSET(x8664_pda_kernelstack)); fprintf(fp, " x8664_pda_irqrsp: %ld\n", OFFSET(x8664_pda_irqrsp)); fprintf(fp, " x8664_pda_cpunumber: %ld\n", OFFSET(x8664_pda_cpunumber)); fprintf(fp, " x8664_pda_irqstackptr: %ld\n", OFFSET(x8664_pda_irqstackptr)); fprintf(fp, " x8664_pda_level4_pgt: %ld\n", OFFSET(x8664_pda_level4_pgt)); fprintf(fp, " x8664_pda_me: %ld\n", OFFSET(x8664_pda_me)); fprintf(fp, " tss_struct_ist: %ld\n", OFFSET(tss_struct_ist)); fprintf(fp, " mem_section_section_mem_map: %ld\n", OFFSET(mem_section_section_mem_map)); fprintf(fp, " mem_section_pageblock_flags: %ld\n", OFFSET(mem_section_pageblock_flags)); fprintf(fp, " memory_block_dev: %ld\n", OFFSET(memory_block_dev)); fprintf(fp, " memory_block_nid: %ld\n", OFFSET(memory_block_nid)); fprintf(fp, " memory_block_start_section_nr: %ld\n", OFFSET(memory_block_start_section_nr)); fprintf(fp, " memory_block_end_section_nr: %ld\n", OFFSET(memory_block_end_section_nr)); fprintf(fp, " memory_block_state: %ld\n", OFFSET(memory_block_state)); fprintf(fp, " vcpu_guest_context_user_regs: %ld\n", OFFSET(vcpu_guest_context_user_regs)); fprintf(fp, " cpu_user_regs_eip: %ld\n", OFFSET(cpu_user_regs_eip)); fprintf(fp, " cpu_user_regs_esp: %ld\n", OFFSET(cpu_user_regs_esp)); fprintf(fp, " cpu_user_regs_rip: %ld\n", OFFSET(cpu_user_regs_rip)); fprintf(fp, " cpu_user_regs_rsp: %ld\n", OFFSET(cpu_user_regs_rsp)); fprintf(fp, " unwind_table_core: %ld\n", OFFSET(unwind_table_core)); fprintf(fp, " unwind_table_init: %ld\n", OFFSET(unwind_table_init)); fprintf(fp, " unwind_table_address: %ld\n", OFFSET(unwind_table_address)); fprintf(fp, " unwind_table_size: %ld\n", OFFSET(unwind_table_size)); fprintf(fp, " unwind_table_link: %ld\n", OFFSET(unwind_table_link)); fprintf(fp, " unwind_table_name: %ld\n", OFFSET(unwind_table_name)); fprintf(fp, " rq_cfs: %ld\n", OFFSET(rq_cfs)); fprintf(fp, " rq_rt: %ld\n", OFFSET(rq_rt)); fprintf(fp, " cfs_rq_curr: %ld\n", OFFSET(cfs_rq_curr)); fprintf(fp, " rq_nr_running: %ld\n", OFFSET(rq_nr_running)); fprintf(fp, " rq_timestamp: %ld\n", OFFSET(rq_timestamp)); fprintf(fp, " task_struct_se: %ld\n", OFFSET(task_struct_se)); fprintf(fp, " sched_entity_run_node: %ld\n", OFFSET(sched_entity_run_node)); fprintf(fp, " sched_entity_cfs_rq: %ld\n", OFFSET(sched_entity_cfs_rq)); fprintf(fp, " sched_entity_my_q: %ld\n", OFFSET(sched_entity_my_q)); fprintf(fp, " sched_entity_on_rq: %ld\n", OFFSET(sched_entity_on_rq)); fprintf(fp, " cfs_rq_nr_running: %ld\n", OFFSET(cfs_rq_nr_running)); fprintf(fp, " cfs_rq_rb_leftmost: %ld\n", OFFSET(cfs_rq_rb_leftmost)); fprintf(fp, " cfs_rq_tasks_timeline: %ld\n", OFFSET(cfs_rq_tasks_timeline)); fprintf(fp, " rt_rq_active: %ld\n", OFFSET(rt_rq_active)); fprintf(fp, " pcpu_info_vcpu: %ld\n", OFFSET(pcpu_info_vcpu)); fprintf(fp, " pcpu_info_idle: %ld\n", OFFSET(pcpu_info_idle)); fprintf(fp, " vcpu_struct_rq: %ld\n", OFFSET(vcpu_struct_rq)); fprintf(fp, " s390_lowcore_psw_save_area: %ld\n", OFFSET(s390_lowcore_psw_save_area)); fprintf(fp, " s390_stack_frame_back_chain: %ld\n", OFFSET(s390_stack_frame_back_chain)); fprintf(fp, " s390_stack_frame_r14: %ld\n", OFFSET(s390_stack_frame_r14)); fprintf(fp, " cpu_context_save_r7: %ld\n", OFFSET(cpu_context_save_r7)); fprintf(fp, " cpu_context_save_fp: %ld\n", OFFSET(cpu_context_save_fp)); fprintf(fp, " cpu_context_save_sp: %ld\n", OFFSET(cpu_context_save_sp)); fprintf(fp, " cpu_context_save_pc: %ld\n", OFFSET(cpu_context_save_pc)); fprintf(fp, " elf_prstatus_pr_pid: %ld\n", OFFSET(elf_prstatus_pr_pid)); fprintf(fp, " elf_prstatus_pr_reg: %ld\n", OFFSET(elf_prstatus_pr_reg)); fprintf(fp, " irq_desc_t_name: %ld\n", OFFSET(irq_desc_t_name)); fprintf(fp, " thread_info_cpu_context: %ld\n", OFFSET(thread_info_cpu_context)); fprintf(fp, " unwind_table_list: %ld\n", OFFSET(unwind_table_list)); fprintf(fp, " unwind_table_start: %ld\n", OFFSET(unwind_table_start)); fprintf(fp, " unwind_table_stop: %ld\n", OFFSET(unwind_table_stop)); fprintf(fp, " unwind_table_begin_addr: %ld\n", OFFSET(unwind_table_begin_addr)); fprintf(fp, " unwind_table_end_addr: %ld\n", OFFSET(unwind_table_end_addr)); fprintf(fp, " unwind_idx_addr: %ld\n", OFFSET(unwind_idx_addr)); fprintf(fp, " unwind_idx_insn: %ld\n", OFFSET(unwind_idx_insn)); fprintf(fp, " bus_type_p: %ld\n", OFFSET(bus_type_p)); fprintf(fp, " class_devices: %ld\n", OFFSET(class_devices)); fprintf(fp, " class_p: %ld\n", OFFSET(class_p)); fprintf(fp, " class_private_devices: %ld\n", OFFSET(class_private_devices)); fprintf(fp, " device_knode_class: %ld\n", OFFSET(device_knode_class)); fprintf(fp, " device_node: %ld\n", OFFSET(device_node)); fprintf(fp, " device_private_device: %ld\n", OFFSET(device_private_device)); fprintf(fp, " device_private_knode_bus: %ld\n", OFFSET(device_private_knode_bus)); fprintf(fp, " device_private_knode_class: %ld\n", OFFSET(device_private_knode_class)); fprintf(fp, " gendisk_dev: %ld\n", OFFSET(gendisk_dev)); fprintf(fp, " gendisk_kobj: %ld\n", OFFSET(gendisk_kobj)); fprintf(fp, " gendisk_part0: %ld\n", OFFSET(gendisk_part0)); fprintf(fp, " gendisk_queue: %ld\n", OFFSET(gendisk_queue)); fprintf(fp, " gendisk_private_data: %ld\n", OFFSET(gendisk_private_data)); fprintf(fp, " hd_struct_dev: %ld\n", OFFSET(hd_struct_dev)); fprintf(fp, " hd_struct_dkstats: %ld\n", OFFSET(hd_struct_dkstats)); fprintf(fp, " disk_stats_in_flight: %ld\n", OFFSET(disk_stats_in_flight)); fprintf(fp, " klist_k_list: %ld\n", OFFSET(klist_k_list)); fprintf(fp, " klist_node_n_klist: %ld\n", OFFSET(klist_node_n_klist)); fprintf(fp, " klist_node_n_node: %ld\n", OFFSET(klist_node_n_node)); fprintf(fp, " kobject_entry: %ld\n", OFFSET(kobject_entry)); fprintf(fp, " kset_list: %ld\n", OFFSET(kset_list)); fprintf(fp, " kset_kobj: %ld\n", OFFSET(kset_kobj)); fprintf(fp, " request_list_count: %ld\n", OFFSET(request_list_count)); fprintf(fp, " request_cmd_flags: %ld\n", OFFSET(request_cmd_flags)); fprintf(fp, " request_q: %ld\n", OFFSET(request_q)); fprintf(fp, " request_state: %ld\n", OFFSET(request_state)); fprintf(fp, " request_queue_in_flight: %ld\n", OFFSET(request_queue_in_flight)); fprintf(fp, " request_queue_rq: %ld\n", OFFSET(request_queue_rq)); fprintf(fp, " request_queue_mq_ops: %ld\n", OFFSET(request_queue_mq_ops)); fprintf(fp, " request_queue_queue_ctx: %ld\n", OFFSET(request_queue_queue_ctx)); fprintf(fp, " request_queue_queue_hw_ctx: %ld\n", OFFSET(request_queue_queue_hw_ctx)); fprintf(fp, " request_queue_nr_hw_queues: %ld\n", OFFSET(request_queue_nr_hw_queues)); fprintf(fp, " request_queue_hctx_table: %ld\n", OFFSET(request_queue_hctx_table)); fprintf(fp, " blk_mq_ctx_rq_dispatched: %ld\n", OFFSET(blk_mq_ctx_rq_dispatched)); fprintf(fp, " blk_mq_ctx_rq_completed: %ld\n", OFFSET(blk_mq_ctx_rq_completed)); fprintf(fp, " blk_mq_hw_ctx_tags: %ld\n", OFFSET(blk_mq_hw_ctx_tags)); fprintf(fp, " blk_mq_tags_bitmap_tags: %ld\n", OFFSET(blk_mq_tags_bitmap_tags)); fprintf(fp, " blk_mq_tags_breserved_tags: %ld\n", OFFSET(blk_mq_tags_breserved_tags)); fprintf(fp, " blk_mq_tags_nr_reserved_tags: %ld\n", OFFSET(blk_mq_tags_nr_reserved_tags)); fprintf(fp, " blk_mq_tags_rqs: %ld\n", OFFSET(blk_mq_tags_rqs)); fprintf(fp, " request_queue_tag_set: %ld\n", OFFSET(request_queue_tag_set)); fprintf(fp, " blk_mq_tag_set_flags: %ld\n", OFFSET(blk_mq_tag_set_flags)); fprintf(fp, " blk_mq_tag_set_shared_tags: %ld\n", OFFSET(blk_mq_tag_set_shared_tags)); fprintf(fp, " subsys_private_subsys: %ld\n", OFFSET(subsys_private_subsys)); fprintf(fp, " subsys_private_klist_devices: %ld\n", OFFSET(subsys_private_klist_devices)); fprintf(fp, " subsystem_kset: %ld\n", OFFSET(subsystem_kset)); fprintf(fp, " file_f_op: %ld\n", OFFSET(file_f_op)); fprintf(fp, " file_private_data: %ld\n", OFFSET(file_private_data)); fprintf(fp, " hstate_order: %ld\n", OFFSET(hstate_order)); fprintf(fp, " hstate_nr_huge_pages: %ld\n", OFFSET(hstate_nr_huge_pages)); fprintf(fp, " hstate_free_huge_pages: %ld\n", OFFSET(hstate_free_huge_pages)); fprintf(fp, " hstate_name: %ld\n", OFFSET(hstate_name)); fprintf(fp, " hugetlbfs_sb_info_hstate: %ld\n", OFFSET(hugetlbfs_sb_info_hstate)); fprintf(fp, " idr_layer_ary: %ld\n", OFFSET(idr_layer_ary)); fprintf(fp, " idr_layer_layer: %ld\n", OFFSET(idr_layer_layer)); fprintf(fp, " idr_layers: %ld\n", OFFSET(idr_layers)); fprintf(fp, " idr_top: %ld\n", OFFSET(idr_top)); fprintf(fp, " idr_cur: %ld\n", OFFSET(idr_cur)); fprintf(fp, " ipc_id_ary_p: %ld\n", OFFSET(ipc_id_ary_p)); fprintf(fp, " ipc_ids_entries: %ld\n", OFFSET(ipc_ids_entries)); fprintf(fp, " ipc_ids_max_id: %ld\n", OFFSET(ipc_ids_max_id)); fprintf(fp, " ipc_ids_ipcs_idr: %ld\n", OFFSET(ipc_ids_ipcs_idr)); fprintf(fp, " ipc_ids_in_use: %ld\n", OFFSET(ipc_ids_in_use)); fprintf(fp, " ipc_namespace_ids: %ld\n", OFFSET(ipc_namespace_ids)); fprintf(fp, " kern_ipc_perm_deleted: %ld\n", OFFSET(kern_ipc_perm_deleted)); fprintf(fp, " kern_ipc_perm_key: %ld\n", OFFSET(kern_ipc_perm_key)); fprintf(fp, " kern_ipc_perm_mode: %ld\n", OFFSET(kern_ipc_perm_mode)); fprintf(fp, " kern_ipc_perm_uid: %ld\n", OFFSET(kern_ipc_perm_uid)); fprintf(fp, " kern_ipc_perm_id: %ld\n", OFFSET(kern_ipc_perm_id)); fprintf(fp, " kern_ipc_perm_seq: %ld\n", OFFSET(kern_ipc_perm_seq)); fprintf(fp, " nsproxy_ipc_ns: %ld\n", OFFSET(nsproxy_ipc_ns)); fprintf(fp, " nsproxy_net_ns: %ld\n", OFFSET(nsproxy_net_ns)); fprintf(fp, " shmem_inode_info_swapped: %ld\n", OFFSET(shmem_inode_info_swapped)); fprintf(fp, " shmem_inode_info_vfs_inode: %ld\n", OFFSET(shmem_inode_info_vfs_inode)); fprintf(fp, " shm_file_data_file: %ld\n", OFFSET(shm_file_data_file)); fprintf(fp, " shmid_kernel_shm_file: %ld\n", OFFSET(shmid_kernel_shm_file)); fprintf(fp, " shmid_kernel_shm_nattch: %ld\n", OFFSET(shmid_kernel_shm_nattch)); fprintf(fp, " shmid_kernel_shm_perm: %ld\n", OFFSET(shmid_kernel_shm_perm)); fprintf(fp, " shmid_kernel_shm_segsz: %ld\n", OFFSET(shmid_kernel_shm_segsz)); fprintf(fp, " shmid_kernel_id: %ld\n", OFFSET(shmid_kernel_id)); fprintf(fp, " sem_array_sem_perm: %ld\n", OFFSET(sem_array_sem_perm)); fprintf(fp, " sem_array_sem_id: %ld\n", OFFSET(sem_array_sem_id)); fprintf(fp, " sem_array_sem_nsems: %ld\n", OFFSET(sem_array_sem_nsems)); fprintf(fp, " msg_queue_q_perm: %ld\n", OFFSET(msg_queue_q_perm)); fprintf(fp, " msg_queue_q_id: %ld\n", OFFSET(msg_queue_q_id)); fprintf(fp, " msg_queue_q_cbytes: %ld\n", OFFSET(msg_queue_q_cbytes)); fprintf(fp, " msg_queue_q_qnum: %ld\n", OFFSET(msg_queue_q_qnum)); fprintf(fp, " super_block_s_fs_info: %ld\n", OFFSET(super_block_s_fs_info)); fprintf(fp, " log_ts_nsec: %ld\n", OFFSET(log_ts_nsec)); fprintf(fp, " log_len: %ld\n", OFFSET(log_len)); fprintf(fp, " log_text_len: %ld\n", OFFSET(log_text_len)); fprintf(fp, " log_dict_len: %ld\n", OFFSET(log_dict_len)); fprintf(fp, " log_level: %ld\n", OFFSET(log_level)); fprintf(fp, " log_flags_level: %ld\n", OFFSET(log_flags_level)); fprintf(fp, " log_caller_id: %ld\n", OFFSET(log_caller_id)); fprintf(fp, " printk_info_seq: %ld\n", OFFSET(printk_info_seq)); fprintf(fp, " printk_info_ts_nseq: %ld\n", OFFSET(printk_info_ts_nsec)); fprintf(fp, " printk_info_text_len: %ld\n", OFFSET(printk_info_text_len)); fprintf(fp, " printk_info_level: %ld\n", OFFSET(printk_info_level)); fprintf(fp, " printk_info_caller_id: %ld\n", OFFSET(printk_info_caller_id)); fprintf(fp, " printk_info_dev_info: %ld\n", OFFSET(printk_info_dev_info)); fprintf(fp, " dev_printk_info_subsystem: %ld\n", OFFSET(dev_printk_info_subsystem)); fprintf(fp, " dev_printk_info_device: %ld\n", OFFSET(dev_printk_info_device)); fprintf(fp, " prb_desc_ring: %ld\n", OFFSET(prb_desc_ring)); fprintf(fp, " prb_text_data_ring: %ld\n", OFFSET(prb_text_data_ring)); fprintf(fp, " prb_desc_ring_count_bits: %ld\n", OFFSET(prb_desc_ring_count_bits)); fprintf(fp, " prb_desc_ring_descs: %ld\n", OFFSET(prb_desc_ring_descs)); fprintf(fp, " prb_desc_ring_infos: %ld\n", OFFSET(prb_desc_ring_infos)); fprintf(fp, " prb_desc_ring_head_id: %ld\n", OFFSET(prb_desc_ring_head_id)); fprintf(fp, " prb_desc_ring_tail_id: %ld\n", OFFSET(prb_desc_ring_tail_id)); fprintf(fp, " prb_desc_state_var: %ld\n", OFFSET(prb_desc_state_var)); fprintf(fp, " prb_desc_text_blk_lpos: %ld\n", OFFSET(prb_desc_text_blk_lpos)); fprintf(fp, " prb_data_blk_lpos_begin: %ld\n", OFFSET(prb_data_blk_lpos_begin)); fprintf(fp, " prb_data_blk_lpos_next: %ld\n", OFFSET(prb_data_blk_lpos_next)); fprintf(fp, " prb_data_ring_size_bits: %ld\n", OFFSET(prb_data_ring_size_bits)); fprintf(fp, " prb_data_ring_data: %ld\n", OFFSET(prb_data_ring_data)); fprintf(fp, " atomit_long_t_counter: %ld\n", OFFSET(atomic_long_t_counter)); fprintf(fp, " printk_safe_seq_buf_len: %ld\n", OFFSET(printk_safe_seq_buf_len)); fprintf(fp, "printk_safe_seq_buf_message_lost: %ld\n", OFFSET(printk_safe_seq_buf_message_lost)); fprintf(fp, " printk_safe_seq_buf_buffer: %ld\n", OFFSET(printk_safe_seq_buf_buffer)); fprintf(fp, " sched_rt_entity_my_q: %ld\n", OFFSET(sched_rt_entity_my_q)); fprintf(fp, " task_group_parent: %ld\n", OFFSET(task_group_parent)); fprintf(fp, " task_group_css: %ld\n", OFFSET(task_group_css)); fprintf(fp, " cgroup_subsys_state_cgroup: %ld\n", OFFSET(cgroup_subsys_state_cgroup)); fprintf(fp, " cgroup_dentry: %ld\n", OFFSET(cgroup_dentry)); fprintf(fp, " cgroup_kn: %ld\n", OFFSET(cgroup_kn)); fprintf(fp, " kernfs_node_name: %ld\n", OFFSET(kernfs_node_name)); fprintf(fp, " kernfs_node_parent: %ld\n", OFFSET(kernfs_node_parent)); fprintf(fp, " task_group_rt_rq: %ld\n", OFFSET(task_group_rt_rq)); fprintf(fp, " rt_rq_tg: %ld\n", OFFSET(rt_rq_tg)); fprintf(fp, " task_group_cfs_rq: %ld\n", OFFSET(task_group_cfs_rq)); fprintf(fp, " cfs_rq_tg: %ld\n", OFFSET(cfs_rq_tg)); fprintf(fp, " task_group_siblings: %ld\n", OFFSET(task_group_siblings)); fprintf(fp, " task_group_children: %ld\n", OFFSET(task_group_children)); fprintf(fp, " task_group_cfs_bandwidth: %ld\n", OFFSET(task_group_cfs_bandwidth)); fprintf(fp, " cfs_rq_throttled: %ld\n", OFFSET(cfs_rq_throttled)); fprintf(fp, " task_group_rt_bandwidth: %ld\n", OFFSET(task_group_rt_bandwidth)); fprintf(fp, " rt_rq_rt_throttled: %ld\n", OFFSET(rt_rq_rt_throttled)); fprintf(fp, " rt_rq_highest_prio: %ld\n", OFFSET(rt_rq_highest_prio)); fprintf(fp, " rt_rq_rt_nr_running: %ld\n", OFFSET(rt_rq_rt_nr_running)); fprintf(fp, " hrtimer_cpu_base_clock_base: %ld\n", OFFSET(hrtimer_cpu_base_clock_base)); fprintf(fp, " hrtimer_clock_base_offset: %ld\n", OFFSET(hrtimer_clock_base_offset)); fprintf(fp, " hrtimer_clock_base_active: %ld\n", OFFSET(hrtimer_clock_base_active)); fprintf(fp, " hrtimer_clock_base_first: %ld\n", OFFSET(hrtimer_clock_base_first)); fprintf(fp, " hrtimer_clock_base_get_time: %ld\n", OFFSET(hrtimer_clock_base_get_time)); fprintf(fp, " hrtimer_base_first: %ld\n", OFFSET(hrtimer_base_first)); fprintf(fp, " hrtimer_base_pending: %ld\n", OFFSET(hrtimer_base_pending)); fprintf(fp, " hrtimer_base_get_time: %ld\n", OFFSET(hrtimer_base_get_time)); fprintf(fp, " hrtimer_node: %ld\n", OFFSET(hrtimer_node)); fprintf(fp, " hrtimer_list: %ld\n", OFFSET(hrtimer_list)); fprintf(fp, " hrtimer_softexpires: %ld\n", OFFSET(hrtimer_softexpires)); fprintf(fp, " hrtimer_expires: %ld\n", OFFSET(hrtimer_expires)); fprintf(fp, " hrtimer_function: %ld\n", OFFSET(hrtimer_function)); fprintf(fp, " timerqueue_head_next: %ld\n", OFFSET(timerqueue_head_next)); fprintf(fp, " timerqueue_head_rb_root: %ld\n", OFFSET(timerqueue_head_rb_root)); fprintf(fp, " timerqueue_node_expires: %ld\n", OFFSET(timerqueue_node_expires)); fprintf(fp, " timerqueue_node_node: %ld\n", OFFSET(timerqueue_node_node)); fprintf(fp, " ktime_t_tv64: %ld\n", OFFSET(ktime_t_tv64)); fprintf(fp, " ktime_t_sec: %ld\n", OFFSET(ktime_t_sec)); fprintf(fp, " ktime_t_nsec: %ld\n", OFFSET(ktime_t_nsec)); fprintf(fp, " atomic_t_counter: %ld\n", OFFSET(atomic_t_counter)); fprintf(fp, " percpu_counter_count: %ld\n", OFFSET(percpu_counter_count)); fprintf(fp, " percpu_counter_counters: %ld\n", OFFSET(percpu_counter_counters)); fprintf(fp, " sk_buff_head_next: %ld\n", OFFSET(sk_buff_head_next)); fprintf(fp, " sk_buff_head_qlen: %ld\n", OFFSET(sk_buff_head_qlen)); fprintf(fp, " sk_buff_next: %ld\n", OFFSET(sk_buff_next)); fprintf(fp, " sk_buff_len: %ld\n", OFFSET(sk_buff_len)); fprintf(fp, " sk_buff_data: %ld\n", OFFSET(sk_buff_data)); fprintf(fp, " nlmsghdr_nlmsg_type: %ld\n", OFFSET(nlmsghdr_nlmsg_type)); fprintf(fp, " module_arch: %ld\n", OFFSET(module_arch)); fprintf(fp, " mod_arch_specific_num_orcs: %ld\n", OFFSET(mod_arch_specific_num_orcs)); fprintf(fp, "mod_arch_specific_orc_unwind_ip: %ld\n", OFFSET(mod_arch_specific_orc_unwind_ip)); fprintf(fp, " mod_arch_specific_orc_unwind: %ld\n", OFFSET(mod_arch_specific_orc_unwind)); fprintf(fp, " bpf_prog_aux: %ld\n", OFFSET(bpf_prog_aux)); fprintf(fp, " bpf_prog_type: %ld\n", OFFSET(bpf_prog_type)); fprintf(fp, " bpf_prog_tag: %ld\n", OFFSET(bpf_prog_tag)); fprintf(fp, " bpf_prog_jited_len: %ld\n", OFFSET(bpf_prog_jited_len)); fprintf(fp, " bpf_prog_bpf_func: %ld\n", OFFSET(bpf_prog_bpf_func)); fprintf(fp, " bpf_prog_len: %ld\n", OFFSET(bpf_prog_len)); fprintf(fp, " bpf_prog_pages: %ld\n", OFFSET(bpf_prog_pages)); fprintf(fp, " bpf_prog_insnsi: %ld\n", OFFSET(bpf_prog_insnsi)); fprintf(fp, " bpf_map_map_flags: %ld\n", OFFSET(bpf_map_map_flags)); fprintf(fp, " bpf_map_map_type: %ld\n", OFFSET(bpf_map_map_type)); fprintf(fp, " bpf_map_pages: %ld\n", OFFSET(bpf_map_pages)); fprintf(fp, " bpf_map_key_size: %ld\n", OFFSET(bpf_map_key_size)); fprintf(fp, " bpf_map_value_size: %ld\n", OFFSET(bpf_map_value_size)); fprintf(fp, " bpf_map_max_entries: %ld\n", OFFSET(bpf_map_max_entries)); fprintf(fp, " bpf_map_name: %ld\n", OFFSET(bpf_map_name)); fprintf(fp, " bpf_map_user: %ld\n", OFFSET(bpf_map_user)); fprintf(fp, " bpf_map_memory: %ld\n", OFFSET(bpf_map_memory)); fprintf(fp, " bpf_map_memory_pages: %ld\n", OFFSET(bpf_map_memory_pages)); fprintf(fp, " bpf_map_memory_user: %ld\n", OFFSET(bpf_map_memory_user)); fprintf(fp, " bpf_prog_aux_used_map_cnt: %ld\n", OFFSET(bpf_prog_aux_used_map_cnt)); fprintf(fp, " bpf_prog_aux_used_maps: %ld\n", OFFSET(bpf_prog_aux_used_maps)); fprintf(fp, " bpf_prog_aux_load_time: %ld\n", OFFSET(bpf_prog_aux_load_time)); fprintf(fp, " bpf_prog_aux_user: %ld\n", OFFSET(bpf_prog_aux_user)); fprintf(fp, " bpf_prog_aux_name: %ld\n", OFFSET(bpf_prog_aux_name)); fprintf(fp, " user_struct_uid: %ld\n", OFFSET(user_struct_uid)); fprintf(fp, " xarray_xa_head: %ld\n", OFFSET(xarray_xa_head)); fprintf(fp, " xa_node_slots: %ld\n", OFFSET(xa_node_slots)); fprintf(fp, " xa_node_shift: %ld\n", OFFSET(xa_node_shift)); fprintf(fp, " uts_namespace_name: %ld\n", OFFSET(uts_namespace_name)); fprintf(fp, " sbitmap_word_depth: %ld\n", OFFSET(sbitmap_word_depth)); fprintf(fp, " sbitmap_word_word: %ld\n", OFFSET(sbitmap_word_word)); fprintf(fp, " sbitmap_word_cleared: %ld\n", OFFSET(sbitmap_word_cleared)); fprintf(fp, " sbitmap_depth: %ld\n", OFFSET(sbitmap_depth)); fprintf(fp, " sbitmap_shift: %ld\n", OFFSET(sbitmap_shift)); fprintf(fp, " sbitmap_map_nr: %ld\n", OFFSET(sbitmap_map_nr)); fprintf(fp, " sbitmap_map: %ld\n", OFFSET(sbitmap_map)); fprintf(fp, " sbitmap_alloc_hint: %ld\n", OFFSET(sbitmap_alloc_hint)); fprintf(fp, " sbitmap_round_robin: %ld\n", OFFSET(sbitmap_round_robin)); fprintf(fp, " sbitmap_queue_sb: %ld\n", OFFSET(sbitmap_queue_sb)); fprintf(fp, " sbitmap_queue_alloc_hint: %ld\n", OFFSET(sbitmap_queue_alloc_hint)); fprintf(fp, " sbitmap_queue_wake_batch: %ld\n", OFFSET(sbitmap_queue_wake_batch)); fprintf(fp, " sbitmap_queue_wake_index: %ld\n", OFFSET(sbitmap_queue_wake_index)); fprintf(fp, " sbitmap_queue_ws: %ld\n", OFFSET(sbitmap_queue_ws)); fprintf(fp, " sbitmap_queue_ws_active: %ld\n", OFFSET(sbitmap_queue_ws_active)); fprintf(fp, " sbitmap_queue_round_robin: %ld\n", OFFSET(sbitmap_queue_round_robin)); fprintf(fp, "sbitmap_queue_min_shallow_depth: %ld\n", OFFSET(sbitmap_queue_min_shallow_depth)); fprintf(fp, " sbq_wait_state_wait_cnt: %ld\n", OFFSET(sbq_wait_state_wait_cnt)); fprintf(fp, " sbq_wait_state_wait: %ld\n", OFFSET(sbq_wait_state_wait)); fprintf(fp, " mm_struct_mm_mt: %ld\n", OFFSET(mm_struct_mm_mt)); fprintf(fp, " maple_tree_ma_root: %ld\n", OFFSET(maple_tree_ma_root)); fprintf(fp, " maple_tree_ma_flags: %ld\n", OFFSET(maple_tree_ma_flags)); fprintf(fp, " maple_node_parent: %ld\n", OFFSET(maple_node_parent)); fprintf(fp, " maple_node_ma64: %ld\n", OFFSET(maple_node_ma64)); fprintf(fp, " maple_node_mr64: %ld\n", OFFSET(maple_node_mr64)); fprintf(fp, " maple_node_slot: %ld\n", OFFSET(maple_node_slot)); fprintf(fp, " maple_arange_64_pivot: %ld\n", OFFSET(maple_arange_64_pivot)); fprintf(fp, " maple_arange_64_slot: %ld\n", OFFSET(maple_arange_64_slot)); fprintf(fp, " maple_arange_64_gap: %ld\n", OFFSET(maple_arange_64_gap)); fprintf(fp, " maple_arange_64_meta: %ld\n", OFFSET(maple_arange_64_meta)); fprintf(fp, " maple_range_64_pivot: %ld\n", OFFSET(maple_range_64_pivot)); fprintf(fp, " maple_range_64_slot: %ld\n", OFFSET(maple_range_64_slot)); fprintf(fp, " maple_metadata_end: %ld\n", OFFSET(maple_metadata_end)); fprintf(fp, " maple_metadata_gap: %ld\n", OFFSET(maple_metadata_gap)); fprintf(fp, " zram_mem_pool: %ld\n", OFFSET(zram_mem_pool)); fprintf(fp, " zram_compressor: %ld\n", OFFSET(zram_compressor)); fprintf(fp, " zram_comp_algs: %ld\n", OFFSET(zram_comp_algs)); fprintf(fp, " zram_table_entry_flags: %ld\n", OFFSET(zram_table_entry_flags)); fprintf(fp, " zs_pool_size_class: %ld\n", OFFSET(zs_pool_size_class)); fprintf(fp, " size_class_size: %ld\n", OFFSET(size_class_size)); fprintf(fp, " zspage_huge: %ld\n", OFFSET(zspage_huge)); fprintf(fp, " inactive_task_frame_r15: %ld\n", OFFSET(inactive_task_frame_r15)); fprintf(fp, " inactive_task_frame_r14: %ld\n", OFFSET(inactive_task_frame_r14)); fprintf(fp, " inactive_task_frame_r13: %ld\n", OFFSET(inactive_task_frame_r13)); fprintf(fp, " inactive_task_frame_r12: %ld\n", OFFSET(inactive_task_frame_r12)); fprintf(fp, " inactive_task_frame_flags: %ld\n", OFFSET(inactive_task_frame_flags)); fprintf(fp, " inactive_task_frame_si: %ld\n", OFFSET(inactive_task_frame_si)); fprintf(fp, " inactive_task_frame_di: %ld\n", OFFSET(inactive_task_frame_di)); fprintf(fp, " inactive_task_frame_bx: %ld\n", OFFSET(inactive_task_frame_bx)); fprintf(fp, " thread_struct_es: %ld\n", OFFSET(thread_struct_es)); fprintf(fp, " thread_struct_ds: %ld\n", OFFSET(thread_struct_ds)); fprintf(fp, " thread_struct_fsbase: %ld\n", OFFSET(thread_struct_fsbase)); fprintf(fp, " thread_struct_gsbase: %ld\n", OFFSET(thread_struct_gsbase)); fprintf(fp, " thread_struct_fs: %ld\n", OFFSET(thread_struct_fs)); fprintf(fp, " thread_struct_gs: %ld\n", OFFSET(thread_struct_gs)); fprintf(fp, " bpf_ringbuf_map_map: %ld\n", OFFSET(bpf_ringbuf_map_map)); fprintf(fp, " bpf_ringbuf_map_rb: %ld\n", OFFSET(bpf_ringbuf_map_rb)); fprintf(fp, " bpf_ringbuf_consumer_pos: %ld\n", OFFSET(bpf_ringbuf_consumer_pos)); fprintf(fp, " bpf_ringbuf_nr_pages: %ld\n", OFFSET(bpf_ringbuf_nr_pages)); fprintf(fp, "\n size_table:\n"); fprintf(fp, " page: %ld\n", SIZE(page)); fprintf(fp, " page_flags: %ld\n", SIZE(page_flags)); fprintf(fp, " trace_print_flags: %ld\n", SIZE(trace_print_flags)); fprintf(fp, " free_area_struct: %ld\n", SIZE(free_area_struct)); fprintf(fp, " free_area: %ld\n", SIZE(free_area)); fprintf(fp, " zone_struct: %ld\n", SIZE(zone_struct)); fprintf(fp, " zone: %ld\n", SIZE(zone)); fprintf(fp, " kmem_slab_s: %ld\n", SIZE(kmem_slab_s)); fprintf(fp, " slab_s: %ld\n", SIZE(slab_s)); fprintf(fp, " slab: %ld\n", SIZE(slab)); fprintf(fp, " kmem_cache_s: %ld\n", SIZE(kmem_cache_s)); fprintf(fp, " cpucache_s: %ld\n", SIZE(cpucache_s)); fprintf(fp, " array_cache: %ld\n", SIZE(array_cache)); fprintf(fp, " kmem_bufctl_t: %ld\n", SIZE(kmem_bufctl_t)); fprintf(fp, " kmem_cache: %ld\n", SIZE(kmem_cache)); fprintf(fp, " kmem_cache_node: %ld\n", SIZE(kmem_cache_node)); fprintf(fp, " kmem_cache_cpu: %ld\n", SIZE(kmem_cache_cpu)); fprintf(fp, " swap_info_struct: %ld\n", SIZE(swap_info_struct)); fprintf(fp, " vm_area_struct: %ld\n", SIZE(vm_area_struct)); fprintf(fp, " mm_struct: %ld\n", SIZE(mm_struct)); fprintf(fp, " pglist_data: %ld\n", SIZE(pglist_data)); fprintf(fp, " page_cache_bucket: %ld\n", SIZE(page_cache_bucket)); fprintf(fp, " pt_regs: %ld\n", SIZE(pt_regs)); fprintf(fp, " task_struct: %ld\n", SIZE(task_struct)); fprintf(fp, " task_struct_state: %ld\n", SIZE(task_struct_state)); fprintf(fp, " task_struct_flags: %ld\n", SIZE(task_struct_flags)); fprintf(fp, " task_struct_policy: %ld\n", SIZE(task_struct_policy)); fprintf(fp, " thread_info: %ld\n", SIZE(thread_info)); fprintf(fp, " fred_frame: %ld\n", SIZE(fred_frame)); fprintf(fp, " softirq_state: %ld\n", SIZE(softirq_state)); fprintf(fp, " softirq_action: %ld\n", SIZE(softirq_action)); fprintf(fp, " desc_struct: %ld\n", SIZE(desc_struct)); fprintf(fp, " umode_t: %ld\n", SIZE(umode_t)); fprintf(fp, " dentry: %ld\n", SIZE(dentry)); fprintf(fp, " fs_struct: %ld\n", SIZE(fs_struct)); fprintf(fp, " files_struct: %ld\n", SIZE(files_struct)); fprintf(fp, " fdtable: %ld\n", SIZE(fdtable)); fprintf(fp, " file: %ld\n", SIZE(file)); fprintf(fp, " inode: %ld\n", SIZE(inode)); fprintf(fp, " vfsmount: %ld\n", SIZE(vfsmount)); fprintf(fp, " mount: %ld\n", SIZE(mount)); fprintf(fp, " super_block: %ld\n", SIZE(super_block)); fprintf(fp, " irqdesc: %ld\n", SIZE(irqdesc)); fprintf(fp, " module: %ld\n", SIZE(module)); fprintf(fp, " module_memory: %ld\n", SIZE(module_memory)); fprintf(fp, " module_sect_attr: %ld\n", SIZE(module_sect_attr)); fprintf(fp, " list_head: %ld\n", SIZE(list_head)); fprintf(fp, " hlist_head: %ld\n", SIZE(hlist_head)); fprintf(fp, " hlist_node: %ld\n", SIZE(hlist_node)); fprintf(fp, " irq_cpustat_t: %ld\n", SIZE(irq_cpustat_t)); fprintf(fp, " cpuinfo_x86: %ld\n", SIZE(cpuinfo_x86)); fprintf(fp, " cpuinfo_ia64: %ld\n", SIZE(cpuinfo_ia64)); fprintf(fp, " timer_list: %ld\n", SIZE(timer_list)); fprintf(fp, " timer_vec_root: %ld\n", SIZE(timer_vec_root)); fprintf(fp, " timer_vec: %ld\n", SIZE(timer_vec)); fprintf(fp, " tvec_root_s: %ld\n", SIZE(tvec_root_s)); fprintf(fp, " tvec_s: %ld\n", SIZE(tvec_s)); fprintf(fp, " tvec_t_base_s: %ld\n", SIZE(tvec_t_base_s)); fprintf(fp, " wait_queue: %ld\n", SIZE(wait_queue)); fprintf(fp, " __wait_queue: %ld\n", SIZE(__wait_queue)); fprintf(fp, " wait_queue_entry: %ld\n", SIZE(wait_queue_entry)); fprintf(fp, " device: %ld\n", SIZE(device)); fprintf(fp, " net_device: %ld\n", SIZE(net_device)); fprintf(fp, " sock: %ld\n", SIZE(sock)); fprintf(fp, " inet_sock: %ld\n", SIZE(inet_sock)); fprintf(fp, " socket: %ld\n", SIZE(socket)); fprintf(fp, " in6_addr: %ld\n", SIZE(in6_addr)); fprintf(fp, " signal_struct: %ld\n", SIZE(signal_struct)); fprintf(fp, " sigpending_signal: %ld\n", SIZE(sigpending_signal)); fprintf(fp, " signal_queue: %ld\n", SIZE(signal_queue)); fprintf(fp, " sigqueue: %ld\n", SIZE(sigqueue)); fprintf(fp, " k_sigaction: %ld\n", SIZE(k_sigaction)); fprintf(fp, " sighand_struct: %ld\n", SIZE(sighand_struct)); fprintf(fp, " resource_entry_t: %ld\n", SIZE(resource_entry_t)); fprintf(fp, " resource: %ld\n", SIZE(resource)); fprintf(fp, " runqueue: %ld\n", SIZE(runqueue)); fprintf(fp, " irq_desc_t: %ld\n", SIZE(irq_desc_t)); fprintf(fp, " irq_data: %ld\n", SIZE(irq_data)); fprintf(fp, " irq_common_data: %ld\n", SIZE(irq_common_data)); fprintf(fp, " task_union: %ld\n", SIZE(task_union)); fprintf(fp, " thread_union: %ld\n", SIZE(thread_union)); fprintf(fp, " prio_array: %ld\n", SIZE(prio_array)); fprintf(fp, " user_regs_struct: %ld\n", SIZE(user_regs_struct)); fprintf(fp, " switch_stack: %ld\n", SIZE(switch_stack)); fprintf(fp, " vm_area_struct_vm_flags: %ld\n", SIZE(vm_area_struct_vm_flags)); fprintf(fp, " e820map: %ld\n", SIZE(e820map)); fprintf(fp, " e820entry: %ld\n", SIZE(e820entry)); fprintf(fp, " cpu_s: %ld\n", SIZE(cpu_s)); fprintf(fp, " pgd_t: %ld\n", SIZE(pgd_t)); fprintf(fp, " kallsyms_header: %ld\n", SIZE(kallsyms_header)); fprintf(fp, " kallsyms_symbol: %ld\n", SIZE(kallsyms_symbol)); fprintf(fp, " kallsyms_section: %ld\n", SIZE(kallsyms_section)); fprintf(fp, " block_device: %ld\n", SIZE(block_device)); fprintf(fp, " blk_major_name: %ld\n", SIZE(blk_major_name)); fprintf(fp, " address_space: %ld\n", SIZE(address_space)); fprintf(fp, " gendisk: %ld\n", SIZE(gendisk)); fprintf(fp, " irq_ctx: %ld\n", SIZE(irq_ctx)); fprintf(fp, " char_device_struct: %ld\n", SIZE(char_device_struct)); fprintf(fp, " spinlock_t: %ld\n", SIZE(spinlock_t)); fprintf(fp, " radix_tree_root: %ld\n", SIZE(radix_tree_root)); fprintf(fp, " radix_tree_node: %ld\n", SIZE(radix_tree_node)); fprintf(fp, " x8664_pda: %ld\n", SIZE(x8664_pda)); fprintf(fp, " ppc64_paca: %ld\n", SIZE(ppc64_paca)); fprintf(fp, " gate_struct: %ld\n", SIZE(gate_struct)); fprintf(fp, " tss_struct: %ld\n", SIZE(tss_struct)); fprintf(fp, " task_struct_start_time: %ld\n", SIZE(task_struct_start_time)); fprintf(fp, " task_struct_utime: %ld\n", SIZE(task_struct_utime)); fprintf(fp, " task_struct_stime: %ld\n", SIZE(task_struct_stime)); fprintf(fp, " cputime_t: %ld\n", SIZE(cputime_t)); fprintf(fp, " mem_section: %ld\n", SIZE(mem_section)); fprintf(fp, " pid_link: %ld\n", SIZE(pid_link)); fprintf(fp, " upid: %ld\n", SIZE(upid)); fprintf(fp, " pid: %ld\n", SIZE(pid)); fprintf(fp, " unwind_table: %ld\n", SIZE(unwind_table)); fprintf(fp, " rlimit: %ld\n", SIZE(rlimit)); fprintf(fp, " cfs_rq: %ld\n", SIZE(cfs_rq)); fprintf(fp, " pcpu_info: %ld\n", SIZE(pcpu_info)); fprintf(fp, " vcpu_struct: %ld\n", SIZE(vcpu_struct)); fprintf(fp, " cdev: %ld\n", SIZE(cdev)); fprintf(fp, " probe: %ld\n", SIZE(probe)); fprintf(fp, " kobj_map: %ld\n", SIZE(kobj_map)); fprintf(fp, " cpu_context_save: %ld\n", SIZE(cpu_context_save)); fprintf(fp, " elf_prstatus: %ld\n", SIZE(elf_prstatus)); fprintf(fp, " note_buf: %ld\n", SIZE(note_buf)); fprintf(fp, " unwind_idx: %ld\n", SIZE(unwind_idx)); fprintf(fp, " s390_stack_frame: %ld\n", SIZE(s390_stack_frame)); fprintf(fp, " percpu_data: %ld\n", SIZE(percpu_data)); fprintf(fp, " sched_entity: %ld\n", SIZE(sched_entity)); fprintf(fp, " kernel_stat: %ld\n", SIZE(kernel_stat)); fprintf(fp, " subsystem: %ld\n", SIZE(subsystem)); fprintf(fp, " class_private: %ld\n", SIZE(class_private)); fprintf(fp, " rq_in_flight: %ld\n", SIZE(rq_in_flight)); fprintf(fp, " class_private_devices: %ld\n", SIZE(class_private_devices)); fprintf(fp, " hstate: %ld\n", SIZE(hstate)); fprintf(fp, " ipc_ids: %ld\n", SIZE(ipc_ids)); fprintf(fp, " shmid_kernel: %ld\n", SIZE(shmid_kernel)); fprintf(fp, " sem_array: %ld\n", SIZE(sem_array)); fprintf(fp, " msg_queue: %ld\n", SIZE(msg_queue)); fprintf(fp, " log: %ld\n", SIZE(log)); fprintf(fp, " log_level: %ld\n", SIZE(log_level)); fprintf(fp, " rt_rq: %ld\n", SIZE(rt_rq)); fprintf(fp, " task_group: %ld\n", SIZE(task_group)); fprintf(fp, " vmap_area: %ld\n", SIZE(vmap_area)); fprintf(fp, " vmap_node: %ld\n", SIZE(vmap_node)); fprintf(fp, " hrtimer_clock_base: %ld\n", SIZE(hrtimer_clock_base)); fprintf(fp, " hrtimer_base: %ld\n", SIZE(hrtimer_base)); fprintf(fp, " timer_base: %ld\n", SIZE(timer_base)); fprintf(fp, " tnt: %ld\n", SIZE(tnt)); fprintf(fp, " taint_flag: %ld\n", SIZE(taint_flag)); fprintf(fp, " nlmsghdr: %ld\n", SIZE(nlmsghdr)); fprintf(fp, " nlmsghdr_nlmsg_type: %ld\n", SIZE(nlmsghdr_nlmsg_type)); fprintf(fp, " sk_buff_head_qlen: %ld\n", SIZE(sk_buff_head_qlen)); fprintf(fp, " sk_buff_len: %ld\n", SIZE(sk_buff_len)); fprintf(fp, " orc_entry: %ld\n", SIZE(orc_entry)); fprintf(fp, " bpf_prog: %ld\n", SIZE(bpf_prog)); fprintf(fp, " bpf_prog_aux: %ld\n", SIZE(bpf_prog_aux)); fprintf(fp, " bpf_map: %ld\n", SIZE(bpf_map)); fprintf(fp, " bpf_insn: %ld\n", SIZE(bpf_insn)); fprintf(fp, " xarray: %ld\n", SIZE(xarray)); fprintf(fp, " xa_node: %ld\n", SIZE(xa_node)); fprintf(fp, " printk_info: %ld\n", SIZE(printk_info)); fprintf(fp, " printk_ringbuffer: %ld\n", SIZE(printk_ringbuffer)); fprintf(fp, " prb_desc: %ld\n", SIZE(prb_desc)); fprintf(fp, " printk_safe_seq_buf_buffer: %ld\n", SIZE(printk_safe_seq_buf_buffer)); fprintf(fp, " sbitmap_word: %ld\n", SIZE(sbitmap_word)); fprintf(fp, " sbitmap: %ld\n", SIZE(sbitmap)); fprintf(fp, " sbitmap_queue: %ld\n", SIZE(sbitmap_queue)); fprintf(fp, " sbq_wait_state: %ld\n", SIZE(sbq_wait_state)); fprintf(fp, " blk_mq_tags: %ld\n", SIZE(blk_mq_tags)); fprintf(fp, " maple_tree: %ld\n", SIZE(maple_tree)); fprintf(fp, " maple_node: %ld\n", SIZE(maple_node)); fprintf(fp, " percpu_counter: %ld\n", SIZE(percpu_counter)); fprintf(fp, " cpumask_t: %ld\n", SIZE(cpumask_t)); fprintf(fp, " bpf_ringbuf_map: %ld\n", SIZE(bpf_ringbuf_map)); fprintf(fp, "\n array_table:\n"); /* * Use get_array_length() for those fields not set up at init-time; * ARRAY_LENGTH() will work for the rest. */ fprintf(fp, " kmem_cache_s_name: %d\n", ARRAY_LENGTH(kmem_cache_s_name)); fprintf(fp, " kmem_cache_s_c_name: %d\n", ARRAY_LENGTH(kmem_cache_s_c_name)); fprintf(fp, " kmem_cache_s_array: %d\n", ARRAY_LENGTH(kmem_cache_s_array)); fprintf(fp, " kmem_cache_s_cpudata: %d\n", ARRAY_LENGTH(kmem_cache_s_cpudata)); fprintf(fp, " log_buf: %d\n", ARRAY_LENGTH(log_buf)); fprintf(fp, " irq_desc: %d\n", ARRAY_LENGTH(irq_desc)); fprintf(fp, " irq_action: %d\n", ARRAY_LENGTH(irq_action)); fprintf(fp, " timer_vec_vec: %d\n", get_array_length("timer_vec.vec", NULL, SIZE(list_head))); fprintf(fp, " timer_vec_root_vec: %d\n", get_array_length("timer_vec_root.vec", NULL, SIZE(list_head))); fprintf(fp, " tvec_root_s_vec: %d\n", get_array_length("tvec_root_s.vec", NULL, SIZE(list_head))); fprintf(fp, " tvec_s_vec: %d\n", get_array_length("tvec_s.vec", NULL, SIZE(list_head))); fprintf(fp, " page_hash_table: %d\n", ARRAY_LENGTH(page_hash_table)); fprintf(fp, " net_device_name: %d\n", ARRAY_LENGTH(net_device_name)); fprintf(fp, " neigh_table_hash_buckets: %d\n", get_array_length("neigh_table.hash_buckets", NULL, sizeof(void *))); fprintf(fp, " neighbour_ha: %d\n", get_array_length("neighbour.ha", NULL, sizeof(char))); fprintf(fp, " swap_info: %d\n", get_array_length("swap_info", NULL, 0)); fprintf(fp, " pglist_data_node_zones: %d\n", ARRAY_LENGTH(pglist_data_node_zones)); fprintf(fp, " zone_struct_free_area: %d\n", ARRAY_LENGTH(zone_struct_free_area)); fprintf(fp, " zone_free_area: %d\n", ARRAY_LENGTH(zone_free_area)); fprintf(fp, " free_area: %d\n", ARRAY_LENGTH(free_area)); fprintf(fp, " free_area_DIMENSION: %d\n", ARRAY_LENGTH(free_area_DIMENSION)); fprintf(fp, " prio_array_queue: %d\n", get_array_length("prio_array.queue", NULL, SIZE(list_head))); fprintf(fp, " height_to_maxindex: %d\n", ARRAY_LENGTH(height_to_maxindex)); fprintf(fp, " height_to_maxnodes: %d\n", ARRAY_LENGTH(height_to_maxnodes)); fprintf(fp, " pid_hash: %d\n", ARRAY_LENGTH(pid_hash)); fprintf(fp, " kmem_cache_node: %d\n", ARRAY_LENGTH(kmem_cache_node)); fprintf(fp, " kmem_cache_cpu_slab: %d\n", ARRAY_LENGTH(kmem_cache_cpu_slab)); fprintf(fp, " rt_prio_array_queue: %d\n", ARRAY_LENGTH(rt_prio_array_queue)); fprintf(fp, " task_struct_rlim: %d\n", ARRAY_LENGTH(task_struct_rlim)); fprintf(fp, " signal_struct_rlim: %d\n", ARRAY_LENGTH(signal_struct_rlim)); fprintf(fp, " vm_numa_stat: %d\n", ARRAY_LENGTH(vm_numa_stat)); fprintf(fp, " pid_numbers: %d\n", ARRAY_LENGTH(pid_numbers)); if (spec) { int in_size_table, in_array_table, arrays, offsets, sizes; in_size_table = in_array_table = arrays = offsets = sizes = 0; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "size_table:")) in_size_table = TRUE; if (strstr(buf, "array_table:")) { in_array_table = TRUE; in_size_table = FALSE; } if (strstr(buf, spec)) { if (in_size_table) { if (!sizes) fprintf(pc->saved_fp, "%s size_table:\n", offsets ? "\n" : ""); sizes++; } else if (in_array_table) { if (!arrays) fprintf(pc->saved_fp, "%s array_table:\n", offsets || sizes ? "\n" : ""); arrays++; } else { if (!offsets) fprintf(pc->saved_fp, " offset_table:\n"); offsets++; } if (strstr(buf, " size_table:") || strstr(buf, " array_table:") || strstr(buf, " offset_table:")) break; fprintf(pc->saved_fp, "%s", buf); } } close_tmpfile(); } if (makestruct) { fprintf(pc->saved_fp, "static struct builtin_debug_table %s;\n\n", revname); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, " offset_table:\n")) { fprintf(pc->saved_fp, "static struct offset_table %s_offset_table = {\n", revname); continue; } if (strstr(buf, " size_table:\n")) { fprintf(pc->saved_fp, "static struct size_table %s_size_table = {\n", revname); continue; } if (strstr(buf, " array_table:\n")) { fprintf(pc->saved_fp, "static struct array_table %s_array_table = {\n", revname); continue; } if (STREQ(buf, "\n")) { fprintf(pc->saved_fp, "};\n\n"); continue; } fprintf(pc->saved_fp, "%s,\n", strip_linefeeds(buf)); } fprintf(pc->saved_fp, "};\n\n"); close_tmpfile(); fprintf(fp, "static struct builtin_debug_table %s = {\n", revname); fprintf(fp, " release: \"%s\",\n", uts->release); fprintf(fp, " machine_type: \"%s\",\n", pc->machine_type); fprintf(fp, " offset_table: &%s_offset_table,\n", revname); fprintf(fp, " size_table: &%s_size_table,\n", revname); fprintf(fp, " array_table: &%s_array_table,\n", revname); fprintf(fp, "};\n\n"); } pc->flags |= data_debug; } #define NUMARGS_CACHE_ENTRIES (100) static struct numargs_cache { ulong function; int numargs; } numargs_cache[NUMARGS_CACHE_ENTRIES] = { {0} }; static int numargs_cache_index = 0; int get_function_numargs(ulong callpc) { int i; struct numargs_cache *na; struct gnu_request *req; int retval; ulong func; func = closest_symbol_value(callpc); if (!func) return -1; for (i = 0; i < NUMARGS_CACHE_ENTRIES; i++) { na = &numargs_cache[i]; if (!na->function) { numargs_cache_index = i; break; } if (na->function == func) return na->numargs; } req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->buf = GETBUF(BUFSIZE); req->command = GNU_FUNCTION_NUMARGS; req->flags |= GNU_RETURN_ON_ERROR; req->pc = func; gdb_interface(req); if (req->flags & GNU_COMMAND_FAILED) { retval = -1; goto func_done; } retval = (int)req->value; func_done: FREEBUF(req->buf); FREEBUF(req); numargs_cache_index %= NUMARGS_CACHE_ENTRIES; na = &numargs_cache[numargs_cache_index++]; na->function = func; na->numargs = retval; return retval; } /* * help -c output */ void dump_numargs_cache(void) { int i; struct numargs_cache *na; char buf[BUFSIZE]; fprintf(fp, "numargs_cache_index: %d\n", numargs_cache_index); for (i = 0; i < NUMARGS_CACHE_ENTRIES; i++) { na = &numargs_cache[i]; if (!na->function) break; fprintf(fp, "%lx (%s): %d\n", na->function, value_to_symstr(na->function, buf, 0), na->numargs); } } /* * This is the call-back function that is passed to bfd_map_over_sections(). * Based upon the request, check whether the passed-in section has what * the caller needs. The MODULE_SECTIONS code is tricky because it has * to keep a running alignment value as it walks through the section * headers in order to eventually calculate the module's base data address. */ static void section_header_info(bfd *bfd, asection *section, void *reqptr) { int i; struct load_module *lm; ulong request; asection **sec; ulong section_end_address; request = ((ulong)reqptr); switch (request) { case (ulong)KERNEL_SECTIONS: sec = (asection **)st->sections; for (i = 0; (i < st->bfd->section_count) && *sec; i++) sec++; *sec = section; if (STREQ(bfd_section_name(section), ".text.init") || STREQ(bfd_section_name(section), ".init.text")) { kt->stext_init = (ulong) bfd_section_vma(section); kt->etext_init = kt->stext_init + (ulong)bfd_section_size(section); } if (STREQ(bfd_section_name(section), ".text")) { st->first_section_start = (ulong) bfd_section_vma(section); } if (STREQ(bfd_section_name(section), ".text") || STREQ(bfd_section_name(section), ".data")) { if (!(bfd_section_flags(section) & SEC_LOAD)) st->flags |= NO_SEC_LOAD; if (!(bfd_section_flags(section) & SEC_HAS_CONTENTS)) st->flags |= NO_SEC_CONTENTS; } if (STREQ(bfd_section_name(section), ".eh_frame")) { st->dwarf_eh_frame_file_offset = (off_t)section->filepos; st->dwarf_eh_frame_size = (ulong)bfd_section_size(section); } if (STREQ(bfd_section_name(section), ".debug_frame")) { st->dwarf_debug_frame_file_offset = (off_t)section->filepos; st->dwarf_debug_frame_size = (ulong)bfd_section_size(section); } if (st->first_section_start != 0) { section_end_address = (ulong) bfd_section_vma(section) + (ulong) bfd_section_size(section); if (section_end_address > st->last_section_end) st->last_section_end = section_end_address; } break; case (ulong)MODULE_SECTIONS: lm = st->current; store_section_data(lm, bfd, section); break; case (ulong)VERIFY_SECTIONS: if (STREQ(bfd_section_name(section), ".text") || STREQ(bfd_section_name(section), ".data")) { if (!(bfd_section_flags(section) & SEC_LOAD)) st->flags |= NO_SEC_LOAD; if (!(bfd_section_flags(section) & SEC_HAS_CONTENTS)) st->flags |= NO_SEC_CONTENTS; } if (STREQ(bfd_section_name(section), ".eh_frame")) { st->dwarf_eh_frame_file_offset = (off_t)section->filepos; st->dwarf_eh_frame_size = (ulong)bfd_section_size(section); } if (STREQ(bfd_section_name(section), ".debug_frame")) { st->dwarf_debug_frame_file_offset = (off_t)section->filepos; st->dwarf_debug_frame_size = (ulong)bfd_section_size(section); } break; default: error(FATAL, "invalid call to section_header_info\n"); break; } } /* * Emulate insmod by calculating the priorities of each section. * The priority number will be used later by calculate_load_order() * to determine the the starting addresses of the text and data * sections. * * insmod uses the following code sequence -- which references the actual ELF * section header structure data: * * ac = 0; * if (a->name[0] != '.' || strlen(a->name) != 10 || * strcmp(a->name + 5, ".init")) ac |= 32; * if (af & SHF_ALLOC) ac |= 16; * if (!(af & SHF_WRITE)) ac |= 8; * if (af & SHF_EXECINSTR) ac |= 4; * if (a->header.sh_type != SHT_NOBITS) ac |= 2; * * BFD abstracts the ELF section header into an asection structure, so this * code determines the priority using the relevant logic. */ static void store_section_data(struct load_module *lm, bfd *bfd, asection *section) { int i; int prio; char *name; prio = 0; name = (char *)bfd_section_name(section); if (name[0] != '.' || strlen(name) != 10 || strcmp(name + 5, ".init")) prio |= 32; if (section->flags & SEC_ALLOC) prio |= 16; if (section->flags & SEC_READONLY) prio |= 8; if (section->flags & SEC_CODE) prio |= 4; if (!STREQ(name, ".bss")) prio |= 2; i = lm->mod_sections; lm->mod_section_data[i].section = section; lm->mod_section_data[i].priority = prio; lm->mod_section_data[i].flags = section->flags & ~SEC_FOUND; lm->mod_section_data[i].size = bfd_section_size(section); lm->mod_section_data[i].offset = 0; lm->mod_section_data[i].addr = 0; if (strlen(name) < MAX_MOD_SEC_NAME) strcpy(lm->mod_section_data[i].name, name); else strncpy(lm->mod_section_data[i].name, name, MAX_MOD_SEC_NAME-1); /* * The percpu section isn't included in kallsyms or module_core area. */ if (lm->mod_percpu && (STREQ(name,".data.percpu") || STREQ(name, ".data..percpu"))) { lm->mod_percpu_size = bfd_section_size(section); lm->mod_section_data[i].flags |= SEC_FOUND; lm->mod_section_data[i].addr = lm->mod_percpu; } lm->mod_sections += 1; } /* * insmod first calculates a priority for each module section, and re-orders * the sections from their ELF object file position -- that priority was * determined in store_section_priority(). Now, based upon a priority-based * ordering, this routine calculates the starting offset for each section. * This is the code segment from insmod that is being emulated here: * * unsigned long * obj_load_size (struct obj_file *f) * { * unsigned long dot = 0; * struct obj_section *sec; * * /+ Finalize the positions of the sections relative to one another. +/ * * for (sec = f->load_order; sec ; sec = sec->load_next) * { * ElfW(Addr) align; * * align = sec->header.sh_addralign; * if (align && (dot & (align - 1))) * dot = (dot | (align - 1)) + 1; * * sec->header.sh_addr = dot; * dot += sec->header.sh_size; * } * * return dot; * } * * Another insmod hack extends the .kstrtab section with a string containing * the name of the module. If the .kstrtab comes before the .data section, * it in turn gets bumped up. * * BFD abstracts the ELF section header into an asection structure, so this * code determines the priority using the relevant logic. * * Later versions of insmod do the work for us by creating pseudo-symbols * that contain the base address of the text, rodata, data and bss sections. * When that's the case, veer off to check_insmod_builtin() to potentially * override the offset value calculated here. */ static void calculate_load_order_v1(struct load_module *lm, bfd *bfd) { int i; asection *section; ulong alignment; ulong offset; offset = 0; switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V1: offset = lm->mod_size_of_struct; break; case KMOD_V2: offset = lm->mod_base; break; } qsort(&lm->mod_section_data[0], lm->mod_sections, sizeof(struct mod_section_data), compare_prios); for (i = (lm->mod_sections-1); i >= 0; i--) { section = lm->mod_section_data[i].section; alignment = power(2, bfd_section_alignment(section)); if (alignment && (offset & (alignment - 1))) offset = (offset | (alignment - 1)) + 1; lm->mod_section_data[i].offset = offset; if (CRASHDEBUG(1)) fprintf(fp, "%12s prio: %x flags: %x offset: %lx\n", lm->mod_section_data[i].name, lm->mod_section_data[i].priority, lm->mod_section_data[i].flags, lm->mod_section_data[i].offset); if (st->flags & INSMOD_BUILTIN) check_insmod_builtin(lm, i, &offset); if (STREQ(lm->mod_section_data[i].name, ".text")) lm->mod_text_start = lm->mod_base + offset; if (STREQ(lm->mod_section_data[i].name, ".data")) lm->mod_data_start = lm->mod_base + offset; if (STREQ(lm->mod_section_data[i].name, ".bss")) lm->mod_bss_start = lm->mod_base + offset; if (STREQ(lm->mod_section_data[i].name, ".rodata")) lm->mod_rodata_start = lm->mod_base + offset; offset += bfd_section_size(section); if (STREQ(bfd_section_name(section), ".kstrtab")) offset += strlen(lm->mod_name)+1; } } /* * Later versions of kmod no longer get the help from insmod, * and while the heuristics might work, it's relatively * straightforward to just try to match the sections in the object file * with exported symbols. * * This works well if kallsyms is set, but may not work so well in other * instances. */ static void calculate_load_order_v2(struct load_module *lm, bfd *bfd, int dynamic, void *minisyms, long symcount, unsigned int size) { struct syment *s1, *s2; ulong sec_start; bfd_byte *from, *fromend; asymbol *store; asymbol *sym; symbol_info syminfo; char *secname; int i; if ((store = bfd_make_empty_symbol(bfd)) == NULL) error(FATAL, "bfd_make_empty_symbol() failed\n"); s1 = lm->mod_symtable; s2 = lm->mod_symend; while (s1 < s2) { ulong sym_offset = s1->value - lm->mod_base; if (MODULE_PSEUDO_SYMBOL(s1)) { s1++; continue; } /* Skip over symbols whose sections have been identified. */ for (i = 0; i < lm->mod_sections; i++) { if ((lm->mod_section_data[i].flags & SEC_FOUND) == 0) continue; if (sym_offset >= lm->mod_section_data[i].offset && sym_offset < lm->mod_section_data[i].offset + lm->mod_section_data[i].size) { break; } } /* Matched one of the sections. Skip symbol. */ if (i < lm->mod_sections) { if (CRASHDEBUG(2)) { fprintf(fp, "skip %lx %s %s\n", s1->value, s1->name, lm->mod_section_data[i].name); } s1++; continue; } /* Find the symbol in the object file. */ from = (bfd_byte *) minisyms; fromend = from + symcount * size; secname = NULL; for (; from < fromend; from += size) { if ((sym = bfd_minisymbol_to_symbol(bfd, dynamic, from, store)) == NULL) error(FATAL, "bfd_minisymbol_to_symbol() failed\n"); bfd_get_symbol_info(bfd, sym, &syminfo); if (CRASHDEBUG(3)) { fprintf(fp,"matching sym %s %lx against bfd %s %lx\n", s1->name, (long) s1->value, syminfo.name, (long) syminfo.value); } if (strcmp(syminfo.name, s1->name) == 0) { secname = (char *)bfd_section_name(sym->section); break; } } if (secname == NULL) { if (CRASHDEBUG(1)) { fprintf(fp, "symbol %s not found in module\n", s1->name); } s1++; continue; } /* Match the section it came in. */ for (i = 0; i < lm->mod_sections; i++) { if (STREQ(lm->mod_section_data[i].name, secname)) { break; } } if (i == lm->mod_sections) { fprintf(fp, "?? Section %s not found for symbol %s\n", secname, s1->name); s1++; continue; } if (lm->mod_section_data[i].flags & SEC_FOUND) { s1++; continue; } /* Update the offset information for the section */ sec_start = s1->value - syminfo.value; // sec_end = sec_start + lm->mod_section_data[i].size; lm->mod_section_data[i].offset = sec_start - lm->mod_base; lm->mod_section_data[i].flags |= SEC_FOUND; if (CRASHDEBUG(2)) { fprintf(fp, "update sec offset sym %s @ %lx val %lx section %s\n", s1->name, s1->value, (ulong)syminfo.value, secname); } if (strcmp(secname, ".text") == 0) lm->mod_text_start = sec_start; if (strcmp(secname, ".bss") == 0) lm->mod_bss_start = sec_start; if (strcmp(secname, ".data") == 0) lm->mod_data_start = sec_start; if (strcmp(secname, ".data") == 0) lm->mod_data_start = sec_start; if (strcmp(secname, ".rodata") == 0) lm->mod_rodata_start = sec_start; s1++; } } /* Linux 6.4 and later */ static void calculate_load_order_6_4(struct load_module *lm, bfd *bfd, int dynamic, void *minisyms, long symcount, unsigned int size) { struct syment *s1, *s2; ulong sec_start; bfd_byte *from, *fromend; asymbol *store; asymbol *sym; symbol_info syminfo; bfd_vma secaddr; char *secname; int i, t; if ((store = bfd_make_empty_symbol(bfd)) == NULL) error(FATAL, "bfd_make_empty_symbol() failed\n"); for_each_mod_mem_type(t) { s1 = lm->symtable[t]; s2 = lm->symend[t]; while (s1 < s2) { if (MODULE_PSEUDO_SYMBOL(s1)) { s1++; continue; } /* Skip over symbols whose sections have been identified. */ for (i = 0; i < lm->mod_sections; i++) { if ((lm->mod_section_data[i].flags & SEC_FOUND) == 0) continue; if (s1->value >= lm->mod_section_data[i].addr && s1->value < lm->mod_section_data[i].addr + lm->mod_section_data[i].size) break; } /* Matched one of the sections. Skip symbol. */ if (i < lm->mod_sections) { if (CRASHDEBUG(2)) fprintf(fp, "skip %lx %s %s\n", s1->value, s1->name, lm->mod_section_data[i].name); s1++; continue; } /* Find the symbol in the object file. */ from = (bfd_byte *) minisyms; fromend = from + symcount * size; secname = NULL; for (; from < fromend; from += size) { if (!(sym = bfd_minisymbol_to_symbol(bfd, dynamic, from, store))) error(FATAL, "bfd_minisymbol_to_symbol() failed\n"); bfd_get_symbol_info(bfd, sym, &syminfo); if (CRASHDEBUG(3)) { fprintf(fp,"matching sym %s %lx against bfd %s %lx\n", s1->name, (long) s1->value, syminfo.name, (long) syminfo.value); } if (strcmp(syminfo.name, s1->name) == 0) { secname = (char *)bfd_section_name(sym->section); secaddr = bfd_section_vma(sym->section); break; } } if (secname == NULL) { if (CRASHDEBUG(1)) fprintf(fp, "symbol %s not found in module\n", s1->name); s1++; continue; } /* Match the section it came in. */ for (i = 0; i < lm->mod_sections; i++) { if (STREQ(lm->mod_section_data[i].name, secname)) break; } if (i == lm->mod_sections) { fprintf(fp, "?? Section %s not found for symbol %s\n", secname, s1->name); s1++; continue; } if (lm->mod_section_data[i].flags & SEC_FOUND) { s1++; continue; } /* Update the offset information for the section */ sec_start = s1->value - syminfo.value + secaddr; /* keep the address instead of offset */ lm->mod_section_data[i].addr = sec_start; lm->mod_section_data[i].flags |= SEC_FOUND; if (CRASHDEBUG(2)) fprintf(fp, "update sec offset sym %s @ %lx val %lx section %s @ %lx\n", s1->name, s1->value, (ulong)syminfo.value, secname, secaddr); if (strcmp(secname, ".text") == 0) lm->mod_text_start = sec_start; if (strcmp(secname, ".bss") == 0) lm->mod_bss_start = sec_start; if (strcmp(secname, ".data") == 0) lm->mod_data_start = sec_start; if (strcmp(secname, ".rodata") == 0) lm->mod_rodata_start = sec_start; s1++; } } } /* * Later versons of insmod store basic address information of each * module in a format that looks like the following example of the * nfsd module: * * d004d000 __insmod_nfsd_O/lib/modules/2.2.17/fs/nfsd.o_M3A7EE300_V131601 * d004d054 __insmod_nfsd_S.text_L30208 * d0054840 __insmod_nfsd_S.rodata_L8930 * d0056b40 __insmod_nfsd_S.data_L1220 * d00570c0 __insmod_nfsd_S.bss_L123840 * * When that's true, override the offset value made by calculate_load_order(). */ static void check_insmod_builtin(struct load_module *lm, int index, ulong *offset) { struct syment *sp; char buf[BUFSIZE]; ulong offs; sprintf(buf, "__insmod_%s_S%s", lm->mod_name, lm->mod_section_data[index].name); if (symbol_query(buf, NULL, &sp) == 1) { if (CRASHDEBUG(1)) fprintf(fp, "check_insmod_builtin: %lx %s\n", sp->value, sp->name); offs = sp->value - lm->mod_base; if (offs != *offset) { if (CRASHDEBUG(1)) fprintf(fp, "check_insmod_builtin: [%s] %s %lx != %lx\n", lm->mod_name, lm->mod_section_data[index].name, offs, *offset); *offset = offs; } } } /* * Determine whether a module symbol is one of the insmod-created symbols * described above. */ static int is_insmod_builtin(struct load_module *lm, struct syment *sp) { char buf[BUFSIZE]; if (!(st->flags & INSMOD_BUILTIN)) return FALSE; sprintf(buf, "__insmod_%s_S", lm->mod_name); if (strstr(sp->name, buf)) return TRUE; return FALSE; } /* * Modified from typical "qsort" help functions to simulate section-ordering * done by insmod when loading modules. */ static int compare_prios(const void *v1, const void *v2) { struct mod_section_data *md1, *md2; md1 = (struct mod_section_data *)v1; md2 = (struct mod_section_data *)v2; return (md1->priority < md2->priority ? -1 : 1); } /* * This routine scours a module object file namelist for global text and * data symbols, sorting and storing them in a static table for quick * reference. This allows access to non-EXPORT_SYMBOL() symbols. * The object file is then passed to gdb for loading of all symbolic * and debugging data. * * Thanks to David Addison (addy@quadrics.com) for the suggestion. */ int load_module_symbols(char *modref, char *namelist, ulong base_addr) { static bfd *mbfd; char **matching; long symcount; void *minisyms; unsigned int size; int result; struct load_module *lm; asymbol *sort_x; asymbol *sort_y; if (!is_module_name(modref, NULL, &lm)) error(FATAL, "%s: not a loaded module name\n", modref); if ((lm->mod_flags & MOD_LOAD_SYMS) || strlen(lm->mod_namelist)) { if (CRASHDEBUG(1)) fprintf(fp, "%s: module symbols are already loaded\n", modref); return TRUE; } if (CRASHDEBUG(2)) fprintf(fp, "load_module_symbols: %s %s %lx %lx\n", modref, namelist, base_addr, kt->flags); switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V1: break; case KMOD_V2: st->current = lm; BZERO(lm->mod_namelist, MAX_MOD_NAMELIST); if (strlen(namelist) < MAX_MOD_NAMELIST) strcpy(lm->mod_namelist, namelist); else strncpy(lm->mod_namelist, namelist, MAX_MOD_NAMELIST-1); if (st->flags & USE_OLD_ADD_SYM) goto add_symbols; } if ((mbfd = bfd_openr(namelist, NULL)) == NULL) error(FATAL, "cannot open object file: %s\n", namelist); if (!bfd_check_format_matches(mbfd, bfd_object, &matching)) error(FATAL, "cannot determine object file format: %s\n", namelist); if (LKCD_KERNTYPES() && (file_elf_version(namelist) == EV_DWARFEXTRACT)) goto add_symbols; /* no symbols, add the debuginfo */ if (!(bfd_get_file_flags(mbfd) & HAS_SYMS)) error(FATAL, "no symbols in object file: %s\n", namelist); symcount = bfd_read_minisymbols(mbfd, FALSE, &minisyms, &size); if (symcount < 0) error(FATAL, "cannot access symbol table data: %s\n", namelist); else if (symcount == 0) error(FATAL, "no symbols in object file: %s\n", namelist); if (CRASHDEBUG(2)) { fprintf(fp, "%ld symbols found in obj file %s\n", symcount, namelist); } sort_x = bfd_make_empty_symbol(mbfd); sort_y = bfd_make_empty_symbol(mbfd); if (sort_x == NULL || sort_y == NULL) error(FATAL, "bfd_make_empty_symbol() failed\n"); gnu_qsort(mbfd, minisyms, symcount, size, sort_x, sort_y); store_load_module_symbols(mbfd, FALSE, minisyms, symcount, size, base_addr, namelist); free(minisyms); bfd_close(mbfd); add_symbols: result = add_symbol_file(st->current); if (CRASHDEBUG(2)) check_for_dups(st->current); st->current = NULL; return result; } /* * Add a module's symbol file data to gdb's notion of the world. */ static int add_symbol_file(struct load_module *lm) { struct gnu_request request, *req; char buf[BUFSIZE]; int i, len; char *secname; req = &request; BZERO(req, sizeof(struct gnu_request)); if ((lm->mod_flags & MOD_KALLSYMS) && add_symbol_file_kallsyms(lm, req)) return TRUE; for (i = len = 0; i < lm->mod_sections; i++) { secname = lm->mod_section_data[i].name; if ((lm->mod_section_data[i].flags & SEC_FOUND) && (!STREQ(secname, ".text") && !STREQ(secname, ".data.percpu") && !STREQ(secname, ".data..percpu"))) { if (MODULE_MEMORY()) sprintf(buf, " -s %s 0x%lx", secname, lm->mod_section_data[i].addr); else sprintf(buf, " -s %s 0x%lx", secname, lm->mod_section_data[i].offset + lm->mod_base); len += strlen(buf); } } for (i = 0; i < lm->mod_sections; i++) { secname = lm->mod_section_data[i].name; if ((lm->mod_section_data[i].flags & SEC_FOUND) && (STREQ(secname, ".data.percpu") || STREQ(secname, ".data..percpu"))) { sprintf(buf, " -s %s 0x%lx", secname, lm->mod_percpu); len += strlen(buf); } } if (pc->curcmd_flags & MOD_READNOW) lm->mod_flags |= MOD_DO_READNOW; req->command = GNU_ADD_SYMBOL_FILE; req->addr = (ulong)lm; req->buf = GETBUF(len+BUFSIZE); if (!CRASHDEBUG(1)) req->fp = pc->nullfp; st->flags |= ADD_SYMBOL_FILE; gdb_interface(req); st->flags &= ~ADD_SYMBOL_FILE; FREEBUF(req->buf); sprintf(buf, "set complaints 0"); gdb_pass_through(buf, NULL, GNU_RETURN_ON_ERROR); return(!(req->flags & GNU_COMMAND_FAILED)); } static int add_symbol_file_percpu(struct load_module *lm, struct gnu_request *req, int buflen) { char pbuf[BUFSIZE]; int i, len; char *secname; len = strlen(req->buf); for (i = 0; i < lm->mod_sections; i++) { secname = lm->mod_section_data[i].name; if ((lm->mod_section_data[i].flags & SEC_FOUND) && (STREQ(secname, ".data.percpu") || STREQ(secname, ".data..percpu"))) { sprintf(pbuf, " -s %s 0x%lx", secname, lm->mod_percpu); while ((len + strlen(pbuf)) >= buflen) { RESIZEBUF(req->buf, buflen, buflen * 2); buflen *= 2; } strcat(req->buf, pbuf); len += strlen(pbuf); } } return buflen; } /* * Gather the module section data from the in-kernel data structures. */ static int add_symbol_file_kallsyms(struct load_module *lm, struct gnu_request *req) { int len, buflen, done, nsections, retval; ulong vaddr, array_entry, attribute, owner, name, address; long name_type; char buf[BUFSIZE]; char section_name[BUFSIZE/2]; ulong section_vaddr; #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) return FALSE; #endif if (!(st->flags & (MODSECT_VMASK|MODSECT_UNKNOWN))) { STRUCT_SIZE_INIT(module_sect_attr, "module_sect_attr"); MEMBER_OFFSET_INIT(module_sect_attrs, "module", "sect_attrs"); MEMBER_OFFSET_INIT(module_sect_attrs_attrs, "module_sect_attrs", "attrs"); MEMBER_OFFSET_INIT(module_sect_attrs_nsections, "module_sect_attrs", "nsections"); MEMBER_OFFSET_INIT(module_sect_attr_mattr, "module_sect_attr", "mattr"); MEMBER_OFFSET_INIT(module_sect_attr_name, "module_sect_attr", "name"); MEMBER_OFFSET_INIT(module_sect_attr_address, "module_sect_attr", "address"); MEMBER_OFFSET_INIT(module_attribute_attr, "module_attribute", "attr"); MEMBER_OFFSET_INIT(module_sect_attr_attr, "module_sect_attr", "attr"); MEMBER_OFFSET_INIT(module_sections_attrs, "module_sections", "attrs"); MEMBER_OFFSET_INIT(attribute_owner, "attribute", "owner"); if (VALID_MEMBER(module_sect_attrs_attrs) && VALID_MEMBER(module_sect_attr_mattr) && VALID_MEMBER(module_attribute_attr) && VALID_MEMBER(module_sect_attrs_nsections)) st->flags |= MODSECT_V3; else if (VALID_MEMBER(module_sect_attrs_attrs) && VALID_MEMBER(module_sect_attr_mattr) && VALID_MEMBER(module_attribute_attr)) st->flags |= MODSECT_V2; else if (VALID_MEMBER(module_sect_attr_attr) && VALID_MEMBER(module_sections_attrs)) st->flags |= MODSECT_V1; else st->flags |= MODSECT_UNKNOWN; if ((st->flags & MODSECT_UNKNOWN) || !VALID_STRUCT(module_sect_attr) || (INVALID_MEMBER(attribute_owner) && (st->flags & (MODSECT_V1|MODSECT_V2))) || INVALID_MEMBER(module_sect_attrs) || INVALID_MEMBER(module_sect_attr_name) || INVALID_MEMBER(module_sect_attr_address)) { if (CRASHDEBUG(1)) error(WARNING, "module section data structures " "unrecognized or changed\n"); st->flags &= ~(MODSECT_VMASK); st->flags |= MODSECT_UNKNOWN; return FALSE; } } else if (st->flags & MODSECT_UNKNOWN) return FALSE; if (!readmem(lm->module_struct + OFFSET(module_sect_attrs), KVADDR, &vaddr, sizeof(void *), "module.sect_attrs", RETURN_ON_ERROR|QUIET)) return FALSE; array_entry = attribute = 0; switch (st->flags & MODSECT_VMASK) { case MODSECT_V1: array_entry = vaddr + OFFSET(module_sections_attrs); nsections = UNUSED; break; case MODSECT_V2: array_entry = vaddr + OFFSET(module_sect_attrs_attrs); nsections = UNUSED; break; case MODSECT_V3: array_entry = vaddr + OFFSET(module_sect_attrs_attrs); if (!readmem(vaddr + OFFSET(module_sect_attrs_nsections), KVADDR, &nsections, sizeof(int), "module_sect_attrs.nsections", RETURN_ON_ERROR|QUIET)) return FALSE; if (CRASHDEBUG(2)) fprintf(fp, "nsections: %d\n", nsections); break; } if (CRASHDEBUG(2)) fprintf(fp, "%s:\n", lm->mod_namelist); name_type = MEMBER_TYPE("module_sect_attr", "name"); req->buf = GETBUF(buflen = 1024); retval = FALSE; for (done = FALSE; !done; array_entry += SIZE(module_sect_attr)) { switch (st->flags & MODSECT_VMASK) { case MODSECT_V1: attribute = array_entry + OFFSET(module_sect_attr_attr); break; case MODSECT_V2: case MODSECT_V3: attribute = array_entry + OFFSET(module_sect_attr_mattr) + OFFSET(module_attribute_attr); break; } if (st->flags & (MODSECT_V1|MODSECT_V2)) owner = attribute + OFFSET(attribute_owner); else owner = UNUSED; address = array_entry + OFFSET(module_sect_attr_address); switch (name_type) { case TYPE_CODE_ARRAY: name = array_entry + OFFSET(module_sect_attr_name); break; case TYPE_CODE_PTR: if (!readmem(array_entry + OFFSET(module_sect_attr_name), KVADDR, &name, sizeof(void *), "module_sect_attr.name", RETURN_ON_ERROR|QUIET)) { done = TRUE; retval = FALSE; continue; } break; default: done = TRUE; retval = FALSE; } if (CRASHDEBUG(2)) { fprintf(fp, "attribute: %lx ", attribute); if (owner == UNUSED) fprintf(fp, " owner: (not used)"); else fprintf(fp, " owner: %lx ", owner); fprintf(fp, " name: %lx ", name); fprintf(fp, " address: %lx\n", address); } if (nsections == UNUSED) { if (!readmem(owner, KVADDR, &vaddr, sizeof(void *), "attribute.owner", RETURN_ON_ERROR|QUIET)) { done = TRUE; continue; } if (lm->module_struct != vaddr) { done = TRUE; continue; } } BZERO(section_name, BUFSIZE/2); if (!read_string(name, section_name, 32)) { done = TRUE; retval = FALSE; continue; } if (!readmem(address, KVADDR, §ion_vaddr, sizeof(void *), "module_sect_attr.address", RETURN_ON_ERROR|QUIET)) { done = TRUE; retval = FALSE; continue; } if (CRASHDEBUG(1)) fprintf(fp, "%lx %s\n", section_vaddr, section_name); len = strlen(req->buf); if (STREQ(section_name, ".text")) { sprintf(buf, "add-symbol-file %s 0x%lx %s", lm->mod_namelist, section_vaddr, pc->curcmd_flags & MOD_READNOW ? "-readnow" : ""); while ((len + strlen(buf)) >= buflen) { RESIZEBUF(req->buf, buflen, buflen * 2); buflen *= 2; } shift_string_right(req->buf, strlen(buf)); BCOPY(buf, req->buf, strlen(buf)); retval = TRUE; } else if (lm->mod_init_module_ptr || !STRNEQ(section_name, ".init.")) { sprintf(buf, " -s %s 0x%lx", section_name, section_vaddr); while ((len + strlen(buf)) >= buflen) { RESIZEBUF(req->buf, buflen, buflen * 2); buflen *= 2; } strcat(req->buf, buf); } if (nsections != UNUSED) { if (--nsections == 0) done = TRUE; } } if (retval == FALSE) { if (CRASHDEBUG(1)) fprintf(fp, "%s: add_symbol_file_kallsyms failed\n", lm->mod_namelist); FREEBUF(req->buf); req->buf = NULL; return FALSE; } /* * Special case for per-cpu symbols */ buflen = add_symbol_file_percpu(lm, req, buflen); lm->mod_flags |= MOD_NOPATCH; req->command = GNU_ADD_SYMBOL_FILE; req->addr = (ulong)lm; if (!CRASHDEBUG(1)) req->fp = pc->nullfp; st->flags |= ADD_SYMBOL_FILE; gdb_interface(req); st->flags &= ~ADD_SYMBOL_FILE; FREEBUF(req->buf); sprintf(buf, "set complaints 0"); gdb_pass_through(buf, NULL, GNU_RETURN_ON_ERROR); return(!(req->flags & GNU_COMMAND_FAILED)); } /* * Given a syment structure of a valid symbol, determine which * load_module (if any) it belongs to. */ static int load_module_index(struct syment *sp) { int i; ulong value; struct load_module *lm; value = sp->value; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (IN_MODULE(value, lm)) return i; if (IN_MODULE_INIT(value, lm)) return i; } return (error(FATAL, "cannot find %lx (%s) in module space\n", sp->value, sp->name)); } /* * Return the syment of a kallsyms-generated module symbol. */ static struct syment * kallsyms_module_symbol(struct load_module *lm, symbol_info *syminfo) { struct syment *sp, *spx; int cnt, t; if (!(lm->mod_flags & MOD_KALLSYMS)) return NULL; sp = NULL; cnt = 0; if (MODULE_MEMORY()) { for_each_mod_mem_type(t) { if (!lm->ext_symtable[t]) continue; for (spx = lm->ext_symtable[t]; spx <= lm->ext_symend[t]; spx++) { if (!STREQ(spx->name, syminfo->name)) continue; if (spx->cnt) { cnt++; continue; } spx->cnt++; sp = spx; break; } } } else { for (spx = lm->mod_ext_symtable; spx <= lm->mod_ext_symend; spx++) { if (!STREQ(spx->name, syminfo->name)) continue; if (spx->cnt) { cnt++; continue; } spx->cnt++; sp = spx; break; } } if (CRASHDEBUG(2)) { if (cnt) fprintf(fp, "kallsyms [%s] %s: multiply defined\n", lm->mod_name, syminfo->name); if (sp) fprintf(fp, "kallsyms [%s] %s: %lx\n", lm->mod_name, syminfo->name, sp->value); else fprintf(fp, "kallsyms [%s] %s: NOT FOUND\n", lm->mod_name, syminfo->name); } return sp; } /* * Replace the externally-defined module symbols found in store_load_modules() * with all the text and data symbols found in the load module object file. */ static void store_load_module_symbols(bfd *bfd, int dynamic, void *minisyms, long symcount, unsigned int size, ulong base_addr, char *namelist) { int i, t; asymbol *store; asymbol *sym; bfd_byte *from, *fromend; symbol_info syminfo; struct syment *sp, *spx; struct load_module *lm; char name[BUFSIZE]; char *nameptr, *secname; long index; long symalloc; int found = FALSE; if ((store = bfd_make_empty_symbol(bfd)) == NULL) error(FATAL, "bfd_make_empty_symbol() failed\n"); st->current = lm = NULL; /* * Find out whether this module has already been loaded. Coming * out of this for loop, lm->mod_load_symtable will either be set to * a reusable symbol table, or NULL if it needs to be re-malloc'd. */ for (i = symalloc = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_base == base_addr) { symalloc = symcount + lm->mod_ext_symcnt; if (lm->mod_load_symtable && (lm->mod_symalloc < symalloc)) { free(lm->mod_load_symtable); namespace_ctl(NAMESPACE_FREE, &lm->mod_load_namespace, NULL, NULL); lm->mod_load_symtable = NULL; } break; } } if (i == st->mods_installed) error(FATAL, "cannot find module at %lx\n", base_addr); if (!lm->mod_load_symtable) { if ((lm->mod_load_symtable = (struct syment *) calloc(symalloc, sizeof(struct syment))) == NULL) error(FATAL, "module syment space malloc: %s\n", strerror(errno)); if (!namespace_ctl(NAMESPACE_INIT, &lm->mod_load_namespace, (void *)symalloc, NULL)) error(FATAL, "module name space malloc: %s\n", strerror(errno)); } else namespace_ctl(NAMESPACE_REUSE, &lm->mod_load_namespace, NULL, NULL); st->current = lm; lm->mod_symalloc = symalloc; BZERO(lm->mod_namelist, MAX_MOD_NAMELIST); if (strlen(namelist) < MAX_MOD_NAMELIST) strcpy(lm->mod_namelist, namelist); else strncpy(lm->mod_namelist, namelist, MAX_MOD_NAMELIST-1); lm->mod_text_start = lm->mod_data_start = 0; lm->mod_rodata_start = lm->mod_bss_start = 0; lm->mod_load_symcnt = 0; lm->mod_sections = 0; if (MODULE_MEMORY()) { for_each_mod_mem_type(t) { if (!lm->ext_symtable[t]) continue; for (spx = lm->ext_symtable[t]; spx <= lm->ext_symend[t]; spx++) spx->cnt = 0; } } else { for (spx = lm->mod_ext_symtable; spx <= lm->mod_ext_symend; spx++) spx->cnt = 0; } sp = lm->mod_load_symtable; if (!(lm->mod_section_data = (struct mod_section_data *) malloc(sizeof(struct mod_section_data) * (bfd->section_count+1)))) error(FATAL, "module section data array malloc: %s\n", strerror(errno)); bfd_map_over_sections(bfd, section_header_info, MODULE_SECTIONS); if (MODULE_MEMORY()) calculate_load_order_6_4(lm, bfd, dynamic, minisyms, symcount, size); else if (kt->flags & KMOD_V1) calculate_load_order_v1(lm, bfd); else calculate_load_order_v2(lm, bfd, dynamic, minisyms, symcount, size); from = (bfd_byte *) minisyms; fromend = from + symcount * size; for (; from < fromend; from += size) { if ((sym = bfd_minisymbol_to_symbol(bfd, dynamic, from, store)) == NULL) error(FATAL, "bfd_minisymbol_to_symbol() failed\n"); bfd_get_symbol_info(bfd, sym, &syminfo); secname = (char *)bfd_section_name(sym->section); found = 0; if (kt->flags & KMOD_V1) { switch (syminfo.type) { case 'b': case 'B': if (CRASHDEBUG(2)) fprintf(fp, "%08lx (%c) [%s] %s\n", (ulong)syminfo.value, syminfo.type, secname, syminfo.name); if (!lm->mod_bss_start) break; syminfo.value += lm->mod_bss_start; found = 1; break; case 'd': case 'D': if (CRASHDEBUG(2)) fprintf(fp, "%08lx (%c) [%s] %s\n", (ulong)syminfo.value, syminfo.type, secname, syminfo.name); if (STREQ(secname, ".rodata")) { if (!lm->mod_rodata_start) break; syminfo.value += lm->mod_rodata_start; } else { if (!lm->mod_data_start) break; syminfo.value += lm->mod_data_start; } found = 1; break; case 't': case 'T': if (CRASHDEBUG(2)) fprintf(fp, "%08lx (%c) [%s] %s\n", (ulong)syminfo.value, syminfo.type, secname, syminfo.name); if (! lm->mod_text_start) { break; } if ((st->flags & INSMOD_BUILTIN) && (STREQ(name, "init_module") || STREQ(name, "cleanup_module"))) break; syminfo.value += lm->mod_text_start; found = 1; break; default: break; } } else { /* Match the section it came in. */ for (i = 0; i < lm->mod_sections; i++) { if (STREQ(lm->mod_section_data[i].name, secname) && (lm->mod_section_data[i].flags & SEC_FOUND)) { break; } } if (i < lm->mod_sections) { if (CRASHDEBUG(2)) fprintf(fp, "%08lx (%c) [%s] %s\n", (ulong)syminfo.value, syminfo.type, secname, syminfo.name); if ((st->flags & INSMOD_BUILTIN) && (STREQ(name, "init_module") || STREQ(name, "cleanup_module"))) found = FALSE; else if (syminfo.name[0] == '.') found = FALSE; else if ((spx = kallsyms_module_symbol(lm, &syminfo))) { syminfo.value = spx->value; found = TRUE; } else if (lm->mod_percpu && (STREQ(secname, ".data.percpu") || STREQ(secname, ".data..percpu"))) { syminfo.value += lm->mod_percpu; found = TRUE; } else { if (MODULE_MEMORY()) syminfo.value += lm->mod_section_data[i].addr; else syminfo.value += lm->mod_section_data[i].offset + lm->mod_base; found = TRUE; } } } if (found) { strcpy(name, syminfo.name); strip_module_symbol_end(name); strip_symbol_end(name, NULL); if (machdep->verify_symbol(name, syminfo.value, syminfo.type)) { sp->value = syminfo.value; sp->type = syminfo.type; sp->flags |= MODULE_SYMBOL; namespace_ctl(NAMESPACE_INSTALL, &lm->mod_load_namespace, sp, name); if (CRASHDEBUG(2)) fprintf(fp, "installing %c %08lx %s\n", syminfo.type, sp->value, name); sp++; lm->mod_load_symcnt++; } } } lm->mod_load_symend = &lm->mod_load_symtable[lm->mod_load_symcnt]; /* * Merge in any externals that didn't show up in the four * syminfo data types accepted above, plus the two pseudo symbols. * Note that the new syment name pointers haven't been resolved yet. */ if (!MODULE_MEMORY()) goto old_module; for_each_mod_mem_type(t) { if (!lm->ext_symtable[t]) continue; for (spx = lm->ext_symtable[t]; spx <= lm->ext_symend[t]; spx++) { found = FALSE; for (sp = lm->mod_load_symtable; sp < lm->mod_load_symend; sp++) { index = (long)sp->name; nameptr = &lm->mod_load_namespace.address[index]; if (STREQ(spx->name, nameptr)) { found = TRUE; if (spx->value == sp->value) { if (CRASHDEBUG(2)) fprintf(fp, "%s: %s matches!\n", lm->mod_name, nameptr); } else { if (CRASHDEBUG(2)) fprintf(fp, "[%s] %s: %lx != extern'd value: %lx\n", lm->mod_name, nameptr, sp->value, spx->value); } break; } } if (!found) { if (CRASHDEBUG(2)) fprintf(fp, "append ext %s (%lx)\n", spx->name, spx->value); /* append it here... */ namespace_ctl(NAMESPACE_INSTALL, &lm->mod_load_namespace, lm->mod_load_symend, spx->name); lm->mod_load_symend->value = spx->value; lm->mod_load_symend->type = spx->type; lm->mod_load_symend->flags |= MODULE_SYMBOL; lm->mod_load_symend++; lm->mod_load_symcnt++; } } } goto append_section_symbols; old_module: for (spx = lm->mod_ext_symtable; spx <= lm->mod_ext_symend; spx++) { found = FALSE; for (sp = lm->mod_load_symtable; sp < lm->mod_load_symend; sp++) { index = (long)sp->name; nameptr = &lm->mod_load_namespace.address[index]; if (STREQ(spx->name, nameptr)) { found = TRUE; if (spx->value == sp->value) { if (CRASHDEBUG(2)) fprintf(fp, "%s: %s matches!\n", lm->mod_name, nameptr); } else { if (CRASHDEBUG(2)) fprintf(fp, "[%s] %s: %lx != extern'd value: %lx\n", lm->mod_name, nameptr, sp->value, spx->value); } break; } } if (!found) { if (CRASHDEBUG(2)) fprintf(fp, "append ext %s (%lx)\n", spx->name, spx->value); /* append it here... */ namespace_ctl(NAMESPACE_INSTALL, &lm->mod_load_namespace, lm->mod_load_symend, spx->name); lm->mod_load_symend->value = spx->value; lm->mod_load_symend->type = spx->type; lm->mod_load_symend->flags |= MODULE_SYMBOL; lm->mod_load_symend++; lm->mod_load_symcnt++; } } append_section_symbols: /* * Append helpful pseudo symbols about found out sections. * Use 'S' as its type which is never seen in existing symbols. */ for (i = 0; (pc->curcmd_flags & MOD_SECTIONS) && (i < lm->mod_sections); i++) { if (!(lm->mod_section_data[i].flags & SEC_FOUND)) continue; /* Section start */ if (MODULE_MEMORY()) lm->mod_load_symend->value = lm->mod_section_data[i].addr; else lm->mod_load_symend->value = lm->mod_base + lm->mod_section_data[i].offset; lm->mod_load_symend->type = 'S'; lm->mod_load_symend->flags |= MODULE_SYMBOL; sprintf(name, "_MODULE_SECTION_START [%s]", lm->mod_section_data[i].name); namespace_ctl(NAMESPACE_INSTALL, &lm->mod_load_namespace, lm->mod_load_symend, name); lm->mod_load_symend++; lm->mod_load_symcnt++; /* Section end */ if (MODULE_MEMORY()) lm->mod_load_symend->value = lm->mod_section_data[i].addr; else lm->mod_load_symend->value = lm->mod_base + lm->mod_section_data[i].offset; lm->mod_load_symend->value += lm->mod_section_data[i].size; lm->mod_load_symend->type = 'S'; lm->mod_load_symend->flags |= MODULE_SYMBOL; sprintf(name, "_MODULE_SECTION_END [%s]", lm->mod_section_data[i].name); namespace_ctl(NAMESPACE_INSTALL, &lm->mod_load_namespace, lm->mod_load_symend, name); lm->mod_load_symend++; lm->mod_load_symcnt++; } namespace_ctl(NAMESPACE_COMPLETE, &lm->mod_load_namespace, lm->mod_load_symtable, lm->mod_load_symend); qsort(lm->mod_load_symtable, lm->mod_load_symcnt, sizeof(struct syment), compare_syms); if (MODULE_MEMORY()) { /* keep load symtable addresses to lm->load_symtable[] */ /* TODO: make more efficient */ for (sp = lm->mod_load_symtable; sp < lm->mod_load_symend; sp++) { char buf1[BUFSIZE], buf2[BUFSIZE]; if (CRASHDEBUG(2)) fprintf(fp, "DEBUG: value %16lx name %s\n", sp->value, sp->name); if (!MODULE_PSEUDO_SYMBOL(sp)) continue; for_each_mod_mem_type(t) { if (!lm->mem[t].size) continue; sprintf(buf1, "%s%s", module_tag[t].start, lm->mod_name); sprintf(buf2, "%s%s", module_tag[t].end, lm->mod_name); if (STREQ(sp->name, buf1)) { lm->load_symtable[t] = sp; break; } else if (STREQ(sp->name, buf2)) { lm->load_symend[t] = sp; break; } } } } lm->mod_load_symend--; if (!MODULE_MEMORY() && !MODULE_END(lm->mod_load_symend) && !IN_MODULE_PERCPU(lm->mod_load_symend->value, lm)) error(INFO, "%s: last symbol: %s is not _MODULE_END_%s?\n", lm->mod_name, lm->mod_load_symend->name, lm->mod_name); if (MODULE_MEMORY()) { for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; mod_symtable_hash_remove_range(lm->symtable[t], lm->symend[t]); } lm->symtable = lm->load_symtable; lm->symend = lm->load_symend; mod_symtable_hash_install_range(lm->mod_load_symtable, lm->mod_load_symend); } else { mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend); lm->mod_symtable = lm->mod_load_symtable; lm->mod_symend = lm->mod_load_symend; mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); } lm->mod_flags &= ~MOD_EXT_SYMS; lm->mod_flags |= MOD_LOAD_SYMS; st->flags |= LOAD_MODULE_SYMS; } /* * Delete a load module's symbol table. If base_addr is NULL, delete the * complete list of modules. */ void delete_load_module(ulong base_addr) { int i, t; struct load_module *lm; struct gnu_request request, *req; req = &request; BZERO(req, sizeof(struct gnu_request)); req->command = GNU_DELETE_SYMBOL_FILE; if (base_addr == ALL_MODULES) { for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_LOAD_SYMS) { req->name = lm->mod_namelist; gdb_interface(req); } if (MODULE_MEMORY()) { if (lm->mod_load_symtable) { mod_symtable_hash_remove_range(lm->mod_load_symtable, lm->mod_load_symend); for_each_mod_mem_type(t) { lm->load_symtable[t] = NULL; lm->load_symend[t] = NULL; } } } else mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend); if (lm->mod_load_symtable) { free(lm->mod_load_symtable); namespace_ctl(NAMESPACE_FREE, &lm->mod_load_namespace, NULL, NULL); } if (lm->mod_flags & MOD_REMOTE) unlink_module(lm); if (MODULE_MEMORY()) { if (lm->mod_load_symtable) { /* still non-NULL */ lm->symtable = lm->ext_symtable; lm->symend = lm->ext_symend; for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; mod_symtable_hash_install_range(lm->symtable[t], lm->symend[t]); } } } else { lm->mod_symtable = lm->mod_ext_symtable; lm->mod_symend = lm->mod_ext_symend; mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); } lm->mod_flags &= ~(MOD_LOAD_SYMS|MOD_REMOTE|MOD_NOPATCH); lm->mod_flags |= MOD_EXT_SYMS; lm->mod_load_symtable = NULL; lm->mod_load_symend = NULL; lm->mod_namelist[0] = NULLCHAR; lm->mod_load_symcnt = lm->mod_symalloc = 0; lm->mod_text_start = lm->mod_data_start = 0; lm->mod_bss_start = lm->mod_rodata_start = 0; lm->mod_sections = 0; lm->mod_percpu_size = 0; if (lm->mod_section_data) free(lm->mod_section_data); lm->mod_section_data = (struct mod_section_data *)0; lm->loaded_objfile = NULL; } st->flags &= ~LOAD_MODULE_SYMS; return; } st->flags &= ~LOAD_MODULE_SYMS; /* restored below (if any found) */ for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_base == base_addr) { if (lm->mod_flags & MOD_LOAD_SYMS) { req->name = lm->mod_namelist; gdb_interface(req); } if (MODULE_MEMORY()) { if (lm->mod_load_symtable) { mod_symtable_hash_remove_range(lm->mod_load_symtable, lm->mod_load_symend); for_each_mod_mem_type(t) { lm->load_symtable[t] = NULL; lm->load_symend[t] = NULL; } } } else mod_symtable_hash_remove_range(lm->mod_symtable, lm->mod_symend); if (lm->mod_load_symtable) { free(lm->mod_load_symtable); namespace_ctl(NAMESPACE_FREE, &lm->mod_load_namespace, NULL, NULL); } if (lm->mod_flags & MOD_REMOTE) unlink_module(lm); if (MODULE_MEMORY()) { if (lm->mod_load_symtable) { lm->symtable = lm->ext_symtable; lm->symend = lm->ext_symend; for_each_mod_mem_type(t) { if (!lm->symtable[t]) continue; mod_symtable_hash_install_range(lm->symtable[t], lm->symend[t]); } } } else { lm->mod_symtable = lm->mod_ext_symtable; lm->mod_symend = lm->mod_ext_symend; mod_symtable_hash_install_range(lm->mod_symtable, lm->mod_symend); } lm->mod_flags &= ~(MOD_LOAD_SYMS|MOD_REMOTE|MOD_NOPATCH); lm->mod_flags |= MOD_EXT_SYMS; lm->mod_load_symtable = NULL; lm->mod_load_symend = NULL; lm->mod_namelist[0] = NULLCHAR; lm->mod_load_symcnt = lm->mod_symalloc = 0; lm->mod_text_start = lm->mod_data_start = 0; lm->mod_bss_start = lm->mod_rodata_start = 0; lm->mod_percpu_size = 0; lm->mod_sections = 0; if (lm->mod_section_data) free(lm->mod_section_data); lm->mod_section_data = (struct mod_section_data *)0; lm->loaded_objfile = NULL; } else if (lm->mod_flags & MOD_LOAD_SYMS) st->flags |= LOAD_MODULE_SYMS; } } /* * Check whether a string is the name of a module. If requested, return * the base address of the module. */ int is_module_name(char *s, ulong *addr, struct load_module **lmp) { int i; struct load_module *lm; if (NO_MODULES()) return FALSE; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (STREQ(s, lm->mod_name)) { if (addr) *addr = lm->mod_base; if (lmp) *lmp = lm; return TRUE; } } return FALSE; } /* * Check whether an value is the base address of a module. If requested, * return the module name. */ int is_module_address(ulong check_addr, char *module_name) { int i; struct load_module *lm; if (NO_MODULES()) return FALSE; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (check_addr == lm->mod_base) { if (module_name) strcpy(module_name, lm->mod_name); return TRUE; } } return FALSE; } /* * In a MOD_EXT_SYMBOLS module, find a rough estimate as to where the * .rodata section starts. The value will be used by is_kernel_text() * when symbols are not loaded. */ static void find_mod_etext(struct load_module *lm) { ulong start, end; char *modbuf; ulong maxchunk, alloc; long offset = 0; start = roundup(lm->mod_size_of_struct, sizeof(long)) + lm->mod_base; end = lm->mod_base + lm->mod_size; maxchunk = MIN(end-start, KILOBYTES(32)); modbuf = GETBUF(maxchunk); while (start < end) { alloc = MIN(maxchunk, end-start); readmem(start, KVADDR, modbuf, alloc, "module rodata search chunk", FAULT_ON_ERROR); if ((offset = rodata_search((ulong *)modbuf, alloc)) >= 0) break; start += alloc; } FREEBUF(modbuf); if (offset >= 0) lm->mod_etext_guess = start + offset; else lm->mod_etext_guess = end; } #define ASCII_WORD_COUNT (16/sizeof(ulong)) static long rodata_search(ulong *buf, ulong size) { int i, acnt, words; long offset; ulong *wordptr; words = size/sizeof(ulong); wordptr = buf; for (i = acnt = 0, offset = -1; i < words; i++, wordptr++) { if (ascii_long(*wordptr)) { if (acnt++ == 0) offset = i * sizeof(ulong); } else { acnt = 0; offset = -1; } if (acnt == ASCII_WORD_COUNT) break; } return offset; } static int ascii_long(ulong word) { int i, cnt; unsigned char c; for (i = cnt = 0; i < sizeof(ulong); i++) { c = (unsigned char)((word >> (i*BITS_PER_BYTE)) & 0xff); if ((c >= ' ') && (c < 0x7f)) cnt++; } return (cnt == sizeof(ulong)); } /* * Symbol sorting routines adapted from binutils/nm.c */ /* nm.c -- Describe symbol table of a rel file. Copyright 1991, 92, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc. This file is part of GNU Binutils. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ static bfd *gnu_sort_bfd; static asymbol *gnu_sort_x; static asymbol *gnu_sort_y; #define valueof(x) ((x)->section->vma + (x)->value) static int non_numeric_forward(const void *P_x, const void *P_y) { asymbol *x, *y; const char *xn, *yn; x = bfd_minisymbol_to_symbol(gnu_sort_bfd, FALSE, P_x, gnu_sort_x); y = bfd_minisymbol_to_symbol(gnu_sort_bfd, FALSE, P_y, gnu_sort_y); if (x == NULL || y == NULL) error(FATAL, "bfd_minisymbol_to_symbol failed\n"); xn = bfd_asymbol_name(x); yn = bfd_asymbol_name(y); return ((xn == NULL) ? ((yn == NULL) ? 0 : -1) : ((yn == NULL) ? 1 : strcmp (xn, yn))); } static int numeric_forward(const void *P_x, const void *P_y) { asymbol *x, *y; asection *xs, *ys; x = bfd_minisymbol_to_symbol(gnu_sort_bfd, FALSE, P_x, gnu_sort_x); y = bfd_minisymbol_to_symbol(gnu_sort_bfd, FALSE, P_y, gnu_sort_y); if (x == NULL || y == NULL) error(FATAL, "bfd_minisymbol_to_symbol failed\n"); if (st->_stext_vmlinux == UNINITIALIZED) { if (STREQ(x->name, "_stext")) st->_stext_vmlinux = valueof(x); else if (STREQ(y->name, "_stext")) st->_stext_vmlinux = valueof(y); } if (kt->flags2 & KASLR_CHECK) { if (STREQ(x->name, "kaslr_get_random_long") || STREQ(y->name, "kaslr_get_random_long") || STREQ(x->name, "module_load_offset") || STREQ(y->name, "module_load_offset")) { kt->flags2 &= ~KASLR_CHECK; kt->flags2 |= (RELOC_AUTO|KASLR); } } if (SADUMP_DUMPFILE() || QEMU_MEM_DUMP_NO_VMCOREINFO() || VMSS_DUMPFILE()) { /* Need for kaslr_offset and phys_base */ if (STREQ(x->name, "divide_error") || STREQ(x->name, "asm_exc_divide_error")) st->divide_error_vmlinux = valueof(x); else if (STREQ(y->name, "divide_error") || STREQ(y->name, "asm_exc_divide_error")) st->divide_error_vmlinux = valueof(y); if (STREQ(x->name, "idt_table")) st->idt_table_vmlinux = valueof(x); else if (STREQ(y->name, "idt_table")) st->idt_table_vmlinux = valueof(y); if (STREQ(x->name, "kaiser_init")) st->kaiser_init_vmlinux = valueof(x); else if (STREQ(y->name, "kaiser_init")) st->kaiser_init_vmlinux = valueof(y); if (STREQ(x->name, "linux_banner")) st->linux_banner_vmlinux = valueof(x); else if (STREQ(y->name, "linux_banner")) st->linux_banner_vmlinux = valueof(y); if (STREQ(x->name, "pti_init")) st->pti_init_vmlinux = valueof(x); else if (STREQ(y->name, "pti_init")) st->pti_init_vmlinux = valueof(y); if (STREQ(x->name, "saved_command_line")) st->saved_command_line_vmlinux = valueof(x); else if (STREQ(y->name, "saved_command_line")) st->saved_command_line_vmlinux = valueof(y); } xs = bfd_asymbol_section(x); ys = bfd_asymbol_section(y); if (bfd_is_und_section(xs)) { if (!bfd_is_und_section(ys)) return -1; } else if (bfd_is_und_section (ys)) return 1; else if (valueof (x) != valueof (y)) return valueof (x) < valueof (y) ? -1 : 1; return non_numeric_forward(P_x, P_y); } static void gnu_qsort(bfd *bfd, void *minisyms, long symcount, unsigned int size, asymbol *x, asymbol *y) { gnu_sort_bfd = bfd; gnu_sort_x = x; gnu_sort_y = y; qsort(minisyms, symcount, size, numeric_forward); } /* * If a System.map file or a debug kernel was specified, the name hash * has been filled -- so sync up gdb's notion of symbol values with * the local values, taking dups into account. Given that gdb's * minimal_symbol dump is sorted by value, shortcut the get_syment_array() * call if the sp after the last one found is associated with the * new one. */ #define last_sp addr2 int patch_kernel_symbol(struct gnu_request *req) { int i, c; long relocate_display; struct syment *sp_array[1000], *sp; if (req->name == PATCH_KERNEL_SYMBOLS_START) { if (kt->relocate) { if ((long)kt->relocate < 0) relocate_display = (kt->relocate * -1) >> 20; else relocate_display = kt->relocate >> 20; error(WARNING, "\nkernel relocated [%ldMB]: patching %ld gdb minimal_symbol values\n", relocate_display, st->symcnt); } fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\nplease wait... (patching %ld gdb minimal_symbol values) ", st->symcnt); fflush(fp); req->count = 0; req->length = 0; req->last_sp = 0; return TRUE; } if (req->name == PATCH_KERNEL_SYMBOLS_STOP) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\r \r"); st->flags |= GDB_SYMS_PATCHED; return TRUE; } if (!req->name || !req->addr) return FALSE; sp = (struct syment *)req->last_sp; sp += sp ? 1 : 0; if (sp && (sp->cnt == 1) && !(sp->flags & SYMBOL_NAME_USED) && STREQ(sp->name, req->name)) { *((ulong *)req->addr) = sp->value; sp->flags |= SYMBOL_NAME_USED; req->last_sp = (ulong)sp; } else { switch (c = get_syment_array(req->name, sp_array, 1000)) { case 0: req->last_sp = 0; return TRUE; case 1: *((ulong *)req->addr) = sp_array[0]->value; sp_array[0]->flags |= SYMBOL_NAME_USED; req->last_sp = (ulong)sp_array[0]; break; default: for (i = 0; i < c; i++) { if (sp_array[i]->flags & SYMBOL_NAME_USED) continue; *((ulong *)req->addr) = sp_array[i]->value; sp_array[i]->flags |= SYMBOL_NAME_USED; req->last_sp = (ulong)sp_array[i]; break; } break; } } return TRUE; } #undef last_sp /* * If the first offset/size is bogus, then use the second if it's OK. * But if both are bogus, then check whether we're debugging datatypes, * and act accordingly. */ long OFFSET_option(long offset1, long offset2, char *func, char *file, int line, char *item1, char *item2) { char errmsg[BUFSIZE]; if (offset1 >= 0) return offset1; if (offset2 >= 0) return offset2; if (pc->flags & DATADEBUG) { void *retaddr[NUMBER_STACKFRAMES] = { 0 }; SAVE_RETURN_ADDRESS(retaddr); sprintf(errmsg, "invalid (optional) structure member offsets: %s or %s", item1, item2); datatype_error(retaddr, errmsg, func, file, line); } return -1; } long SIZE_option(long size1, long size2, char *func, char *file, int line, char *item1, char *item2) { char errmsg[BUFSIZE]; if (size1 >= 0) return size1; if (size2 >= 0) return size2; if (pc->flags & DATADEBUG) { void *retaddr[NUMBER_STACKFRAMES] = { 0 }; SAVE_RETURN_ADDRESS(retaddr); sprintf(errmsg, "invalid (optional) structure sizes: %s or %s", item1, item2); datatype_error(retaddr, errmsg, func, file, line); } return -1; } /* * Do the work of the former OFFSET() and SIZE() macros. * * For now verification that the offset is legitimate is only done * if the "--data_debug" command line option was used. There * could still be constructs like "OFFSET(x) >= 0" in the current * code, or in user extensions. Perhaps there should be an option * to turn it off instead? */ long OFFSET_verify(long offset, char *func, char *file, int line, char *item) { char errmsg[BUFSIZE]; if (!(pc->flags & DATADEBUG)) return offset; if (offset < 0) { void *retaddr[NUMBER_STACKFRAMES] = { 0 }; SAVE_RETURN_ADDRESS(retaddr); sprintf(errmsg, "invalid structure member offset: %s", item); datatype_error(retaddr, errmsg, func, file, line); } return offset; } long SIZE_verify(long size, char *func, char *file, int line, char *item) { char errmsg[BUFSIZE]; if (!(pc->flags & DATADEBUG)) return size; if (size < 0) { void *retaddr[NUMBER_STACKFRAMES] = { 0 }; SAVE_RETURN_ADDRESS(retaddr); sprintf(errmsg, "invalid structure size: %s", item); datatype_error(retaddr, errmsg, func, file, line); } return size; } /* * Perform the common datatype error handling. */ static void datatype_error(void **retaddr, char *errmsg, char *func, char *file, int line) { char buf[BUFSIZE]; int fd; fprintf(stderr, "\n%s: %s\n", pc->curcmd, errmsg); fprintf(stderr, "%s FILE: %s LINE: %d FUNCTION: %s()\n\n", space(strlen(pc->curcmd)), file, line, func); fflush(stderr); dump_trace(retaddr); if (pc->flags & TTY) { if ((fd = open("/dev/tty", O_RDONLY)) >= 0) { tcsetattr(fd, TCSANOW, &pc->termios_orig); close(fd); } } if (pc->flags & DROP_CORE) drop_core("DROP_CORE flag set: forcing a segmentation fault\n"); if (CRASHDEBUG(1)) gdb_readnow_warning(); if (pc->flags & RUNTIME) { sprintf(buf, "%s\n%s FILE: %s LINE: %d FUNCTION: %s()\n", errmsg, space(strlen(pc->curcmd)), file, line, func); error(FATAL, "%s\n", buf); } exit(1); } /* * Dump a trace leading to the improper datatype usage. */ void dump_trace(void **retaddr) { int i, c; char *thisfile; char *arglist[MAXARGS]; char buf[BUFSIZE]; FILE *pipe; ulong vaddr, size, lookfor; ulong last_vaddr, last_size; char symbol[BUFSIZE]; const char *nm_call; fflush(fp); fflush(stdout); fflush(pc->stdpipe); thisfile = get_thisfile(); fprintf(stderr, "[%s] error trace: ", thisfile); for (i = (NUMBER_STACKFRAMES-1); i >= 0; i--) { if (retaddr[i]) fprintf(stderr, "%s%lx%s", i == 3 ? "" : "=> ", (ulong)retaddr[i], i == 0 ? "\n" : " "); } fflush(stderr); if (!file_exists("/usr/bin/nm", NULL)) { fprintf(stderr, "crash: /usr/bin/nm: no such file\n"); return; } if (is_binary_stripped(thisfile)) nm_call = "/usr/bin/nm -DSBn %s"; else nm_call = "/usr/bin/nm -BSn %s"; last_size = 0; for (i = 0; i < NUMBER_STACKFRAMES; i++) { if (!(lookfor = (ulong)retaddr[i])) continue; sprintf(buf, nm_call, thisfile); if (!(pipe = popen(buf, "r"))) { perror("pipe"); break; } last_vaddr = 0; BZERO(symbol, BUFSIZE); while (fgets(buf, BUFSIZE, pipe)) { c = parse_line(strip_linefeeds(buf), arglist); if (c != 4) continue; vaddr = htol(arglist[0], FAULT_ON_ERROR, NULL); size = htol(arglist[1], FAULT_ON_ERROR, NULL); if (vaddr > lookfor) { if ((lookfor - last_vaddr) > last_size) fprintf(stderr, "%s %lx: (undetermined)\n", i == 0 ? "\n" : "", lookfor); else fprintf(stderr, "%s %lx: %s+%ld\n", i == 0 ? "\n" : "", lookfor, symbol, lookfor-last_vaddr); break; } strcpy(symbol, arglist[3]); last_vaddr = vaddr; last_size = size; } pclose(pipe); } fprintf(stderr, "\n"); } /* * Try best to determine which executable this is. */ static char * get_thisfile(void) { char *buf1; char buf2[BUFSIZE]; char *tok, *path; if (pc->program_path[0] == '.' || pc->program_path[0] == '/') return pc->program_path; if ((path = getenv("PATH"))) { strcpy(buf2, path); } else return pc->program_path; buf1 = GETBUF(BUFSIZE); tok = strtok(buf2, ":"); while (tok) { sprintf(buf1, "%s/%s", tok, pc->program_name); if (file_exists(buf1, NULL) && is_elf_file(buf1)) { return buf1; } tok = strtok(NULL, ":"); } return pc->program_path; } /* * Check whether an address fits into any existing init_module() functions, * and if so, return the load_module. */ struct load_module * init_module_function(ulong vaddr) { int i; struct load_module *lm; if (((kt->flags & (KMOD_V1|KMOD_V2)) == KMOD_V1) || INVALID_MEMBER(module_init_text_size) || INVALID_MEMBER(module_module_init)) return NULL; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (!lm->mod_init_module_ptr || !lm->mod_init_text_size) continue; if ((vaddr >= lm->mod_init_module_ptr) && (vaddr < (lm->mod_init_module_ptr+lm->mod_init_text_size)) && accessible(vaddr)) return lm; } return NULL; } /* * The caller fills in the structure and member name fields of * the passed-in struct_member_data structure, which are then * passed to the gdb "printm" command to get the member data. * * Adapted from Qiao Nuohan's "pstruct" extension module. */ int fill_struct_member_data(struct struct_member_data *smd) { int i, cnt; char buf[BUFSIZE]; char *printm_list[MAXARGS]; cnt = 0; sprintf(buf, "printm ((struct %s *)0x0).%s", smd->structure, smd->member); open_tmpfile2(); if (!gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) return FALSE; rewind(pc->tmpfile2); if (fgets(buf, BUFSIZE, pc->tmpfile2)) { if (CRASHDEBUG(2)) fprintf(fp, "%s.%s: %s", smd->structure, smd->member, buf); cnt = parse_line(buf, printm_list); } close_tmpfile2(); if (cnt != 6) return FALSE; for (i = 0; i < cnt; i++) { if (!decimal(printm_list[i], 0)) return FALSE; } smd->type = dtol(printm_list[0], RETURN_ON_ERROR, NULL); smd->unsigned_type = dtol(printm_list[1], RETURN_ON_ERROR, NULL); smd->length = dtol(printm_list[2], RETURN_ON_ERROR, NULL); smd->offset = dtol(printm_list[3], RETURN_ON_ERROR, NULL); smd->bitpos = dtol(printm_list[4], RETURN_ON_ERROR, NULL); smd->bitsize = dtol(printm_list[5], RETURN_ON_ERROR, NULL); return TRUE; } void add_to_downsized(char *name) { struct downsized *ds; ds = &st->downsized; while (ds->name) ds = ds->next; if (!(ds->name = (char *)malloc(strlen(name)+1)) || !(ds->next = (struct downsized *)calloc(1, sizeof(struct downsized)))) error(FATAL, "cannot calloc/malloc downsized struct or \"%s\" name string\n", name); strcpy(ds->name, name); if (CRASHDEBUG(1)) fprintf(fp, "%sadd_to_downsized: \"%s\"\n", (pc->flags & PLEASE_WAIT) ? "\n" : "", name); } int is_downsized(char *name) { struct downsized *ds; for (ds = &st->downsized; ds->name; ds = ds->next) { if (STREQ(name, ds->name)) return TRUE; } return FALSE; } struct syment * symbol_complete_match(const char *match, struct syment *sp_last) { int i, t; struct syment *sp, *sp_end, *sp_start; struct load_module *lm; int search_init; if (sp_last) { sp_start = next_symbol(NULL, sp_last); if (!sp_start) return NULL; } else sp_start = st->symtable; if ((sp_start >= st->symtable) && (sp_start < st->symend)) { for (sp = sp_start; sp < st->symend; sp++) { if (STRNEQ(sp->name, match)) return sp; } sp_start = NULL; } if (!MODULE_MEMORY()) goto old_module; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; for_each_mod_mem_type(t) { sp_end = lm->symend[t]; if (!sp_start) sp_start = lm->symtable[t]; if (sp_start < lm->symtable[t] || sp_start > sp_end) continue; for (sp = sp_start; sp < sp_end; sp++) { if (MODULE_PSEUDO_SYMBOL(sp)) continue; if (STRNEQ(sp->name, match)) return sp; } sp_start = NULL; } } return NULL; old_module: search_init = FALSE; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_INIT) search_init = TRUE; sp_end = lm->mod_symend; if (!sp_start) sp_start = lm->mod_symtable; if ((sp_start >= lm->mod_symtable) && (sp_start < sp_end)) { for (sp = sp_start; sp < sp_end; sp++) { if (MODULE_START(sp)) continue; if (STRNEQ(sp->name, match)) return sp; } sp_start = NULL; } } if (!search_init) return NULL; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (!lm->mod_init_symtable) continue; sp_end = lm->mod_init_symend; if (!sp_start) sp_start = lm->mod_init_symtable; if ((sp_start >= lm->mod_init_symtable) && (sp_start < sp_end)) { for (sp = sp_start; sp < sp_end; sp++) { if (MODULE_START(sp)) continue; if (STRNEQ(sp->name, match)) return sp; } } } return NULL; } /* Returns module memory type if addr is in range, otherwise MOD_INVALID(-1) */ static int in_module_range(ulong addr, struct load_module *lm, int start, int end) { ulong base = 0, size = 0; int i; if (!MODULE_MEMORY()) goto old_module; for (i = start ; i <= end; i++) { base = lm->mem[i].base; size = lm->mem[i].size; if (!size) continue; if ((addr >= base) && (addr < (base + size))) return i; } return MOD_INVALID; old_module: if (start == MOD_TEXT) { base = lm->mod_base; size = lm->mod_size; } else if (start == MOD_INIT_TEXT) { base = lm->mod_init_module_ptr; size = lm->mod_init_size; } else error(FATAL, "invalid module memory type!"); if ((addr >= base) && (addr < (base + size))) return start; return MOD_INVALID; } /* Returns module memory type, otherwise MOD_INVALID(-1) */ static int module_mem_type(ulong addr, struct load_module *lm) { return in_module_range(addr, lm, MOD_TEXT, MOD_INIT_RODATA); } /* Returns the end address of the module memory region. */ static ulong module_mem_end(ulong addr, struct load_module *lm) { int type = module_mem_type(addr, lm); if (type == MOD_INVALID) return 0; return lm->mem[type].base + lm->mem[type].size; } crash-utility-crash-9cd43f5/lkcd_v1.c0000664000372000037200000002107515107550337017070 0ustar juerghjuergh/* lkcd_v1.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #define CONFIG_VMDUMP #include "lkcd_vmdump_v1.h" static dump_header_t dump_header_v1 = { 0 }; static dump_page_t dump_page = { 0 }; /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v1(FILE *fp, int fd) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_page_t *dp; lkcd->fd = fd; lkcd->fp = fp; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v1; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; lkcd->dump_header = dh; lkcd->dump_page = dp; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->total_pages = dh->dh_num_pages; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->page_header_size = sizeof(dump_page_t); lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->get_dp_flags = get_dp_flags_v1; lkcd->get_dp_address = get_dp_address_v1; lkcd->get_dp_size = get_dp_size_v1; lkcd->compression = LKCD_DUMP_COMPRESS_RLE; lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: eof = TRUE; continue; } if (!(dp->dp_flags & (DUMP_COMPRESSED|DUMP_RAW|DUMP_END))) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_size > 4096) { lkcd_print("dp_size > 4096: %d\n", dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_END) { lkcd_print("found DUMP_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(1)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v1(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v1(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v1(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } /* * console-only output for info regarding current page. */ void dump_dump_page_v1(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console("dp_address: %llx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_COMPRESSED) console("DUMP_COMPRESSED", others++); if (flags & DUMP_RAW) console("%sDUMP_RAW", others++ ? "|" : ""); if (flags & DUMP_END) console("DUMP_END", others++ ? "|" : ""); console(")\n"); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v1(ulong arg) { int others; dump_header_t *dh; dump_page_t *dp; dh = (dump_header_t *)lkcd->dump_header; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: %llx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else lkcd_print("(?)\n"); lkcd_print(" dh_version: %d\n", dh->dh_version); lkcd_print(" dh_header_size: %d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: %d\n", dh->dh_dump_level); lkcd_print(" dh_page_size: %d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: %lld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: %llx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: %llx\n", dh->dh_memory_end); lkcd_print(" dh_esp: %lx\n", dh->dh_esp); lkcd_print(" dh_eip: %lx\n", dh->dh_eip); lkcd_print(" dh_num_pages: %d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(dh->dh_time.tv_sec)))); lkcd_print(" dh_utsname:\n"); lkcd_print(" sysname: %s\n", dh->dh_utsname.sysname); lkcd_print(" nodename: %s\n", dh->dh_utsname.nodename); lkcd_print(" release: %s\n", dh->dh_utsname.release); lkcd_print(" version: %s\n", dh->dh_utsname.version); lkcd_print(" machine: %s\n", dh->dh_utsname.machine); lkcd_print(" domainname: %s\n", dh->dh_utsname.domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print(" dh_regs:\n"); #ifdef PPC lkcd_print(" (PowerPC register display TBD)\n"); #endif #ifdef X86 lkcd_print(" ebx: %lx\n", dh->dh_regs.ebx); lkcd_print(" ecx: %lx\n", dh->dh_regs.ecx); lkcd_print(" edx: %lx\n", dh->dh_regs.edx); lkcd_print(" esi: %lx\n", dh->dh_regs.esi); lkcd_print(" edi: %lx\n", dh->dh_regs.edi); lkcd_print(" eax: %lx\n", dh->dh_regs.eax); lkcd_print(" xds: %x\n", dh->dh_regs.xds); lkcd_print(" xes: %x\n", dh->dh_regs.xes); lkcd_print(" orig_eax: %lx\n", dh->dh_regs.orig_eax); lkcd_print(" eip: %lx\n", dh->dh_regs.eip); lkcd_print(" xcs: %x\n", dh->dh_regs.xcs); lkcd_print(" eflags: %lx\n", dh->dh_regs.eflags); lkcd_print(" esp: %lx\n", dh->dh_regs.esp); lkcd_print(" xss: %x\n", dh->dh_regs.xss); #endif if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: %llx\n", dp->dp_address); lkcd_print(" dp_size: %ld\n", dp->dp_size); lkcd_print(" dp_flags: %lx (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_COMPRESSED) lkcd_print("DUMP_COMPRESSED", others++); if (dp->dp_flags & DUMP_RAW) lkcd_print("%sDUMP_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_END) lkcd_print("DUMP_END", others++ ? "|" : ""); lkcd_print(")\n"); } crash-utility-crash-9cd43f5/memory.c0000664000372000037200000213277115107550337017065 0ustar juerghjuergh/* memory.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2019 David Anderson * Copyright (C) 2002-2019 Red Hat, Inc. All rights reserved. * Copyright (C) 2002 Silicon Graphics, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include #include #include "maple_tree.h" struct meminfo { /* general purpose memory information structure */ ulong cache; /* used by the various memory searching/dumping */ ulong slab; /* routines. Only one of these is used per cmd */ ulong c_flags; /* so stuff whatever's helpful in here... */ ulong c_offset; ulong c_num; ulong s_mem; void *s_freep; ulong *s_index; ulong s_inuse; ulong cpucached_cache; ulong cpucached_slab; ulong inuse; ulong order; ulong slabsize; ulong num_slabs; ulong objects; ulonglong spec_addr; ulong flags; ulong size; ulong objsize; int memtype; int free; int slab_offset; char *reqname; char *curname; ulong *spec_cpumask; ulong *addrlist; int *kmem_bufctl; ulong *cpudata[NR_CPUS]; ulong *shared_array_cache; int current_cache_index; ulong found; ulong retval; struct struct_member_data *page_member_cache; ulong nr_members; char *ignore; int errors; int calls; int cpu; int cache_count; ulong get_shared; ulong get_totalram; ulong get_buffers; ulong get_slabs; char *slab_buf; char *cache_buf; ulong *cache_list; struct vmlist { ulong addr; ulong size; } *vmlist; ulong container; int *freelist; int freelist_index_size; ulong random; ulong list_offset; }; /* * Search modes */ #define SEARCH_ULONG (0) #define SEARCH_UINT (1) #define SEARCH_USHORT (2) #define SEARCH_CHARS (3) #define SEARCH_DEFAULT (SEARCH_ULONG) /* search mode information */ struct searchinfo { int mode; int vcnt; int val; int context; int memtype; int do_task_header; int tasks_found; struct task_context *task_context; ulong vaddr_start; ulong vaddr_end; ulonglong paddr_start; ulonglong paddr_end; union { /* default ulong search */ struct { ulong value[MAXARGS]; char *opt_string[MAXARGS]; ulong mask; } s_ulong; /* uint search */ struct { uint value[MAXARGS]; char *opt_string[MAXARGS]; uint mask; } s_uint; /* ushort search */ struct { ushort value[MAXARGS]; char *opt_string[MAXARGS]; ushort mask; } s_ushort; /* string (chars) search */ struct { char *value[MAXARGS]; int len[MAXARGS]; int started_flag; /* string search needs history */ } s_chars; } s_parms; char buf[BUFSIZE]; }; struct handle_each_vm_area_args { ulong task; ulong flag; ulong vaddr; struct reference *ref; char *vma_header; char *buf1; char *buf2; char *buf3; char *buf4; char *buf5; ulong vma; char **vma_buf; struct task_mem_usage *tm; int *found; int *single_vma_found; unsigned int radix; struct task_context *tc; ulong *single_vma; }; static char *memtype_string(int, int); static char *error_handle_string(ulong); static void collect_page_member_data(char *, struct meminfo *); struct integer_data { ulong value; ulong bitfield_value; struct struct_member_data *pmd; }; static int get_bitfield_data(struct integer_data *); static int show_page_member_data(char *, ulong, struct meminfo *, char *); static void dump_mem_map(struct meminfo *); static void dump_mem_map_SPARSEMEM(struct meminfo *); static void fill_mem_map_cache(ulong, ulong, char *); static void page_flags_init(void); static int page_flags_init_from_pageflag_names(void); static int page_flags_init_from_pageflags_enum(void); static int translate_page_flags(char *, ulong); static void dump_free_pages(struct meminfo *); static int dump_zone_page_usage(void); static void dump_multidimensional_free_pages(struct meminfo *); static void dump_free_pages_zones_v1(struct meminfo *); static void dump_free_pages_zones_v2(struct meminfo *); struct free_page_callback_data; static int dump_zone_free_area(ulong, int, ulong, struct free_page_callback_data *); static void dump_page_hash_table(struct meminfo *); static void kmem_search(struct meminfo *); static void kmem_cache_init(void); static void kmem_cache_init_slub(void); static ulong max_cpudata_limit(ulong, ulong *); static int kmem_cache_downsize(void); static int ignore_cache(struct meminfo *, char *); static char *is_kmem_cache_addr(ulong, char *); static char *is_kmem_cache_addr_common(ulong, char *); static void kmem_cache_list(struct meminfo *); static void dump_kmem_cache(struct meminfo *); static void dump_kmem_cache_percpu_v1(struct meminfo *); static void dump_kmem_cache_percpu_v2(struct meminfo *); static void dump_kmem_cache_slub(struct meminfo *); static void kmem_cache_list_common(struct meminfo *); static ulong get_cpu_slab_ptr(struct meminfo *, int, ulong *); static unsigned int oo_order(ulong); static unsigned int oo_objects(ulong); static char *vaddr_to_kmem_cache(ulong, char *, int); static char *is_slab_overload_page(ulong, ulong *, char *); static ulong vaddr_to_slab(ulong); static void do_slab_chain(int, struct meminfo *); static void do_slab_chain_percpu_v1(long, struct meminfo *); static void do_slab_chain_percpu_v2(long, struct meminfo *); static void do_slab_chain_percpu_v2_nodes(long, struct meminfo *); static void do_slab_chain_slab_overload_page(long, struct meminfo *); static int slab_freelist_index_size(void); static int do_slab_slub(struct meminfo *, int); static void do_kmem_cache_slub(struct meminfo *); static void save_slab_data(struct meminfo *); static int slab_data_saved(struct meminfo *); static void dump_saved_slab_data(void); static void dump_slab(struct meminfo *); static void dump_slab_percpu_v1(struct meminfo *); static void dump_slab_percpu_v2(struct meminfo *); static void dump_slab_overload_page(struct meminfo *); static int verify_slab_v1(struct meminfo *, ulong, int); static int verify_slab_v2(struct meminfo *, ulong, int); static int verify_slab_overload_page(struct meminfo *, ulong, int); static void gather_slab_free_list(struct meminfo *); static void gather_slab_free_list_percpu(struct meminfo *); static void gather_slab_free_list_slab_overload_page(struct meminfo *); static void gather_cpudata_list_v1(struct meminfo *); static void gather_cpudata_list_v2(struct meminfo *); static void gather_cpudata_list_v2_nodes(struct meminfo *, int); static int check_cpudata_list(struct meminfo *, ulong); static int check_shared_list(struct meminfo *, ulong); static void gather_slab_cached_count(struct meminfo *); static void dump_slab_objects(struct meminfo *); static void dump_slab_objects_percpu(struct meminfo *); static void dump_vmlist(struct meminfo *); static void dump_vmap_area(struct meminfo *); static int get_vmap_area_list_from_nodes(ulong **); static int dump_page_lists(struct meminfo *); static void dump_kmeminfo(struct meminfo *); static int page_to_phys(ulong, physaddr_t *); static void display_memory(ulonglong, long, ulong, int, void *); static char *show_opt_string(struct searchinfo *); static void display_with_pre_and_post(void *, ulonglong, struct searchinfo *); static ulong search_ulong(ulong *, ulong, int, struct searchinfo *); static ulong search_uint(ulong *, ulong, int, struct searchinfo *); static ulong search_ushort(ulong *, ulong, int, struct searchinfo *); static ulong search_chars(ulong *, ulong, int, struct searchinfo *); static ulonglong search_ulong_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_uint_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_ushort_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_chars_p(ulong *, ulonglong, int, struct searchinfo *); static void search_virtual(struct searchinfo *); static void search_physical(struct searchinfo *); static int next_upage(struct task_context *, ulong, ulong *); static int next_kpage(ulong, ulong *); static int next_physpage(ulonglong, ulonglong *); static int next_vmlist_vaddr(ulong, ulong *); static int next_module_vaddr(ulong, ulong *); static int next_identity_mapping(ulong, ulong *); static int vm_area_page_dump(ulong, ulong, ulong, ulong, ulong, struct reference *); static void rss_page_types_init(void); static int dump_swap_info(ulong, ulong *, ulong *); static int get_hugetlb_total_pages(ulong *, ulong *); static char *get_swapdev(ulong, char *); static void fill_swap_info(ulong); static char *vma_file_offset(ulong, ulong, char *); static ssize_t read_dev_kmem(ulong, char *, long); static void dump_memory_nodes(int); static void dump_zone_stats(void); #define MEMORY_NODES_DUMP (0) #define MEMORY_NODES_INITIALIZE (1) static void node_table_init(void); static int compare_node_data(const void *, const void *); static void do_vm_flags(ulonglong); static ulonglong get_vm_flags(char *); static void PG_reserved_flag_init(void); static void PG_slab_flag_init(void); static ulong nr_blockdev_pages(void); static ulong nr_blockdev_pages_v2(void); void sparse_mem_init(void); void dump_mem_sections(int); void dump_memory_blocks(int); void list_mem_sections(void); ulong sparse_decode_mem_map(ulong, ulong); char *read_mem_section(ulong); ulong nr_to_section(ulong); int valid_section(ulong); int section_has_mem_map(ulong); ulong section_mem_map_addr(ulong, int); ulong valid_section_nr(ulong); ulong pfn_to_map(ulong); static int get_nodes_online(void); static int next_online_node(int); static ulong next_online_pgdat(int); static int vm_stat_init(void); static int vm_event_state_init(void); static int dump_vm_stat(char *, long *, ulong); static int dump_vm_event_state(void); static int dump_page_states(void); static int generic_read_dumpfile(ulonglong, void *, long, char *, ulong); static int generic_write_dumpfile(ulonglong, void *, long, char *, ulong); static int page_to_nid(ulong); static int get_kmem_cache_list(ulong **); static int get_kmem_cache_root_list(ulong **); static int get_kmem_cache_child_list(ulong **, ulong); static int get_kmem_cache_slub_data(long, struct meminfo *); static ulong compound_head(ulong); static long count_partial(ulong, struct meminfo *, ulong *); static short count_cpu_partial(struct meminfo *, int); static ulong get_freepointer(struct meminfo *, void *); static int count_free_objects(struct meminfo *, ulong); char *is_slab_page(struct meminfo *, char *); static void do_cpu_partial_slub(struct meminfo *, int); static void do_node_lists_slub(struct meminfo *, ulong, int); static int devmem_is_restricted(void); static int switch_to_proc_kcore(void); static int verify_pfn(ulong); static void dump_per_cpu_offsets(void); static void dump_page_flags(ulonglong); static ulong kmem_cache_nodelists(ulong); static void dump_hstates(void); static ulong freelist_ptr(struct meminfo *, ulong, ulong); static ulong handle_each_vm_area(struct handle_each_vm_area_args *); /* * Memory display modes specific to this file. */ #define DISPLAY_8 (0x2) #define DISPLAY_16 (0x4) #define DISPLAY_32 (0x8) #define DISPLAY_64 (0x10) #define SHOW_OFFSET (0x20) #define SYMBOLIC (0x40) #define HEXADECIMAL (0x80) #define DECIMAL (0x100) #define UDECIMAL (0x200) #define ASCII_ENDLINE (0x400) #define NO_ASCII (0x800) #define SLAB_CACHE (0x1000) #define DISPLAY_ASCII (0x2000) #define NET_ENDIAN (0x4000) #define DISPLAY_RAW (0x8000) #define NO_ERROR (0x10000) #define SLAB_CACHE2 (0x20000) #define DISPLAY_TYPES (DISPLAY_RAW|DISPLAY_ASCII|DISPLAY_8|\ DISPLAY_16|DISPLAY_32|DISPLAY_64) #define ASCII_UNLIMITED ((ulong)(-1) >> 1) static ulong DISPLAY_DEFAULT; /* * Before kernel commit ff202303c398e, the value is defined as a macro, so copy it here; * After this commit, the value is defined as an enum, which can be evaluated at runtime. */ #define PAGE_TYPE_BASE 0xf0000000 #define PageType(page_type, flag) \ ((page_type & (vt->page_type_base | flag)) == vt->page_type_base) static void page_type_init(void) { if (!enumerator_value("PAGE_TYPE_BASE", (long *)&vt->page_type_base)) vt->page_type_base = PAGE_TYPE_BASE; } /* * The PG_slab's type has changed from a page flag to a page type * since kernel commit 46df8e73a4a3. */ static bool page_slab(ulong page, ulong flags) { if (vt->flags & SLAB_PAGEFLAGS) { if ((flags >> vt->PG_slab) & 1) return TRUE; return FALSE; } if (VALID_MEMBER(page_page_type)) { uint page_type; readmem(page+OFFSET(page_page_type), KVADDR, &page_type, sizeof(page_type), "page_type", FAULT_ON_ERROR); if (PageType(page_type, (uint)vt->PG_slab)) return TRUE; } return FALSE; } /* * Verify that the sizeof the primitive types are reasonable. */ void mem_init(void) { if (sizeof(char) != SIZEOF_8BIT) error(FATAL, "unsupported sizeof(char): %d\n", sizeof(char)); if (sizeof(short) != SIZEOF_16BIT) error(FATAL, "unsupported sizeof(short): %d\n", sizeof(short)); if ((sizeof(int) != SIZEOF_32BIT) && (sizeof(int) != SIZEOF_64BIT)) error(FATAL, "unsupported sizeof(int): %d\n", sizeof(int)); if ((sizeof(long) != SIZEOF_32BIT) && (sizeof(long) != SIZEOF_64BIT)) error(FATAL, "unsupported sizeof(long): %d\n", sizeof(long)); if (sizeof(void *) != sizeof(long)) error(FATAL, "pointer size: %d is not sizeof(long): %d\n", sizeof(void *), sizeof(long)); DISPLAY_DEFAULT = (sizeof(long) == 8) ? DISPLAY_64 : DISPLAY_32; } /* * Stash a few popular offsets and some basic kernel virtual memory * items used by routines in this file. */ void vm_init(void) { char buf[BUFSIZE]; int i, len, dimension, nr_node_ids; struct syment *sp_array[2]; ulong value1, value2; char *kmem_cache_node_struct, *nodelists_field; MEMBER_OFFSET_INIT(task_struct_mm, "task_struct", "mm"); MEMBER_OFFSET_INIT(mm_struct_mmap, "mm_struct", "mmap"); MEMBER_OFFSET_INIT(mm_struct_mm_mt, "mm_struct", "mm_mt"); if (VALID_MEMBER(mm_struct_mm_mt)) { maple_init(); } MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "rss"); if (!VALID_MEMBER(mm_struct_rss)) MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "_rss"); MEMBER_OFFSET_INIT(mm_struct_anon_rss, "mm_struct", "_anon_rss"); MEMBER_OFFSET_INIT(mm_struct_file_rss, "mm_struct", "_file_rss"); if (!VALID_MEMBER(mm_struct_anon_rss)) { MEMBER_OFFSET_INIT(mm_struct_rss_stat, "mm_struct", "rss_stat"); MEMBER_OFFSET_INIT(mm_rss_stat_count, "mm_rss_stat", "count"); } MEMBER_OFFSET_INIT(mm_struct_total_vm, "mm_struct", "total_vm"); MEMBER_OFFSET_INIT(mm_struct_start_code, "mm_struct", "start_code"); MEMBER_OFFSET_INIT(mm_struct_mm_count, "mm_struct", "mm_count"); MEMBER_OFFSET_INIT(vm_area_struct_vm_mm, "vm_area_struct", "vm_mm"); MEMBER_OFFSET_INIT(vm_area_struct_vm_next, "vm_area_struct", "vm_next"); MEMBER_OFFSET_INIT(vm_area_struct_vm_end, "vm_area_struct", "vm_end"); MEMBER_OFFSET_INIT(vm_area_struct_vm_start, "vm_area_struct", "vm_start"); MEMBER_OFFSET_INIT(vm_area_struct_vm_flags, "vm_area_struct", "vm_flags"); MEMBER_OFFSET_INIT(vm_area_struct_vm_file, "vm_area_struct", "vm_file"); MEMBER_OFFSET_INIT(vm_area_struct_vm_offset, "vm_area_struct", "vm_offset"); MEMBER_OFFSET_INIT(vm_area_struct_vm_pgoff, "vm_area_struct", "vm_pgoff"); MEMBER_SIZE_INIT(vm_area_struct_vm_flags, "vm_area_struct", "vm_flags"); MEMBER_OFFSET_INIT(vm_struct_addr, "vm_struct", "addr"); MEMBER_OFFSET_INIT(vm_struct_size, "vm_struct", "size"); MEMBER_OFFSET_INIT(vm_struct_next, "vm_struct", "next"); MEMBER_OFFSET_INIT(vmap_area_va_start, "vmap_area", "va_start"); MEMBER_OFFSET_INIT(vmap_area_va_end, "vmap_area", "va_end"); MEMBER_OFFSET_INIT(vmap_area_list, "vmap_area", "list"); MEMBER_OFFSET_INIT(vmap_area_flags, "vmap_area", "flags"); MEMBER_OFFSET_INIT(vmap_area_vm, "vmap_area", "vm"); if (INVALID_MEMBER(vmap_area_vm)) MEMBER_OFFSET_INIT(vmap_area_vm, "vmap_area", "private"); MEMBER_OFFSET_INIT(vmap_area_purge_list, "vmap_area", "purge_list"); STRUCT_SIZE_INIT(vmap_area, "vmap_area"); if (VALID_MEMBER(vmap_area_va_start) && VALID_MEMBER(vmap_area_va_end) && VALID_MEMBER(vmap_area_list) && VALID_MEMBER(vmap_area_vm)) { if (kernel_symbol_exists("vmap_nodes")) { STRUCT_SIZE_INIT(vmap_node, "vmap_node"); MEMBER_OFFSET_INIT(vmap_node_busy, "vmap_node", "busy"); MEMBER_OFFSET_INIT(rb_list_head, "rb_list", "head"); vt->flags |= USE_VMAP_NODES; } else if (kernel_symbol_exists("vmap_area_list")) vt->flags |= USE_VMAP_AREA; } if (kernel_symbol_exists("hstates")) { STRUCT_SIZE_INIT(hstate, "hstate"); MEMBER_OFFSET_INIT(hstate_order, "hstate", "order"); MEMBER_OFFSET_INIT(hstate_nr_huge_pages, "hstate", "nr_huge_pages"); MEMBER_OFFSET_INIT(hstate_free_huge_pages, "hstate", "free_huge_pages"); MEMBER_OFFSET_INIT(hstate_name, "hstate", "name"); } MEMBER_OFFSET_INIT(page_next, "page", "next"); if (VALID_MEMBER(page_next)) MEMBER_OFFSET_INIT(page_prev, "page", "prev"); if (INVALID_MEMBER(page_next)) ANON_MEMBER_OFFSET_INIT(page_next, "page", "next"); if (INVALID_MEMBER(page_next)) MEMBER_OFFSET_INIT(page_next, "slab", "next"); MEMBER_OFFSET_INIT(page_list, "page", "list"); if (VALID_MEMBER(page_list)) { ASSIGN_OFFSET(page_list_next) = OFFSET(page_list) + OFFSET(list_head_next); ASSIGN_OFFSET(page_list_prev) = OFFSET(page_list) + OFFSET(list_head_prev); } MEMBER_OFFSET_INIT(page_next_hash, "page", "next_hash"); MEMBER_OFFSET_INIT(page_inode, "page", "inode"); MEMBER_OFFSET_INIT(page_offset, "page", "offset"); MEMBER_OFFSET_INIT(page_count, "page", "count"); if (INVALID_MEMBER(page_count)) { MEMBER_OFFSET_INIT(page_count, "page", "_count"); if (INVALID_MEMBER(page_count)) ANON_MEMBER_OFFSET_INIT(page_count, "page", "_count"); if (INVALID_MEMBER(page_count)) MEMBER_OFFSET_INIT(page_count, "page", "_refcount"); if (INVALID_MEMBER(page_count)) ANON_MEMBER_OFFSET_INIT(page_count, "page", "_refcount"); } MEMBER_OFFSET_INIT(page_flags, "page", "flags"); MEMBER_SIZE_INIT(page_flags, "page", "flags"); MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); if (INVALID_MEMBER(page_mapping)) ANON_MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); if (INVALID_MEMBER(page_mapping) && (THIS_KERNEL_VERSION < LINUX(2,6,17)) && MEMBER_EXISTS("page", "_mapcount")) ASSIGN_OFFSET(page_mapping) = MEMBER_OFFSET("page", "_mapcount") + STRUCT_SIZE("atomic_t") + sizeof(ulong); MEMBER_OFFSET_INIT(page_index, "page", "index"); if (INVALID_MEMBER(page_index)) /* 6.16 and later */ MEMBER_OFFSET_INIT(page_index, "page", "__folio_index"); if (INVALID_MEMBER(page_index)) ANON_MEMBER_OFFSET_INIT(page_index, "page", "index"); MEMBER_OFFSET_INIT(page_buffers, "page", "buffers"); MEMBER_OFFSET_INIT(page_lru, "page", "lru"); if (INVALID_MEMBER(page_lru)) ANON_MEMBER_OFFSET_INIT(page_lru, "page", "lru"); MEMBER_OFFSET_INIT(page_pte, "page", "pte"); MEMBER_OFFSET_INIT(page_compound_head, "page", "compound_head"); if (INVALID_MEMBER(page_compound_head)) ANON_MEMBER_OFFSET_INIT(page_compound_head, "page", "compound_head"); MEMBER_OFFSET_INIT(page_private, "page", "private"); MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); MEMBER_OFFSET_INIT(page_page_type, "page", "page_type"); MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); MEMBER_OFFSET_INIT(swap_info_struct_swap_file, "swap_info_struct", "swap_file"); MEMBER_OFFSET_INIT(swap_info_struct_swap_vfsmnt, "swap_info_struct", "swap_vfsmnt"); MEMBER_OFFSET_INIT(swap_info_struct_flags, "swap_info_struct", "flags"); MEMBER_OFFSET_INIT(swap_info_struct_swap_map, "swap_info_struct", "swap_map"); MEMBER_OFFSET_INIT(swap_info_struct_swap_device, "swap_info_struct", "swap_device"); MEMBER_OFFSET_INIT(swap_info_struct_prio, "swap_info_struct", "prio"); MEMBER_OFFSET_INIT(swap_info_struct_max, "swap_info_struct", "max"); MEMBER_OFFSET_INIT(swap_info_struct_pages, "swap_info_struct", "pages"); MEMBER_OFFSET_INIT(swap_info_struct_inuse_pages, "swap_info_struct", "inuse_pages"); MEMBER_OFFSET_INIT(swap_info_struct_old_block_size, "swap_info_struct", "old_block_size"); MEMBER_OFFSET_INIT(swap_info_struct_bdev, "swap_info_struct", "bdev"); MEMBER_OFFSET_INIT(zs_pool_size_class, "zs_pool", "size_class"); MEMBER_OFFSET_INIT(size_class_size, "size_class", "size"); MEMBER_OFFSET_INIT(block_device_bd_inode, "block_device", "bd_inode"); MEMBER_OFFSET_INIT(block_device_bd_list, "block_device", "bd_list"); MEMBER_OFFSET_INIT(block_device_bd_disk, "block_device", "bd_disk"); MEMBER_OFFSET_INIT(inode_i_mapping, "inode", "i_mapping"); MEMBER_OFFSET_INIT(address_space_page_tree, "address_space", "page_tree"); if (INVALID_MEMBER(address_space_page_tree)) MEMBER_OFFSET_INIT(address_space_page_tree, "address_space", "i_pages"); MEMBER_OFFSET_INIT(address_space_nrpages, "address_space", "nrpages"); if (INVALID_MEMBER(address_space_nrpages)) MEMBER_OFFSET_INIT(address_space_nrpages, "address_space", "__nrpages"); MEMBER_OFFSET_INIT(super_block_s_inodes, "super_block", "s_inodes"); MEMBER_OFFSET_INIT(inode_i_sb_list, "inode", "i_sb_list"); MEMBER_OFFSET_INIT(gendisk_major, "gendisk", "major"); MEMBER_OFFSET_INIT(gendisk_fops, "gendisk", "fops"); MEMBER_OFFSET_INIT(gendisk_disk_name, "gendisk", "disk_name"); MEMBER_OFFSET_INIT(gendisk_private_data, "gendisk", "private_data"); STRUCT_SIZE_INIT(block_device, "block_device"); STRUCT_SIZE_INIT(address_space, "address_space"); STRUCT_SIZE_INIT(gendisk, "gendisk"); STRUCT_SIZE_INIT(blk_major_name, "blk_major_name"); if (VALID_STRUCT(blk_major_name)) { MEMBER_OFFSET_INIT(blk_major_name_next, "blk_major_name", "next"); MEMBER_OFFSET_INIT(blk_major_name_name, "blk_major_name", "name"); MEMBER_OFFSET_INIT(blk_major_name_major, "blk_major_name", "major"); } STRUCT_SIZE_INIT(kmem_slab_s, "kmem_slab_s"); STRUCT_SIZE_INIT(slab_s, "slab_s"); STRUCT_SIZE_INIT(slab, "slab"); STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache_s"); STRUCT_SIZE_INIT(pgd_t, "pgd_t"); /* * slab: overload struct slab over struct page * https://lkml.org/lkml/2013/10/16/155 * * commit e36ce448a08d removed kmem_cache.freelist_cache in 6.1, * so use freelist_size instead. */ if (MEMBER_EXISTS("kmem_cache", "freelist_size")) { vt->flags |= SLAB_OVERLOAD_PAGE; ANON_MEMBER_OFFSET_INIT(page_s_mem, "page", "s_mem"); ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); ANON_MEMBER_OFFSET_INIT(page_active, "page", "active"); /* * Moved to struct slab in Linux 5.17 */ if (INVALID_MEMBER(page_s_mem)) MEMBER_OFFSET_INIT(page_s_mem, "slab", "s_mem"); if (INVALID_MEMBER(page_freelist)) MEMBER_OFFSET_INIT(page_freelist, "slab", "freelist"); if (INVALID_MEMBER(page_active)) MEMBER_OFFSET_INIT(page_active, "slab", "active"); MEMBER_OFFSET_INIT(slab_slab_list, "slab", "slab_list"); } if (!VALID_STRUCT(kmem_slab_s) && VALID_STRUCT(slab_s)) { vt->flags |= PERCPU_KMALLOC_V1; MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", "objsize"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache_s", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs, "kmem_cache_s", "slabs"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_full, "kmem_cache_s", "slabs_full"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_partial, "kmem_cache_s", "slabs_partial"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_free, "kmem_cache_s", "slabs_free"); MEMBER_OFFSET_INIT(kmem_cache_s_cpudata, "kmem_cache_s", "cpudata"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.cpudata", NULL, 0); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", "colour_off"); MEMBER_OFFSET_INIT(slab_s_list, "slab_s", "list"); MEMBER_OFFSET_INIT(slab_s_s_mem, "slab_s", "s_mem"); MEMBER_OFFSET_INIT(slab_s_inuse, "slab_s", "inuse"); MEMBER_OFFSET_INIT(slab_s_free, "slab_s", "free"); MEMBER_OFFSET_INIT(cpucache_s_avail, "cpucache_s", "avail"); MEMBER_OFFSET_INIT(cpucache_s_limit, "cpucache_s", "limit"); STRUCT_SIZE_INIT(cpucache_s, "cpucache_s"); } else if (!VALID_STRUCT(kmem_slab_s) && !VALID_STRUCT(slab_s) && !MEMBER_EXISTS("kmem_cache", "cpu_slab") && (VALID_STRUCT(slab) || (vt->flags & SLAB_OVERLOAD_PAGE))) { vt->flags |= PERCPU_KMALLOC_V2; if (VALID_STRUCT(kmem_cache_s)) { MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", "colour_off"); MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", "objsize"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache_s", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache_s", "lists"); MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache_s", "array"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.array", NULL, 0); } else { STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache"); MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "next"); if (INVALID_MEMBER(kmem_cache_s_next)) { /* * slab/slub unification starting in Linux 3.6. */ MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_size, "kmem_cache", "size"); STRUCT_SIZE_INIT(kmem_cache, "kmem_cache"); } MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache", "colour_off"); if (MEMBER_EXISTS("kmem_cache", "objsize")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "objsize"); else if (MEMBER_EXISTS("kmem_cache", "buffer_size")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "buffer_size"); else if (MEMBER_EXISTS("kmem_cache", "size")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "size"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_cpu_cache, "kmem_cache", "cpu_cache"); if (MEMBER_EXISTS("kmem_cache", "lists")) MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists"); else if (MEMBER_EXISTS("kmem_cache", "nodelists") || MEMBER_EXISTS("kmem_cache", "node")) { nodelists_field = MEMBER_EXISTS("kmem_cache", "node") ? "node" : "nodelists"; vt->flags |= PERCPU_KMALLOC_V2_NODES; MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", nodelists_field); if (MEMBER_TYPE("kmem_cache", nodelists_field) == TYPE_CODE_PTR) { /* * nodelists now a pointer to an outside array */ vt->flags |= NODELISTS_IS_PTR; if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; } else if (VALID_MEMBER(kmem_cache_cpu_cache)) { /* * commit bf0dea23a9c094ae869a88bb694fbe966671bf6d * mm/slab: use percpu allocator for cpu cache */ vt->flags |= SLAB_CPU_CACHE; MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node"); if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; } else { /* * This should never happen with kmem_cache.node, * only with kmem_cache.nodelists */ ARRAY_LENGTH_INIT(vt->kmem_cache_len_nodes, NULL, "kmem_cache.nodelists", NULL, 0); } } MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache", "array"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.array", NULL, 0); } if (VALID_STRUCT(slab)) { MEMBER_OFFSET_INIT(slab_list, "slab", "list"); MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); MEMBER_OFFSET_INIT(slab_free, "slab", "free"); /* * slab members were moved to an anonymous union in 2.6.39. */ if (INVALID_MEMBER(slab_list)) ANON_MEMBER_OFFSET_INIT(slab_list, "slab", "list"); if (INVALID_MEMBER(slab_s_mem)) ANON_MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); if (INVALID_MEMBER(slab_inuse)) ANON_MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); if (INVALID_MEMBER(slab_free)) ANON_MEMBER_OFFSET_INIT(slab_free, "slab", "free"); } MEMBER_OFFSET_INIT(array_cache_avail, "array_cache", "avail"); MEMBER_OFFSET_INIT(array_cache_limit, "array_cache", "limit"); STRUCT_SIZE_INIT(array_cache, "array_cache"); /* * kmem_list3 renamed to kmem_cache_node in kernel 3.11-rc1 */ kmem_cache_node_struct = STRUCT_EXISTS("kmem_cache_node") ? "kmem_cache_node" : "kmem_list3"; MEMBER_OFFSET_INIT(kmem_list3_slabs_partial, kmem_cache_node_struct, "slabs_partial"); MEMBER_OFFSET_INIT(kmem_list3_slabs_full, kmem_cache_node_struct, "slabs_full"); MEMBER_OFFSET_INIT(kmem_list3_slabs_free, kmem_cache_node_struct, "slabs_free"); MEMBER_OFFSET_INIT(kmem_list3_free_objects, kmem_cache_node_struct, "free_objects"); MEMBER_OFFSET_INIT(kmem_list3_shared, kmem_cache_node_struct, "shared"); /* * Common to slab/slub */ MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); if (INVALID_MEMBER(page_slab)) ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); if (INVALID_MEMBER(page_slab_page)) ANON_MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); if (INVALID_MEMBER(page_first_page)) ANON_MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); } else if (MEMBER_EXISTS("kmem_cache", "cpu_slab") && STRUCT_EXISTS("kmem_cache_node")) { vt->flags |= KMALLOC_SLUB; STRUCT_SIZE_INIT(kmem_cache, "kmem_cache"); MEMBER_OFFSET_INIT(kmem_cache_size, "kmem_cache", "size"); MEMBER_OFFSET_INIT(kmem_cache_objsize, "kmem_cache", "objsize"); if (INVALID_MEMBER(kmem_cache_objsize)) MEMBER_OFFSET_INIT(kmem_cache_objsize, "kmem_cache", "object_size"); MEMBER_OFFSET_INIT(kmem_cache_offset, "kmem_cache", "offset"); MEMBER_OFFSET_INIT(kmem_cache_order, "kmem_cache", "order"); MEMBER_OFFSET_INIT(kmem_cache_local_node, "kmem_cache", "local_node"); MEMBER_OFFSET_INIT(kmem_cache_objects, "kmem_cache", "objects"); MEMBER_OFFSET_INIT(kmem_cache_inuse, "kmem_cache", "inuse"); MEMBER_OFFSET_INIT(kmem_cache_align, "kmem_cache", "align"); MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node"); MEMBER_OFFSET_INIT(kmem_cache_cpu_slab, "kmem_cache", "cpu_slab"); MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_red_left_pad, "kmem_cache", "red_left_pad"); MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_flags, "kmem_cache", "flags"); MEMBER_OFFSET_INIT(kmem_cache_random, "kmem_cache", "random"); MEMBER_OFFSET_INIT(kmem_cache_cpu_freelist, "kmem_cache_cpu", "freelist"); MEMBER_OFFSET_INIT(kmem_cache_cpu_page, "kmem_cache_cpu", "page"); if (INVALID_MEMBER(kmem_cache_cpu_page)) MEMBER_OFFSET_INIT(kmem_cache_cpu_page, "kmem_cache_cpu", "slab"); MEMBER_OFFSET_INIT(kmem_cache_cpu_node, "kmem_cache_cpu", "node"); MEMBER_OFFSET_INIT(kmem_cache_cpu_partial, "kmem_cache_cpu", "partial"); MEMBER_OFFSET_INIT(page_inuse, "page", "inuse"); if (INVALID_MEMBER(page_inuse)) ANON_MEMBER_OFFSET_INIT(page_inuse, "page", "inuse"); if (INVALID_MEMBER(page_inuse)) MEMBER_OFFSET_INIT(page_inuse, "slab", "inuse"); MEMBER_OFFSET_INIT(page_offset, "page", "offset"); if (INVALID_MEMBER(page_offset)) ANON_MEMBER_OFFSET_INIT(page_offset, "page", "offset"); MEMBER_OFFSET_INIT(page_slab, "page", "slab"); if (INVALID_MEMBER(page_slab)) ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab"); if (INVALID_MEMBER(page_slab)) { MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); if (INVALID_MEMBER(page_slab)) ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); } if (INVALID_MEMBER(page_slab)) MEMBER_OFFSET_INIT(page_slab, "slab", "slab_cache"); MEMBER_OFFSET_INIT(slab_slab_list, "slab", "slab_list"); MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); if (INVALID_MEMBER(page_slab_page)) ANON_MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); if (INVALID_MEMBER(page_first_page)) ANON_MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); if (INVALID_MEMBER(page_freelist)) ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); if (INVALID_MEMBER(page_freelist)) MEMBER_OFFSET_INIT(page_freelist, "slab", "freelist"); if (INVALID_MEMBER(kmem_cache_objects)) { MEMBER_OFFSET_INIT(kmem_cache_oo, "kmem_cache", "oo"); /* NOTE: returns offset of containing bitfield */ ANON_MEMBER_OFFSET_INIT(page_objects, "page", "objects"); if (INVALID_MEMBER(page_objects)) ANON_MEMBER_OFFSET_INIT(page_objects, "slab", "objects"); } if (VALID_MEMBER(kmem_cache_node)) { ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.node", NULL, 0); vt->flags |= CONFIG_NUMA; } ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.cpu_slab", NULL, 0); STRUCT_SIZE_INIT(kmem_cache_node, "kmem_cache_node"); STRUCT_SIZE_INIT(kmem_cache_cpu, "kmem_cache_cpu"); MEMBER_OFFSET_INIT(kmem_cache_node_nr_partial, "kmem_cache_node", "nr_partial"); MEMBER_OFFSET_INIT(kmem_cache_node_nr_slabs, "kmem_cache_node", "nr_slabs"); MEMBER_OFFSET_INIT(kmem_cache_node_total_objects, "kmem_cache_node", "total_objects"); MEMBER_OFFSET_INIT(kmem_cache_node_partial, "kmem_cache_node", "partial"); MEMBER_OFFSET_INIT(kmem_cache_node_full, "kmem_cache_node", "full"); } else { MEMBER_OFFSET_INIT(kmem_cache_s_c_nextp, "kmem_cache_s", "c_nextp"); MEMBER_OFFSET_INIT(kmem_cache_s_c_name, "kmem_cache_s", "c_name"); MEMBER_OFFSET_INIT(kmem_cache_s_c_num, "kmem_cache_s", "c_num"); MEMBER_OFFSET_INIT(kmem_cache_s_c_org_size, "kmem_cache_s", "c_org_size"); MEMBER_OFFSET_INIT(kmem_cache_s_c_flags, "kmem_cache_s", "c_flags"); MEMBER_OFFSET_INIT(kmem_cache_s_c_offset, "kmem_cache_s", "c_offset"); MEMBER_OFFSET_INIT(kmem_cache_s_c_firstp, "kmem_cache_s", "c_firstp"); MEMBER_OFFSET_INIT(kmem_cache_s_c_gfporder, "kmem_cache_s", "c_gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_c_magic, "kmem_cache_s", "c_magic"); MEMBER_OFFSET_INIT(kmem_cache_s_c_align, "kmem_cache_s", "c_align"); MEMBER_OFFSET_INIT(kmem_slab_s_s_nextp, "kmem_slab_s", "s_nextp"); MEMBER_OFFSET_INIT(kmem_slab_s_s_freep, "kmem_slab_s", "s_freep"); MEMBER_OFFSET_INIT(kmem_slab_s_s_inuse, "kmem_slab_s", "s_inuse"); MEMBER_OFFSET_INIT(kmem_slab_s_s_mem, "kmem_slab_s", "s_mem"); MEMBER_OFFSET_INIT(kmem_slab_s_s_index, "kmem_slab_s", "s_index"); MEMBER_OFFSET_INIT(kmem_slab_s_s_offset, "kmem_slab_s", "s_offset"); MEMBER_OFFSET_INIT(kmem_slab_s_s_magic, "kmem_slab_s", "s_magic"); } if (kernel_symbol_exists("slab_root_caches")) { MEMBER_OFFSET_INIT(kmem_cache_memcg_params, "kmem_cache", "memcg_params"); MEMBER_OFFSET_INIT(memcg_cache_params___root_caches_node, "memcg_cache_params", "__root_caches_node"); MEMBER_OFFSET_INIT(memcg_cache_params_children, "memcg_cache_params", "children"); MEMBER_OFFSET_INIT(memcg_cache_params_children_node, "memcg_cache_params", "children_node"); if (VALID_MEMBER(kmem_cache_memcg_params) && VALID_MEMBER(memcg_cache_params___root_caches_node) && VALID_MEMBER(memcg_cache_params_children) && VALID_MEMBER(memcg_cache_params_children_node)) vt->flags |= SLAB_ROOT_CACHES; } if (!kt->kernel_NR_CPUS) { if (enumerator_value("WORK_CPU_UNBOUND", (long *)&value1)) kt->kernel_NR_CPUS = (int)value1; else if ((i = get_array_length("__per_cpu_offset", NULL, 0))) kt->kernel_NR_CPUS = i; else if (ARRAY_LENGTH(kmem_cache_s_cpudata)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_cpudata); else if (ARRAY_LENGTH(kmem_cache_s_array)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_array); else if (ARRAY_LENGTH(kmem_cache_cpu_slab)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_cpu_slab); } if (CRASHDEBUG(1)) fprintf(fp, "kernel NR_CPUS: %d %s\n", kt->kernel_NR_CPUS, kt->kernel_NR_CPUS ? "" : "(unknown)"); if (kt->kernel_NR_CPUS > NR_CPUS) { error(WARNING, "kernel-configured NR_CPUS (%d) greater than compiled-in NR_CPUS (%d)\n", kt->kernel_NR_CPUS, NR_CPUS); error(FATAL, "recompile crash with larger NR_CPUS\n"); } if (machdep->init_kernel_pgd) machdep->init_kernel_pgd(); else if (symbol_exists("swapper_pg_dir")) { value1 = symbol_value("swapper_pg_dir"); for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = value1; } else if (symbol_exists("cpu_pgd")) { len = get_array_length("cpu_pgd", &dimension, 0); if ((len == NR_CPUS) && (dimension == machdep->ptrs_per_pgd)) { value1 = symbol_value("cpu_pgd"); for (i = 0; i < NR_CPUS; i++) { value2 = i * (SIZE(pgd_t) * machdep->ptrs_per_pgd); vt->kernel_pgd[i] = value1 + value2; } error(WARNING, "no swapper_pg_dir: using first entry of cpu_pgd[%d][%d]\n\n", dimension, len); } else { error(WARNING, "unrecognized dimensions: cpu_pgd[%d][%d]\n", dimension, len); value1 = symbol_value("cpu_pgd"); for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = value1; error(WARNING, "no swapper_pg_dir: using first entry of cpu_pgd[%d][%d]\n\n", dimension, len); } } else error(FATAL, "no swapper_pg_dir or cpu_pgd symbols exist?\n"); get_symbol_data("high_memory", sizeof(ulong), &vt->high_memory); if (kernel_symbol_exists("mem_section")) vt->flags |= SPARSEMEM; else if (kernel_symbol_exists("mem_map")) { get_symbol_data("mem_map", sizeof(char *), &vt->mem_map); vt->flags |= FLATMEM; } else vt->flags |= DISCONTIGMEM; sparse_mem_init(); vt->vmalloc_start = machdep->vmalloc_start(); if (IS_VMALLOC_ADDR(vt->mem_map)) vt->flags |= V_MEM_MAP; vt->total_pages = BTOP(VTOP(vt->high_memory)); if (symbol_exists("_totalram_pages")) { readmem(symbol_value("_totalram_pages") + OFFSET(atomic_t_counter), KVADDR, &vt->totalram_pages, sizeof(ulong), "_totalram_pages", FAULT_ON_ERROR); } else { switch (get_syment_array("totalram_pages", sp_array, 2)) { case 1: get_symbol_data("totalram_pages", sizeof(ulong), &vt->totalram_pages); break; case 2: if (!(readmem(sp_array[0]->value, KVADDR, &value1, sizeof(ulong), "totalram_pages #1", RETURN_ON_ERROR))) break; if (!(readmem(sp_array[1]->value, KVADDR, &value2, sizeof(ulong), "totalram_pages #2", RETURN_ON_ERROR))) break; vt->totalram_pages = MAX(value1, value2); break; } } if (symbol_exists("_totalhigh_pages")) { readmem(symbol_value("_totalhigh_pages") + OFFSET(atomic_t_counter), KVADDR, &vt->totalhigh_pages, sizeof(ulong), "_totalhigh_pages", FAULT_ON_ERROR); vt->total_pages += vt->totalhigh_pages; } else if (symbol_exists("totalhigh_pages")) { switch (get_syment_array("totalhigh_pages", sp_array, 2)) { case 1: get_symbol_data("totalhigh_pages", sizeof(ulong), &vt->totalhigh_pages); break; case 2: if (!(readmem(sp_array[0]->value, KVADDR, &value1, sizeof(ulong), "totalhigh_pages #1", RETURN_ON_ERROR))) break; if (!(readmem(sp_array[1]->value, KVADDR, &value2, sizeof(ulong), "totalhigh_pages #2", RETURN_ON_ERROR))) break; vt->totalhigh_pages = MAX(value1, value2); break; } vt->total_pages += vt->totalhigh_pages; } if (symbol_exists("num_physpages")) get_symbol_data("num_physpages", sizeof(ulong), &vt->num_physpages); if (kernel_symbol_exists("mem_map")) get_symbol_data("max_mapnr", sizeof(ulong), &vt->max_mapnr); if (kernel_symbol_exists("nr_swapfiles")) get_symbol_data("nr_swapfiles", sizeof(unsigned int), &vt->nr_swapfiles); STRUCT_SIZE_INIT(page, "page"); STRUCT_SIZE_INIT(free_area, "free_area"); STRUCT_SIZE_INIT(free_area_struct, "free_area_struct"); STRUCT_SIZE_INIT(zone, "zone"); STRUCT_SIZE_INIT(zone_struct, "zone_struct"); STRUCT_SIZE_INIT(kmem_bufctl_t, "kmem_bufctl_t"); STRUCT_SIZE_INIT(swap_info_struct, "swap_info_struct"); STRUCT_SIZE_INIT(mm_struct, "mm_struct"); STRUCT_SIZE_INIT(vm_area_struct, "vm_area_struct"); STRUCT_SIZE_INIT(pglist_data, "pglist_data"); if (VALID_STRUCT(pglist_data)) { vt->flags |= ZONES; if (symbol_exists("pgdat_list") && !IS_SPARSEMEM()) vt->flags |= NODES; /* * Determine the number of nodes the best way possible, * starting with a default of 1. */ vt->numnodes = 1; if (symbol_exists("numnodes")) get_symbol_data("numnodes", sizeof(int), &vt->numnodes); if (get_nodes_online()) vt->flags |= NODES_ONLINE; MEMBER_OFFSET_INIT(pglist_data_node_zones, "pglist_data", "node_zones"); MEMBER_OFFSET_INIT(pglist_data_node_mem_map, "pglist_data", "node_mem_map"); MEMBER_OFFSET_INIT(pglist_data_node_start_paddr, "pglist_data", "node_start_paddr"); MEMBER_OFFSET_INIT(pglist_data_node_start_mapnr, "pglist_data", "node_start_mapnr"); MEMBER_OFFSET_INIT(pglist_data_node_size, "pglist_data", "node_size"); MEMBER_OFFSET_INIT(pglist_data_node_id, "pglist_data", "node_id"); MEMBER_OFFSET_INIT(pglist_data_node_next, "pglist_data", "node_next"); MEMBER_OFFSET_INIT(pglist_data_bdata, "pglist_data", "bdata"); MEMBER_OFFSET_INIT(pglist_data_nr_zones, "pglist_data", "nr_zones"); MEMBER_OFFSET_INIT(pglist_data_node_start_pfn, "pglist_data", "node_start_pfn"); MEMBER_OFFSET_INIT(pglist_data_pgdat_next, "pglist_data", "pgdat_next"); MEMBER_OFFSET_INIT(pglist_data_node_present_pages, "pglist_data", "node_present_pages"); MEMBER_OFFSET_INIT(pglist_data_node_spanned_pages, "pglist_data", "node_spanned_pages"); ARRAY_LENGTH_INIT(vt->nr_zones, pglist_data_node_zones, "pglist_data.node_zones", NULL, SIZE_OPTION(zone_struct, zone)); vt->ZONE_HIGHMEM = vt->nr_zones - 1; if (VALID_STRUCT(zone_struct)) { MEMBER_OFFSET_INIT(zone_struct_free_pages, "zone_struct", "free_pages"); MEMBER_OFFSET_INIT(zone_struct_free_area, "zone_struct", "free_area"); MEMBER_OFFSET_INIT(zone_struct_zone_pgdat, "zone_struct", "zone_pgdat"); MEMBER_OFFSET_INIT(zone_struct_name, "zone_struct", "name"); MEMBER_OFFSET_INIT(zone_struct_size, "zone_struct", "size"); if (INVALID_MEMBER(zone_struct_size)) MEMBER_OFFSET_INIT(zone_struct_memsize, "zone_struct", "memsize"); MEMBER_OFFSET_INIT(zone_struct_zone_start_pfn, "zone_struct", "zone_start_pfn"); MEMBER_OFFSET_INIT(zone_struct_zone_start_paddr, "zone_struct", "zone_start_paddr"); MEMBER_OFFSET_INIT(zone_struct_zone_start_mapnr, "zone_struct", "zone_start_mapnr"); MEMBER_OFFSET_INIT(zone_struct_zone_mem_map, "zone_struct", "zone_mem_map"); MEMBER_OFFSET_INIT(zone_struct_inactive_clean_pages, "zone_struct", "inactive_clean_pages"); MEMBER_OFFSET_INIT(zone_struct_inactive_clean_list, "zone_struct", "inactive_clean_list"); ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_struct_free_area, "zone_struct.free_area", NULL, SIZE(free_area_struct)); MEMBER_OFFSET_INIT(zone_struct_inactive_dirty_pages, "zone_struct", "inactive_dirty_pages"); MEMBER_OFFSET_INIT(zone_struct_active_pages, "zone_struct", "active_pages"); MEMBER_OFFSET_INIT(zone_struct_pages_min, "zone_struct", "pages_min"); MEMBER_OFFSET_INIT(zone_struct_pages_low, "zone_struct", "pages_low"); MEMBER_OFFSET_INIT(zone_struct_pages_high, "zone_struct", "pages_high"); vt->dump_free_pages = dump_free_pages_zones_v1; } else if (VALID_STRUCT(zone)) { MEMBER_OFFSET_INIT(zone_vm_stat, "zone", "vm_stat"); MEMBER_OFFSET_INIT(zone_free_pages, "zone", "free_pages"); if (INVALID_MEMBER(zone_free_pages) && VALID_MEMBER(zone_vm_stat)) { long nr_free_pages = 0; if (!enumerator_value("NR_FREE_PAGES", &nr_free_pages)) error(WARNING, "cannot determine NR_FREE_PAGES enumerator\n"); ASSIGN_OFFSET(zone_free_pages) = OFFSET(zone_vm_stat) + (nr_free_pages * sizeof(long)); } MEMBER_OFFSET_INIT(zone_free_area, "zone", "free_area"); MEMBER_OFFSET_INIT(zone_zone_pgdat, "zone", "zone_pgdat"); MEMBER_OFFSET_INIT(zone_name, "zone", "name"); MEMBER_OFFSET_INIT(zone_zone_mem_map, "zone", "zone_mem_map"); MEMBER_OFFSET_INIT(zone_zone_start_pfn, "zone", "zone_start_pfn"); MEMBER_OFFSET_INIT(zone_spanned_pages, "zone", "spanned_pages"); MEMBER_OFFSET_INIT(zone_present_pages, "zone", "present_pages"); MEMBER_OFFSET_INIT(zone_pages_min, "zone", "pages_min"); MEMBER_OFFSET_INIT(zone_pages_low, "zone", "pages_low"); MEMBER_OFFSET_INIT(zone_pages_high, "zone", "pages_high"); MEMBER_OFFSET_INIT(zone_watermark, "zone", "watermark"); if (INVALID_MEMBER(zone_watermark)) MEMBER_OFFSET_INIT(zone_watermark, "zone", "_watermark"); MEMBER_OFFSET_INIT(zone_nr_active, "zone", "nr_active"); MEMBER_OFFSET_INIT(zone_nr_inactive, "zone", "nr_inactive"); MEMBER_OFFSET_INIT(zone_all_unreclaimable, "zone", "all_unreclaimable"); MEMBER_OFFSET_INIT(zone_flags, "zone", "flags"); MEMBER_OFFSET_INIT(zone_pages_scanned, "zone", "pages_scanned"); ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_free_area, "zone.free_area", NULL, SIZE(free_area)); vt->dump_free_pages = dump_free_pages_zones_v2; } } else vt->numnodes = 1; node_table_init(); sprintf(buf, "%llx", (ulonglong) MAX((uint64_t)vt->max_mapnr * PAGESIZE(), machdep->memory_size())); vt->paddr_prlen = strlen(buf); vt->zero_paddr = ~0UL; if (kernel_symbol_exists("zero_pfn")) { ulong zero_pfn; if (readmem(symbol_value("zero_pfn"), KVADDR, &zero_pfn, sizeof(zero_pfn), "read zero_pfn", QUIET|RETURN_ON_ERROR)) vt->zero_paddr = zero_pfn << PAGESHIFT(); } vt->huge_zero_paddr = ~0UL; if (kernel_symbol_exists("huge_zero_pfn")) { ulong huge_zero_pfn; if (readmem(symbol_value("huge_zero_pfn"), KVADDR, &huge_zero_pfn, sizeof(huge_zero_pfn), "read huge_zero_pfn", QUIET|RETURN_ON_ERROR) && huge_zero_pfn != ~0UL) vt->huge_zero_paddr = huge_zero_pfn << PAGESHIFT(); } if (vt->flags & PERCPU_KMALLOC_V1) vt->dump_kmem_cache = dump_kmem_cache_percpu_v1; else if (vt->flags & PERCPU_KMALLOC_V2) vt->dump_kmem_cache = dump_kmem_cache_percpu_v2; else if (vt->flags & KMALLOC_SLUB) vt->dump_kmem_cache = dump_kmem_cache_slub; else vt->dump_kmem_cache = dump_kmem_cache; if (!(vt->flags & (NODES|ZONES))) { get_array_length("free_area", &dimension, 0); if (dimension) vt->dump_free_pages = dump_multidimensional_free_pages; else vt->dump_free_pages = dump_free_pages; } if (!(vt->vma_cache = (char *)malloc(SIZE(vm_area_struct)*VMA_CACHE))) error(FATAL, "cannot malloc vm_area_struct cache\n"); if (symbol_exists("page_hash_bits")) { unsigned int page_hash_bits; get_symbol_data("page_hash_bits", sizeof(unsigned int), &page_hash_bits); len = (1 << page_hash_bits); builtin_array_length("page_hash_table", len, NULL); get_symbol_data("page_hash_table", sizeof(void *), &vt->page_hash_table); vt->page_hash_table_len = len; STRUCT_SIZE_INIT(page_cache_bucket, "page_cache_bucket"); if (VALID_STRUCT(page_cache_bucket)) MEMBER_OFFSET_INIT(page_cache_bucket_chain, "page_cache_bucket", "chain"); } else if (symbol_exists("page_hash_table")) { vt->page_hash_table = symbol_value("page_hash_table"); vt->page_hash_table_len = 0; } else if (CRASHDEBUG(1)) error(NOTE, "page_hash_table does not exist in this kernel\n"); kmem_cache_init(); page_flags_init(); page_type_init(); rss_page_types_init(); vt->flags |= VM_INIT; } /* * This command displays the contents of memory, with the output formatted * in several different manners. The starting address may be entered either * symbolically or by address. The default output size is the size of a long * data type, and the default output format is hexadecimal. When hexadecimal * output is used, the output will be accompanied by an ASCII translation. * These are the options: * * -p address argument is a physical address. * -u address argument is a user virtual address. * -d display output in signed decimal format (default is hexadecimal). * -D display output in unsigned decimal format (default is hexadecimal). * -s displays output symbolically when appropriate. * -8 display output in 8-bit values. * -16 display output in 16-bit values. * -32 display output in 32-bit values (default on 32-bit machines). * -64 display output in 64-bit values (default on 64-bit machines). * * The default number of items to display is 1, but a count argument, if any, * must follow the address. */ void cmd_rd(void) { int c, memtype, reverse; ulong flag; long bcnt, adjust, count; ulonglong addr, endaddr; ulong offset; struct syment *sp; FILE *tmpfp; char *outputfile; flag = HEXADECIMAL|DISPLAY_DEFAULT; endaddr = 0; offset = 0; memtype = KVADDR; tmpfp = NULL; outputfile = NULL; count = -1; adjust = bcnt = 0; reverse = FALSE; while ((c = getopt(argcnt, args, "Raxme:r:pfudDusSNo:81:3:6:")) != EOF) { switch(c) { case 'R': reverse = TRUE; break; case 'a': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_ASCII; break; case '8': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_8; break; case '1': if (!STREQ(optarg, "6")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_16; } break; case '3': if (!STREQ(optarg, "2")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_32; } break; case '6': if (!STREQ(optarg, "4")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_64; } break; case 'e': endaddr = htoll(optarg, FAULT_ON_ERROR, NULL); break; case 'r': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_RAW; outputfile = optarg; if ((tmpfp = fopen(outputfile, "w")) == NULL) error(FATAL, "cannot open output file: %s\n", outputfile); set_tmpfile2(tmpfp); break; case 's': case 'S': if (flag & DISPLAY_DEFAULT) { flag |= SYMBOLIC; if (c == 'S') { if (flag & SLAB_CACHE) flag |= SLAB_CACHE2; else flag |= SLAB_CACHE; } } else { error(INFO, "-%c option" " is only allowed with %d-bit display\n", c, DISPLAY_DEFAULT == DISPLAY_64 ? 64 : 32); argerrs++; } break; case 'o': offset = stol(optarg, FAULT_ON_ERROR, NULL); flag |= SHOW_OFFSET; break; case 'p': memtype &= ~(UVADDR|KVADDR|XENMACHADDR|FILEADDR); memtype = PHYSADDR; break; case 'u': memtype &= ~(KVADDR|PHYSADDR|XENMACHADDR|FILEADDR); memtype = UVADDR; break; case 'd': flag &= ~(HEXADECIMAL|DECIMAL); flag |= DECIMAL; break; case 'D': flag &= ~(HEXADECIMAL|UDECIMAL); flag |= UDECIMAL; break; case 'm': if (!(kt->flags & ARCH_XEN)) error(FATAL, "-m option only applies to xen architecture\n"); memtype &= ~(UVADDR|KVADDR|FILEADDR); memtype = XENMACHADDR; break; case 'f': if (!pc->dumpfile) error(FATAL, "-f option requires a dumpfile\n"); memtype &= ~(KVADDR|UVADDR|PHYSADDR|XENMACHADDR); memtype = FILEADDR; break; case 'x': flag |= NO_ASCII; break; case 'N': flag |= NET_ENDIAN; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if (*args[optind] == '(') addr = evall(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) addr = htoll(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) addr = (ulonglong)sp->value; else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); return; } if (flag & SHOW_OFFSET) addr += offset; if (args[++optind]) count = stol(args[optind], FAULT_ON_ERROR, NULL); if (count == -1) { if (endaddr) { if (endaddr <= addr) error(FATAL, "invalid ending address: %llx\n", endaddr); bcnt = endaddr - addr; switch (flag & (DISPLAY_TYPES)) { case DISPLAY_64: count = bcnt/8; break; case DISPLAY_32: count = bcnt/4; break; case DISPLAY_16: count = bcnt/2; break; case DISPLAY_8: case DISPLAY_ASCII: case DISPLAY_RAW: count = bcnt; break; } if (bcnt == 0) count = 1; } else { if ((flag & DISPLAY_TYPES) == DISPLAY_RAW) error(FATAL, "-r option requires either a count" " argument or the -e option\n"); count = (flag & DISPLAY_ASCII) ? ASCII_UNLIMITED : 1; } } else if (endaddr) error(WARNING, "ending address ignored when count is specified\n"); if ((flag & HEXADECIMAL) && !(flag & SYMBOLIC) && !(flag & NO_ASCII) && !(flag & DISPLAY_ASCII)) flag |= ASCII_ENDLINE; if (memtype == KVADDR) { if (!COMMON_VADDR_SPACE() && !IS_KVADDR(addr)) memtype = UVADDR; } if (reverse) { if (!count) count = 1; switch (flag & (DISPLAY_TYPES)) { case DISPLAY_64: bcnt = (count * 8); adjust = bcnt - 8; break; case DISPLAY_32: bcnt = (count * 4); adjust = bcnt - 4; break; case DISPLAY_16: bcnt = (count * 2); adjust = bcnt - 2; break; case DISPLAY_8: case DISPLAY_ASCII: case DISPLAY_RAW: bcnt = count; adjust = bcnt - 1; break; } addr = (count > 1) ? addr - adjust : addr; } display_memory(addr, count, flag, memtype, outputfile); } /* * display_memory() does the work for cmd_rd(), but can (and is) called by * other routines that want to dump raw data. Based upon the flag, the * output format is tailored to fit in an 80-character line. Hexadecimal * output is accompanied by an end-of-line ASCII translation. */ #define MAX_HEXCHARS_PER_LINE (32) /* line locations where ASCII output starts */ #define ASCII_START_8 (51 + VADDR_PRLEN) #define ASCII_START_16 (43 + VADDR_PRLEN) #define ASCII_START_32 (39 + VADDR_PRLEN) #define ASCII_START_64 (37 + VADDR_PRLEN) #define ENTRIES_8 (16) /* number of entries per line per size */ #define ENTRIES_16 (8) #define ENTRIES_32 (4) #define ENTRIES_64 (2) struct memloc { /* common holder of read memory */ uint8_t u8; uint16_t u16; uint32_t u32; uint64_t u64; uint64_t limit64; }; static void display_memory(ulonglong addr, long count, ulong flag, int memtype, void *opt) { int i, a, j; size_t typesz, sz; long written; void *location; char readtype[20]; char *addrtype; struct memloc mem; int displayed, per_line; int hx, lost; char hexchars[MAX_HEXCHARS_PER_LINE+1]; char ch; int linelen; char buf[BUFSIZE*2]; char slab[BUFSIZE]; int ascii_start; ulong error_handle; char *hex_64_fmt = BITS32() ? "%.*llx " : "%.*lx "; char *dec_64_fmt = BITS32() ? "%12lld " : "%15ld "; char *dec_u64_fmt = BITS32() ? "%12llu " : "%20lu "; if (count <= 0) error(FATAL, "invalid count request: %ld\n", count); switch (memtype) { case KVADDR: addrtype = "KVADDR"; break; case UVADDR: addrtype = "UVADDR"; break; case PHYSADDR: addrtype = "PHYSADDR"; break; case XENMACHADDR: addrtype = "XENMACHADDR"; break; case FILEADDR: addrtype = "FILEADDR"; break; default: addrtype = NULL; break; } if (CRASHDEBUG(4)) fprintf(fp, "\n", addr, count, flag, addrtype); if (flag & DISPLAY_RAW) { for (written = 0; written < count; written += sz) { sz = BUFSIZE > (count - written) ? (size_t)(count - written) : (size_t)BUFSIZE; readmem(addr + written, memtype, buf, (long)sz, "raw dump to file", FAULT_ON_ERROR); if (fwrite(buf, 1, sz, pc->tmpfile2) != sz) error(FATAL, "cannot write to: %s\n", (char *)opt); } close_tmpfile2(); fprintf(fp, "%ld bytes copied from 0x%llx to %s\n", count, addr, (char *)opt); return; } BZERO(&mem, sizeof(struct memloc)); hx = lost = linelen = typesz = per_line = ascii_start = 0; location = NULL; switch (flag & (DISPLAY_TYPES)) { case DISPLAY_64: ascii_start = ASCII_START_64; typesz = SIZEOF_64BIT; location = &mem.u64; sprintf(readtype, "64-bit %s", addrtype); per_line = ENTRIES_64; if (machine_type("IA64")) mem.limit64 = kt->end; break; case DISPLAY_32: ascii_start = ASCII_START_32; typesz = SIZEOF_32BIT; location = &mem.u32; sprintf(readtype, "32-bit %s", addrtype); per_line = ENTRIES_32; break; case DISPLAY_16: ascii_start = ASCII_START_16; typesz = SIZEOF_16BIT; location = &mem.u16; sprintf(readtype, "16-bit %s", addrtype); per_line = ENTRIES_16; break; case DISPLAY_8: ascii_start = ASCII_START_8; typesz = SIZEOF_8BIT; location = &mem.u8; sprintf(readtype, "8-bit %s", addrtype); per_line = ENTRIES_8; break; case DISPLAY_ASCII: typesz = SIZEOF_8BIT; location = &mem.u8; sprintf(readtype, "ascii"); per_line = 60; displayed = 0; break; } if (flag & NO_ERROR) error_handle = RETURN_ON_ERROR|QUIET; else error_handle = FAULT_ON_ERROR; for (i = a = 0; i < count; i++) { if(!readmem(addr, memtype, location, typesz, readtype, error_handle)) { addr += typesz; lost += 1; continue; } if (!(flag & DISPLAY_ASCII) && (((i - lost) % per_line) == 0)) { if ((i - lost)) { if (flag & ASCII_ENDLINE) { fprintf(fp, " %s", hexchars); } fprintf(fp, "\n"); } fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); hx = 0; BZERO(hexchars, MAX_HEXCHARS_PER_LINE+1); linelen = VADDR_PRLEN + strlen(": "); } switch (flag & DISPLAY_TYPES) { case DISPLAY_64: if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { if ((!mem.limit64 || (mem.u64 <= mem.limit64)) && in_ksymbol_range(mem.u64) && strlen(value_to_symstr(mem.u64, buf, 0))) { fprintf(fp, "%-16s ", buf); linelen += strlen(buf)+1; break; } if ((flag & SLAB_CACHE) && vaddr_to_kmem_cache(mem.u64, slab, !VERBOSE)) { if ((flag & SLAB_CACHE2) || CRASHDEBUG(1)) sprintf(buf, "[%llx:%s]", (ulonglong)mem.u64, slab); else sprintf(buf, "[%s]", slab); fprintf(fp, "%-16s ", buf); linelen += strlen(buf)+1; break; } } if (flag & HEXADECIMAL) { fprintf(fp, hex_64_fmt, LONG_LONG_PRLEN, mem.u64); linelen += (LONG_LONG_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, dec_64_fmt, mem.u64); else if (flag & UDECIMAL) fprintf(fp, dec_u64_fmt, mem.u64); break; case DISPLAY_32: if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { if (in_ksymbol_range(mem.u32) && strlen(value_to_symstr(mem.u32, buf, 0))) { fprintf(fp, INT_PRLEN == 16 ? "%-16s " : "%-8s ", buf); linelen += strlen(buf)+1; break; } if ((flag & SLAB_CACHE) && vaddr_to_kmem_cache(mem.u32, slab, !VERBOSE)) { if ((flag & SLAB_CACHE2) || CRASHDEBUG(1)) sprintf(buf, "[%x:%s]", mem.u32, slab); else sprintf(buf, "[%s]", slab); fprintf(fp, INT_PRLEN == 16 ? "%-16s " : "%-8s ", buf); linelen += strlen(buf)+1; break; } } if (flag & NET_ENDIAN) mem.u32 = htonl(mem.u32); if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", INT_PRLEN, mem.u32 ); linelen += (INT_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%12d ", mem.u32 ); else if (flag & UDECIMAL) fprintf(fp, "%12u ", mem.u32 ); break; case DISPLAY_16: if (flag & NET_ENDIAN) mem.u16 = htons(mem.u16); if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", SHORT_PRLEN, mem.u16); linelen += (SHORT_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%5d ", mem.u16); else if (flag & UDECIMAL) fprintf(fp, "%5u ", mem.u16); break; case DISPLAY_8: if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", CHAR_PRLEN, mem.u8); linelen += (CHAR_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%3d ", mem.u8); else if (flag & UDECIMAL) fprintf(fp, "%3u ", mem.u8); break; case DISPLAY_ASCII: if (isprint(mem.u8)) { if ((a % per_line) == 0) { if (displayed && i) fprintf(fp, "\n"); fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); } fprintf(fp, "%c", mem.u8); displayed++; a++; } else { if (count == ASCII_UNLIMITED) return; a = 0; } break; } if (flag & HEXADECIMAL) { char* ptr; switch (flag & DISPLAY_TYPES) { case DISPLAY_64: ptr = (char*)&mem.u64; for (j = 0; j < SIZEOF_64BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_32: ptr = (char*)&mem.u32; for (j = 0; j < (SIZEOF_32BIT); j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_16: ptr = (char*)&mem.u16; for (j = 0; j < SIZEOF_16BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_8: ptr = (char*)&mem.u8; for (j = 0; j < SIZEOF_8BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; } } addr += typesz; } if ((flag & ASCII_ENDLINE) && hx) { pad_line(fp, ascii_start - linelen, ' '); fprintf(fp, " %s", hexchars); } if (lost != count ) fprintf(fp,"\n"); } void display_memory_from_file_offset(ulonglong addr, long count, void *file) { if (file) display_memory(addr, count, DISPLAY_RAW, FILEADDR, file); else display_memory(addr, count, DISPLAY_64|ASCII_ENDLINE|HEXADECIMAL, FILEADDR, file); } /* * cmd_wr() is the sister routine of cmd_rd(), used to modify the contents * of memory. Like the "rd" command, the starting address may be entered * either symbolically or by address. The default modification size * is the size of a long data type. Write permission must exist on the * /dev/mem. The flags are similar to those used by rd: * * -p address argument is a physical address. * -u address argument is user virtual address (only if ambiguous). * -k address argument is user virtual address (only if ambiguous). * -8 write data in an 8-bit value. * -16 write data in a 16-bit value. * -32 write data in a 32-bit values (default on 32-bit machines). * -64 write data in a 64-bit values (default on 64-bit machines). * * Only one value of a given datasize may be modified. */ void cmd_wr(void) { int c; ulonglong value; int addr_entered, value_entered; int memtype; struct memloc mem; ulong addr; void *buf; long size; struct syment *sp; if (DUMPFILE()) error(FATAL, "not allowed on dumpfiles\n"); memtype = 0; buf = NULL; addr = 0; size = sizeof(void*); addr_entered = value_entered = FALSE; while ((c = getopt(argcnt, args, "fukp81:3:6:")) != EOF) { switch(c) { case '8': size = 1; break; case '1': if (!STREQ(optarg, "6")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 2; break; case '3': if (!STREQ(optarg, "2")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 4; break; case '6': if (!STREQ(optarg, "4")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 8; break; case 'p': memtype &= ~(UVADDR|KVADDR|FILEADDR); memtype = PHYSADDR; break; case 'u': memtype &= ~(PHYSADDR|KVADDR|FILEADDR); memtype = UVADDR; break; case 'k': memtype &= ~(PHYSADDR|UVADDR|FILEADDR); memtype = KVADDR; break; case 'f': /* * Unsupported, but can be forcibly implemented * by removing the DUMPFILE() check above and * recompiling. */ if (!pc->dumpfile) error(FATAL, "-f option requires a dumpfile\n"); memtype &= ~(PHYSADDR|UVADDR|KVADDR); memtype = FILEADDR; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind]) { if (*args[optind] == '(') addr = evall(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) addr = htoll(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) addr = sp->value; else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); return; } addr_entered = TRUE; if (args[++optind]) { value = stol(args[optind], FAULT_ON_ERROR, NULL); value_entered = TRUE; switch (size) { case 1: mem.u8 = (uint8_t)value; buf = (void *)&mem.u8; break; case 2: mem.u16 = (uint16_t)value; buf = (void *)&mem.u16; break; case 4: mem.u32 = (uint32_t)value; buf = (void *)&mem.u32; break; case 8: mem.u64 = (uint64_t)value; buf = (void *)&mem.u64; break; } } } if (!addr_entered || !value_entered) cmd_usage(pc->curcmd, SYNOPSIS); if (!memtype) memtype = vaddr_type(addr, CURRENT_CONTEXT()); switch (memtype) { case UVADDR: if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { error(INFO, "invalid user virtual address: %llx\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (!IS_KVADDR(addr)) { error(INFO, "invalid kernel virtual address: %llx\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } break; case PHYSADDR: break; case FILEADDR: break; case AMBIGUOUS: error(INFO, "ambiguous address: %llx (requires -p, -u or -k)\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } writemem(addr, memtype, buf, size, "write memory", FAULT_ON_ERROR); } char * format_stack_entry(struct bt_info *bt, char *retbuf, ulong value, ulong limit) { char buf[BUFSIZE*2]; char slab[BUFSIZE]; if (BITS32()) { if ((bt->flags & BT_FULL_SYM_SLAB) && accessible(value)) { if ((!limit || (value <= limit)) && in_ksymbol_range(value) && strlen(value_to_symstr(value, buf, 0))) sprintf(retbuf, INT_PRLEN == 16 ? "%-16s" : "%-8s", buf); else if (vaddr_to_kmem_cache(value, slab, !VERBOSE)) { if ((bt->flags & BT_FULL_SYM_SLAB2) || CRASHDEBUG(1)) sprintf(buf, "[%lx:%s]", value, slab); else sprintf(buf, "[%s]", slab); sprintf(retbuf, INT_PRLEN == 16 ? "%-16s" : "%-8s", buf); } else sprintf(retbuf, "%08lx", value); } else sprintf(retbuf, "%08lx", value); } else { if ((bt->flags & BT_FULL_SYM_SLAB) && accessible(value)) { if ((!limit || (value <= limit)) && in_ksymbol_range(value) && strlen(value_to_symstr(value, buf, 0))) sprintf(retbuf, "%-16s", buf); else if (vaddr_to_kmem_cache(value, slab, !VERBOSE)) { if ((bt->flags & BT_FULL_SYM_SLAB2) || CRASHDEBUG(1)) sprintf(buf, "[%lx:%s]", value, slab); else sprintf(buf, "[%s]", slab); sprintf(retbuf, "%-16s", buf); } else sprintf(retbuf, "%016lx", value); } else sprintf(retbuf, "%016lx", value); } return retbuf; } /* * For processors with "traditional" kernel/user address space distinction. */ int generic_is_kvaddr(ulong addr) { return (addr >= (ulong)(machdep->kvbase)); } /* * NOTE: Perhaps even this generic version should tighten up requirements * by calling uvtop()? */ int generic_is_uvaddr(ulong addr, struct task_context *tc) { return (addr < (ulong)(machdep->kvbase)); } /* * Raw dump of a task's stack, forcing symbolic output. */ void raw_stack_dump(ulong stackbase, ulong size) { display_memory(stackbase, size/sizeof(ulong), HEXADECIMAL|DISPLAY_DEFAULT|SYMBOLIC, KVADDR, NULL); } /* * Raw data dump, with the option of symbolic output. */ void raw_data_dump(ulong addr, long count, int symbolic) { long wordcnt; ulonglong address; int memtype; ulong flags = HEXADECIMAL; switch (sizeof(long)) { case SIZEOF_32BIT: wordcnt = count/SIZEOF_32BIT; if (count % SIZEOF_32BIT) wordcnt++; break; case SIZEOF_64BIT: wordcnt = count/SIZEOF_64BIT; if (count % SIZEOF_64BIT) wordcnt++; break; default: break; } switch (count) { case SIZEOF_8BIT: flags |= DISPLAY_8; break; case SIZEOF_16BIT: flags |= DISPLAY_16; break; case SIZEOF_32BIT: flags |= DISPLAY_32; break; default: flags |= DISPLAY_DEFAULT; break; } if (pc->curcmd_flags & MEMTYPE_FILEADDR) { address = pc->curcmd_private; memtype = FILEADDR; } else if (pc->curcmd_flags & MEMTYPE_UVADDR) { address = (ulonglong)addr; memtype = UVADDR; } else { address = (ulonglong)addr; memtype = KVADDR; } display_memory(address, wordcnt, flags|(symbolic ? SYMBOLIC : ASCII_ENDLINE), memtype, NULL); } /* * Quietly checks the accessibility of a memory location. */ int accessible(ulong kva) { ulong tmp; return(readmem(kva, KVADDR, &tmp, sizeof(ulong), "accessible check", RETURN_ON_ERROR|QUIET)); } /* * readmem() is by far *the* workhorse of this whole program. It reads * memory from /dev/kmem, /dev/mem the dumpfile or /proc/kcore, whichever * is appropriate: * * addr a user, kernel or physical memory address. * memtype addr type: UVADDR, KVADDR, PHYSADDR, XENMACHADDR or FILEADDR * buffer supplied buffer to read the data into. * size number of bytes to read. * type string describing the request -- helpful when the read fails. * error_handle what to do if the read fails: FAULT_ON_ERROR kills the command * immediately; RETURN_ON_ERROR returns FALSE; QUIET suppresses * the error message. */ #define PRINT_ERROR_MESSAGE ((!(error_handle & QUIET) && !STREQ(pc->curcmd, "search")) || \ (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) || CRASHDEBUG(2)) #define INVALID_UVADDR "invalid user virtual address: %llx type: \"%s\"\n" #define INVALID_KVADDR "invalid kernel virtual address: %llx type: \"%s\"\n" #define SEEK_ERRMSG "seek error: %s address: %llx type: \"%s\"\n" #define READ_ERRMSG "read error: %s address: %llx type: \"%s\"\n" #define WRITE_ERRMSG "write error: %s address: %llx type: \"%s\"\n" #define PAGE_EXCLUDED_ERRMSG "page excluded: %s address: %llx type: \"%s\"\n" #define PAGE_INCOMPLETE_ERRMSG "page incomplete: %s address: %llx type: \"%s\"\n" #define RETURN_ON_PARTIAL_READ() \ if ((error_handle & RETURN_PARTIAL) && (size < orig_size)) { \ if (CRASHDEBUG(1)) \ error(INFO, "RETURN_PARTIAL: \"%s\" read: %ld of %ld\n",\ type, orig_size - size, orig_size); \ return TRUE; \ } int readmem(ulonglong addr, int memtype, void *buffer, long size, char *type, ulong error_handle) { int fd; long cnt, orig_size; physaddr_t paddr; ulonglong pseudo; char *bufptr; if (CRASHDEBUG(4)) fprintf(fp, "\n", addr, memtype_string(memtype, 1), type, size, error_handle_string(error_handle), (ulong)buffer); bufptr = (char *)buffer; orig_size = size; if (size <= 0) { if (PRINT_ERROR_MESSAGE) error(INFO, "invalid size request: %ld type: \"%s\"\n", size, type); goto readmem_error; } fd = REMOTE_MEMSRC() ? pc->sockfd : (ACTIVE() ? pc->mfd : pc->dfd); /* * Screen out any error conditions. */ switch (memtype) { case UVADDR: if (!CURRENT_CONTEXT()) { if (PRINT_ERROR_MESSAGE) error(INFO, "no current user process\n"); goto readmem_error; } if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto readmem_error; } break; case KVADDR: if (LKCD_DUMPFILE()) addr = fix_lkcd_address(addr); if (!IS_KVADDR(addr)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto readmem_error; } break; case PHYSADDR: case XENMACHADDR: break; case FILEADDR: return generic_read_dumpfile(addr, buffer, size, type, error_handle); } while (size > 0) { switch (memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (paddr != 0) { cnt = PAGESIZE() - PAGEOFFSET(addr); if (cnt > size) cnt = size; cnt = readswap(paddr, bufptr, cnt, addr); if (cnt) { bufptr += cnt; addr += cnt; size -= cnt; continue; } } if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto readmem_error; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto readmem_error; } break; case PHYSADDR: paddr = addr; break; case XENMACHADDR: pseudo = xen_m2p(addr); if (pseudo == XEN_MACHADDR_NOT_FOUND) { pc->curcmd_flags |= XEN_MACHINE_ADDR; paddr = addr; } else paddr = pseudo | PAGEOFFSET(addr); break; } /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(paddr); if (cnt > size) cnt = size; if (CRASHDEBUG(4)) fprintf(fp, "<%s: addr: %llx paddr: %llx cnt: %ld>\n", readmem_function_name(), addr, (unsigned long long)paddr, cnt); if (memtype == KVADDR) pc->curcmd_flags |= MEMTYPE_KVADDR; else pc->curcmd_flags &= ~MEMTYPE_KVADDR; switch (READMEM(fd, bufptr, cnt, (memtype == PHYSADDR) || (memtype == XENMACHADDR) ? 0 : addr, paddr)) { case SEEK_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(memtype, 0), addr, type); goto readmem_error; case READ_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, READ_ERRMSG, memtype_string(memtype, 0), addr, type); if ((pc->flags & DEVMEM) && (kt->flags & PRE_KERNEL_INIT) && !(error_handle & NO_DEVMEM_SWITCH) && devmem_is_restricted() && switch_to_proc_kcore()) { error_handle &= ~QUIET; return(readmem(addr, memtype, bufptr, size, type, error_handle)); } goto readmem_error; case PAGE_EXCLUDED: RETURN_ON_PARTIAL_READ(); if (CRASHDEBUG(8)) error(INFO, PAGE_EXCLUDED_ERRMSG, memtype_string(memtype, 0), addr, type); goto readmem_error; case PAGE_INCOMPLETE: RETURN_ON_PARTIAL_READ(); if (PRINT_ERROR_MESSAGE) error(INFO, PAGE_INCOMPLETE_ERRMSG, memtype_string(memtype, 0), addr, type); goto readmem_error; default: break; } addr += cnt; bufptr += cnt; size -= cnt; } return TRUE; readmem_error: switch (error_handle) { case (FAULT_ON_ERROR): case (QUIET|FAULT_ON_ERROR): if (pc->flags & IN_FOREACH) RESUME_FOREACH(); RESTART(); case (RETURN_ON_ERROR): case (RETURN_PARTIAL|RETURN_ON_ERROR): case (QUIET|RETURN_ON_ERROR): break; } return FALSE; } /* * Accept anything... */ int generic_verify_paddr(physaddr_t paddr) { return TRUE; } /* * Read from /dev/mem. */ int read_dev_mem(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int readcnt; if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } /* * /dev/mem disallows anything >= __pa(high_memory) * * However it will allow 64-bit lseeks to anywhere, and when followed * by pulling a 32-bit address from the 64-bit file position, it * quietly returns faulty data from the (wrapped-around) address. */ if (vt->high_memory && (paddr >= (physaddr_t)(VTOP(vt->high_memory)))) { readcnt = 0; errno = 0; goto try_dev_kmem; } if (lseek(fd, (off_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; next_read: errno = 0; readcnt = read(fd, bufptr, cnt); if ((readcnt != cnt) && CRASHDEBUG(4)) { if (errno) perror("/dev/mem"); error(INFO, "read(/dev/mem, %lx, %ld): %ld (%lx)\n", paddr, cnt, readcnt, readcnt); } try_dev_kmem: /* * On 32-bit intel architectures high memory can can only be accessed * via vmalloc'd addresses. However, /dev/mem returns 0 bytes, and * non-reserved memory pages can't be mmap'd, so the only alternative * is to read it from /dev/kmem. */ if ((readcnt != cnt) && BITS32() && !readcnt && !errno && IS_VMALLOC_ADDR(addr)) readcnt = read_dev_kmem(addr, bufptr, cnt); /* * The 2.6 valid_phys_addr_range() can potentially shorten the * count of a legitimate read request. So far this has only been * seen on an ia64 where a kernel page straddles an EFI segment. */ if ((readcnt != cnt) && readcnt && (machdep->flags & DEVMEMRD) && !errno) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "read(/dev/mem, %lx, %ld): %ld (%lx)\n", paddr, cnt, readcnt, readcnt); cnt -= readcnt; bufptr += readcnt; goto next_read; } if (readcnt != cnt) return READ_ERROR; return readcnt; } /* * Write to /dev/mem. */ int write_dev_mem(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return WRITE_ERROR; } if (lseek(fd, (off_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; if (write(fd, bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * The first required reads of memory are done in kernel_init(), * so if there's a fatal read error of /dev/mem, display a warning * message if it appears that CONFIG_STRICT_DEVMEM is in effect. * On x86 and x86_64, only the first 256 pages of physical memory * are accessible: * * #ifdef CONFIG_STRICT_DEVMEM * int devmem_is_allowed(unsigned long pagenr) * { * if (pagenr <= 256) * return 1; * if (!page_is_ram(pagenr)) * return 1; * return 0; * } * #endif * * It would probably suffice to simply check for the existence of * devmem_is_allowed(), but on x86 and x86_64 verify pfn 256 reads OK, * and 257 fails. * * Update: a patch has been posted to LKML to fix the off-by-one error * by changing "<= 256" to "< 256": * * https://lkml.org/lkml/2012/8/28/357 * * The X86/X86_64 lower-boundary pfn check below has been changed * (preemptively) from 256 to 255. * * In any case, if that x86/x86_64 check fails to prove CONFIG_STRICT_DEVMEM * is configured, then the function will check that "jiffies" can be read, * as is done for the other architectures. * */ static int devmem_is_restricted(void) { long tmp; int restricted; /* * Check for pre-CONFIG_STRICT_DEVMEM kernels. */ if (!kernel_symbol_exists("devmem_is_allowed")) { if (machine_type("ARM") || machine_type("ARM64") || machine_type("X86") || machine_type("X86_64") || machine_type("PPC") || machine_type("PPC64")) return FALSE; } restricted = FALSE; if (STREQ(pc->live_memsrc, "/dev/mem")) { if (machine_type("X86") || machine_type("X86_64")) { if (readmem(255*PAGESIZE(), PHYSADDR, &tmp, sizeof(long), "devmem_is_allowed - pfn 255", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH) && !(readmem(257*PAGESIZE(), PHYSADDR, &tmp, sizeof(long), "devmem_is_allowed - pfn 257", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH))) restricted = TRUE; } if (kernel_symbol_exists("jiffies") && !readmem(symbol_value("jiffies"), KVADDR, &tmp, sizeof(ulong), "devmem_is_allowed - jiffies", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH)) restricted = TRUE; if (restricted && CRASHDEBUG(1)) error(INFO, "this kernel may be configured with CONFIG_STRICT_DEVMEM," " which\n renders /dev/mem unusable as a live memory " "source.\n"); } return restricted; } static int switch_to_proc_kcore(void) { close(pc->mfd); if (file_exists("/proc/kcore", NULL)) { if (CRASHDEBUG(1)) error(INFO, "trying /proc/kcore as an alternative to /dev/mem\n\n"); } else return FALSE; if ((pc->mfd = open("/proc/kcore", O_RDONLY)) < 0) { error(INFO, "/proc/kcore: %s\n", strerror(errno)); return FALSE; } if (!proc_kcore_init(fp, pc->mfd)) { error(INFO, "/proc/kcore: initialization failed\n"); return FALSE; } pc->flags &= ~DEVMEM; pc->flags |= PROC_KCORE; pc->readmem = read_proc_kcore; pc->writemem = write_proc_kcore; pc->live_memsrc = "/proc/kcore"; return TRUE; } /* * Read from memory driver. */ int read_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (pc->curcmd_flags & XEN_MACHINE_ADDR) return READ_ERROR; if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } lseek(fd, (loff_t)paddr, SEEK_SET); if (read(fd, bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to memory driver. */ int write_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (!(MEMORY_DRIVER_DEVICE_MODE & S_IWUSR)) return (error(FATAL, "cannot write to %s!\n", pc->live_memsrc)); if (lseek(fd, (loff_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; if (write(fd, bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * Read from an MCLX formatted dumpfile. */ int read_mclx_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (vas_lseek((ulong)paddr, SEEK_SET)) return SEEK_ERROR; if (vas_read((void *)bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to an MCLX formatted dumpfile. This only modifies the buffered * copy only; if it gets flushed, the modification is lost. */ int write_mclx_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (vas_lseek((ulong)paddr, SEEK_SET)) return SEEK_ERROR; if (vas_write((void *)bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * Read from an LKCD formatted dumpfile. */ int read_lkcd_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { set_lkcd_fp(fp); if (!lkcd_lseek(paddr)) return SEEK_ERROR; if (lkcd_read((void *)bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to an LKCD formatted dumpfile. (dummy routine -- not allowed) */ int write_lkcd_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return (error(FATAL, "cannot write to an LKCD compressed dump!\n")); } /* * Read from network daemon. */ int read_daemon(int fd, void *bufptr, int cnt, ulong vaddr, physaddr_t paddr) { if (remote_memory_read(pc->rmfd, bufptr, cnt, paddr, -1) == cnt) return cnt; if (!IS_VMALLOC_ADDR(vaddr) || DUMPFILE()) return READ_ERROR; /* * On 32-bit architectures w/memory above ~936MB, * that memory can only be accessed via vmalloc'd * addresses. However, /dev/mem returns 0 bytes, * and non-reserved memory pages can't be mmap'd, so * the only alternative is to read it from /dev/kmem. */ if (BITS32() && remote_memory_read(pc->rkfd, bufptr, cnt, vaddr, -1) == cnt) return cnt; return READ_ERROR; } /* * Write to network daemon. */ int write_daemon(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return (error(FATAL, "writing to daemon not supported yet [TBD]\n")); } /* * Turn the memtype bitmask into a string. */ static char *memtype_string(int memtype, int debug) { static char membuf[40]; switch (memtype) { case UVADDR: sprintf(membuf, debug ? "UVADDR" : "user virtual"); break; case KVADDR: sprintf(membuf, debug ? "KVADDR" : "kernel virtual"); break; case PHYSADDR: sprintf(membuf, debug ? "PHYSADDR" : "physical"); break; case XENMACHADDR: sprintf(membuf, debug ? "XENMACHADDR" : "xen machine"); break; case FILEADDR: sprintf(membuf, debug ? "FILEADDR" : "dumpfile"); break; default: if (debug) sprintf(membuf, "0x%x (?)", memtype); else sprintf(membuf, "unknown"); break; } return membuf; } /* * Turn the error_handle bitmask into a string, * Note: FAULT_ON_ERROR == 0 */ static char *error_handle_string(ulong error_handle) { static char ebuf[20]; int others; sprintf(ebuf, "("); others = 0; if (error_handle & RETURN_ON_ERROR) sprintf(&ebuf[strlen(ebuf)], "%sROE", others++ ? "|" : ""); if (error_handle & FAULT_ON_ERROR) sprintf(&ebuf[strlen(ebuf)], "%sFOE", others++ ? "|" : ""); if (error_handle & QUIET) sprintf(&ebuf[strlen(ebuf)], "%sQ", others++ ? "|" : ""); if (error_handle & HEX_BIAS) sprintf(&ebuf[strlen(ebuf)], "%sHB", others++ ? "|" : ""); if (error_handle & RETURN_PARTIAL) sprintf(&ebuf[strlen(ebuf)], "%sRP", others++ ? "|" : ""); if (error_handle & NO_DEVMEM_SWITCH) sprintf(&ebuf[strlen(ebuf)], "%sNDS", others++ ? "|" : ""); strcat(ebuf, ")"); return ebuf; } /* * Sister routine to readmem(). */ int writemem(ulonglong addr, int memtype, void *buffer, long size, char *type, ulong error_handle) { int fd; long cnt; physaddr_t paddr; char *bufptr; if (CRASHDEBUG(1)) fprintf(fp, "writemem: %llx, %s, \"%s\", %ld, %s %lx\n", addr, memtype_string(memtype, 1), type, size, error_handle_string(error_handle), (ulong)buffer); if (size < 0) { if (PRINT_ERROR_MESSAGE) error(INFO, "invalid size request: %ld\n", size); goto writemem_error; } bufptr = (char *)buffer; fd = ACTIVE() ? pc->mfd : pc->dfd; /* * Screen out any error conditions. */ switch (memtype) { case UVADDR: if (!CURRENT_CONTEXT()) { if (PRINT_ERROR_MESSAGE) error(INFO, "no current user process\n"); goto writemem_error; } if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto writemem_error; } break; case KVADDR: if (!IS_KVADDR(addr)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto writemem_error; } break; case PHYSADDR: break; case FILEADDR: return generic_write_dumpfile(addr, buffer, size, type, error_handle); } while (size > 0) { switch (memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto writemem_error; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto writemem_error; } break; case PHYSADDR: paddr = addr; break; } /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(paddr); if (cnt > size) cnt = size; switch (pc->writemem(fd, bufptr, cnt, addr, paddr)) { case SEEK_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(memtype, 0), addr, type); goto writemem_error; case WRITE_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, WRITE_ERRMSG, memtype_string(memtype, 0), addr, type); goto writemem_error; default: break; } addr += cnt; bufptr += cnt; size -= cnt; } return TRUE; writemem_error: switch (error_handle) { case (FAULT_ON_ERROR): case (QUIET|FAULT_ON_ERROR): RESTART(); case (RETURN_ON_ERROR): case (QUIET|RETURN_ON_ERROR): break; } return FALSE; } /* * When /dev/mem won't allow access, try /dev/kmem. */ static ssize_t read_dev_kmem(ulong vaddr, char *bufptr, long cnt) { ssize_t readcnt; if (pc->kfd < 0) { if ((pc->kfd = open("/dev/kmem", O_RDONLY)) < 0) return 0; } if (lseek(pc->kfd, vaddr, SEEK_SET) == -1) return 0; readcnt = read(pc->kfd, bufptr, cnt); if (readcnt != cnt) readcnt = 0; return readcnt; } /* * Generic dumpfile read/write functions to handle FILEADDR * memtype arguments to readmem() and writemem(). These are * not to be confused with pc->readmem/writemem plug-ins. */ static int generic_read_dumpfile(ulonglong addr, void *buffer, long size, char *type, ulong error_handle) { int fd; int retval; retval = TRUE; if (!pc->dumpfile) error(FATAL, "command requires a dumpfile\n"); if ((fd = open(pc->dumpfile, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (lseek(fd, addr, SEEK_SET) == -1) { if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } else if (read(fd, buffer, size) != size) { if (PRINT_ERROR_MESSAGE) error(INFO, READ_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } close(fd); return retval; } static int generic_write_dumpfile(ulonglong addr, void *buffer, long size, char *type, ulong error_handle) { int fd; int retval; retval = TRUE; if (!pc->dumpfile) error(FATAL, "command requires a dumpfile\n"); if ((fd = open(pc->dumpfile, O_WRONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (lseek(fd, addr, SEEK_SET) == -1) { if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } else if (write(fd, buffer, size) != size) { if (PRINT_ERROR_MESSAGE) error(INFO, WRITE_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } close(fd); return retval; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ int kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { physaddr_t unused; return (machdep->kvtop(tc ? tc : CURRENT_CONTEXT(), kvaddr, paddr ? paddr : &unused, verbose)); } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ int uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { return(machdep->uvtop(tc, vaddr, paddr, verbose)); } /* * The vtop command does a verbose translation of a user or kernel virtual * address into it physical address. The pte translation is shown by * passing the VERBOSE flag to kvtop() or uvtop(). If it's a user virtual * address, the vm_area_struct data containing the page is displayed. * Lastly, the mem_map[] page data containing the address is displayed. */ void cmd_vtop(void) { int c; ulong vaddr, context; int others; ulong vtop_flags, loop_vtop_flags; struct task_context *tc; vtop_flags = loop_vtop_flags = 0; tc = NULL; while ((c = getopt(argcnt, args, "ukc:")) != EOF) { switch(c) { case 'c': switch (str_to_context(optarg, &context, &tc)) { case STR_PID: case STR_TASK: vtop_flags |= USE_USER_PGD; break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); break; } break; case 'u': vtop_flags |= UVADDR; break; case 'k': vtop_flags |= KVADDR; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if (!tc && !(tc = CURRENT_CONTEXT())) error(FATAL, "no current user process\n"); if ((vtop_flags & (UVADDR|KVADDR)) == (UVADDR|KVADDR)) error(FATAL, "-u and -k options are mutually exclusive\n"); others = 0; while (args[optind]) { vaddr = htol(args[optind], FAULT_ON_ERROR, NULL); if (!(vtop_flags & (UVADDR|KVADDR))) { switch (vaddr_type(vaddr, tc)) { case UVADDR: loop_vtop_flags = UVADDR; break; case KVADDR: loop_vtop_flags = KVADDR; break; case AMBIGUOUS: error(FATAL, "ambiguous address: %lx (requires -u or -k)\n", vaddr); break; } } else loop_vtop_flags = 0; if (others++) fprintf(fp, "\n"); do_vtop(vaddr, tc, vtop_flags | loop_vtop_flags); if (REMOTE() && CRASHDEBUG(1)) { ulong paddr = remote_vtop(tc->processor, vaddr); if (paddr) fprintf(fp, "rvtop(%lx)=%lx\n", vaddr, paddr); } optind++; } } /* * Do the work for cmd_vtop(), or less likely, foreach(). */ void do_vtop(ulong vaddr, struct task_context *tc, ulong vtop_flags) { physaddr_t paddr; ulong vma, page; int page_exists; struct meminfo meminfo; char buf1[BUFSIZE]; char buf2[BUFSIZE]; int memtype = 0; switch (vtop_flags & (UVADDR|KVADDR)) { case UVADDR: memtype = UVADDR; break; case KVADDR: memtype = KVADDR; break; case (UVADDR|KVADDR): error(FATAL, "-u and -k options are mutually exclusive\n"); break; default: switch (vaddr_type(vaddr, tc)) { case UVADDR: memtype = UVADDR; break; case KVADDR: memtype = KVADDR; break; case AMBIGUOUS: error(FATAL, "ambiguous address: %lx (requires -u or -k)\n", vaddr); break; } break; } page_exists = paddr = 0; switch (memtype) { case UVADDR: fprintf(fp, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); if (!IN_TASK_VMA(tc->task, vaddr)) { fprintf(fp, "%s (not accessible)\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr))); return; } if (!uvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } uvtop(tc, vaddr, &paddr, VERBOSE); fprintf(fp, "\n"); vma = vm_area_dump(tc->task, UVADDR, vaddr, 0); if (!page_exists) { if (swap_location(paddr, buf1)) fprintf(fp, "\nSWAP: %s\n", buf1); else if (vma_file_offset(vma, vaddr, buf1)) fprintf(fp, "\nFILE: %s\n", buf1); } break; case KVADDR: fprintf(fp, "%s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); if (!IS_KVADDR(vaddr)) { fprintf(fp, "%-8lx (not a kernel virtual address)\n\n", vaddr); return; } if (vtop_flags & USE_USER_PGD) { if (!uvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } uvtop(tc, vaddr, &paddr, VERBOSE); } else { if (!kvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } kvtop(tc, vaddr, &paddr, VERBOSE); } break; } fprintf(fp, "\n"); if (page_exists && phys_to_page(paddr, &page)) { if ((pc->flags & DEVMEM) && (paddr >= VTOP(vt->high_memory))) return; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.flags = ADDRESS_SPECIFIED; meminfo.spec_addr = paddr; meminfo.memtype = PHYSADDR; dump_mem_map(&meminfo); } } /* * Runs PTOV() on the physical address argument or translates * a per-cpu offset and cpu specifier. */ void cmd_ptov(void) { int c, len, unknown; ulong vaddr; physaddr_t paddr, paddr_test; char buf1[BUFSIZE]; char buf2[BUFSIZE]; int others; char *cpuspec; ulong *cpus; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); others = 0; cpuspec = NULL; cpus = NULL; while (args[optind]) { cpuspec = strchr(args[optind], ':'); if (cpuspec) { *cpuspec++ = NULLCHAR; cpus = get_cpumask_buf(); if (STREQ(cpuspec, "")) SET_BIT(cpus, CURRENT_CONTEXT()->processor); else make_cpumask(cpuspec, cpus, FAULT_ON_ERROR, NULL); } paddr = htoll(args[optind], FAULT_ON_ERROR, NULL); if (cpuspec) { sprintf(buf1, "[%d]", kt->cpus-1); len = strlen(buf1) + 2; fprintf(fp, "%sPER-CPU OFFSET: %llx\n", others++ ? "\n" : "", (ulonglong)paddr); fprintf(fp, " %s %s\n", mkstring(buf1, len, LJUST, "CPU"), mkstring(buf2, VADDR_PRLEN, LJUST, "VIRTUAL")); for (c = 0; c < kt->cpus; c++) { if (!NUM_IN_BITMAP(cpus, c)) continue; vaddr = paddr + kt->__per_cpu_offset[c]; sprintf(buf1, "[%d]", c); fprintf(fp, " %s%lx", mkstring(buf2, len, LJUST, buf1), vaddr); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } FREEBUF(cpus); } else { vaddr = PTOV(paddr); unknown = BITS32() && (!kvtop(0, vaddr, &paddr_test, 0) || (paddr_test != paddr)); fprintf(fp, "%s%s %s\n", others++ ? "\n" : "", mkstring(buf1, VADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); fprintf(fp, "%s %s\n", unknown ? mkstring(buf1, VADDR_PRLEN, LJUST, "unknown") : mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); } optind++; } } /* * Runs PTOB() on the page frame number to get the page address. */ void cmd_ptob(void) { ulonglong value; optind = 1; if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { value = stoll(args[optind], FAULT_ON_ERROR, NULL); fprintf(fp, "%llx: %llx\n", value, PTOB(value)); optind++; } } /* * Runs BTOP() on the address to get the page frame number. */ void cmd_btop(void) { ulonglong value; optind = 1; if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { value = htoll(args[optind], FAULT_ON_ERROR, NULL); fprintf(fp, "%llx: %llx\n", value, BTOP(value)); optind++; } } /* * This command displays basic virtual memory information of a context, * consisting of a pointer to its mm_struct, its RSS and total virtual * memory size; and a list of pointers to each vm_area_struct, its starting * and ending address, and vm_flags value. The argument can be a task * address or a PID number; if no args, the current context is used. */ void cmd_vm(void) { int c; ulong flag; ulong value; ulong single_vma; ulonglong llvalue; struct task_context *tc; struct reference reference, *ref; unsigned int radix; int subsequent; flag = 0; single_vma = 0; radix = 0; ref = NULL; BZERO(&reference, sizeof(struct reference)); while ((c = getopt(argcnt, args, "f:pmvR:P:xdM:")) != EOF) { switch(c) { case 'M': pc->curcmd_private = htoll(optarg, FAULT_ON_ERROR, NULL); pc->curcmd_flags |= MM_STRUCT_FORCE; if (!IS_KVADDR(pc->curcmd_private)) error(FATAL, "invalid mm_struct address: %s\n", optarg); break; case 'f': if (flag) argerrs++; else { llvalue = htoll(optarg, FAULT_ON_ERROR, NULL); do_vm_flags(llvalue); return; } break; case 'p': if (flag) argerrs++; else flag |= PHYSADDR; break; case 'm': if (flag) argerrs++; else flag |= PRINT_MM_STRUCT; break; case 'v': if (flag) argerrs++; else flag |= PRINT_VMA_STRUCTS; break; case 'R': if (ref) { error(INFO, "only one -R option allowed\n"); argerrs++; } else if (flag && !(flag & PHYSADDR)) argerrs++; else { ref = &reference; ref->str = optarg; flag |= PHYSADDR; } break; case 'P': if (flag) argerrs++; else { flag |= PRINT_SINGLE_VMA; single_vma = htol(optarg, FAULT_ON_ERROR, NULL); } break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (radix == 10) flag |= PRINT_RADIX_10; else if (radix == 16) flag |= PRINT_RADIX_16; if (!args[optind]) { if (!ref) print_task_header(fp, CURRENT_CONTEXT(), 0); vm_area_dump(CURRENT_TASK(), flag, single_vma, ref); return; } subsequent = 0; while (args[optind]) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) { if (!ref) print_task_header(fp, tc, subsequent++); vm_area_dump(tc->task, flag, single_vma, ref); } break; case STR_TASK: if (!ref) print_task_header(fp, tc, subsequent++); vm_area_dump(tc->task, flag, single_vma, ref); break; case STR_INVALID: error(INFO, "%sinvalid task or pid value: %s\n", subsequent++ ? "\n" : "", args[optind]); break; } optind++; } } /* * Translate a vm_flags value. */ #define VM_READ 0x00000001ULL /* currently active flags */ #define VM_WRITE 0x00000002ULL #define VM_EXEC 0x00000004ULL #define VM_SHARED 0x00000008ULL #define VM_MAYREAD 0x00000010ULL /* limits for mprotect() etc */ #define VM_MAYWRITE 0x00000020ULL #define VM_MAYEXEC 0x00000040ULL #define VM_MAYSHARE 0x00000080ULL #define VM_GROWSDOWN 0x00000100ULL /* general info on the segment */ #define VM_GROWSUP 0x00000200ULL #define VM_NOHUGEPAGE 0x00000200ULL /* MADV_NOHUGEPAGE marked this vma */ #define VM_SHM 0x00000400ULL /* shared memory area, don't swap out */ #define VM_PFNMAP 0x00000400ULL #define VM_DENYWRITE 0x00000800ULL /* ETXTBSY on write attempts.. */ #define VM_EXECUTABLE 0x00001000ULL #define VM_LOCKED 0x00002000ULL #define VM_IO 0x00004000ULL /* Memory mapped I/O or similar */ #define VM_SEQ_READ 0x00008000ULL /* App will access data sequentially */ #define VM_RAND_READ 0x00010000ULL /* App will not benefit from clustered reads */ #define VM_DONTCOPY 0x00020000ULL /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000ULL /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000ULL /* Don't unmap it from swap_out */ #define VM_BIGPAGE 0x00100000ULL /* bigpage mappings, no pte's */ #define VM_BIGMAP 0x00200000ULL /* user wants bigpage mapping */ #define VM_WRITECOMBINED 0x00100000ULL /* Write-combined */ #define VM_NONCACHED 0x00200000ULL /* Noncached access */ #define VM_HUGETLB 0x00400000ULL /* Huge tlb Page*/ #define VM_ACCOUNT 0x00100000ULL /* Memory is a vm accounted object */ #define VM_NONLINEAR 0x00800000ULL /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000ULL /* T if mapped copy of data (nommu mmap) */ #define VM_HUGEPAGE 0x01000000ULL /* MADV_HUGEPAGE marked this vma */ #define VM_INSERTPAGE 0x02000000ULL /* The vma has had "vm_insert_page()" done on it */ #define VM_ALWAYSDUMP 0x04000000ULL /* Always include in core dumps */ #define VM_CAN_NONLINEAR 0x08000000ULL /* Has ->fault & does nonlinear pages */ #define VM_MIXEDMAP 0x10000000ULL /* Can contain "struct page" and pure PFN pages */ #define VM_SAO 0x20000000ULL /* Strong Access Ordering (powerpc) */ #define VM_PFN_AT_MMAP 0x40000000ULL /* PFNMAP vma that is fully mapped at mmap time */ #define VM_MERGEABLE 0x80000000ULL /* KSM may merge identical pages */ static void do_vm_flags(ulonglong flags) { int others; others = 0; fprintf(fp, "%llx: (", flags); if (flags & VM_READ) { fprintf(fp, "READ"); others++; } if (flags & VM_WRITE) fprintf(fp, "%sWRITE", others++ ? "|" : ""); if (flags & VM_EXEC) fprintf(fp, "%sEXEC", others++ ? "|" : ""); if (flags & VM_SHARED) fprintf(fp, "%sSHARED", others++ ? "|" : ""); if (flags & VM_MAYREAD) fprintf(fp, "%sMAYREAD", others++ ? "|" : ""); if (flags & VM_MAYWRITE) fprintf(fp, "%sMAYWRITE", others++ ? "|" : ""); if (flags & VM_MAYEXEC) fprintf(fp, "%sMAYEXEC", others++ ? "|" : ""); if (flags & VM_MAYSHARE) fprintf(fp, "%sMAYSHARE", others++ ? "|" : ""); if (flags & VM_GROWSDOWN) fprintf(fp, "%sGROWSDOWN", others++ ? "|" : ""); if (kernel_symbol_exists("expand_upwards")) { if (flags & VM_GROWSUP) fprintf(fp, "%sGROWSUP", others++ ? "|" : ""); } else if (flags & VM_NOHUGEPAGE) fprintf(fp, "%sNOHUGEPAGE", others++ ? "|" : ""); if (flags & VM_SHM) { if (THIS_KERNEL_VERSION > LINUX(2,6,17)) fprintf(fp, "%sPFNMAP", others++ ? "|" : ""); else fprintf(fp, "%sSHM", others++ ? "|" : ""); } if (flags & VM_DENYWRITE) fprintf(fp, "%sDENYWRITE", others++ ? "|" : ""); if (flags & VM_EXECUTABLE) fprintf(fp, "%sEXECUTABLE", others++ ? "|" : ""); if (flags & VM_LOCKED) fprintf(fp, "%sLOCKED", others++ ? "|" : ""); if (flags & VM_IO) fprintf(fp, "%sIO", others++ ? "|" : ""); if (flags & VM_SEQ_READ) fprintf(fp, "%sSEQ_READ", others++ ? "|" : ""); if (flags & VM_RAND_READ) fprintf(fp, "%sRAND_READ", others++ ? "|" : ""); if (flags & VM_DONTCOPY) fprintf(fp, "%sDONTCOPY", others++ ? "|" : ""); if (flags & VM_DONTEXPAND) fprintf(fp, "%sDONTEXPAND", others++ ? "|" : ""); if (flags & VM_RESERVED) fprintf(fp, "%sRESERVED", others++ ? "|" : ""); if (symbol_exists("nr_bigpages") && (THIS_KERNEL_VERSION == LINUX(2,4,9))) { if (flags & VM_BIGPAGE) fprintf(fp, "%sBIGPAGE", others++ ? "|" : ""); if (flags & VM_BIGMAP) fprintf(fp, "%sBIGMAP", others++ ? "|" : ""); } else { if ((THIS_KERNEL_VERSION < LINUX(2,4,21)) && (flags & VM_WRITECOMBINED)) fprintf(fp, "%sWRITECOMBINED", others++ ? "|" : ""); if ((THIS_KERNEL_VERSION < LINUX(2,4,21)) && (flags & VM_NONCACHED)) fprintf(fp, "%sNONCACHED", others++ ? "|" : ""); if (flags & VM_HUGETLB) fprintf(fp, "%sHUGETLB", others++ ? "|" : ""); if (flags & VM_ACCOUNT) fprintf(fp, "%sACCOUNT", others++ ? "|" : ""); } if (flags & VM_NONLINEAR) fprintf(fp, "%sNONLINEAR", others++ ? "|" : ""); if (flags & VM_HUGEPAGE) { if (MEMBER_EXISTS("mm_struct", "pmd_huge_pte")) fprintf(fp, "%sHUGEPAGE", others++ ? "|" : ""); else fprintf(fp, "%sMAPPED_COPY", others++ ? "|" : ""); } if (flags & VM_INSERTPAGE) fprintf(fp, "%sINSERTPAGE", others++ ? "|" : ""); if (flags & VM_ALWAYSDUMP) fprintf(fp, "%sALWAYSDUMP", others++ ? "|" : ""); if (flags & VM_CAN_NONLINEAR) fprintf(fp, "%sCAN_NONLINEAR", others++ ? "|" : ""); if (flags & VM_MIXEDMAP) fprintf(fp, "%sMIXEDMAP", others++ ? "|" : ""); if (flags & VM_SAO) fprintf(fp, "%sSAO", others++ ? "|" : ""); if (flags & VM_PFN_AT_MMAP) fprintf(fp, "%sPFN_AT_MMAP", others++ ? "|" : ""); if (flags & VM_MERGEABLE) fprintf(fp, "%sMERGEABLE", others++ ? "|" : ""); fprintf(fp, ")\n"); } /* * Read whatever size vm_area_struct.vm_flags happens to be into a ulonglong. */ static ulonglong get_vm_flags(char *vma_buf) { ulonglong vm_flags = 0; if (SIZE(vm_area_struct_vm_flags) == sizeof(short)) vm_flags = USHORT(vma_buf + OFFSET(vm_area_struct_vm_flags)); else if (SIZE(vm_area_struct_vm_flags) == sizeof(long)) vm_flags = ULONG(vma_buf+ OFFSET(vm_area_struct_vm_flags)); else if (SIZE(vm_area_struct_vm_flags) == sizeof(long long)) vm_flags = ULONGLONG(vma_buf+ OFFSET(vm_area_struct_vm_flags)); else error(INFO, "questionable vm_area_struct.vm_flags size: %d\n", SIZE(vm_area_struct_vm_flags)); return vm_flags; } static void vm_cleanup(void *arg) { struct task_context *tc; pc->cmd_cleanup = NULL; pc->cmd_cleanup_arg = NULL; tc = (struct task_context *)arg; tc->mm_struct = 0; } static int is_valid_mm(ulong mm) { char kbuf[BUFSIZE]; char *p; int mm_count; if (!(p = vaddr_to_kmem_cache(mm, kbuf, VERBOSE))) goto bailout; if (!STRNEQ(p, "mm_struct")) goto bailout; readmem(mm + OFFSET(mm_struct_mm_count), KVADDR, &mm_count, sizeof(int), "mm_struct mm_count", FAULT_ON_ERROR); if (mm_count == 0) error(FATAL, "stale mm_struct address\n"); return mm_count; bailout: error(FATAL, "invalid mm_struct address\n"); return 0; } /* * vm_area_dump() primarily does the work for cmd_vm(), but is also called * from IN_TASK_VMA(), do_vtop(), and foreach(). How it behaves depends * upon the flag and ref arguments: * * UVADDR do_vtop() when dumping the VMA for a uvaddr * UVADDR|VERIFY_ADDR IN_TASK_VMA() macro checks if a uvaddr is in a VMA * PHYSADDR cmd_vm() or foreach(vm) for -p and -R options * PRINT_MM_STRUCT cmd_vm() or foreach(vm) for -m option * PRINT_VMA_STRUCTS cmd_vm() or foreach(vm) for -v option * PRINT_INODES open_files_dump() backdoors foreach(vm) * * ref cmd_vm() or foreach(vm) for -R option that searches * for references -- and only then does a display */ #define PRINT_VM_DATA(buf4, buf5, tm) \ { \ fprintf(fp, "%s %s ", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST, "MM"), \ mkstring(buf5, VADDR_PRLEN, CENTER|LJUST, "PGD")); \ fprintf(fp, "%s %s\n", \ mkstring(buf4, 6, CENTER|LJUST, "RSS"), \ mkstring(buf5, 8, CENTER|LJUST, "TOTAL_VM")); \ \ fprintf(fp, "%s %s ", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, \ MKSTR(tm->mm_struct_addr)), \ mkstring(buf5, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, \ MKSTR(tm->pgd_addr))); \ \ sprintf(buf4, "%ldk", (tm->rss * PAGESIZE())/1024); \ sprintf(buf5, "%ldk", (tm->total_vm * PAGESIZE())/1024); \ fprintf(fp, "%s %s\n", \ mkstring(buf4, 6, CENTER|LJUST, NULL), \ mkstring(buf5, 8, CENTER|LJUST, NULL)); \ } #define PRINT_VMA_DATA(buf1, buf2, buf3, buf4, vma) \ fprintf(fp, "%s%s%s%s%s %6llx%s%s\n", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, MKSTR(vma)),\ space(MINSPACE), \ mkstring(buf2, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_start)), \ space(MINSPACE), \ mkstring(buf3, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_end)), \ vm_flags, space(MINSPACE), buf1); #define FILENAME_COMPONENT(P,C) \ ((STREQ((P), "/") && STREQ((C), "/")) || \ (!STREQ((C), "/") && strstr((P),(C)))) #define VM_REF_SEARCH (0x1) #define VM_REF_DISPLAY (0x2) #define VM_REF_NUMBER (0x4) #define VM_REF_VMA (0x8) #define VM_REF_PAGE (0x10) #define VM_REF_HEADER (0x20) #define DO_REF_SEARCH(X) ((X) && ((X)->cmdflags & VM_REF_SEARCH)) #define DO_REF_DISPLAY(X) ((X) && ((X)->cmdflags & VM_REF_DISPLAY)) #define VM_REF_CHECK_HEXVAL(X,V) \ (DO_REF_SEARCH(X) && ((X)->cmdflags & VM_REF_NUMBER) && ((X)->hexval == (V))) #define VM_REF_CHECK_DECVAL(X,V) \ (DO_REF_SEARCH(X) && ((X)->cmdflags & VM_REF_NUMBER) && ((X)->decval == (V))) #define VM_REF_CHECK_STRING(X,S) \ (DO_REF_SEARCH(X) && (string_exists(S)) && FILENAME_COMPONENT((S),(X)->str)) #define VM_REF_FOUND(X) ((X) && ((X)->cmdflags & VM_REF_HEADER)) static ulong handle_each_vm_area(struct handle_each_vm_area_args *args) { char *dentry_buf, *file_buf; ulong vm_start; ulong vm_end; ulong vm_mm; ulonglong vm_flags; ulong vm_file, inode; ulong dentry, vfsmnt; if ((args->flag & PHYSADDR) && !DO_REF_SEARCH(args->ref)) fprintf(fp, "%s", args->vma_header); inode = 0; BZERO(args->buf1, BUFSIZE); *(args->vma_buf) = fill_vma_cache(args->vma); vm_mm = ULONG(*(args->vma_buf) + OFFSET(vm_area_struct_vm_mm)); vm_end = ULONG(*(args->vma_buf) + OFFSET(vm_area_struct_vm_end)); vm_start = ULONG(*(args->vma_buf) + OFFSET(vm_area_struct_vm_start)); vm_flags = get_vm_flags(*(args->vma_buf)); vm_file = ULONG(*(args->vma_buf) + OFFSET(vm_area_struct_vm_file)); if (args->flag & PRINT_SINGLE_VMA) { if (args->vma != *(args->single_vma)) return 0; fprintf(fp, "%s", args->vma_header); *(args->single_vma_found) = TRUE; } if (args->flag & PRINT_VMA_STRUCTS) { dump_struct("vm_area_struct", args->vma, args->radix); return 0; } if (vm_file && !(args->flag & VERIFY_ADDR)) { file_buf = fill_file_cache(vm_file); dentry = ULONG(file_buf + OFFSET(file_f_dentry)); dentry_buf = NULL; if (dentry) { dentry_buf = fill_dentry_cache(dentry); if (VALID_MEMBER(file_f_vfsmnt)) { vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); get_pathname(dentry, args->buf1, BUFSIZE, 1, vfsmnt); } else get_pathname(dentry, args->buf1, BUFSIZE, 1, 0); } if ((args->flag & PRINT_INODES) && dentry) inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); } if (!(args->flag & UVADDR) || ((args->flag & UVADDR) && ((args->vaddr >= vm_start) && (args->vaddr < vm_end)))) { *(args->found) = TRUE; if (args->flag & VERIFY_ADDR) return args->vma; if (DO_REF_SEARCH(args->ref)) { if (VM_REF_CHECK_HEXVAL(args->ref, args->vma) || VM_REF_CHECK_HEXVAL(args->ref, (ulong)vm_flags) || VM_REF_CHECK_STRING(args->ref, args->buf1)) { if (!(args->ref->cmdflags & VM_REF_HEADER)) { print_task_header(fp, args->tc, 0); PRINT_VM_DATA(args->buf4, args->buf5, args->tm); args->ref->cmdflags |= VM_REF_HEADER; } if (!(args->ref->cmdflags & VM_REF_VMA) || (args->ref->cmdflags & VM_REF_PAGE)) { fprintf(fp, "%s", args->vma_header); args->ref->cmdflags |= VM_REF_VMA; args->ref->cmdflags &= ~VM_REF_PAGE; args->ref->ref1 = args->vma; } PRINT_VMA_DATA(args->buf1, args->buf2, args->buf3, args->buf4, args->vma); } if (vm_area_page_dump(args->vma, args->task, vm_start, vm_end, vm_mm, args->ref)) { if (!(args->ref->cmdflags & VM_REF_HEADER)) { print_task_header(fp, args->tc, 0); PRINT_VM_DATA(args->buf4, args->buf5, args->tm); args->ref->cmdflags |= VM_REF_HEADER; } if (!(args->ref->cmdflags & VM_REF_VMA) || (args->ref->ref1 != args->vma)) { fprintf(fp, "%s", args->vma_header); PRINT_VMA_DATA(args->buf1, args->buf2, args->buf3, args->buf4, args->vma); args->ref->cmdflags |= VM_REF_VMA; args->ref->ref1 = args->vma; } args->ref->cmdflags |= VM_REF_DISPLAY; vm_area_page_dump(args->vma, args->task, vm_start, vm_end, vm_mm, args->ref); args->ref->cmdflags &= ~VM_REF_DISPLAY; } return 0; } if (inode) { fprintf(fp, "%lx%s%s%s%s%s%6llx%s%lx %s\n", args->vma, space(MINSPACE), mkstring(args->buf2, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_start)), space(MINSPACE), mkstring(args->buf3, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_end)), space(MINSPACE), vm_flags, space(MINSPACE), inode, args->buf1); } else { PRINT_VMA_DATA(args->buf1, args->buf2, args->buf3, args->buf4, args->vma); if (args->flag & (PHYSADDR|PRINT_SINGLE_VMA)) vm_area_page_dump(args->vma, args->task, vm_start, vm_end, vm_mm, args->ref); } if (args->flag & UVADDR) return args->vma; } return 0; } ulong vm_area_dump(ulong task, ulong flag, ulong vaddr, struct reference *ref) { struct task_context *tc; ulong vma; ulong single_vma; unsigned int radix; int single_vma_found; int found; struct task_mem_usage task_mem_usage, *tm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char vma_header[BUFSIZE]; char *vma_buf; int i; ulong mm_mt, entry_num; struct list_pair *entry_list; tc = task_to_context(task); tm = &task_mem_usage; get_task_mem_usage(task, tm); single_vma = 0; single_vma_found = FALSE; if (flag & PRINT_SINGLE_VMA) { single_vma = vaddr; vaddr = 0; } if (flag & PRINT_RADIX_10) radix = 10; else if (flag & PRINT_RADIX_16) radix = 16; else radix = 0; if (ref) { ref->cmdflags = VM_REF_SEARCH; if (IS_A_NUMBER(ref->str)) { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); if (decimal(ref->str, 0)) ref->decval = dtol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= VM_REF_NUMBER; } } if (VM_REF_CHECK_HEXVAL(ref, tm->mm_struct_addr) || VM_REF_CHECK_HEXVAL(ref, tm->pgd_addr)) { print_task_header(fp, tc, 0); PRINT_VM_DATA(buf4, buf5, tm); fprintf(fp, "\n"); return (ulong)NULL; } if (!(flag & (UVADDR|PRINT_MM_STRUCT|PRINT_VMA_STRUCTS|PRINT_SINGLE_VMA)) && !DO_REF_SEARCH(ref)) PRINT_VM_DATA(buf4, buf5, tm); if (!tm->mm_struct_addr) { if (pc->curcmd_flags & MM_STRUCT_FORCE) { if (!is_valid_mm(pc->curcmd_private)) return (ulong)NULL; tc->mm_struct = tm->mm_struct_addr = pc->curcmd_private; /* * tc->mm_struct is changed, use vm_cleanup to * restore it. */ pc->cmd_cleanup_arg = (void *)tc; pc->cmd_cleanup = vm_cleanup; } else return (ulong)NULL; } if (flag & PRINT_MM_STRUCT) { dump_struct("mm_struct", tm->mm_struct_addr, radix); return (ulong)NULL; } sprintf(vma_header, "%s%s%s%s%s FLAGS%sFILE\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "VMA"), space(MINSPACE), mkstring(buf2, UVADDR_PRLEN, CENTER|RJUST, "START"), space(MINSPACE), mkstring(buf3, UVADDR_PRLEN, CENTER|RJUST, "END"), space(MINSPACE)); if (!(flag & (PHYSADDR|VERIFY_ADDR|PRINT_VMA_STRUCTS|PRINT_SINGLE_VMA)) && !DO_REF_SEARCH(ref)) fprintf(fp, "%s", vma_header); found = FALSE; struct handle_each_vm_area_args args = { .task = task, .flag = flag, .vaddr = vaddr, .ref = ref, .tc = tc, .radix = radix, .tm = tm, .buf1 = buf1, .buf2 = buf2, .buf3 = buf3, .buf4 = buf4, .buf5 = buf5, .vma_header = vma_header, .single_vma = &single_vma, .single_vma_found = &single_vma_found, .found = &found, .vma_buf = &vma_buf, }; if (INVALID_MEMBER(mm_struct_mmap) && VALID_MEMBER(mm_struct_mm_mt)) { mm_mt = tm->mm_struct_addr + OFFSET(mm_struct_mm_mt); entry_num = do_maple_tree(mm_mt, MAPLE_TREE_COUNT, NULL); entry_list = (struct list_pair *)GETBUF(entry_num * sizeof(struct list_pair)); do_maple_tree(mm_mt, MAPLE_TREE_GATHER, entry_list); for (i = 0; i < entry_num; i++) { if (!!(args.vma = (ulong)entry_list[i].value) && handle_each_vm_area(&args)) { FREEBUF(entry_list); return args.vma; } } FREEBUF(entry_list); } else { readmem(tm->mm_struct_addr + OFFSET(mm_struct_mmap), KVADDR, &vma, sizeof(void *), "mm_struct mmap", FAULT_ON_ERROR); while (vma) { args.vma = vma; if (handle_each_vm_area(&args)) return vma; vma = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next)); } } if (flag & VERIFY_ADDR) return (ulong)NULL; if ((flag & PRINT_SINGLE_VMA) && !single_vma_found) fprintf(fp, "(not found)\n"); if ((flag & UVADDR) && !found) fprintf(fp, "(not found)\n"); if (VM_REF_FOUND(ref)) fprintf(fp, "\n"); return (ulong)NULL; } static int vm_area_page_dump(ulong vma, ulong task, ulong start, ulong end, ulong mm, struct reference *ref) { physaddr_t paddr; ulong offs; char *p1, *p2; int display; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE*2]; char buf4[BUFSIZE]; if (mm == symbol_value("init_mm")) return FALSE; if (!ref || DO_REF_DISPLAY(ref)) fprintf(fp, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), LJUST, "PHYSICAL")); if (DO_REF_DISPLAY(ref)) { start = ref->ref2; } while (start < end) { display = DO_REF_SEARCH(ref) ? FALSE : TRUE; if (VM_REF_CHECK_HEXVAL(ref, start)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } if (uvtop(task_to_context(task), start, &paddr, 0)) { sprintf(buf3, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST|LONGLONG_HEX, MKSTR(&paddr))); if (VM_REF_CHECK_HEXVAL(ref, paddr)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } else if (paddr && swap_location(paddr, buf1)) { sprintf(buf3, "%s SWAP: %s\n", mkstring(buf2, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), buf1); if (DO_REF_SEARCH(ref)) { if (VM_REF_CHECK_DECVAL(ref, THIS_KERNEL_VERSION >= LINUX(2,6,0) ? __swp_offset(paddr) : SWP_OFFSET(paddr))) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } strcpy(buf4, buf3); p1 = strstr(buf4, "SWAP:") + strlen("SWAP: "); p2 = strstr(buf4, " OFFSET:"); *p2 = NULLCHAR; if (VM_REF_CHECK_STRING(ref, p1)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } } else if (vma_file_offset(vma, start, buf1)) { sprintf(buf3, "%s FILE: %s\n", mkstring(buf2, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), buf1); if (DO_REF_SEARCH(ref)) { extract_hex(strstr(buf3, "OFFSET:") + strlen("OFFSET: "), &offs, 0, 0); if (VM_REF_CHECK_HEXVAL(ref, offs)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } } else { sprintf(buf3, "%s (not mapped)\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start))); } if (display) fprintf(fp, "%s", buf3); start += PAGESIZE(); } return FALSE; } /* * Cache the passed-in vm_area_struct. */ char * fill_vma_cache(ulong vma) { int i; char *cache; vt->vma_cache_fills++; for (i = 0; i < VMA_CACHE; i++) { if (vt->cached_vma[i] == vma) { vt->cached_vma_hits[i]++; cache = vt->vma_cache + (SIZE(vm_area_struct)*i); return(cache); } } cache = vt->vma_cache + (SIZE(vm_area_struct)*vt->vma_cache_index); readmem(vma, KVADDR, cache, SIZE(vm_area_struct), "fill_vma_cache", FAULT_ON_ERROR); vt->cached_vma[vt->vma_cache_index] = vma; vt->vma_cache_index = (vt->vma_cache_index+1) % VMA_CACHE; return(cache); } /* * If active, clear the vm_area_struct references. */ void clear_vma_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < VMA_CACHE; i++) { vt->cached_vma[i] = 0; vt->cached_vma_hits[i] = 0; } vt->vma_cache_fills = 0; vt->vma_cache_index = 0; } /* * Check whether an address is a user stack address based * upon its vm_area_struct flags. */ int in_user_stack(ulong task, ulong vaddr) { ulong vma; ulonglong vm_flags; char *vma_buf; if ((vma = vm_area_dump(task, UVADDR|VERIFY_ADDR, vaddr, 0))) { vma_buf = fill_vma_cache(vma); vm_flags = get_vm_flags(vma_buf); if (vm_flags & VM_GROWSDOWN) return TRUE; else if (kernel_symbol_exists("expand_upwards") && (vm_flags & VM_GROWSUP)) return TRUE; /* * per-thread stack */ if ((vm_flags & (VM_READ|VM_WRITE)) == (VM_READ|VM_WRITE)) return TRUE; } return FALSE; } /* * Set the const value of filepages, anonpages and shmempages * according to MM_FILEPAGES, MM_ANONPAGES and MM_SHMEMPAGES. */ static void rss_page_types_init(void) { long anonpages, filepages, shmempages; if (VALID_MEMBER(mm_struct_rss)) return; if (VALID_MEMBER(mm_struct_rss_stat)) { if (!enumerator_value("MM_FILEPAGES", &filepages) || !enumerator_value("MM_ANONPAGES", &anonpages)) { filepages = 0; anonpages = 1; } tt->filepages = filepages; tt->anonpages = anonpages; /* * The default value(MM_SHMEMPAGES) is 3, which is introduced * in linux v4.5-rc1 and later. See commit eca56ff906bd. */ if (!enumerator_value("MM_SHMEMPAGES", &shmempages)) tt->shmempages = -1; else tt->shmempages = shmempages; } } static struct tgid_context * tgid_quick_search(ulong tgid) { struct tgid_context *last, *next; tt->tgid_searches++; if (!(last = tt->last_tgid)) return NULL; if (tgid == last->tgid) { tt->tgid_cache_hits++; return last; } next = last + 1; if ((next < (tt->tgid_array + RUNNING_TASKS())) && (tgid == next->tgid)) { tt->tgid_cache_hits++; return next; } return NULL; } static void collect_page_member_data(char *optlist, struct meminfo *mi) { int i; int members; char buf[BUFSIZE]; char *memberlist[MAXARGS]; struct struct_member_data *page_member_cache, *pmd; if ((count_chars(optlist, ',')+1) > MAXARGS) error(FATAL, "too many members in comma-separated list\n"); if ((LASTCHAR(optlist) == ',') || (LASTCHAR(optlist) == '.')) error(FATAL, "invalid format: %s\n", optlist); strcpy(buf, optlist); replace_string(optlist, ",", ' '); if (!(members = parse_line(optlist, memberlist))) error(FATAL, "invalid page struct member list format: %s\n", buf); page_member_cache = (struct struct_member_data *) GETBUF(sizeof(struct struct_member_data) * members); for (i = 0, pmd = page_member_cache; i < members; i++, pmd++) { pmd->structure = "page"; pmd->member = memberlist[i]; if (!fill_struct_member_data(pmd)) error(FATAL, "invalid %s struct member: %s\n", pmd->structure, pmd->member); if (CRASHDEBUG(1)) { fprintf(fp, " structure: %s\n", pmd->structure); fprintf(fp, " member: %s\n", pmd->member); fprintf(fp, " type: %ld\n", pmd->type); fprintf(fp, " unsigned_type: %ld\n", pmd->unsigned_type); fprintf(fp, " length: %ld\n", pmd->length); fprintf(fp, " offset: %ld\n", pmd->offset); fprintf(fp, " bitpos: %ld\n", pmd->bitpos); fprintf(fp, " bitsize: %ld%s", pmd->bitsize, members > 1 ? "\n\n" : "\n"); } } mi->nr_members = members; mi->page_member_cache = page_member_cache; } static int get_bitfield_data(struct integer_data *bd) { int pos, size; uint32_t tmpvalue32; uint64_t tmpvalue64; uint32_t mask32; uint64_t mask64; struct struct_member_data *pmd; pmd = bd->pmd; pos = bd->pmd->bitpos; size = bd->pmd->bitsize; if (pos == 0 && size == 0) { bd->bitfield_value = bd->value; return TRUE; } switch (__BYTE_ORDER) { case __LITTLE_ENDIAN: switch (pmd->length) { case 4: tmpvalue32 = (uint32_t)bd->value; tmpvalue32 >>= pos; mask32 = (1 << size) - 1; tmpvalue32 &= mask32; bd->bitfield_value = (ulong)tmpvalue32; break; case 8: tmpvalue64 = (uint64_t)bd->value; tmpvalue64 >>= pos; mask64 = (1UL << size) - 1; tmpvalue64 &= mask64; bd->bitfield_value = tmpvalue64; break; default: return FALSE; } break; case __BIG_ENDIAN: switch (pmd->length) { case 4: tmpvalue32 = (uint32_t)bd->value; tmpvalue32 <<= pos; tmpvalue32 >>= (32-size); mask32 = (1 << size) - 1; tmpvalue32 &= mask32; bd->bitfield_value = (ulong)tmpvalue32; break; case 8: tmpvalue64 = (uint64_t)bd->value; tmpvalue64 <<= pos; tmpvalue64 >>= (64-size); mask64 = (1UL << size) - 1; tmpvalue64 &= mask64; bd->bitfield_value = tmpvalue64; break; default: return FALSE; } break; } return TRUE; } static int show_page_member_data(char *pcache, ulong pp, struct meminfo *mi, char *outputbuffer) { int bufferindex, i, c, cnt, radix, struct_intbuf[10]; ulong longbuf, struct_longbuf[10]; unsigned char boolbuf; void *voidptr; ushort shortbuf; struct struct_member_data *pmd; struct integer_data integer_data; bufferindex = 0; pmd = mi->page_member_cache; bufferindex += sprintf(outputbuffer + bufferindex, "%lx ", pp); for (i = 0; i < mi->nr_members; pmd++, i++) { switch (pmd->type) { case TYPE_CODE_PTR: voidptr = VOID_PTR(pcache + pmd->offset); bufferindex += sprintf(outputbuffer + bufferindex, VADDR_PRLEN == 8 ? "%08lx " : "%016lx ", (ulong)voidptr); break; case TYPE_CODE_INT: switch (pmd->length) { case 1: integer_data.value = UCHAR(pcache + pmd->offset); break; case 2: integer_data.value = USHORT(pcache + pmd->offset); break; case 4: integer_data.value = UINT(pcache + pmd->offset); break; case 8: if (BITS32()) goto unsupported; integer_data.value = ULONG(pcache + pmd->offset); break; default: goto unsupported; } integer_data.pmd = pmd; if (get_bitfield_data(&integer_data)) longbuf = integer_data.bitfield_value; else goto unsupported; if (STREQ(pmd->member, "flags")) radix = 16; else if (STRNEQ(pmd->member, "_count") || STRNEQ(pmd->member, "_mapcount")) radix = 10; else radix = *gdb_output_radix; if (pmd->unsigned_type) { if (pmd->length == sizeof(ulonglong)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%lu " : "%016lx ", longbuf); else if (pmd->length == sizeof(int)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%08x ", (uint)longbuf); else if (pmd->length == sizeof(short)) { bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%04x ", (ushort)longbuf); } else if (pmd->length == sizeof(char)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%02x ", (unsigned char)longbuf); } else { if (pmd->length == sizeof(ulonglong)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%ld " : "%016lx", longbuf); else if (pmd->length == sizeof(int)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%08x ", (int)longbuf); else if (pmd->length == sizeof(short)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%04x ", (short)longbuf); else if (pmd->length == sizeof(char)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%02x ", (char)longbuf); } break; case TYPE_CODE_STRUCT: if (STRNEQ(pmd->member, "_count") || STRNEQ(pmd->member, "_mapcount")) { BCOPY(pcache+pmd->offset, (char *)&struct_intbuf[0], pmd->length); bufferindex += sprintf(outputbuffer + bufferindex, "%d ", struct_intbuf[0]); } else if ((pmd->length % sizeof(long)) == 0) { BCOPY(pcache+pmd->offset, (char *)&struct_longbuf[0], pmd->length); cnt = pmd->length / sizeof(long); for (c = 0; c < cnt; c++) { bufferindex += sprintf(outputbuffer + bufferindex, BITS32() ? "%08lx%s" : "%016lx%s", struct_longbuf[c], (c+1) < cnt ? "," : ""); } bufferindex += sprintf(outputbuffer + bufferindex, " "); } else if ((pmd->length % sizeof(int)) == 0) { BCOPY(pcache+pmd->offset, (char *)&struct_intbuf[0], pmd->length); cnt = pmd->length / sizeof(int); for (c = 0; c < cnt; c++) { bufferindex += sprintf(outputbuffer + bufferindex, "%08x%s", struct_intbuf[c], (c+1) < cnt ? "," : ""); } } else if (pmd->length == sizeof(short)) { BCOPY(pcache+pmd->offset, (char *)&shortbuf, pmd->length); bufferindex += sprintf(outputbuffer + bufferindex, "%04x ", shortbuf); } else goto unsupported; break; case TYPE_CODE_BOOL: radix = *gdb_output_radix; boolbuf = UCHAR(pcache + pmd->offset); if (boolbuf <= 1) bufferindex += sprintf(outputbuffer + bufferindex, "%s ", boolbuf ? "true" : "false"); else bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d" : "%x ", boolbuf); break; default: unsupported: error(FATAL, "unsupported page member reference: %s.%s\n", pmd->structure, pmd->member); break; } } return bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } /* * Fill in the task_mem_usage structure with the RSS, virtual memory size, * percent of physical memory being used, and the mm_struct address. */ void get_task_mem_usage(ulong task, struct task_mem_usage *tm) { struct task_context *tc; long rss = 0, rss_cache = 0; int mm_count = 0; BZERO(tm, sizeof(struct task_mem_usage)); if (IS_ZOMBIE(task)) return; tc = task_to_context(task); if (!tc || !tc->mm_struct) /* probably a kernel thread */ return; tm->mm_struct_addr = tc->mm_struct; if (!task_mm(task, TRUE)) return; mm_count = INT(tt->mm_struct + OFFSET(mm_struct_mm_count)); if (IS_EXITING(task) && mm_count <= 0) return; if (VALID_MEMBER(mm_struct_rss)) /* * mm_struct.rss or mm_struct._rss exist. */ tm->rss = ULONG(tt->mm_struct + OFFSET(mm_struct_rss)); else { /* * Latest kernels have mm_struct.mm_rss_stat[]. */ if (VALID_MEMBER(mm_struct_rss_stat) && VALID_MEMBER(mm_rss_stat_count)) { long anonpages, filepages, shmempages, count; anonpages = tt->anonpages; filepages = tt->filepages; shmempages = tt->shmempages; count = LONG(tt->mm_struct + OFFSET(mm_struct_rss_stat) + OFFSET(mm_rss_stat_count) + (filepages * sizeof(long))); /* * The counter is updated in asynchronous manner * and may become negative, see: * include/linux/mm.h: get_mm_counter() */ if (count > 0) rss += count; count = LONG(tt->mm_struct + OFFSET(mm_struct_rss_stat) + OFFSET(mm_rss_stat_count) + (anonpages * sizeof(long))); if (count > 0) rss += count; if (shmempages > 0) { count = LONG(tt->mm_struct + OFFSET(mm_struct_rss_stat) + OFFSET(mm_rss_stat_count) + (shmempages * sizeof(long))); if (count > 0) rss += count; } } else if (VALID_MEMBER(mm_struct_rss_stat)) { /* 6.2: struct percpu_counter rss_stat[NR_MM_COUNTERS] */ ulong fbc; fbc = tc->mm_struct + OFFSET(mm_struct_rss_stat) + (tt->filepages * SIZE(percpu_counter)); rss += percpu_counter_sum_positive(fbc); fbc = tc->mm_struct + OFFSET(mm_struct_rss_stat) + (tt->anonpages * SIZE(percpu_counter)); rss += percpu_counter_sum_positive(fbc); fbc = tc->mm_struct + OFFSET(mm_struct_rss_stat) + (tt->shmempages * SIZE(percpu_counter)); rss += percpu_counter_sum_positive(fbc); } /* Check whether SPLIT_RSS_COUNTING is enabled */ if (VALID_MEMBER(task_struct_rss_stat)) { int sync_rss; struct tgid_context tgid, *tgid_array, *tg, *first, *last; tgid_array = tt->tgid_array; tgid.tgid = task_tgid(task); if (!(tg = tgid_quick_search(tgid.tgid))) tg = (struct tgid_context *)bsearch(&tgid, tgid_array, RUNNING_TASKS(), sizeof(struct tgid_context), sort_by_tgid); if (tg) { /* find the first element which has the same tgid */ first = tg; while ((first > tgid_array) && ((first - 1)->tgid == first->tgid)) first--; /* find the last element which have same tgid */ last = tg; while ((last < (tgid_array + (RUNNING_TASKS() - 1))) && (last->tgid == (last + 1)->tgid)) last++; /* * Using rss cache for dumpfile is more beneficial than live debug * because its value never changes in dumpfile. */ if (ACTIVE() || last->rss_cache == UNINITIALIZED) { while (first <= last) { ulong addr = first->task + OFFSET(task_struct_rss_stat) + OFFSET(task_rss_stat_count); /* count 0 -> filepages */ if (!readmem(addr, KVADDR, &sync_rss, sizeof(int), "task_struct rss_stat MM_FILEPAGES", RETURN_ON_ERROR)) continue; if (sync_rss > 0) rss_cache += sync_rss; /* count 1 -> anonpages */ if (!readmem(addr + sizeof(int), KVADDR, &sync_rss, sizeof(int), "task_struct rss_stat MM_ANONPAGES", RETURN_ON_ERROR)) continue; if (sync_rss > 0) rss_cache += sync_rss; /* count 3 -> shmempages */ if (tt->shmempages >= 0) { if (!readmem(addr + tt->shmempages * sizeof(int), KVADDR, &sync_rss, sizeof(int), "task_struct rss_stat MM_SHMEMPAGES", RETURN_ON_ERROR)) continue; if (sync_rss > 0) rss_cache += sync_rss; } if (first == last) break; first++; } last->rss_cache = rss_cache; } rss += last->rss_cache; tt->last_tgid = last; } } /* * mm_struct._anon_rss and mm_struct._file_rss should exist. */ if (VALID_MEMBER(mm_struct_anon_rss)) rss += LONG(tt->mm_struct + OFFSET(mm_struct_anon_rss)); if (VALID_MEMBER(mm_struct_file_rss)) rss += LONG(tt->mm_struct + OFFSET(mm_struct_file_rss)); tm->rss = (unsigned long)rss; } tm->total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); tm->pgd_addr = ULONG(tt->mm_struct + OFFSET(mm_struct_pgd)); if (is_kernel_thread(task) && !tm->rss) return; tm->pct_physmem = ((double)(tm->rss*100)) / ((double)(MIN(vt->total_pages, vt->num_physpages ? vt->num_physpages : vt->total_pages))); } /* * cmd_kmem() is designed as a multi-purpose kernel memory investigator with * the flag argument sending it off in a multitude of areas. To date, the * following options are defined: * * -f displays the contents of the system free_area[] array headers; * also verifies that the page count equals nr_free_pages * -F same as -f, but also dumps all pages linked to that header. * -p displays basic information about each page in the system * mem_map[] array. * -s displays kmalloc() slab data. * -S same as -s, but displays all kmalloc() objects. * -v displays the vmlist entries. * -c displays the number of pages in the page_hash_table. * -C displays all entries in the page_hash_table. * -i displays informational data shown by /proc/meminfo. * -h hugepage information from hstates[] array * * -P forces address to be defined as a physical address * address when used with -f, the address can be either a page pointer * or a physical address; the free_area header containing the page * (if any) is displayed. * When used with -p, the address can be either a page pointer or a * physical address; its basic mem_map page information is displayed. * When used with -c, the page_hash_table entry containing the * page pointer is displayed. */ /* Note: VERBOSE is 0x1, ADDRESS_SPECIFIED is 0x2 */ #define GET_TOTALRAM_PAGES (ADDRESS_SPECIFIED << 1) #define GET_SHARED_PAGES (ADDRESS_SPECIFIED << 2) #define GET_FREE_PAGES (ADDRESS_SPECIFIED << 3) #define GET_FREE_HIGHMEM_PAGES (ADDRESS_SPECIFIED << 4) #define GET_ZONE_SIZES (ADDRESS_SPECIFIED << 5) #define GET_HIGHEST (ADDRESS_SPECIFIED << 6) #define GET_BUFFERS_PAGES (ADDRESS_SPECIFIED << 7) #define GET_SLAB_PAGES (ADDRESS_SPECIFIED << 8) #define GET_PHYS_TO_VMALLOC (ADDRESS_SPECIFIED << 9) #define GET_ACTIVE_LIST (ADDRESS_SPECIFIED << 10) #define GET_INACTIVE_LIST (ADDRESS_SPECIFIED << 11) #define GET_INACTIVE_CLEAN (ADDRESS_SPECIFIED << 12) /* obsolete */ #define GET_INACTIVE_DIRTY (ADDRESS_SPECIFIED << 13) /* obsolete */ #define SLAB_GET_COUNTS (ADDRESS_SPECIFIED << 14) #define SLAB_WALKTHROUGH (ADDRESS_SPECIFIED << 15) #define GET_VMLIST_COUNT (ADDRESS_SPECIFIED << 16) #define GET_VMLIST (ADDRESS_SPECIFIED << 17) #define SLAB_DATA_NOSAVE (ADDRESS_SPECIFIED << 18) #define GET_SLUB_SLABS (ADDRESS_SPECIFIED << 19) #define GET_SLUB_OBJECTS (ADDRESS_SPECIFIED << 20) #define VMLIST_VERIFY (ADDRESS_SPECIFIED << 21) #define SLAB_FIRST_NODE (ADDRESS_SPECIFIED << 22) #define CACHE_SET (ADDRESS_SPECIFIED << 23) #define SLAB_OVERLOAD_PAGE_PTR (ADDRESS_SPECIFIED << 24) #define SLAB_BITFIELD (ADDRESS_SPECIFIED << 25) #define SLAB_GATHER_FAILURE (ADDRESS_SPECIFIED << 26) #define GET_SLAB_ROOT_CACHES (ADDRESS_SPECIFIED << 27) #define GET_ALL \ (GET_SHARED_PAGES|GET_TOTALRAM_PAGES|GET_BUFFERS_PAGES|GET_SLAB_PAGES) void cmd_kmem(void) { int i; int c; int sflag, Sflag, pflag, fflag, Fflag, vflag, zflag, oflag, gflag; int nflag, cflag, Cflag, iflag, lflag, Lflag, Pflag, Vflag, hflag; int rflag; struct meminfo meminfo; ulonglong value[MAXARGS]; char buf[BUFSIZE]; char arg_buf[BUFSIZE]; char *p1; ulong *cpus; int spec_addr, escape, choose_cpu; cpus = NULL; spec_addr = choose_cpu = 0; sflag = Sflag = pflag = fflag = Fflag = Pflag = zflag = oflag = 0; vflag = Cflag = cflag = iflag = nflag = lflag = Lflag = Vflag = 0; gflag = hflag = rflag = 0; escape = FALSE; BZERO(&meminfo, sizeof(struct meminfo)); BZERO(&value[0], sizeof(ulonglong)*MAXARGS); pc->curcmd_flags &= ~HEADER_PRINTED; while ((c = getopt(argcnt, args, "gI:sS::rFfm:pvczCi::nl:L:PVoh")) != EOF) { switch(c) { case 'V': Vflag = 1; break; case 'n': nflag = 1; break; case 'z': zflag = 1; break; case 'i': iflag = 1; if (optarg && strcmp(optarg, "=shared") == 0) { meminfo.flags = GET_SHARED_PAGES; } break; case 'h': hflag = 1; break; case 'C': Cflag = 1, cflag = 0;; break; case 'c': cflag = 1, Cflag = 0; break; case 'v': vflag = 1; break; case 's': sflag = 1; Sflag = rflag = 0; break; case 'S': if (choose_cpu) error(FATAL, "only one -S option allowed\n"); /* Use the GNU extension with getopt(3) ... */ if (optarg) { if (!(vt->flags & KMALLOC_SLUB)) error(FATAL, "can only use -S=cpu(s) with a kernel \n" "that is built with CONFIG_SLUB support.\n"); if (optarg[0] != '=') error(FATAL, "CPU-specific slab data to be displayed " "must be written as expected only e.g. -S=1,45.\n"); /* Skip = ... */ optarg++; choose_cpu = 1; BZERO(arg_buf, BUFSIZE); strcpy(arg_buf, optarg); cpus = get_cpumask_buf(); make_cpumask(arg_buf, cpus, FAULT_ON_ERROR, NULL); meminfo.spec_cpumask = cpus; } Sflag = 1; sflag = rflag = 0; break; case 'r': rflag = 1; sflag = Sflag = 0; break; case 'F': Fflag = 1; fflag = 0; break;; case 'f': fflag = 1; Fflag = 0; break;; case 'p': pflag = 1; break; case 'm': pflag = 1; collect_page_member_data(optarg, &meminfo); break; case 'I': meminfo.ignore = optarg; break; case 'l': if (STREQ(optarg, "a")) { meminfo.flags |= GET_ACTIVE_LIST; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "i")) { meminfo.flags |= GET_INACTIVE_LIST; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "ic")) { meminfo.flags |= GET_INACTIVE_CLEAN; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "id")) { meminfo.flags |= GET_INACTIVE_DIRTY; lflag = 1; Lflag = 0; } else argerrs++; break; case 'L': if (STREQ(optarg, "a")) { meminfo.flags |= GET_ACTIVE_LIST; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "i")) { meminfo.flags |= GET_INACTIVE_LIST; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "ic")) { meminfo.flags |= GET_INACTIVE_CLEAN; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "id")) { meminfo.flags |= GET_INACTIVE_DIRTY; Lflag = 1; lflag = 0; } else argerrs++; break; case 'P': Pflag = 1; break; case 'o': oflag = 1; break; case 'g': gflag = 1; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if ((sflag + Sflag + pflag + fflag + Fflag + Vflag + oflag + vflag + Cflag + cflag + iflag + lflag + Lflag + gflag + hflag + rflag) > 1) { error(INFO, "only one flag allowed!\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (sflag || Sflag || rflag || !(vt->flags & KMEM_CACHE_INIT)) kmem_cache_init(); while (args[optind]) { if (hexadecimal(args[optind], 0)) { value[spec_addr++] = htoll(args[optind], FAULT_ON_ERROR, NULL); } else { if (meminfo.reqname) error(FATAL, "only one kmem_cache reference is allowed\n"); meminfo.reqname = args[optind]; if (args[optind][0] == '\\') { meminfo.reqname = &args[optind][1]; escape = TRUE; } else meminfo.reqname = args[optind]; if (!sflag && !Sflag && !rflag) cmd_usage(pc->curcmd, SYNOPSIS); } optind++; } for (i = 0; i < spec_addr; i++) { if (Pflag) meminfo.memtype = PHYSADDR; else meminfo.memtype = IS_KVADDR(value[i]) ? KVADDR : PHYSADDR; if (fflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_free_pages(&meminfo); fflag++; } if (pflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; dump_mem_map(&meminfo); pflag++; } if (sflag || Sflag) { if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); meminfo.flags = Sflag ? VERBOSE : 0; if (meminfo.memtype == PHYSADDR) { if (value[i] < VTOP(vt->high_memory)) { value[i] = PTOV(value[i]); meminfo.memtype = KVADDR; } else error(WARNING, "cannot make virtual-to-physical translation: %llx\n", value[i]); } if ((p1 = is_kmem_cache_addr(value[i], buf))) { if (meminfo.reqname) error(FATAL, "only one kmem_cache reference is allowed\n"); meminfo.reqname = p1; meminfo.cache = value[i]; meminfo.flags |= CACHE_SET; if ((i+1) == spec_addr) { /* done? */ if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_kmem_cache(&meminfo); } meminfo.flags &= ~CACHE_SET; } else { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (Sflag && (vt->flags & KMALLOC_SLUB)) meminfo.flags |= VERBOSE; if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_kmem_cache(&meminfo); } if (sflag) sflag++; if (Sflag) Sflag++; } if (vflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; dump_vmlist(&meminfo); vflag++; } if (cflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); dump_page_hash_table(&meminfo); cflag++; } if (lflag) { meminfo.spec_addr = value[i]; meminfo.flags |= (ADDRESS_SPECIFIED|VERBOSE); if (meminfo.calls++) fprintf(fp, "\n"); dump_page_lists(&meminfo); lflag++; } if (gflag) { if (i) fprintf(fp, "\n"); dump_page_flags(value[i]); gflag++; } /* * no value arguments allowed! */ if (zflag || nflag || iflag || Fflag || Cflag || Lflag || Vflag || oflag || hflag || rflag) { error(INFO, "no address arguments allowed with this option\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (!(sflag + Sflag + pflag + fflag + vflag + cflag + lflag + Lflag + gflag)) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); else kmem_cache_init(); kmem_search(&meminfo); } } if (iflag == 1) dump_kmeminfo(&meminfo); if (pflag == 1) dump_mem_map(&meminfo); if (fflag == 1) vt->dump_free_pages(&meminfo); if (Fflag == 1) { meminfo.flags = VERBOSE; vt->dump_free_pages(&meminfo); } if (hflag == 1) dump_hstates(); if (sflag == 1 || rflag == 1) { if (rflag) { if (!((vt->flags & KMALLOC_SLUB) && (vt->flags & SLAB_ROOT_CACHES))) option_not_supported('r'); meminfo.flags = GET_SLAB_ROOT_CACHES; } if (!escape && STREQ(meminfo.reqname, "list")) kmem_cache_list(&meminfo); else if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); else vt->dump_kmem_cache(&meminfo); } if (Sflag == 1) { if (STREQ(meminfo.reqname, "list")) kmem_cache_list(&meminfo); else if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); else { meminfo.flags = VERBOSE; vt->dump_kmem_cache(&meminfo); } if (choose_cpu) FREEBUF(cpus); } if (vflag == 1) dump_vmlist(&meminfo); if (Cflag == 1) { meminfo.flags = VERBOSE; dump_page_hash_table(&meminfo); } if (cflag == 1) dump_page_hash_table(&meminfo); if (nflag == 1) dump_memory_nodes(MEMORY_NODES_DUMP); if (zflag == 1) dump_zone_stats(); if (lflag == 1) { dump_page_lists(&meminfo); } if (Lflag == 1) { meminfo.flags |= VERBOSE; dump_page_lists(&meminfo); } if (Vflag == 1) { dump_vm_stat(NULL, NULL, 0); dump_page_states(); dump_vm_event_state(); } if (oflag == 1) dump_per_cpu_offsets(); if (gflag == 1) dump_page_flags(0); if (!(sflag + Sflag + pflag + fflag + Fflag + vflag + Vflag + zflag + oflag + cflag + Cflag + iflag + nflag + lflag + Lflag + gflag + hflag + rflag + meminfo.calls)) cmd_usage(pc->curcmd, SYNOPSIS); } static void PG_reserved_flag_init(void) { ulong pageptr; int count; ulong vaddr, flags; char *buf; if (enumerator_value("PG_reserved", (long *)&flags)) { vt->PG_reserved = 1 << flags; if (CRASHDEBUG(2)) fprintf(fp, "PG_reserved (enum): %lx\n", vt->PG_reserved); return; } vaddr = kt->stext; if (!vaddr) { if (kernel_symbol_exists("sys_read")) vaddr = symbol_value("sys_read"); else if (kernel_symbol_exists("__x64_sys_read")) vaddr = symbol_value("__x64_sys_read"); } if (!phys_to_page((physaddr_t)VTOP(vaddr), &pageptr)) return; buf = (char *)GETBUF(SIZE(page)); if (!readmem(pageptr, KVADDR, buf, SIZE(page), "reserved page", RETURN_ON_ERROR|QUIET)) { FREEBUF(buf); return; } flags = ULONG(buf + OFFSET(page_flags)); count = INT(buf + OFFSET(page_count)); if (count_bits_long(flags) == 1) vt->PG_reserved = flags; else vt->PG_reserved = 1 << (ffsl(flags)-1); if (count == -1) vt->flags |= PGCNT_ADJ; if (CRASHDEBUG(2)) fprintf(fp, "PG_reserved: vaddr: %lx page: %lx flags: %lx => %lx\n", vaddr, pageptr, flags, vt->PG_reserved); FREEBUF(buf); } static void PG_slab_flag_init(void) { int bit; ulong pageptr; ulong vaddr, flags, flags2; char buf[BUFSIZE]; /* safe for a page struct */ /* * Set the old defaults in case all else fails. */ if (enumerator_value("PG_slab", (long *)&flags)) { vt->PG_slab = flags; if (CRASHDEBUG(2)) fprintf(fp, "PG_slab (enum): %lx\n", vt->PG_slab); } else if (VALID_MEMBER(page_pte)) { if (THIS_KERNEL_VERSION < LINUX(2,6,0)) vt->PG_slab = 10; else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) vt->PG_slab = 7; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { vt->PG_slab = 7; } else { if (try_get_symbol_data("vm_area_cachep", sizeof(void *), &vaddr) && phys_to_page((physaddr_t)VTOP(vaddr), &pageptr) && readmem(pageptr, KVADDR, buf, SIZE(page), "vm_area_cachep page", RETURN_ON_ERROR|QUIET)) { flags = ULONG(buf + OFFSET(page_flags)); if ((bit = ffsl(flags))) { vt->PG_slab = bit - 1; if (CRASHDEBUG(2)) fprintf(fp, "PG_slab bit: vaddr: %lx page: %lx flags: %lx => %ld\n", vaddr, pageptr, flags, vt->PG_slab); } } } if (VALID_MEMBER(page_compound_head)) { if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: (UNUSED): page.compound_head exists!\n"); } else if (vt->flags & KMALLOC_SLUB) { /* * PG_slab and the following are hardwired for * kernels prior to the pageflags enumerator. */ #define PG_compound 14 /* Part of a compound page */ #define PG_reclaim 17 /* To be reclaimed asap */ vt->PG_head_tail_mask = ((1L << PG_compound) | (1L << PG_reclaim)); if (enumerator_value("PG_tail", (long *)&flags)) vt->PG_head_tail_mask = (1L << flags); else if (enumerator_value("PG_compound", (long *)&flags) && enumerator_value("PG_reclaim", (long *)&flags2)) { vt->PG_head_tail_mask = ((1L << flags) | (1L << flags2)); if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: %lx\n", vt->PG_head_tail_mask); } else if (vt->flags & PAGEFLAGS) { vt->PG_head_tail_mask = 0; error(WARNING, "SLUB: cannot determine how compound pages are linked\n\n"); } } else { if (enumerator_value("PG_tail", (long *)&flags)) vt->PG_head_tail_mask = (1L << flags); else if (enumerator_value("PG_compound", (long *)&flags) && enumerator_value("PG_reclaim", (long *)&flags2)) { vt->PG_head_tail_mask = ((1L << flags) | (1L << flags2)); if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: %lx (PG_compound|PG_reclaim)\n", vt->PG_head_tail_mask); } else if (vt->flags & PAGEFLAGS) error(WARNING, "SLAB: cannot determine how compound pages are linked\n\n"); } if (!vt->PG_slab) error(INFO, "cannot determine PG_slab bit value\n"); } /* * dump_mem_map() displays basic data about each entry in the mem_map[] * array, or if an address is specified, just the mem_map[] entry for that * address. Specified addresses can either be physical address or page * structure pointers. */ /* Page flag bit values */ #define v22_PG_locked 0 #define v22_PG_error 1 #define v22_PG_referenced 2 #define v22_PG_dirty 3 #define v22_PG_uptodate 4 #define v22_PG_free_after 5 #define v22_PG_decr_after 6 #define v22_PG_swap_unlock_after 7 #define v22_PG_DMA 8 #define v22_PG_Slab 9 #define v22_PG_swap_cache 10 #define v22_PG_skip 11 #define v22_PG_reserved 31 #define v24_PG_locked 0 #define v24_PG_error 1 #define v24_PG_referenced 2 #define v24_PG_uptodate 3 #define v24_PG_dirty 4 #define v24_PG_decr_after 5 #define v24_PG_active 6 #define v24_PG_inactive_dirty 7 #define v24_PG_slab 8 #define v24_PG_swap_cache 9 #define v24_PG_skip 10 #define v24_PG_inactive_clean 11 #define v24_PG_highmem 12 #define v24_PG_checked 13 /* kill me in 2.5.. */ #define v24_PG_bigpage 14 /* bits 21-30 unused */ #define v24_PG_arch_1 30 #define v24_PG_reserved 31 #define v26_PG_private 12 #define PGMM_CACHED (512) static void dump_mem_map_SPARSEMEM(struct meminfo *mi) { ulong i; long total_pages; int others, page_not_mapped, phys_not_mapped, page_mapping; ulong pp, ppend; physaddr_t phys, physend; ulong tmp, reserved, shared, slabs; ulong PG_reserved_flag; long buffers; ulong inode, offset, flags, mapping, index; uint count; int print_hdr, pg_spec, phys_spec, done; int v22; char hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char *page_cache; char *pcache; ulong section, section_nr, nr_mem_sections, section_size; long buffersize; char *outputbuffer; int bufferindex; buffersize = 1024 * 1024; outputbuffer = GETBUF(buffersize + 512); char style1[100]; char style2[100]; char style3[100]; char style4[100]; sprintf((char *)&style1, "%%lx%s%%%dllx%s%%%dlx%s%%8lx %%2d%s", space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE), space(MINSPACE)); sprintf((char *)&style2, "%%-%dlx%s%%%dllx%s%s%s%s %2s ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, " "), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, " "), " "); sprintf((char *)&style3, "%%-%dlx%s%%%dllx%s%s%s%s %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "-------"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "-----")); sprintf((char *)&style4, "%%-%dlx%s%%%dllx%s%%%dlx%s%%8lx %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE)); v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ if (v22) { sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), space(MINSPACE), mkstring(buf4, 8, CENTER|LJUST, "OFFSET"), space(MINSPACE-1)); } else if (mi->nr_members) { sprintf(hdr, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE")); for (i = 0; i < mi->nr_members; i++) sprintf(&hdr[strlen(hdr)], " %s", mi->page_member_cache[i].member); strcat(hdr, "\n"); } else { sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "INDEX")); } mapping = index = 0; reserved = shared = slabs = buffers = inode = offset = 0; pg_spec = phys_spec = print_hdr = FALSE; switch (mi->flags) { case ADDRESS_SPECIFIED: switch (mi->memtype) { case KVADDR: if (is_page_ptr(mi->spec_addr, NULL)) pg_spec = TRUE; else { if (kvtop(NULL, mi->spec_addr, &phys, 0)) { mi->spec_addr = phys; phys_spec = TRUE; } else return; } break; case PHYSADDR: phys_spec = TRUE; break; default: error(FATAL, "dump_mem_map: no memtype specified\n"); break; } print_hdr = TRUE; break; case GET_ALL: shared = 0; reserved = 0; buffers = 0; slabs = 0; break; case GET_SHARED_PAGES: shared = 0; break; case GET_TOTALRAM_PAGES: reserved = 0; break; case GET_BUFFERS_PAGES: buffers = 0; break; case GET_SLAB_PAGES: slabs = 0; break; default: print_hdr = TRUE; break; } page_cache = GETBUF(SIZE(page) * PGMM_CACHED); done = FALSE; total_pages = 0; nr_mem_sections = NR_MEM_SECTIONS(); bufferindex = 0; /* * Iterate over all possible sections */ for (section_nr = 0; section_nr < nr_mem_sections ; section_nr++) { if (CRASHDEBUG(2)) fprintf(fp, "section_nr = %ld\n", section_nr); /* * If we are looking up a specific address, jump directly * to the section with that page */ if (mi->flags & ADDRESS_SPECIFIED) { ulong pfn; physaddr_t tmp; if (pg_spec) { if (!page_to_phys(mi->spec_addr, &tmp)) return; pfn = tmp >> PAGESHIFT(); } else pfn = mi->spec_addr >> PAGESHIFT(); section_nr = pfn_to_section_nr(pfn); } if (!(section = valid_section_nr(section_nr))) { #ifdef NOTDEF break; /* On a real sparsemem system we need to check * every section as gaps may exist. But this * can be slow. If we know we don't have gaps * just stop validating sections when we * get to the end of the valid ones. * In the future find a way to short circuit * this loop. */ #endif if (mi->flags & ADDRESS_SPECIFIED) break; continue; } if (print_hdr) { if (!(pc->curcmd_flags & HEADER_PRINTED)) fprintf(fp, "%s", hdr); print_hdr = FALSE; pc->curcmd_flags |= HEADER_PRINTED; } pp = section_mem_map_addr(section, 0); pp = sparse_decode_mem_map(pp, section_nr); phys = (physaddr_t) section_nr * PAGES_PER_SECTION() * PAGESIZE(); section_size = PAGES_PER_SECTION(); for (i = 0; i < section_size; i++, pp += SIZE(page), phys += PAGESIZE()) { if ((i % PGMM_CACHED) == 0) { ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); if ((pg_spec && (mi->spec_addr > ppend)) || (phys_spec && (PHYSPAGEBASE(mi->spec_addr) > physend))) { i += (PGMM_CACHED-1); pp = ppend; phys = physend; continue; } fill_mem_map_cache(pp, ppend, page_cache); } pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); if (received_SIGINT()) restart(0); if ((pg_spec && (pp == mi->spec_addr)) || (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) done = TRUE; if (!done && (pg_spec || phys_spec)) continue; if (mi->nr_members) { bufferindex += show_page_member_data(pcache, pp, mi, outputbuffer+bufferindex); goto display_members; } flags = ULONG(pcache + OFFSET(page_flags)); if (SIZE(page_flags) == 4) flags &= 0xffffffff; count = UINT(pcache + OFFSET(page_count)); switch (mi->flags) { case GET_ALL: case GET_BUFFERS_PAGES: if (VALID_MEMBER(page_buffers)) { tmp = ULONG(pcache + OFFSET(page_buffers)); if (tmp) buffers++; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { if ((flags >> v26_PG_private) & 1) buffers++; } else error(FATAL, "cannot determine whether pages have buffers\n"); if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SLAB_PAGES: if (v22) { if ((flags >> v22_PG_Slab) & 1) slabs++; } else if (vt->PG_slab) { if (page_slab(pp, flags)) slabs++; } else { if ((flags >> v24_PG_slab) & 1) slabs++; } if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SHARED_PAGES: case GET_TOTALRAM_PAGES: if (vt->PG_reserved) PG_reserved_flag = vt->PG_reserved; else PG_reserved_flag = v22 ? 1 << v22_PG_reserved : 1 << v24_PG_reserved; if (flags & PG_reserved_flag) { reserved++; } else { if ((int)count > (vt->flags & PGCNT_ADJ ? 0 : 1)) shared++; } continue; } page_mapping = VALID_MEMBER(page_mapping); if (v22) { inode = ULONG(pcache + OFFSET(page_inode)); offset = ULONG(pcache + OFFSET(page_offset)); } else if (page_mapping) { mapping = ULONG(pcache + OFFSET(page_mapping)); index = ULONG(pcache + OFFSET(page_index)); } page_not_mapped = phys_not_mapped = FALSE; if (v22) { bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style1, pp, phys, inode, offset, count); } else { if ((vt->flags & V_MEM_MAP)) { if (!machdep->verify_paddr(phys)) phys_not_mapped = TRUE; if (!kvtop(NULL, pp, NULL, 0)) page_not_mapped = TRUE; } if (page_not_mapped) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style2, pp, phys); else if (!page_mapping) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style3, pp, phys, count); else bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style4, pp, phys, mapping, index, count); } others = 0; #define sprintflag(X) sprintf(outputbuffer + bufferindex, X, others++ ? "," : "") if (v22) { if ((flags >> v22_PG_DMA) & 1) bufferindex += sprintflag("%sDMA"); if ((flags >> v22_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v22_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v22_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v22_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v22_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v22_PG_free_after) & 1) bufferindex += sprintflag("%sfree_after"); if ((flags >> v22_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v22_PG_swap_unlock_after) & 1) bufferindex += sprintflag("%sswap_unlock_after"); if ((flags >> v22_PG_Slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v22_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v22_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v22_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) { if (vt->flags & PAGEFLAGS) bufferindex += translate_page_flags(outputbuffer+bufferindex, flags); else bufferindex += sprintf(outputbuffer+bufferindex, "%lx\n", flags); } else { if ((flags >> v24_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v24_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v24_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v24_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v24_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v24_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v24_PG_active) & 1) bufferindex += sprintflag("%sactive"); if ((flags >> v24_PG_inactive_dirty) & 1) bufferindex += sprintflag("%sinactive_dirty"); if ((flags >> v24_PG_slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v24_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v24_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v24_PG_inactive_clean) & 1) bufferindex += sprintflag("%sinactive_clean"); if ((flags >> v24_PG_highmem) & 1) bufferindex += sprintflag("%shighmem"); if ((flags >> v24_PG_checked) & 1) bufferindex += sprintflag("%schecked"); if ((flags >> v24_PG_bigpage) & 1) bufferindex += sprintflag("%sbigpage"); if ((flags >> v24_PG_arch_1) & 1) bufferindex += sprintflag("%sarch_1"); if ((flags >> v24_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); if (phys_not_mapped) bufferindex += sprintflag("%s[NOT MAPPED]"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } display_members: if (bufferindex > buffersize) { fprintf(fp, "%s", outputbuffer); bufferindex = 0; } if (done) break; } if (done) break; } if (bufferindex > 0) { fprintf(fp, "%s", outputbuffer); } switch (mi->flags) { case GET_TOTALRAM_PAGES: mi->retval = total_pages - reserved; break; case GET_SHARED_PAGES: mi->retval = shared; break; case GET_BUFFERS_PAGES: mi->retval = buffers; break; case GET_SLAB_PAGES: mi->retval = slabs; break; case GET_ALL: mi->get_totalram = total_pages - reserved; mi->get_shared = shared; mi->get_buffers = buffers; mi->get_slabs = slabs; break; case ADDRESS_SPECIFIED: mi->retval = done; break; } if (mi->nr_members) FREEBUF(mi->page_member_cache); FREEBUF(outputbuffer); FREEBUF(page_cache); } static void dump_mem_map(struct meminfo *mi) { long i, n; long total_pages; int others, page_not_mapped, phys_not_mapped, page_mapping; ulong pp, ppend; physaddr_t phys, physend; ulong tmp, reserved, shared, slabs; ulong PG_reserved_flag; long buffers; ulong inode, offset, flags, mapping, index; ulong node_size; uint count; int print_hdr, pg_spec, phys_spec, done; int v22; struct node_table *nt; char hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char *page_cache; char *pcache; long buffersize; char *outputbuffer; int bufferindex; char style1[100]; char style2[100]; char style3[100]; char style4[100]; if (IS_SPARSEMEM()) { dump_mem_map_SPARSEMEM(mi); return; } buffersize = 1024 * 1024; outputbuffer = GETBUF(buffersize + 512); sprintf((char *)&style1, "%%lx%s%%%dllx%s%%%dlx%s%%8lx %%2d%s", space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE), space(MINSPACE)); sprintf((char *)&style2, "%%-%dlx%s%%%dllx%s%s%s%s %2s ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, " "), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, " "), " "); sprintf((char *)&style3, "%%-%dlx%s%%%dllx%s%s%s%s %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "-------"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "-----")); sprintf((char *)&style4, "%%-%dlx%s%%%dllx%s%%%dlx%s%%8lx %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE)); v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ if (v22) { sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), space(MINSPACE), mkstring(buf4, 8, CENTER|LJUST, "OFFSET"), space(MINSPACE-1)); } else if (mi->nr_members) { sprintf(hdr, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE")); for (i = 0; i < mi->nr_members; i++) sprintf(&hdr[strlen(hdr)], " %s", mi->page_member_cache[i].member); strcat(hdr, "\n"); } else { sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "INDEX")); } mapping = index = 0; reserved = shared = slabs = buffers = inode = offset = 0; pg_spec = phys_spec = print_hdr = FALSE; switch (mi->flags) { case ADDRESS_SPECIFIED: switch (mi->memtype) { case KVADDR: if (is_page_ptr(mi->spec_addr, NULL)) pg_spec = TRUE; else { if (kvtop(NULL, mi->spec_addr, &phys, 0)) { mi->spec_addr = phys; phys_spec = TRUE; } else return; } break; case PHYSADDR: phys_spec = TRUE; break; default: error(FATAL, "dump_mem_map: no memtype specified\n"); break; } print_hdr = TRUE; break; case GET_ALL: shared = 0; reserved = 0; buffers = 0; slabs = 0; break; case GET_SHARED_PAGES: shared = 0; break; case GET_TOTALRAM_PAGES: reserved = 0; break; case GET_BUFFERS_PAGES: buffers = 0; break; case GET_SLAB_PAGES: slabs = 0; break; default: print_hdr = TRUE; break; } page_cache = GETBUF(SIZE(page) * PGMM_CACHED); done = FALSE; total_pages = 0; bufferindex = 0; for (n = 0; n < vt->numnodes; n++) { if (print_hdr) { if (!(pc->curcmd_flags & HEADER_PRINTED)) fprintf(fp, "%s%s", n ? "\n" : "", hdr); print_hdr = FALSE; pc->curcmd_flags |= HEADER_PRINTED; } nt = &vt->node_table[n]; total_pages += nt->size; pp = nt->mem_map; phys = nt->start_paddr; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; for (i = 0; i < node_size; i++, pp += SIZE(page), phys += PAGESIZE()) { if ((i % PGMM_CACHED) == 0) { ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); if ((pg_spec && (mi->spec_addr > ppend)) || (phys_spec && (PHYSPAGEBASE(mi->spec_addr) > physend))) { i += (PGMM_CACHED-1); pp = ppend; phys = physend; continue; } fill_mem_map_cache(pp, ppend, page_cache); } pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); if (received_SIGINT()) restart(0); if ((pg_spec && (pp == mi->spec_addr)) || (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) done = TRUE; if (!done && (pg_spec || phys_spec)) continue; if (mi->nr_members) { bufferindex += show_page_member_data(pcache, pp, mi, outputbuffer+bufferindex); goto display_members; } flags = ULONG(pcache + OFFSET(page_flags)); if (SIZE(page_flags) == 4) flags &= 0xffffffff; count = UINT(pcache + OFFSET(page_count)); switch (mi->flags) { case GET_ALL: case GET_BUFFERS_PAGES: if (VALID_MEMBER(page_buffers)) { tmp = ULONG(pcache + OFFSET(page_buffers)); if (tmp) buffers++; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { if ((flags >> v26_PG_private) & 1) buffers++; } else error(FATAL, "cannot determine whether pages have buffers\n"); if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SLAB_PAGES: if (v22) { if ((flags >> v22_PG_Slab) & 1) slabs++; } else if (vt->PG_slab) { if (page_slab(pp, flags)) slabs++; } else { if ((flags >> v24_PG_slab) & 1) slabs++; } if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SHARED_PAGES: case GET_TOTALRAM_PAGES: if (vt->PG_reserved) PG_reserved_flag = vt->PG_reserved; else PG_reserved_flag = v22 ? 1 << v22_PG_reserved : 1 << v24_PG_reserved; if (flags & PG_reserved_flag) { reserved++; } else { if ((int)count > (vt->flags & PGCNT_ADJ ? 0 : 1)) shared++; } continue; } page_mapping = VALID_MEMBER(page_mapping); if (v22) { inode = ULONG(pcache + OFFSET(page_inode)); offset = ULONG(pcache + OFFSET(page_offset)); } else if (page_mapping) { mapping = ULONG(pcache + OFFSET(page_mapping)); index = ULONG(pcache + OFFSET(page_index)); } page_not_mapped = phys_not_mapped = FALSE; if (v22) { bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style1, pp, phys, inode, offset, count); } else { if ((vt->flags & V_MEM_MAP)) { if (!machdep->verify_paddr(phys)) phys_not_mapped = TRUE; if (!kvtop(NULL, pp, NULL, 0)) page_not_mapped = TRUE; } if (page_not_mapped) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style2, pp, phys); else if (!page_mapping) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style3, pp, phys, count); else bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style4, pp, phys, mapping, index, count); } others = 0; #define sprintflag(X) sprintf(outputbuffer + bufferindex, X, others++ ? "," : "") if (v22) { if ((flags >> v22_PG_DMA) & 1) bufferindex += sprintflag("%sDMA"); if ((flags >> v22_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v22_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v22_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v22_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v22_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v22_PG_free_after) & 1) bufferindex += sprintflag("%sfree_after"); if ((flags >> v22_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v22_PG_swap_unlock_after) & 1) bufferindex += sprintflag("%sswap_unlock_after"); if ((flags >> v22_PG_Slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v22_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v22_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v22_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) { if (vt->flags & PAGEFLAGS) bufferindex += translate_page_flags(outputbuffer+bufferindex, flags); else bufferindex += sprintf(outputbuffer+bufferindex, "%lx\n", flags); } else { if ((flags >> v24_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v24_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v24_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v24_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v24_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v24_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v24_PG_active) & 1) bufferindex += sprintflag("%sactive"); if ((flags >> v24_PG_inactive_dirty) & 1) bufferindex += sprintflag("%sinactive_dirty"); if ((flags >> v24_PG_slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v24_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v24_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v24_PG_inactive_clean) & 1) bufferindex += sprintflag("%sinactive_clean"); if ((flags >> v24_PG_highmem) & 1) bufferindex += sprintflag("%shighmem"); if ((flags >> v24_PG_checked) & 1) bufferindex += sprintflag("%schecked"); if ((flags >> v24_PG_bigpage) & 1) bufferindex += sprintflag("%sbigpage"); if ((flags >> v24_PG_arch_1) & 1) bufferindex += sprintflag("%sarch_1"); if ((flags >> v24_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); if (phys_not_mapped) bufferindex += sprintflag("%s[NOT MAPPED]"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } display_members: if (bufferindex > buffersize) { fprintf(fp, "%s", outputbuffer); bufferindex = 0; } if (done) break; } if (done) break; } if (bufferindex > 0) { fprintf(fp, "%s", outputbuffer); } switch (mi->flags) { case GET_TOTALRAM_PAGES: mi->retval = total_pages - reserved; break; case GET_SHARED_PAGES: mi->retval = shared; break; case GET_BUFFERS_PAGES: mi->retval = buffers; break; case GET_SLAB_PAGES: mi->retval = slabs; break; case GET_ALL: mi->get_totalram = total_pages - reserved; mi->get_shared = shared; mi->get_buffers = buffers; mi->get_slabs = slabs; break; case ADDRESS_SPECIFIED: mi->retval = done; break; } if (mi->nr_members) FREEBUF(mi->page_member_cache); FREEBUF(outputbuffer); FREEBUF(page_cache); } /* * Stash a chunk of PGMM_CACHED page structures, starting at addr, into the * passed-in buffer. The mem_map array is normally guaranteed to be * readable except in the case of virtual mem_map usage. When V_MEM_MAP * is in place, read all pages consumed by PGMM_CACHED page structures * that are currently mapped, leaving the unmapped ones just zeroed out. */ static void fill_mem_map_cache(ulong pp, ulong ppend, char *page_cache) { long size, cnt; ulong addr; char *bufptr; /* * Try to read it in one fell swoop. */ if (readmem(pp, KVADDR, page_cache, SIZE(page) * PGMM_CACHED, "page struct cache", RETURN_ON_ERROR|QUIET)) return; /* * Break it into page-size-or-less requests, warning if it's * not a virtual mem_map. */ size = SIZE(page) * PGMM_CACHED; addr = pp; bufptr = page_cache; while (size > 0) { /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(addr); if (cnt > size) cnt = size; if (!readmem(addr, KVADDR, bufptr, cnt, "virtual page struct cache", RETURN_ON_ERROR|QUIET)) { BZERO(bufptr, cnt); if (!((vt->flags & V_MEM_MAP) || (machdep->flags & VMEMMAP)) && ((addr+cnt) < ppend)) error(WARNING, "mem_map[] from %lx to %lx not accessible\n", addr, addr+cnt); } addr += cnt; bufptr += cnt; size -= cnt; } } static void dump_hstates() { char *hstate; int i, len, order; long nr, free; ulong vaddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (!kernel_symbol_exists("hstates")) { error(INFO, "hstates[] array does not exist\n"); option_not_supported('h'); } if (INVALID_SIZE(hstate) || INVALID_MEMBER(hstate_order) || INVALID_MEMBER(hstate_name) || INVALID_MEMBER(hstate_nr_huge_pages) || INVALID_MEMBER(hstate_free_huge_pages)) { error(INFO, "hstate structure or members have changed\n"); option_not_supported('h'); } fprintf(fp, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "HSTATE")); fprintf(fp, " SIZE FREE TOTAL NAME\n"); len = get_array_length("hstates", NULL, 0); hstate = GETBUF(SIZE(hstate)); for (i = 0; i < len; i++) { vaddr = symbol_value("hstates") + (SIZE(hstate) * i); if (!readmem(vaddr, KVADDR, hstate, SIZE(hstate), "hstate", RETURN_ON_ERROR)) break; order = INT(hstate + OFFSET(hstate_order)); if (!order) continue; fprintf(fp, "%lx ", vaddr); pages_to_size(1 << order, buf1); shift_string_left(first_space(buf1), 1); fprintf(fp, "%s ", mkstring(buf2, 5, RJUST, buf1)); free = LONG(hstate + OFFSET(hstate_free_huge_pages)); sprintf(buf1, "%ld", free); fprintf(fp, "%s ", mkstring(buf2, 6, RJUST, buf1)); nr = LONG(hstate + OFFSET(hstate_nr_huge_pages)); sprintf(buf1, "%ld", nr); fprintf(fp, "%s ", mkstring(buf2, 6, RJUST, buf1)); fprintf(fp, "%s\n", hstate + OFFSET(hstate_name)); } FREEBUF(hstate); } static void page_flags_init(void) { if (!page_flags_init_from_pageflag_names()) page_flags_init_from_pageflags_enum(); PG_reserved_flag_init(); PG_slab_flag_init(); } static int page_flags_init_from_pageflag_names(void) { int i, len; char *buffer, *nameptr; char namebuf[BUFSIZE]; ulong mask; void *name; MEMBER_OFFSET_INIT(trace_print_flags_mask, "trace_print_flags", "mask"); MEMBER_OFFSET_INIT(trace_print_flags_name, "trace_print_flags", "name"); STRUCT_SIZE_INIT(trace_print_flags, "trace_print_flags"); if (INVALID_SIZE(trace_print_flags) || INVALID_MEMBER(trace_print_flags_mask) || INVALID_MEMBER(trace_print_flags_name) || !kernel_symbol_exists("pageflag_names") || !(len = get_array_length("pageflag_names", NULL, 0))) return FALSE; buffer = GETBUF(SIZE(trace_print_flags) * len); if (!readmem(symbol_value("pageflag_names"), KVADDR, buffer, SIZE(trace_print_flags) * len, "pageflag_names array", RETURN_ON_ERROR)) { FREEBUF(buffer); return FALSE; } if (!(vt->pageflags_data = (struct pageflags_data *) malloc(sizeof(struct pageflags_data) * len))) { error(INFO, "cannot malloc pageflags_data cache\n"); FREEBUF(buffer); return FALSE; } if (CRASHDEBUG(1)) fprintf(fp, "pageflags from pageflag_names: \n"); for (i = 0; i < len; i++) { mask = ULONG(buffer + (SIZE(trace_print_flags)*i) + OFFSET(trace_print_flags_mask)); name = VOID_PTR(buffer + (SIZE(trace_print_flags)*i) + OFFSET(trace_print_flags_name)); if ((mask == -1UL) && !name) { /* Linux 3.5 and earlier */ len--; break; } if ((mask == 0UL) && !name) { /* Linux 4.6 and later */ len--; break; } if (!read_string((ulong)name, namebuf, BUFSIZE-1)) { error(INFO, "failed to read pageflag_names entry (i: %d name: %lx mask: %lx)\n", i, name, mask); goto pageflags_fail; } if (!(nameptr = (char *)malloc(strlen(namebuf)+1))) { error(INFO, "cannot malloc pageflag_names space\n"); goto pageflags_fail; } strcpy(nameptr, namebuf); vt->pageflags_data[i].name = nameptr; vt->pageflags_data[i].mask = mask; if (!strncmp(nameptr, "slab", 4)) vt->flags |= SLAB_PAGEFLAGS; if (CRASHDEBUG(1)) { fprintf(fp, " %08lx %s\n", vt->pageflags_data[i].mask, vt->pageflags_data[i].name); } } FREEBUF(buffer); vt->nr_pageflags = len; vt->flags |= PAGEFLAGS; return TRUE; pageflags_fail: FREEBUF(buffer); free(vt->pageflags_data); vt->pageflags_data = NULL; return FALSE; } static int page_flags_init_from_pageflags_enum(void) { int c; int p, len; char *nameptr; char buf[BUFSIZE]; char *arglist[MAXARGS]; if (!(vt->pageflags_data = (struct pageflags_data *) malloc(sizeof(struct pageflags_data) * 32))) { error(INFO, "cannot malloc pageflags_data cache\n"); return FALSE; } p = 0; pc->flags2 |= ALLOW_FP; open_tmpfile(); if (dump_enumerator_list("pageflags")) { rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (!strstr(buf, " = ")) continue; c = parse_line(buf, arglist); if (strstr(arglist[0], "__NR_PAGEFLAGS")) { len = atoi(arglist[2]); if (!len || (len > 32)) goto enum_fail; vt->nr_pageflags = len; break; } if (!(nameptr = (char *)malloc(strlen(arglist[0])))) { error(INFO, "cannot malloc pageflags name space\n"); goto enum_fail; } strcpy(nameptr, arglist[0] + strlen("PG_")); vt->pageflags_data[p].name = nameptr; vt->pageflags_data[p].mask = 1 << atoi(arglist[2]); if (!strncmp(nameptr, "slab", 4)) vt->flags |= SLAB_PAGEFLAGS; p++; } } else goto enum_fail; close_tmpfile(); pc->flags2 &= ~ALLOW_FP; if (CRASHDEBUG(1)) { fprintf(fp, "pageflags from enum: \n"); for (p = 0; p < vt->nr_pageflags; p++) fprintf(fp, " %08lx %s\n", vt->pageflags_data[p].mask, vt->pageflags_data[p].name); } vt->flags |= PAGEFLAGS; return TRUE; enum_fail: close_tmpfile(); pc->flags2 &= ~ALLOW_FP; for (c = 0; c < p; c++) free(vt->pageflags_data[c].name); free(vt->pageflags_data); vt->pageflags_data = NULL; vt->nr_pageflags = 0; return FALSE; } static int translate_page_flags(char *buffer, ulong flags) { char buf[BUFSIZE]; int i, others; sprintf(buf, "%lx", flags); if (flags) { for (i = others = 0; i < vt->nr_pageflags; i++) { if (flags & vt->pageflags_data[i].mask) sprintf(&buf[strlen(buf)], "%s%s", others++ ? "," : " ", vt->pageflags_data[i].name); } } strcat(buf, "\n"); strcpy(buffer, buf); return(strlen(buf)); } /* * Display the mem_map data for a single page. */ int dump_inode_page(ulong page) { struct meminfo meminfo; if (!is_page_ptr(page, NULL)) return 0; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.spec_addr = page; meminfo.memtype = KVADDR; meminfo.flags = ADDRESS_SPECIFIED; dump_mem_map(&meminfo); return meminfo.retval; } /* * dump_page_hash_table() displays the entries in each page_hash_table. */ #define PGHASH_CACHED (1024) static void dump_page_hash_table(struct meminfo *hi) { int i; int len, entry_len; ulong page_hash_table, head; struct list_data list_data, *ld; struct gnu_request req; long total_cached; long page_cache_size; ulong this_addr, searchpage; int errflag, found, cnt, populated, verbose; uint ival; ulong buffer_pages; char buf[BUFSIZE]; char hash_table[BUFSIZE]; char *pcache, *pghash_cache; if (!vt->page_hash_table) { if (hi->flags & VERBOSE) option_not_supported('C'); if (symbol_exists("nr_pagecache")) { buffer_pages = nr_blockdev_pages(); get_symbol_data("nr_pagecache", sizeof(int), &ival); page_cache_size = (ulong)ival; page_cache_size -= buffer_pages; fprintf(fp, "page cache size: %ld\n", page_cache_size); if (hi->flags & ADDRESS_SPECIFIED) option_not_supported('c'); } else option_not_supported('c'); return; } ld = &list_data; if (hi->spec_addr && (hi->flags & ADDRESS_SPECIFIED)) { verbose = TRUE; searchpage = hi->spec_addr; } else if (hi->flags & VERBOSE) { verbose = TRUE; searchpage = 0; } else { verbose = FALSE; searchpage = 0; } if (vt->page_hash_table_len == 0) error(FATAL, "cannot determine size of page_hash_table\n"); page_hash_table = vt->page_hash_table; len = vt->page_hash_table_len; entry_len = VALID_STRUCT(page_cache_bucket) ? SIZE(page_cache_bucket) : sizeof(void *); populated = 0; if (CRASHDEBUG(1)) fprintf(fp, "page_hash_table length: %d\n", len); get_symbol_type("page_cache_size", NULL, &req); if (req.length == sizeof(int)) { get_symbol_data("page_cache_size", sizeof(int), &ival); page_cache_size = (long)ival; } else get_symbol_data("page_cache_size", sizeof(long), &page_cache_size); pghash_cache = GETBUF(sizeof(void *) * PGHASH_CACHED); if (searchpage) open_tmpfile(); hq_open(); for (i = total_cached = 0; i < len; i++, page_hash_table += entry_len) { if ((i % PGHASH_CACHED) == 0) { readmem(page_hash_table, KVADDR, pghash_cache, entry_len * PGHASH_CACHED, "page hash cache", FAULT_ON_ERROR); } pcache = pghash_cache + ((i%PGHASH_CACHED) * entry_len); if (VALID_STRUCT(page_cache_bucket)) pcache += OFFSET(page_cache_bucket_chain); head = ULONG(pcache); if (!head) continue; if (verbose) fprintf(fp, "page_hash_table[%d]\n", i); if (CRASHDEBUG(1)) populated++; BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = head; ld->searchfor = searchpage; ld->member_offset = OFFSET(page_next_hash); cnt = do_list(ld); total_cached += cnt; if (ld->searchfor) break; if (received_SIGINT()) restart(0); } hq_close(); fprintf(fp, "%spage_cache_size: %ld ", verbose ? "\n" : "", page_cache_size); if (page_cache_size != total_cached) fprintf(fp, "(found %ld)\n", total_cached); else fprintf(fp, "(verified)\n"); if (CRASHDEBUG(1)) fprintf(fp, "heads containing page(s): %d\n", populated); if (searchpage) { rewind(pc->tmpfile); found = FALSE; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "retval = TRUE; } } } /* * dump_free_pages() displays basic data about pages currently resident * in the free_area[] memory lists. If the flags contains the VERBOSE * bit, each page slab base address is dumped. If an address is specified * only the free_area[] data containing that page is displayed, along with * the page slab base address. Specified addresses can either be physical * address or page structure pointers. */ char *free_area_hdr1 = \ "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; char *free_area_hdr2 = \ "AREA SIZE FREE_AREA_STRUCT\n"; static void dump_free_pages(struct meminfo *fi) { int i; int order; ulong free_area; char *free_area_buf; ulong *pp; int nr_mem_lists; struct list_data list_data, *ld; long cnt, total_free, chunk_size; int nr_free_pages; char buf[BUFSIZE]; char last_free[BUFSIZE]; char last_free_hdr[BUFSIZE]; int verbose, errflag, found; physaddr_t searchphys; ulong this_addr; physaddr_t this_phys; int do_search; ulong kfp, offset; int flen, dimension; if (vt->flags & (NODES|ZONES)) error(FATAL, "dump_free_pages called with (NODES|ZONES)\n"); nr_mem_lists = ARRAY_LENGTH(free_area); dimension = ARRAY_LENGTH(free_area_DIMENSION); if (nr_mem_lists == 0) error(FATAL, "cannot determine size/dimensions of free_area\n"); if (dimension) error(FATAL, "dump_free_pages called with multidimensional free area\n"); ld = &list_data; total_free = 0; searchphys = 0; chunk_size = 0; do_search = FALSE; get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); switch (fi->flags) { case GET_FREE_HIGHMEM_PAGES: error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); case GET_FREE_PAGES: fi->retval = (ulong)nr_free_pages; return; case ADDRESS_SPECIFIED: switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages: no memtype specified\n"); } do_search = TRUE; break; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; free_area_buf = GETBUF(nr_mem_lists * SIZE(free_area_struct)); kfp = free_area = symbol_value("free_area"); flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); readmem(free_area, KVADDR, free_area_buf, SIZE(free_area_struct) * nr_mem_lists, "free_area_struct", FAULT_ON_ERROR); if (do_search) open_tmpfile(); if (!verbose) fprintf(fp, "%s", free_area_hdr1); hq_open(); for (i = 0; i < nr_mem_lists; i++) { pp = (ulong *)(free_area_buf + (SIZE(free_area_struct)*i)); chunk_size = power(2, i); if (verbose) fprintf(fp, "%s", free_area_hdr2); fprintf(fp, "%3d ", i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, "%5s ", buf); fprintf(fp, "%s %s", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), verbose ? "\n" : ""); if (is_page_ptr(*pp, NULL)) { BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = *pp; ld->end = free_area; cnt = do_list(ld); total_free += (cnt * chunk_size); } else cnt = 0; if (!verbose) fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); free_area += SIZE(free_area_struct); kfp += SIZE(free_area_struct); } hq_close(); fprintf(fp, "\nnr_free_pages: %d ", nr_free_pages); if (total_free != nr_free_pages) fprintf(fp, "(found %ld)\n", total_free); else fprintf(fp, "(verified)\n"); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { order--; fprintf(fp, "%s", last_free_hdr); fprintf(fp, "%s", last_free); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Dump free pages on kernels with a multi-dimensional free_area array. */ char *free_area_hdr5 = \ " AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; char *free_area_hdr6 = \ " AREA SIZE FREE_AREA_STRUCT\n"; static void dump_multidimensional_free_pages(struct meminfo *fi) { int i, j; struct list_data list_data, *ld; long cnt, total_free; ulong kfp, free_area; physaddr_t searchphys; int flen, errflag, verbose, nr_free_pages; int nr_mem_lists, dimension, order, do_search; ulong sum, found, offset; char *free_area_buf, *p; ulong *pp; long chunk_size; ulong this_addr; physaddr_t this_phys; char buf[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (vt->flags & (NODES|ZONES)) error(FATAL, "dump_multidimensional_free_pages called with (NODES|ZONES)\n"); ld = &list_data; if (SIZE(free_area_struct) % sizeof(ulong)) error(FATAL, "free_area_struct not long-word aligned?\n"); total_free = 0; searchphys = 0; chunk_size = 0; do_search = FALSE; get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); switch (fi->flags) { case GET_FREE_HIGHMEM_PAGES: error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); case GET_FREE_PAGES: fi->retval = (ulong)nr_free_pages; return; case ADDRESS_SPECIFIED: switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_multidimensional_free_pages: no memtype specified\n"); } do_search = TRUE; break; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); nr_mem_lists = ARRAY_LENGTH(free_area); dimension = ARRAY_LENGTH(free_area_DIMENSION); if (!nr_mem_lists || !dimension) error(FATAL, "cannot determine free_area dimensions\n"); free_area_buf = GETBUF((nr_mem_lists * SIZE(free_area_struct)) * dimension); kfp = free_area = symbol_value("free_area"); readmem(free_area, KVADDR, free_area_buf, (SIZE(free_area_struct) * nr_mem_lists) * dimension, "free_area arrays", FAULT_ON_ERROR); if (do_search) open_tmpfile(); hq_open(); for (i = sum = found = 0; i < dimension; i++) { if (!verbose) fprintf(fp, "%s", free_area_hdr5); pp = (ulong *)(free_area_buf + ((SIZE(free_area_struct)*nr_mem_lists)*i)); for (j = 0; j < nr_mem_lists; j++) { if (verbose) fprintf(fp, "%s", free_area_hdr6); sprintf(buf, "[%d][%d]", i, j); fprintf(fp, "%7s ", buf); chunk_size = power(2, j); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, "%5s ", buf); fprintf(fp, "%s %s", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), verbose ? "\n" : ""); if (is_page_ptr(*pp, NULL)) { BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = *pp; ld->end = free_area; cnt = do_list(ld); total_free += (cnt * chunk_size); } else cnt = 0; if (!verbose) fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); pp += (SIZE(free_area_struct)/sizeof(ulong)); free_area += SIZE(free_area_struct); kfp += SIZE(free_area_struct); } fprintf(fp, "\n"); } hq_close(); fprintf(fp, "nr_free_pages: %d ", nr_free_pages); if (total_free != nr_free_pages) fprintf(fp, "(found %ld)\n", total_free); else fprintf(fp, "(verified)\n"); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Dump free pages in newer kernels that have zones. This is a work in * progress, because although the framework for memory nodes has been laid * down, complete support has not been put in place. */ static char *zone_hdr = "ZONE NAME SIZE FREE"; static void dump_free_pages_zones_v1(struct meminfo *fi) { int i, n; ulong node_zones; ulong size; long zone_size_offset; long chunk_size; int order, errflag, do_search; ulong offset, verbose, value, sum, found; ulong this_addr; physaddr_t this_phys, searchphys; ulong zone_mem_map; ulong zone_start_paddr; ulong zone_start_mapnr; struct node_table *nt; char buf[BUFSIZE], *p; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char last_node[BUFSIZE]; char last_zone[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (!(vt->flags & (NODES|ZONES))) error(FATAL, "dump_free_pages_zones_v1 called without (NODES|ZONES)\n"); if (fi->flags & ADDRESS_SPECIFIED) { switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages_zones_v1: no memtype specified\n"); } do_search = TRUE; } else { searchphys = 0; do_search = FALSE; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; chunk_size = 0; zone_size_offset = 0; if (VALID_MEMBER(zone_struct_size)) zone_size_offset = OFFSET(zone_struct_size); else if (VALID_MEMBER(zone_struct_memsize)) zone_size_offset = OFFSET(zone_struct_memsize); else error(FATAL, "zone_struct has neither size nor memsize field\n"); if (do_search) open_tmpfile(); hq_open(); for (n = sum = found = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (fi->flags == GET_FREE_PAGES) { readmem(node_zones+ OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; node_zones += SIZE(zone_struct); continue; } if (fi->flags == GET_FREE_HIGHMEM_PAGES) { if (i == vt->ZONE_HIGHMEM) { readmem(node_zones+ OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; } node_zones += SIZE(zone_struct); continue; } if (fi->flags == GET_ZONE_SIZES) { readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones {mem}size", FAULT_ON_ERROR); sum += size; node_zones += SIZE(zone_struct); continue; } if ((i == 0) && (vt->flags & NODES)) { if (n) { fprintf(fp, "\n"); pad_line(fp, VADDR_PRLEN > 8 ? 74 : 66, '-'); fprintf(fp, "\n"); } fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", i > 0 ? "\n" : "", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%3d ", i); readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1)) fprintf(fp, "%-9s ", buf); else fprintf(fp, "(unknown) "); readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones {mem}size", FAULT_ON_ERROR); fprintf(fp, "%6ld ", size); readmem(node_zones+OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); fprintf(fp, "%6ld ", value); readmem(node_zones+OFFSET(zone_struct_zone_start_paddr), KVADDR, &zone_start_paddr, sizeof(ulong), "node_zones zone_start_paddr", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_zone_start_mapnr), KVADDR, &zone_start_mapnr, sizeof(ulong), "node_zones zone_start_mapnr", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), CENTER|LONG_HEX|RJUST, MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); sum += value; if (value) found += dump_zone_free_area(node_zones+ OFFSET(zone_struct_free_area), vt->nr_free_areas, verbose, NULL); node_zones += SIZE(zone_struct); } } hq_close(); if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { fi->retval = sum; return; } fprintf(fp, "\nnr_free_pages: %ld ", sum); if (sum == found) fprintf(fp, "(verified)\n"); else fprintf(fp, "(found %ld)\n", found); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; last_node[0] = NULLCHAR; last_zone[0] = NULLCHAR; last_area[0] = NULLCHAR; last_area_hdr[0] = NULLCHAR; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_node, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "ZONE")) { p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_zone, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "AREA")) { strcpy(last_area_hdr, buf); p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } if (CRASHDEBUG(0) && !hexadecimal(strip_linefeeds(buf), 0)) continue; errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { if (strlen(last_node)) fprintf(fp, "NODE\n%s\n", last_node); fprintf(fp, "%s %s START_PADDR START_MAPNR\n", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s\n", last_zone); fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Callback function for free-list search for a specific page. */ struct free_page_callback_data { ulong searchpage; long chunk_size; ulong page; int found; }; static int free_page_callback(void *page, void *arg) { struct free_page_callback_data *cbd = arg; ulong first_page, last_page; first_page = (ulong)page; last_page = first_page + (cbd->chunk_size * SIZE(page)); if ((cbd->searchpage >= first_page) && (cbd->searchpage <= last_page)) { cbd->page = (ulong)page; cbd->found = TRUE; return TRUE; } return FALSE; } /* * Same as dump_free_pages_zones_v1(), but updated for numerous 2.6 zone * and free_area related data structure changes. */ static void dump_free_pages_zones_v2(struct meminfo *fi) { int i, n; ulong node_zones; ulong size; long zone_size_offset; long chunk_size; int order, errflag, do_search; ulong offset, verbose, value, sum, found; ulong this_addr; physaddr_t phys, this_phys, searchphys, end_paddr; ulong searchpage; struct free_page_callback_data callback_data; ulong pp; ulong zone_mem_map; ulong zone_start_paddr; ulong zone_start_pfn; ulong zone_start_mapnr; struct node_table *nt; char buf[BUFSIZE], *p; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char last_node[BUFSIZE]; char last_zone[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (!(vt->flags & (NODES|ZONES))) error(FATAL, "dump_free_pages_zones_v2 called without (NODES|ZONES)\n"); if (fi->flags & ADDRESS_SPECIFIED) { switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages_zones_v2: no memtype specified\n"); } if (!phys_to_page(searchphys, &searchpage)) { error(INFO, "cannot determine page for %lx\n", fi->spec_addr); return; } do_search = TRUE; callback_data.searchpage = searchpage; callback_data.found = FALSE; } else { searchphys = 0; do_search = FALSE; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; zone_size_offset = 0; chunk_size = 0; this_addr = 0; if (VALID_MEMBER(zone_spanned_pages)) zone_size_offset = OFFSET(zone_spanned_pages); else error(FATAL, "zone struct has no spanned_pages field\n"); if (do_search) open_tmpfile(); hq_open(); for (n = sum = found = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (fi->flags == GET_FREE_PAGES) { readmem(node_zones+ OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; node_zones += SIZE(zone); continue; } if (fi->flags == GET_FREE_HIGHMEM_PAGES) { readmem(node_zones+OFFSET(zone_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1) && STREQ(buf, "HighMem")) vt->ZONE_HIGHMEM = i; if (i == vt->ZONE_HIGHMEM) { readmem(node_zones+ OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; } node_zones += SIZE(zone); continue; } if (fi->flags == GET_ZONE_SIZES) { readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones size", FAULT_ON_ERROR); sum += size; node_zones += SIZE(zone); continue; } if ((i == 0) && ((vt->flags & NODES) || (vt->numnodes > 1))) { if (n) { fprintf(fp, "\n"); pad_line(fp, VADDR_PRLEN > 8 ? 74 : 66, '-'); fprintf(fp, "\n"); } fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", i > 0 ? "\n" : "", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%3d ", i); readmem(node_zones+OFFSET(zone_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1)) fprintf(fp, "%-9s ", buf); else fprintf(fp, "(unknown) "); readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones size", FAULT_ON_ERROR); fprintf(fp, "%6ld ", size); readmem(node_zones+OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); fprintf(fp, "%6ld ", value); if (VALID_MEMBER(zone_zone_mem_map)) { readmem(node_zones+OFFSET(zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); } readmem(node_zones+ OFFSET(zone_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); if (!VALID_MEMBER(zone_zone_mem_map)) { if (IS_SPARSEMEM() || IS_DISCONTIGMEM()) { zone_mem_map = 0; if (size) { phys = PTOB(zone_start_pfn); if (phys_to_page(phys, &pp)) zone_mem_map = pp; } } else if (vt->flags & FLATMEM) { zone_mem_map = 0; if (size) zone_mem_map = nt->mem_map + (zone_start_pfn * SIZE(page)); } else error(FATAL, "\ncannot determine zone mem_map: TBD\n"); } if (zone_mem_map) zone_start_mapnr = (zone_mem_map - nt->mem_map) / SIZE(page); else zone_start_mapnr = 0; fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), CENTER|LONG_HEX|RJUST, MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); sum += value; if (value) { if (do_search) { end_paddr = nt->start_paddr + ((physaddr_t)nt->size * (physaddr_t)PAGESIZE()); if ((searchphys >= nt->start_paddr) && (searchphys < end_paddr)) found += dump_zone_free_area(node_zones+ OFFSET(zone_free_area), vt->nr_free_areas, verbose, &callback_data); if (callback_data.found) goto done_search; } else found += dump_zone_free_area(node_zones+ OFFSET(zone_free_area), vt->nr_free_areas, verbose, NULL); } node_zones += SIZE(zone); } } done_search: hq_close(); if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { fi->retval = sum; return; } fprintf(fp, "\nnr_free_pages: %ld ", sum); if (sum == found) fprintf(fp, "(verified)\n"); else fprintf(fp, "(found %ld)\n", found); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = 0; last_node[0] = NULLCHAR; last_zone[0] = NULLCHAR; last_area[0] = NULLCHAR; last_area_hdr[0] = NULLCHAR; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_node, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "ZONE")) { p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_zone, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "AREA")) { strcpy(last_area_hdr, buf); p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } if (CRASHDEBUG(0) && !hexadecimal(strip_linefeeds(buf), 0)) continue; errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { if (strlen(last_node)) fprintf(fp, "NODE\n%s\n", last_node); fprintf(fp, "%s %s START_PADDR START_MAPNR\n", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s\n", last_zone); fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages)", ordinal(offset+1, buf), chunk_size/PAGESIZE()); } fi->retval = TRUE; fprintf(fp, "\n"); } } static char * page_usage_hdr = "ZONE NAME FREE ACTIVE INACTIVE_DIRTY INACTIVE_CLEAN MIN/LOW/HIGH"; /* * Display info about the non-free pages in each zone. */ static int dump_zone_page_usage(void) { int i, n; ulong value, node_zones; struct node_table *nt; ulong inactive_dirty_pages, inactive_clean_pages, active_pages; ulong free_pages, pages_min, pages_low, pages_high; char namebuf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; if (!VALID_MEMBER(zone_struct_inactive_dirty_pages) || !VALID_MEMBER(zone_struct_inactive_clean_pages) || !VALID_MEMBER(zone_struct_active_pages) || !VALID_MEMBER(zone_struct_pages_min) || !VALID_MEMBER(zone_struct_pages_low) || !VALID_MEMBER(zone_struct_pages_high)) return FALSE; fprintf(fp, "\n"); for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); if ((vt->numnodes > 1) && (vt->flags & NODES)) { fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s\n", page_usage_hdr); for (i = 0; i < vt->nr_zones; i++) { readmem(node_zones+OFFSET(zone_struct_free_pages), KVADDR, &free_pages, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); readmem(node_zones+ OFFSET(zone_struct_inactive_dirty_pages), KVADDR, &inactive_dirty_pages, sizeof(ulong), "node_zones inactive_dirty_pages", FAULT_ON_ERROR); readmem(node_zones+ OFFSET(zone_struct_inactive_clean_pages), KVADDR, &inactive_clean_pages, sizeof(ulong), "node_zones inactive_clean_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_active_pages), KVADDR, &active_pages, sizeof(ulong), "node_zones active_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_min), KVADDR, &pages_min, sizeof(ulong), "node_zones pages_min", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_low), KVADDR, &pages_low, sizeof(ulong), "node_zones pages_low", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_high), KVADDR, &pages_high, sizeof(ulong), "node_zones pages_high", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf1, BUFSIZE-1)) sprintf(namebuf, "%-8s", buf1); else sprintf(namebuf, "(unknown)"); sprintf(buf2, "%ld/%ld/%ld", pages_min, pages_low, pages_high); fprintf(fp, "%3d %s %7ld %7ld %15ld %15ld %s\n", i, namebuf, free_pages, active_pages, inactive_dirty_pages, inactive_clean_pages, mkstring(buf3, strlen("MIN/LOW/HIGH"), CENTER, buf2)); node_zones += SIZE(zone_struct); } } return TRUE; } /* * Dump the num "order" contents of the zone_t free_area array. */ char *free_area_hdr3 = "AREA SIZE FREE_AREA_STRUCT\n"; char *free_area_hdr4 = "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; static int dump_zone_free_area(ulong free_area, int num, ulong verbose, struct free_page_callback_data *callback_data) { int i, j; long chunk_size; int flen, total_free, cnt; char buf[BUFSIZE]; ulong free_area_buf[3]; char *free_area_buf2; char *free_list_buf; ulong free_list; struct list_data list_data, *ld; int list_count; ulong *free_ptr; list_count = 0; free_list_buf = free_area_buf2 = NULL; if (VALID_STRUCT(free_area_struct)) { if (SIZE(free_area_struct) != (3 * sizeof(ulong))) error(FATAL, "unrecognized free_area_struct size: %ld\n", SIZE(free_area_struct)); list_count = 1; } else if (VALID_STRUCT(free_area)) { if (SIZE(free_area) == (3 * sizeof(ulong))) list_count = 1; else { list_count = MEMBER_SIZE("free_area", "free_list")/SIZE(list_head); free_area_buf2 = GETBUF(SIZE(free_area)); free_list_buf = GETBUF(SIZE(list_head)); readmem(free_area, KVADDR, free_area_buf2, SIZE(free_area), "free_area struct", FAULT_ON_ERROR); } } else error(FATAL, "neither free_area_struct or free_area structures exist\n"); ld = &list_data; if (!verbose) fprintf(fp, "%s", free_area_hdr4); total_free = 0; flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); if (list_count > 1) goto multiple_lists; for (i = 0; i < num; i++, free_area += SIZE_OPTION(free_area_struct, free_area)) { if (verbose) fprintf(fp, "%s", free_area_hdr3); fprintf(fp, "%3d ", i); chunk_size = power(2, i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, " %7s ", buf); readmem(free_area, KVADDR, free_area_buf, sizeof(ulong) * 3, "free_area_struct", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_area))); if (free_area_buf[0] == free_area) { if (verbose) fprintf(fp, "\n"); else fprintf(fp, "%6d %6d\n", 0, 0); continue; } if (verbose) fprintf(fp, "\n"); BZERO(ld, sizeof(struct list_data)); ld->flags = verbose | RETURN_ON_DUPLICATE; ld->start = free_area_buf[0]; ld->end = free_area; if (VALID_MEMBER(page_list_next)) ld->list_head_offset = OFFSET(page_list); else if (VALID_MEMBER(page_lru)) ld->list_head_offset = OFFSET(page_lru)+ OFFSET(list_head_next); else error(FATAL, "neither page.list or page.lru exist?\n"); cnt = do_list(ld); if (cnt < 0) { error(pc->curcmd_flags & IGNORE_ERRORS ? INFO : FATAL, "corrupted free list from free_area_struct: %lx\n", free_area); if (pc->curcmd_flags & IGNORE_ERRORS) break; } if (!verbose) fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); total_free += (cnt * chunk_size); } return total_free; multiple_lists: for (i = 0; i < num; i++, free_area += SIZE_OPTION(free_area_struct, free_area)) { readmem(free_area, KVADDR, free_area_buf2, SIZE(free_area), "free_area struct", FAULT_ON_ERROR); for (j = 0, free_list = free_area; j < list_count; j++, free_list += SIZE(list_head)) { if (verbose) fprintf(fp, "%s", free_area_hdr3); fprintf(fp, "%3d ", i); chunk_size = power(2, i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, " %7s ", buf); readmem(free_list, KVADDR, free_list_buf, SIZE(list_head), "free_area free_list", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_list))); free_ptr = (ulong *)free_list_buf; if (*free_ptr == free_list) { if (verbose) fprintf(fp, "\n"); else fprintf(fp, "%6d %6d\n", 0, 0); continue; } if (verbose) fprintf(fp, "\n"); BZERO(ld, sizeof(struct list_data)); ld->flags = verbose | RETURN_ON_DUPLICATE; ld->start = *free_ptr; ld->end = free_list; ld->list_head_offset = OFFSET(page_lru) + OFFSET(list_head_next); if (callback_data) { ld->flags &= ~VERBOSE; ld->flags |= (LIST_CALLBACK|CALLBACK_RETURN); ld->callback_func = free_page_callback; ld->callback_data = (void *)callback_data; callback_data->chunk_size = chunk_size; } cnt = do_list(ld); if (cnt < 0) { error(pc->curcmd_flags & IGNORE_ERRORS ? INFO : FATAL, "corrupted free list %d from free_area struct: %lx\n", j, free_area); if (pc->curcmd_flags & IGNORE_ERRORS) goto bailout; } if (callback_data && callback_data->found) { fprintf(fp, "%lx\n", callback_data->page); goto bailout; } if (!verbose) fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); total_free += (cnt * chunk_size); } } bailout: FREEBUF(free_area_buf2); FREEBUF(free_list_buf); return total_free; } /* * dump_kmeminfo displays basic memory use information typically shown * by /proc/meminfo, and then some... */ char *kmeminfo_hdr = " PAGES TOTAL PERCENTAGE\n"; static void dump_kmeminfo(struct meminfo *mi) { int i, len; ulong totalram_pages; ulong freeram_pages; ulong used_pages; ulong shared_pages; ulong buffer_pages; ulong subtract_buffer_pages; ulong totalswap_pages, totalused_pages; ulong totalhigh_pages; ulong freehighmem_pages; ulong totallowmem_pages; ulong freelowmem_pages; ulong allowed; long committed; ulong overcommit_kbytes = 0; int overcommit_ratio; ulong hugetlb_total_pages, hugetlb_total_free_pages = 0; int done_hugetlb_calc = 0; long nr_file_pages, nr_slab; long swapper_space_nrpages; ulong pct; uint tmp; struct meminfo meminfo; struct gnu_request req; long page_cache_size; ulong get_totalram; ulong get_buffers; ulong get_slabs; char buf[BUFSIZE]; ulong flags; /* * By default, we will no longer call dump_mem_map() as this is too * slow for large memory systems. If we have to call it (eg. missing * important information such as slabs or total ram), we will also * collect shared pages. Otherwise, we won't print shared pages unless * the caller explicitly requested shared pages ("kmem -i=shared"). */ flags = mi->flags; shared_pages = 0; get_totalram = 0; get_buffers = 0; get_slabs = 0; /* * If vm_stat array does not exists, then set mem map flag. */ if (vm_stat_init()) { if (dump_vm_stat("NR_SLAB", &nr_slab, 0)) get_slabs = nr_slab; else if (dump_vm_stat("NR_SLAB_RECLAIMABLE", &nr_slab, 0)) { get_slabs = nr_slab; if (dump_vm_stat("NR_SLAB_UNRECLAIMABLE", &nr_slab, 0)) get_slabs += nr_slab; } else if (dump_vm_stat("NR_SLAB_RECLAIMABLE_B", &nr_slab, 0)) { /* 5.9 and later */ get_slabs = nr_slab; if (dump_vm_stat("NR_SLAB_UNRECLAIMABLE_B", &nr_slab, 0)) get_slabs += nr_slab; } } else { flags |= GET_SLAB_PAGES; } /* * Get total RAM based upon how the various versions of si_meminfo() * have done it, latest to earliest: * * Prior to 2.3.36, count all mem_map pages minus the reserved ones. * From 2.3.36 onwards, use "totalram_pages" if set. */ if (symbol_exists("totalram_pages") || symbol_exists("_totalram_pages")) { totalram_pages = vt->totalram_pages ? vt->totalram_pages : get_totalram; } else { flags |= GET_TOTALRAM_PAGES; totalram_pages = 0; } /* * If the caller wants shared pages or if we are missing important data * (ie. slab or totalram) then go through the slow dump_mem_map() path. */ if (flags) { BZERO(&meminfo, sizeof(struct meminfo)); meminfo.flags = GET_ALL; dump_mem_map(&meminfo); /* Update the missing information */ if (flags & GET_SLAB_PAGES) { get_slabs = meminfo.get_slabs; } if (flags & GET_TOTALRAM_PAGES) { get_totalram = meminfo.get_totalram; totalram_pages = get_totalram; } shared_pages = meminfo.get_shared; get_buffers = meminfo.get_buffers; } fprintf(fp, "%s", kmeminfo_hdr); fprintf(fp, "%13s %7ld %11s ----\n", "TOTAL MEM", totalram_pages, pages_to_size(totalram_pages, buf)); /* * Get free pages from dump_free_pages() or its associates. * Used pages are a free-bee... */ meminfo.flags = GET_FREE_PAGES; vt->dump_free_pages(&meminfo); freeram_pages = meminfo.retval; pct = (freeram_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "FREE", freeram_pages, pages_to_size(freeram_pages, buf), pct); used_pages = totalram_pages - freeram_pages; pct = (used_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "USED", used_pages, pages_to_size(used_pages, buf), pct); /* * Get shared pages from dump_mem_map(). Note that this is done * differently than the kernel -- it just tallies the non-reserved * pages that have a count of greater than 1. */ if (flags & GET_SHARED_PAGES) { pct = (shared_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "SHARED", shared_pages, pages_to_size(shared_pages, buf), pct); } subtract_buffer_pages = 0; if (symbol_exists("buffermem_pages")) { get_symbol_data("buffermem_pages", sizeof(int), &tmp); buffer_pages = (ulong)tmp; } else if (symbol_exists("buffermem")) { get_symbol_data("buffermem", sizeof(int), &tmp); buffer_pages = BTOP(tmp); } else if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && symbol_exists("nr_blockdev_pages")) { subtract_buffer_pages = buffer_pages = nr_blockdev_pages(); } else buffer_pages = 0; pct = (buffer_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "BUFFERS", buffer_pages, pages_to_size(buffer_pages, buf), pct); if (CRASHDEBUG(1)) error(NOTE, "pages with buffers: %ld\n", get_buffers); /* * page_cache_size has evolved from a long to an atomic_t to * not existing at all. */ if (symbol_exists("page_cache_size")) { get_symbol_type("page_cache_size", NULL, &req); if (req.length == sizeof(int)) { get_symbol_data("page_cache_size", sizeof(int), &tmp); page_cache_size = (long)tmp; } else get_symbol_data("page_cache_size", sizeof(long), &page_cache_size); page_cache_size -= subtract_buffer_pages; } else if (symbol_exists("nr_pagecache")) { get_symbol_data("nr_pagecache", sizeof(int), &tmp); page_cache_size = (long)tmp; page_cache_size -= subtract_buffer_pages; } else if (dump_vm_stat("NR_FILE_PAGES", &nr_file_pages, 0)) { char *swapper_space = GETBUF(SIZE(address_space)); swapper_space_nrpages = 0; if (dump_vm_stat("NR_SWAPCACHE", &swapper_space_nrpages, 0)) { ; } else if (symbol_exists("nr_swapper_spaces") && (len = get_array_length("nr_swapper_spaces", NULL, 0))) { char *nr_swapper_space = GETBUF(len * sizeof(unsigned int)); readmem(symbol_value("nr_swapper_spaces"), KVADDR, nr_swapper_space, len * sizeof(unsigned int), "nr_swapper_space", RETURN_ON_ERROR); for (i = 0; i < len; i++) { int j; unsigned long sa; unsigned int banks = UINT(nr_swapper_space + (i * sizeof(unsigned int))); if (!banks) continue; readmem(symbol_value("swapper_spaces") + (i * sizeof(void *)),KVADDR, &sa, sizeof(void *), "swapper_space", RETURN_ON_ERROR); if (!sa) continue; for (j = 0; j < banks; j++) { readmem(sa + j * SIZE(address_space), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR); swapper_space_nrpages += ULONG(swapper_space + OFFSET(address_space_nrpages)); } } FREEBUF(nr_swapper_space); } else if (symbol_exists("swapper_spaces") && (len = get_array_length("swapper_spaces", NULL, 0))) { for (i = 0; i < len; i++) { if (!readmem(symbol_value("swapper_spaces") + i * SIZE(address_space), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR)) break; swapper_space_nrpages += ULONG(swapper_space + OFFSET(address_space_nrpages)); } } else if (symbol_exists("swapper_space") && readmem(symbol_value("swapper_space"), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR)) swapper_space_nrpages = ULONG(swapper_space + OFFSET(address_space_nrpages)); page_cache_size = nr_file_pages - swapper_space_nrpages - buffer_pages; FREEBUF(swapper_space); } else page_cache_size = 0; if (page_cache_size < 0) { error(INFO, "page_cache_size went negative (%ld), setting to 0\n", page_cache_size); page_cache_size = 0; } pct = (page_cache_size * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "CACHED", page_cache_size, pages_to_size(page_cache_size, buf), pct); /* * Although /proc/meminfo doesn't show it, show how much memory * the slabs take up. */ pct = (get_slabs * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "SLAB", get_slabs, pages_to_size(get_slabs, buf), pct); if (symbol_exists("totalhigh_pages") || symbol_exists("_totalhigh_pages")) { totalhigh_pages = vt->totalhigh_pages; pct = totalhigh_pages ? (totalhigh_pages * 100)/totalram_pages : 0; fprintf(fp, "\n%13s %7ld %11s %3ld%% of TOTAL MEM\n", "TOTAL HIGH", totalhigh_pages, pages_to_size(totalhigh_pages, buf), pct); meminfo.flags = GET_FREE_HIGHMEM_PAGES; vt->dump_free_pages(&meminfo); freehighmem_pages = meminfo.retval; pct = freehighmem_pages ? (freehighmem_pages * 100)/totalhigh_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL HIGH\n", "FREE HIGH", freehighmem_pages, pages_to_size(freehighmem_pages, buf), pct); totallowmem_pages = totalram_pages - totalhigh_pages; pct = (totallowmem_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "TOTAL LOW", totallowmem_pages, pages_to_size(totallowmem_pages, buf), pct); freelowmem_pages = freeram_pages - freehighmem_pages; pct = (freelowmem_pages * 100)/totallowmem_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL LOW\n", "FREE LOW", freelowmem_pages, pages_to_size(freelowmem_pages, buf), pct); } if (get_hugetlb_total_pages(&hugetlb_total_pages, &hugetlb_total_free_pages)) { done_hugetlb_calc = 1; fprintf(fp, "\n%13s %7ld %11s ----\n", "TOTAL HUGE", hugetlb_total_pages, pages_to_size(hugetlb_total_pages, buf)); pct = hugetlb_total_free_pages ? (hugetlb_total_free_pages * 100) / hugetlb_total_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL HUGE\n", "HUGE FREE", hugetlb_total_free_pages, pages_to_size(hugetlb_total_free_pages, buf), pct); } /* * get swap data from dump_swap_info(). */ fprintf(fp, "\n"); if (symbol_exists("swapper_space") || symbol_exists("swapper_spaces")) { if (dump_swap_info(RETURN_ON_ERROR, &totalswap_pages, &totalused_pages)) { fprintf(fp, "%13s %7ld %11s ----\n", "TOTAL SWAP", totalswap_pages, pages_to_size(totalswap_pages, buf)); pct = totalswap_pages ? (totalused_pages * 100) / totalswap_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL SWAP\n", "SWAP USED", totalused_pages, pages_to_size(totalused_pages, buf), pct); pct = totalswap_pages ? ((totalswap_pages - totalused_pages) * 100) / totalswap_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL SWAP\n", "SWAP FREE", totalswap_pages - totalused_pages, pages_to_size(totalswap_pages - totalused_pages, buf), pct); } else error(INFO, "swap_info[%ld].swap_map at %lx is inaccessible\n", totalused_pages, totalswap_pages); } /* * Show committed memory */ if (kernel_symbol_exists("sysctl_overcommit_memory")) { fprintf(fp, "\n"); if (kernel_symbol_exists("sysctl_overcommit_kbytes")) get_symbol_data("sysctl_overcommit_kbytes", sizeof(ulong), &overcommit_kbytes); if (overcommit_kbytes) allowed = overcommit_kbytes >> (machdep->pageshift - 10); else { get_symbol_data("sysctl_overcommit_ratio", sizeof(int), &overcommit_ratio); if (!done_hugetlb_calc) goto bailout; allowed = ((totalram_pages - hugetlb_total_pages) * overcommit_ratio / 100); } if (symbol_exists("vm_committed_as")) { if (INVALID_MEMBER(percpu_counter_count)) goto bailout; readmem(symbol_value("vm_committed_as") + OFFSET(percpu_counter_count), KVADDR, &committed, sizeof(long), "percpu_counter count", FAULT_ON_ERROR); /* Ensure always positive */ if (committed < 0) committed = 0; } else { if (INVALID_MEMBER(atomic_t_counter)) goto bailout; readmem(symbol_value("vm_committed_space") + OFFSET(atomic_t_counter), KVADDR, &committed, sizeof(int), "atomic_t counter", FAULT_ON_ERROR); } allowed += totalswap_pages; fprintf(fp, "%13s %7ld %11s ----\n", "COMMIT LIMIT", allowed, pages_to_size(allowed, buf)); if (allowed) { pct = committed ? ((committed * 100) / allowed) : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL LIMIT\n", "COMMITTED", committed, pages_to_size(committed, buf), pct); } else fprintf(fp, "%13s %7ld %11s ----\n", "COMMITTED", committed, pages_to_size(committed, buf)); } bailout: dump_zone_page_usage(); } /* * Emulate 2.6 nr_blockdev_pages() function. */ static ulong nr_blockdev_pages(void) { struct list_data list_data, *ld; int i, bdevcnt; ulong inode, address_space; ulong nrpages; char *block_device_buf, *inode_buf, *address_space_buf; if (!kernel_symbol_exists("all_bdevs")) return nr_blockdev_pages_v2(); ld = &list_data; BZERO(ld, sizeof(struct list_data)); get_symbol_data("all_bdevs", sizeof(void *), &ld->start); if (empty_list(ld->start)) return 0; ld->flags |= LIST_ALLOCATE; ld->end = symbol_value("all_bdevs"); ld->list_head_offset = OFFSET(block_device_bd_list); block_device_buf = GETBUF(SIZE(block_device)); inode_buf = GETBUF(SIZE(inode)); address_space_buf = GETBUF(SIZE(address_space)); bdevcnt = do_list(ld); /* * go through the block_device list, emulating: * * ret += bdev->bd_inode->i_mapping->nrpages; */ for (i = nrpages = 0; i < bdevcnt; i++) { readmem(ld->list_ptr[i], KVADDR, block_device_buf, SIZE(block_device), "block_device buffer", FAULT_ON_ERROR); inode = ULONG(block_device_buf + OFFSET(block_device_bd_inode)); readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", FAULT_ON_ERROR); address_space = ULONG(inode_buf + OFFSET(inode_i_mapping)); readmem(address_space, KVADDR, address_space_buf, SIZE(address_space), "address_space buffer", FAULT_ON_ERROR); nrpages += ULONG(address_space_buf + OFFSET(address_space_nrpages)); } FREEBUF(ld->list_ptr); FREEBUF(block_device_buf); FREEBUF(inode_buf); FREEBUF(address_space_buf); return nrpages; } /* * Emulate 5.9 nr_blockdev_pages() function. */ static ulong nr_blockdev_pages_v2(void) { struct list_data list_data, *ld; ulong bd_sb, address_space; ulong nrpages; int i, inode_count; char *inode_buf, *address_space_buf; ld = &list_data; BZERO(ld, sizeof(struct list_data)); get_symbol_data("blockdev_superblock", sizeof(void *), &bd_sb); readmem(bd_sb + OFFSET(super_block_s_inodes), KVADDR, &ld->start, sizeof(ulong), "blockdev_superblock.s_inodes", FAULT_ON_ERROR); if (empty_list(ld->start)) return 0; ld->flags |= LIST_ALLOCATE; ld->end = bd_sb + OFFSET(super_block_s_inodes); ld->list_head_offset = OFFSET(inode_i_sb_list); inode_buf = GETBUF(SIZE(inode)); address_space_buf = GETBUF(SIZE(address_space)); inode_count = do_list(ld); /* * go through the s_inodes list, emulating: * * ret += inode->i_mapping->nrpages; */ for (i = nrpages = 0; i < inode_count; i++) { readmem(ld->list_ptr[i], KVADDR, inode_buf, SIZE(inode), "inode buffer", FAULT_ON_ERROR); address_space = ULONG(inode_buf + OFFSET(inode_i_mapping)); readmem(address_space, KVADDR, address_space_buf, SIZE(address_space), "address_space buffer", FAULT_ON_ERROR); nrpages += ULONG(address_space_buf + OFFSET(address_space_nrpages)); } FREEBUF(ld->list_ptr); FREEBUF(inode_buf); FREEBUF(address_space_buf); return nrpages; } /* * dump_vmlist() displays information from the vmlist. */ static void dump_vmlist(struct meminfo *vi) { char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; ulong vmlist; ulong addr, size, next, pcheck, count, verified; physaddr_t paddr; int mod_vmlist; if (vt->flags & (USE_VMAP_AREA|USE_VMAP_NODES)) { dump_vmap_area(vi); return; } get_symbol_data("vmlist", sizeof(void *), &vmlist); next = vmlist; count = verified = 0; mod_vmlist = kernel_symbol_exists("mod_vmlist"); while (next) { if (!(pc->curcmd_flags & HEADER_PRINTED) && (next == vmlist) && !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC| GET_VMLIST_COUNT|GET_VMLIST|VMLIST_VERIFY))) { fprintf(fp, "%s ", mkstring(buf, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), CENTER|LJUST, "VM_STRUCT")); fprintf(fp, "%s SIZE\n", mkstring(buf, (VADDR_PRLEN * 2) + strlen(" - "), CENTER|LJUST, "ADDRESS RANGE")); pc->curcmd_flags |= HEADER_PRINTED; } readmem(next+OFFSET(vm_struct_addr), KVADDR, &addr, sizeof(void *), "vmlist addr", FAULT_ON_ERROR); readmem(next+OFFSET(vm_struct_size), KVADDR, &size, sizeof(ulong), "vmlist size", FAULT_ON_ERROR); if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) { /* * Preceding GET_VMLIST_COUNT set vi->retval. */ if (vi->flags & GET_VMLIST) { if (count < vi->retval) { vi->vmlist[count].addr = addr; vi->vmlist[count].size = size; } } count++; goto next_entry; } if (!(vi->flags & ADDRESS_SPECIFIED) || ((vi->memtype == KVADDR) && ((vi->spec_addr >= addr) && (vi->spec_addr < (addr+size))))) { if (vi->flags & VMLIST_VERIFY) { verified++; break; } fprintf(fp, "%s%s %s - %s %6ld\n", mkstring(buf,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(next)), space(MINSPACE-1), mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(addr)), mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(addr+size)), size); } if ((vi->flags & ADDRESS_SPECIFIED) && (vi->memtype == PHYSADDR)) { for (pcheck = addr; pcheck < (addr+size); pcheck += PAGESIZE()) { if (!kvtop(NULL, pcheck, &paddr, 0)) continue; if ((vi->spec_addr >= paddr) && (vi->spec_addr < (paddr+PAGESIZE()))) { if (vi->flags & GET_PHYS_TO_VMALLOC) { vi->retval = pcheck + PAGEOFFSET(vi->spec_addr); return; } else fprintf(fp, "%s%s %s - %s %6ld\n", mkstring(buf, VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(next)), space(MINSPACE-1), mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(addr)), mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(addr+size)), size); break; } } } next_entry: readmem(next+OFFSET(vm_struct_next), KVADDR, &next, sizeof(void *), "vmlist next", FAULT_ON_ERROR); if (!next && mod_vmlist) { get_symbol_data("mod_vmlist", sizeof(void *), &next); mod_vmlist = FALSE; } } if (vi->flags & GET_HIGHEST) vi->retval = addr+size; if (vi->flags & GET_VMLIST_COUNT) vi->retval = count; if (vi->flags & VMLIST_VERIFY) vi->retval = verified; } static int sort_by_va_start(const void *arg1, const void *arg2) { ulong va_start1, va_start2; readmem(*(ulong *)arg1 + OFFSET(vmap_area_va_start), KVADDR, &va_start1, sizeof(void *), "vmap_area.va_start", FAULT_ON_ERROR); readmem(*(ulong *)arg2 + OFFSET(vmap_area_va_start), KVADDR, &va_start2, sizeof(void *), "vmap_area.va_start", FAULT_ON_ERROR); return va_start1 < va_start2 ? -1 : (va_start1 == va_start2 ? 0 : 1); } /* Linux 6.9 and later kernels use "vmap_nodes". */ static int get_vmap_area_list_from_nodes(ulong **list_ptr) { int i, cnt, c; struct list_data list_data, *ld = &list_data; uint nr_vmap_nodes; ulong vmap_nodes, list_head; ulong *list, *ptr; get_symbol_data("nr_vmap_nodes", sizeof(uint), &nr_vmap_nodes); get_symbol_data("vmap_nodes", sizeof(ulong), &vmap_nodes); /* count up all vmap_areas. */ cnt = 0; for (i = 0; i < nr_vmap_nodes; i++) { BZERO(ld, sizeof(struct list_data)); list_head = vmap_nodes + SIZE(vmap_node) * i + OFFSET(vmap_node_busy) + OFFSET(rb_list_head); readmem(list_head, KVADDR, &ld->start, sizeof(void *), "rb_list.head", FAULT_ON_ERROR); ld->list_head_offset = OFFSET(vmap_area_list); ld->end = list_head; c = do_list(ld); if (c < 0) return -1; cnt += c; } list = ptr = (ulong *)GETBUF(sizeof(void *) * cnt); /* gather all vmap_areas into a list. */ for (i = 0; i < nr_vmap_nodes; i++) { BZERO(ld, sizeof(struct list_data)); ld->flags = LIST_ALLOCATE; list_head = vmap_nodes + SIZE(vmap_node) * i + OFFSET(vmap_node_busy) + OFFSET(rb_list_head); readmem(list_head, KVADDR, &ld->start, sizeof(void *), "rb_list.head", FAULT_ON_ERROR); ld->list_head_offset = OFFSET(vmap_area_list); ld->end = list_head; c = do_list(ld); if (c < 0) return -1; memcpy(ptr, ld->list_ptr, sizeof(void *) * c); ptr += c; FREEBUF(ld->list_ptr); } qsort(list, cnt, sizeof(void *), sort_by_va_start); *list_ptr = list; return cnt; } static void dump_vmap_area(struct meminfo *vi) { int i, cnt; ulong start, end, vm_struct, flags, vm; struct list_data list_data, *ld; char *vmap_area_buf; ulong size, pcheck, count, verified; physaddr_t paddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; ulong *list_ptr; #define VM_VM_AREA 0x4 /* mm/vmalloc.c */ start = count = verified = size = 0; if (vt->flags & USE_VMAP_NODES) { cnt = get_vmap_area_list_from_nodes(&list_ptr); if (cnt < 0) { error(WARNING, "invalid/corrupt vmap_nodes.busy list\n"); vi->retval = 0; return; } } else { ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags = LIST_HEAD_FORMAT|LIST_HEAD_POINTER|LIST_ALLOCATE; get_symbol_data("vmap_area_list", sizeof(void *), &ld->start); ld->list_head_offset = OFFSET(vmap_area_list); ld->end = symbol_value("vmap_area_list"); cnt = do_list(ld); if (cnt < 0) { error(WARNING, "invalid/corrupt vmap_area_list\n"); vi->retval = 0; return; } list_ptr = ld->list_ptr; } vmap_area_buf = GETBUF(SIZE(vmap_area)); for (i = 0; i < cnt; i++) { if (!(pc->curcmd_flags & HEADER_PRINTED) && (i == 0) && !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC| GET_VMLIST_COUNT|GET_VMLIST|VMLIST_VERIFY))) { fprintf(fp, "%s ", mkstring(buf1, MAX(strlen("VMAP_AREA"), VADDR_PRLEN), CENTER|LJUST, "VMAP_AREA")); fprintf(fp, "%s ", mkstring(buf1, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), CENTER|LJUST, "VM_STRUCT")); fprintf(fp, "%s SIZE\n", mkstring(buf1, (VADDR_PRLEN * 2) + strlen(" - "), CENTER|LJUST, "ADDRESS RANGE")); pc->curcmd_flags |= HEADER_PRINTED; } readmem(list_ptr[i], KVADDR, vmap_area_buf, SIZE(vmap_area), "vmap_area struct", FAULT_ON_ERROR); if (VALID_MEMBER(vmap_area_flags) && VALID_MEMBER(vmap_area_purge_list)) { flags = ULONG(vmap_area_buf + OFFSET(vmap_area_flags)); if (flags != VM_VM_AREA) continue; } else { vm = ULONG(vmap_area_buf + OFFSET(vmap_area_vm)); if (!vm) continue; } start = ULONG(vmap_area_buf + OFFSET(vmap_area_va_start)); end = ULONG(vmap_area_buf + OFFSET(vmap_area_va_end)); vm_struct = ULONG(vmap_area_buf + OFFSET(vmap_area_vm)); size = end - start; if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) { /* * Preceding GET_VMLIST_COUNT set vi->retval. */ if (vi->flags & GET_VMLIST) { if (count < vi->retval) { vi->vmlist[count].addr = start; vi->vmlist[count].size = size; } } count++; continue; } if (!(vi->flags & ADDRESS_SPECIFIED) || ((vi->memtype == KVADDR) && ((vi->spec_addr >= start) && (vi->spec_addr < (start+size))))) { if (vi->flags & VMLIST_VERIFY) { verified++; break; } fprintf(fp, "%s%s %s%s %s - %s %7ld\n", mkstring(buf1,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(list_ptr[i])), space(MINSPACE-1), mkstring(buf2,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(vm_struct)), space(MINSPACE-1), mkstring(buf3, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(start)), mkstring(buf4, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(start+size)), size); } if ((vi->flags & ADDRESS_SPECIFIED) && (vi->memtype == PHYSADDR)) { for (pcheck = start; pcheck < (start+size); pcheck += PAGESIZE()) { if (!kvtop(NULL, pcheck, &paddr, 0)) continue; if ((vi->spec_addr >= paddr) && (vi->spec_addr < (paddr+PAGESIZE()))) { if (vi->flags & GET_PHYS_TO_VMALLOC) { vi->retval = pcheck + PAGEOFFSET(vi->spec_addr); FREEBUF(list_ptr); return; } else fprintf(fp, "%s%s %s%s %s - %s %7ld\n", mkstring(buf1,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(list_ptr[i])), space(MINSPACE-1), mkstring(buf2, VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(vm_struct)), space(MINSPACE-1), mkstring(buf3, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(start)), mkstring(buf4, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(start+size)), size); break; } } } } FREEBUF(vmap_area_buf); FREEBUF(list_ptr); if (vi->flags & GET_HIGHEST) vi->retval = start+size; if (vi->flags & GET_VMLIST_COUNT) vi->retval = count; if (vi->flags & VMLIST_VERIFY) vi->retval = verified; } /* * dump_page_lists() displays information from the active_list, * inactive_dirty_list and inactive_clean_list from each zone. */ static int dump_page_lists(struct meminfo *mi) { int i, c, n, retval; ulong node_zones, pgdat; struct node_table *nt; struct list_data list_data, *ld; char buf[BUFSIZE]; ulong value; ulong inactive_clean_pages, inactive_clean_list; int nr_active_pages, nr_inactive_pages; int nr_inactive_dirty_pages; ld = &list_data; retval = FALSE; nr_active_pages = nr_inactive_dirty_pages = -1; BZERO(ld, sizeof(struct list_data)); ld->list_head_offset = OFFSET(page_lru); if (mi->flags & ADDRESS_SPECIFIED) ld->searchfor = mi->spec_addr; else if (mi->flags & VERBOSE) ld->flags |= VERBOSE; if (mi->flags & GET_ACTIVE_LIST) { if (!symbol_exists("active_list")) error(FATAL, "active_list does not exist in this kernel\n"); if (symbol_exists("nr_active_pages")) get_symbol_data("nr_active_pages", sizeof(int), &nr_active_pages); else error(FATAL, "nr_active_pages does not exist in this kernel\n"); ld->end = symbol_value("active_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "active_list:\n"); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_active_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_active_pages); if (c != nr_active_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_LIST) { if (!symbol_exists("inactive_list")) error(FATAL, "inactive_list does not exist in this kernel\n"); if (symbol_exists("nr_inactive_pages")) get_symbol_data("nr_inactive_pages", sizeof(int), &nr_inactive_pages); else error(FATAL, "nr_active_pages does not exist in this kernel\n"); ld->end = symbol_value("inactive_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "inactive_list:\n"); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_inactive_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_inactive_pages); if (c != nr_inactive_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_DIRTY) { if (!symbol_exists("inactive_dirty_list")) error(FATAL, "inactive_dirty_list does not exist in this kernel\n"); if (symbol_exists("nr_inactive_dirty_pages")) get_symbol_data("nr_inactive_dirty_pages", sizeof(int), &nr_inactive_dirty_pages); else error(FATAL, "nr_inactive_dirty_pages does not exist in this kernel\n"); ld->end = symbol_value("inactive_dirty_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "%sinactive_dirty_list:\n", mi->flags & GET_ACTIVE_LIST ? "\n" : ""); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_inactive_dirty_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_inactive_dirty_pages); if (c != nr_inactive_dirty_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_CLEAN) { if (INVALID_MEMBER(zone_struct_inactive_clean_list)) error(FATAL, "inactive_clean_list(s) do not exist in this kernel\n"); get_symbol_data("pgdat_list", sizeof(void *), &pgdat); if ((mi->flags & VERBOSE) && (mi->flags & (GET_ACTIVE_LIST|GET_INACTIVE_DIRTY))) fprintf(fp, "\n"); for (n = 0; pgdat; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "zone_struct name", FAULT_ON_ERROR); if (!read_string(value, buf, BUFSIZE-1)) sprintf(buf, "(unknown) "); if (mi->flags & VERBOSE) { if (vt->numnodes > 1) fprintf(fp, "NODE %d ", n); fprintf(fp, "\"%s\" inactive_clean_list:\n", buf); } readmem(node_zones + OFFSET(zone_struct_inactive_clean_pages), KVADDR, &inactive_clean_pages, sizeof(ulong), "inactive_clean_pages", FAULT_ON_ERROR); readmem(node_zones + OFFSET(zone_struct_inactive_clean_list), KVADDR, &inactive_clean_list, sizeof(ulong), "inactive_clean_list", FAULT_ON_ERROR); ld->start = inactive_clean_list; ld->end = node_zones + OFFSET(zone_struct_inactive_clean_list); if (mi->flags & ADDRESS_SPECIFIED) ld->searchfor = mi->spec_addr; if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { if (vt->numnodes > 1) fprintf(fp, "NODE %d ", n); fprintf(fp, "\"%s\" ", buf); fprintf(fp, "inactive_clean_pages: %ld ", inactive_clean_pages); if (c != inactive_clean_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } node_zones += SIZE(zone_struct); } readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } } return retval; } /* * Check whether an address is a kmem_cache_t address, and if so, return * a pointer to the static buffer containing its name string. Otherwise * return NULL on failure. */ #define PERCPU_NOT_SUPPORTED "per-cpu slab format not supported yet\n" static char * is_kmem_cache_addr(ulong vaddr, char *kbuf) { ulong cache, cache_cache, name; long next_offset, name_offset; if (vt->flags & KMEM_CACHE_UNAVAIL) { error(INFO, "kmem cache slab subsystem not available\n"); return NULL; } if (vt->flags & KMALLOC_SLUB) return is_kmem_cache_addr_common(vaddr, kbuf); if ((vt->flags & KMALLOC_COMMON) && !symbol_exists("cache_cache")) return is_kmem_cache_addr_common(vaddr, kbuf); name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); cache = cache_cache = symbol_value("cache_cache"); do { if (cache == vaddr) { if (vt->kmem_cache_namelen) { readmem(cache+name_offset, KVADDR, kbuf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(cache+name_offset, KVADDR, &name, sizeof(name), "name", FAULT_ON_ERROR); if (!read_string(name, kbuf, BUFSIZE-1)) { if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); else error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(kbuf, "(unknown)"); } } return kbuf; } readmem(cache+next_offset, KVADDR, &cache, sizeof(long), "kmem_cache_s next", FAULT_ON_ERROR); if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) cache -= next_offset; } while (cache != cache_cache); return NULL; } /* * Note same functionality as above, but instead it just * dumps all slab cache names and their addresses. */ static void kmem_cache_list(struct meminfo *mi) { ulong cache, cache_cache, name; long next_offset, name_offset; char *cache_buf; int has_cache_chain; ulong cache_chain; char buf[BUFSIZE]; if (vt->flags & KMEM_CACHE_UNAVAIL) { error(INFO, "kmem cache slab subsystem not available\n"); return; } if (vt->flags & (KMALLOC_SLUB|KMALLOC_COMMON)) { kmem_cache_list_common(mi); return; } if (symbol_exists("cache_chain")) { has_cache_chain = TRUE; cache_chain = symbol_value("cache_chain"); } else { has_cache_chain = FALSE; cache_chain = 0; } name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); cache = cache_cache = symbol_value("cache_cache"); cache_buf = GETBUF(SIZE(kmem_cache_s)); do { readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", FAULT_ON_ERROR); if (vt->kmem_cache_namelen) { BCOPY(cache_buf+name_offset, buf, vt->kmem_cache_namelen); } else { name = ULONG(cache_buf + name_offset); if (!read_string(name, buf, BUFSIZE-1)) { if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); else error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(buf, "(unknown)"); } } fprintf(fp, "%lx %s\n", cache, buf); cache = ULONG(cache_buf + next_offset); if (has_cache_chain && (cache == cache_chain)) readmem(cache, KVADDR, &cache, sizeof(char *), "cache_chain", FAULT_ON_ERROR); if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) cache -= next_offset; } while (cache != cache_cache); FREEBUF(cache_buf); } /* * Translate an address to its physical page number, verify that the * page in fact belongs to the slab subsystem, and if so, return the * name of the cache to which it belongs. */ static char * vaddr_to_kmem_cache(ulong vaddr, char *buf, int verbose) { physaddr_t paddr; ulong page, cache, page_flags; if (!kvtop(NULL, vaddr, &paddr, 0)) { if (verbose) error(WARNING, "cannot make virtual-to-physical translation: %lx\n", vaddr); return NULL; } if (!phys_to_page(paddr, &page)) { if (verbose) error(WARNING, "cannot find mem_map page for address: %lx\n", vaddr); return NULL; } if (vt->PG_slab) { readmem(page+OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", FAULT_ON_ERROR); if (!page_slab(page, page_flags)) { if (((vt->flags & KMALLOC_SLUB) || VALID_MEMBER(page_compound_head)) || ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab) && VALID_MEMBER(page_first_page))) { readmem(compound_head(page)+OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", FAULT_ON_ERROR); if (!page_slab(compound_head(page), page_flags)) return NULL; } else return NULL; } } if ((vt->flags & KMALLOC_SLUB) || ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab) && (VALID_MEMBER(page_compound_head) || VALID_MEMBER(page_first_page)))) { readmem(compound_head(page)+OFFSET(page_slab), KVADDR, &cache, sizeof(void *), "page.slab", FAULT_ON_ERROR); } else if (VALID_MEMBER(page_next)) readmem(page+OFFSET(page_next), KVADDR, &cache, sizeof(void *), "page.next", FAULT_ON_ERROR); else if (VALID_MEMBER(page_list_next)) readmem(page+OFFSET(page_list_next), KVADDR, &cache, sizeof(void *), "page.list.next", FAULT_ON_ERROR); else if (VALID_MEMBER(page_lru)) readmem(page+OFFSET(page_lru)+OFFSET(list_head_next), KVADDR, &cache, sizeof(void *), "page.lru.next", FAULT_ON_ERROR); else error(FATAL, "cannot determine slab cache from page struct\n"); return(is_kmem_cache_addr(cache, buf)); } static char * is_slab_overload_page(ulong vaddr, ulong *page_head, char *buf) { ulong cache; char *p; if ((vt->flags & SLAB_OVERLOAD_PAGE) && is_page_ptr(vaddr, NULL) && VALID_MEMBER(page_slab) && (VALID_MEMBER(page_compound_head) || VALID_MEMBER(page_first_page))) { readmem(compound_head(vaddr)+OFFSET(page_slab), KVADDR, &cache, sizeof(void *), "page.slab", FAULT_ON_ERROR); p = is_kmem_cache_addr(cache, buf); if (p) *page_head = compound_head(vaddr); return p; } return NULL; } /* * Translate an address to its physical page number, verify that the * page in fact belongs to the slab subsystem, and if so, return the * address of the slab to which it belongs. */ static ulong vaddr_to_slab(ulong vaddr) { physaddr_t paddr; ulong page; ulong slab; if (!kvtop(NULL, vaddr, &paddr, 0)) { error(WARNING, "cannot make virtual-to-physical translation: %lx\n", vaddr); return 0; } if (!phys_to_page(paddr, &page)) { error(WARNING, "cannot find mem_map page for address: %lx\n", vaddr); return 0; } slab = 0; if ((vt->flags & KMALLOC_SLUB) || VALID_MEMBER(page_compound_head)) slab = compound_head(page); else if (vt->flags & SLAB_OVERLOAD_PAGE) slab = compound_head(page); else if ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab_page)) readmem(page+OFFSET(page_slab_page), KVADDR, &slab, sizeof(void *), "page.slab_page", FAULT_ON_ERROR); else if (VALID_MEMBER(page_prev)) readmem(page+OFFSET(page_prev), KVADDR, &slab, sizeof(void *), "page.prev", FAULT_ON_ERROR); else if (VALID_MEMBER(page_list_prev)) readmem(page+OFFSET(page_list_prev), KVADDR, &slab, sizeof(void *), "page.list.prev", FAULT_ON_ERROR); else if (VALID_MEMBER(page_lru)) readmem(page+OFFSET(page_lru)+OFFSET(list_head_prev), KVADDR, &slab, sizeof(void *), "page.lru.prev", FAULT_ON_ERROR); else error(FATAL, "unknown definition of struct page?\n"); return slab; } /* * Initialize any data required for scouring the kmalloc subsystem more * efficiently. */ char slab_hdr[100] = { 0 }; char kmem_cache_hdr[100] = { 0 }; char free_inuse_hdr[100] = { 0 }; static void kmem_cache_init(void) { ulong cache, cache_end, max_cnum, max_limit, max_cpus, tmp, tmp2; long cache_count, num_offset, next_offset; char *cache_buf; if (vt->flags & KMEM_CACHE_UNAVAIL) return; if ((vt->flags & KMEM_CACHE_DELAY) && !(pc->flags & RUNTIME)) return; if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT)) return; please_wait("gathering kmem slab cache data"); if (!strlen(slab_hdr)) { if (vt->flags & KMALLOC_SLUB) sprintf(slab_hdr, "SLAB%sMEMORY%sNODE TOTAL ALLOCATED FREE\n", space(VADDR_PRLEN > 8 ? 14 : 6), space(VADDR_PRLEN > 8 ? 12 : 4)); else sprintf(slab_hdr, "SLAB%sMEMORY%sTOTAL ALLOCATED FREE\n", space(VADDR_PRLEN > 8 ? 14 : 6), space(VADDR_PRLEN > 8 ? 12 : 4)); } if (!strlen(kmem_cache_hdr)) sprintf(kmem_cache_hdr, "CACHE%s OBJSIZE ALLOCATED TOTAL SLABS SSIZE NAME\n", space(VADDR_PRLEN > 8 ? 12 : 4)); if (!strlen(free_inuse_hdr)) sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n"); if (vt->flags & KMALLOC_SLUB) { kmem_cache_init_slub(); please_wait_done(); return; } num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); max_cnum = max_limit = max_cpus = cache_count = tmp2 = 0; /* * Pre-2.6 versions used the "cache_cache" as the head of the * slab chain list. 2.6 uses the "cache_chain" list_head. * In 3.6 SLAB and SLUB use the "slab_caches" list_head. */ if (vt->flags & PERCPU_KMALLOC_V2) { if (kernel_symbol_exists("cache_chain")) { get_symbol_data("cache_chain", sizeof(ulong), &cache); cache_end = symbol_value("cache_chain"); } else if (kernel_symbol_exists("slab_caches")) { vt->flags |= KMALLOC_COMMON; get_symbol_data("slab_caches", sizeof(ulong), &cache); cache_end = symbol_value("slab_caches"); } else { error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); return; } cache -= next_offset; } else cache = cache_end = symbol_value("cache_cache"); if (!(pc->flags & RUNTIME)) { if (kmem_cache_downsize()) add_to_downsized("kmem_cache"); } cache_buf = GETBUF(SIZE(kmem_cache_s)); hq_open(); do { cache_count++; if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", RETURN_ON_ERROR)) { FREEBUF(cache_buf); vt->flags |= KMEM_CACHE_UNAVAIL; error(INFO, "%sunable to initialize kmem slab cache subsystem\n\n", DUMPFILE() ? "\n" : ""); hq_close(); return; } if (!hq_enter(cache)) { error(WARNING, "%sduplicate kmem_cache entry in cache list: %lx\n", DUMPFILE() ? "\n" : "", cache); error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); vt->flags |= KMEM_CACHE_UNAVAIL; hq_close(); return; } tmp = (ulong)(UINT(cache_buf + num_offset)); if (tmp > max_cnum) max_cnum = tmp; if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit) max_limit = tmp; /* * Recognize and bail out on any max_cpudata_limit() failures. */ if (vt->flags & KMEM_CACHE_UNAVAIL) { FREEBUF(cache_buf); hq_close(); return; } if (tmp2 > max_cpus) max_cpus = tmp2; cache = ULONG(cache_buf + next_offset); switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { case PERCPU_KMALLOC_V1: cache -= next_offset; break; case PERCPU_KMALLOC_V2: if (cache != cache_end) cache -= next_offset; break; } } while (cache != cache_end); hq_close(); FREEBUF(cache_buf); vt->kmem_max_c_num = max_cnum; vt->kmem_max_limit = max_limit; vt->kmem_max_cpus = max_cpus; vt->kmem_cache_count = cache_count; if (CRASHDEBUG(2)) { fprintf(fp, "kmem_cache_init:\n"); fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); } if (!(vt->flags & KMEM_CACHE_INIT)) { if (vt->flags & PERCPU_KMALLOC_V1) ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, kmem_cache_s_name, "kmem_cache_s.name", NULL, sizeof(char)); else if (vt->flags & PERCPU_KMALLOC_V2) vt->kmem_cache_namelen = 0; else ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, kmem_cache_s_c_name, "kmem_cache_s.c_name", NULL, 0); } please_wait_done(); vt->flags |= KMEM_CACHE_INIT; } static ulong kmem_cache_nodelists(ulong cache) { ulong nodelists = 0; if (vt->flags & NODELISTS_IS_PTR) { /* * nodelists is pointer to the array */ if (!readmem(cache+OFFSET(kmem_cache_s_lists), KVADDR, &nodelists, sizeof(ulong), "nodelists pointer", RETURN_ON_ERROR)) error(WARNING, "cannot read kmem_cache nodelists pointer"); return nodelists; } else return cache+OFFSET(kmem_cache_s_lists); } static int kmem_cache_downsize(void) { char *cache_buf; ulong kmem_cache; uint buffer_size, object_size; int nr_node_ids; int nr_cpu_ids; if (vt->flags & KMALLOC_SLUB) { if (kernel_symbol_exists("kmem_cache") && VALID_MEMBER(kmem_cache_objsize) && try_get_symbol_data("kmem_cache", sizeof(ulong), &kmem_cache) && readmem(kmem_cache + OFFSET(kmem_cache_objsize), KVADDR, &object_size, sizeof(int), "kmem_cache objsize/object_size", RETURN_ON_ERROR)) { ASSIGN_SIZE(kmem_cache) = object_size; if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache)); } if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache)) return TRUE; else return FALSE; } if ((THIS_KERNEL_VERSION < LINUX(2,6,22)) || !(vt->flags & PERCPU_KMALLOC_V2_NODES) || (!kernel_symbol_exists("cache_cache") && !kernel_symbol_exists("kmem_cache_boot")) || (!MEMBER_EXISTS("kmem_cache", "buffer_size") && !MEMBER_EXISTS("kmem_cache", "size"))) { return FALSE; } if (vt->flags & NODELISTS_IS_PTR) { /* * More recent kernels have kmem_cache.array[] sized * by the number of cpus plus the number of nodes. */ if (kernel_symbol_exists("kmem_cache_boot") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("kmem_cache_boot") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "kmem_cache_boot object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else if (kernel_symbol_exists("cache_cache") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("cache_cache") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "cache_cache object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else object_size = 0; /* * Older kernels have kmem_cache.array[] sized by * the number of cpus; real value is nr_cpu_ids, * but fallback is kt->cpus. */ if (kernel_symbol_exists("nr_cpu_ids")) get_symbol_data("nr_cpu_ids", sizeof(int), &nr_cpu_ids); else nr_cpu_ids = kt->cpus; ARRAY_LENGTH(kmem_cache_s_array) = nr_cpu_ids; if (!object_size) ASSIGN_SIZE(kmem_cache_s) = OFFSET(kmem_cache_s_array) + sizeof(ulong) * nr_cpu_ids; if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache_s)); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } else if (vt->flags & SLAB_CPU_CACHE) { if (kernel_symbol_exists("kmem_cache_boot") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("kmem_cache_boot") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "kmem_cache_boot object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else { object_size = OFFSET(kmem_cache_node) + (sizeof(void *) * vt->kmem_cache_len_nodes); ASSIGN_SIZE(kmem_cache_s) = object_size; } if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache_s)); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } cache_buf = GETBUF(SIZE(kmem_cache_s)); if (!readmem(symbol_value("cache_cache"), KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", RETURN_ON_ERROR)) { FREEBUF(cache_buf); return FALSE; } buffer_size = UINT(cache_buf + MEMBER_OFFSET("kmem_cache", "buffer_size")); if (buffer_size < SIZE(kmem_cache_s)) { if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; if (buffer_size >= (uint)(OFFSET(kmem_cache_s_lists) + (sizeof(void *) * vt->kmem_cache_len_nodes))) ASSIGN_SIZE(kmem_cache_s) = buffer_size; else error(WARNING, "questionable cache_cache.buffer_size: %d\n", buffer_size); if (CRASHDEBUG(1)) { fprintf(fp, "\nkmem_cache_downsize: %ld to %d\n", STRUCT_SIZE("kmem_cache"), buffer_size); fprintf(fp, "kmem_cache_downsize: nr_node_ids: %ld\n", vt->kmem_cache_len_nodes); } FREEBUF(cache_buf); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } FREEBUF(cache_buf); return FALSE; } /* * Stash a list of presumably-corrupted slab cache addresses. */ static void mark_bad_slab_cache(ulong cache) { size_t sz; if (vt->nr_bad_slab_caches) { sz = sizeof(ulong) * (vt->nr_bad_slab_caches + 1); if (!(vt->bad_slab_caches = realloc(vt->bad_slab_caches, sz))) { error(INFO, "cannot realloc bad_slab_caches array\n"); vt->nr_bad_slab_caches = 0; return; } } else { if (!(vt->bad_slab_caches = (ulong *)malloc(sizeof(ulong)))) { error(INFO, "cannot malloc bad_slab_caches array\n"); return; } } vt->bad_slab_caches[vt->nr_bad_slab_caches++] = cache; } static int bad_slab_cache(ulong cache) { int i; for (i = 0; i < vt->nr_bad_slab_caches; i++) { if (vt->bad_slab_caches[i] == cache) return TRUE; } return FALSE; } /* * Determine the largest cpudata limit for a given cache. */ static ulong max_cpudata_limit(ulong cache, ulong *cpus) { int i; ulong cpudata[NR_CPUS]; int limit; ulong max_limit; ulong shared, percpu_ptr; ulong *start_address; if (vt->flags & PERCPU_KMALLOC_V2_NODES) goto kmem_cache_s_array_nodes; if (vt->flags & PERCPU_KMALLOC_V2) goto kmem_cache_s_array; if (INVALID_MEMBER(kmem_cache_s_cpudata)) { *cpus = 0; return 0; } if (!readmem(cache+OFFSET(kmem_cache_s_cpudata), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), "cpudata array", RETURN_ON_ERROR)) goto bail_out; for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && cpudata[i]; i++) { if (!readmem(cpudata[i]+OFFSET(cpucache_s_limit), KVADDR, &limit, sizeof(int), "cpucache limit", RETURN_ON_ERROR)) goto bail_out; if (limit > max_limit) max_limit = limit; } *cpus = i; return max_limit; kmem_cache_s_array: if (!readmem(cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), "array cache array", RETURN_ON_ERROR)) goto bail_out; for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && cpudata[i]; i++) { if (!readmem(cpudata[i]+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "array cache limit", RETURN_ON_ERROR)) goto bail_out; if (limit > max_limit) max_limit = limit; } /* * If the shared list can be accessed, check its size as well. */ if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) && readmem(cache+OFFSET(kmem_cache_s_lists)+OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) && readmem(shared+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "shared array_cache limit", RETURN_ON_ERROR|QUIET)) { if (limit > max_limit) max_limit = limit; } *cpus = i; return max_limit; kmem_cache_s_array_nodes: if (CRASHDEBUG(3)) fprintf(fp, "kmem_cache: %lx\n", cache); if (vt->flags & SLAB_CPU_CACHE) { if (!readmem(cache+OFFSET(kmem_cache_cpu_cache), KVADDR, &percpu_ptr, sizeof(void *), "kmem_cache.cpu_cache", RETURN_ON_ERROR)) goto bail_out; for (i = 0; i < kt->cpus; i++) cpudata[i] = percpu_ptr + kt->__per_cpu_offset[i]; } else { if (!readmem(cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * MIN(NR_CPUS, ARRAY_LENGTH(kmem_cache_s_array)), "array cache array", RETURN_ON_ERROR)) goto bail_out; } for (i = max_limit = 0; i < kt->cpus; i++) { if (check_offline_cpu(i)) continue; if (!cpudata[i]) break; if (!readmem(cpudata[i]+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "array cache limit", RETURN_ON_ERROR)) { error(INFO, "kmem_cache: %lx: invalid array_cache pointer: %lx\n", cache, cpudata[i]); mark_bad_slab_cache(cache); return max_limit; } if (CRASHDEBUG(3)) fprintf(fp, " array limit[%d]: %d\n", i, limit); if ((unsigned int)limit > INT_MAX) error(INFO, "kmem_cache: %lx: invalid array limit[%d]: %d\n", cache, i, limit); else if (limit > max_limit) max_limit = limit; } *cpus = i; /* * Check the shared list of all the nodes. */ start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) && readmem(kmem_cache_nodelists(cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) { for (i = 0; i < vt->kmem_cache_len_nodes; i++) { if (start_address[i] == 0) continue; if (readmem(start_address[i] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET)) { if (!shared) break; } else continue; if (readmem(shared + OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "shared array_cache limit", RETURN_ON_ERROR|QUIET)) { if (CRASHDEBUG(3)) fprintf(fp, " shared node limit[%d]: %d\n", i, limit); if ((unsigned int)limit > INT_MAX) error(INFO, "kmem_cache: %lx: shared node limit[%d]: %d\n", cache, i, limit); else if (limit > max_limit) max_limit = limit; break; } } } FREEBUF(start_address); return max_limit; bail_out: vt->flags |= KMEM_CACHE_UNAVAIL; error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); *cpus = 0; return 0; } /* * Determine whether the current slab cache is contained in * the comma-separated list from a "kmem -I list1,list2 ..." * command entry. */ static int ignore_cache(struct meminfo *si, char *name) { int i, argc; char *p1; char *arglist[MAXARGS]; char buf[BUFSIZE]; if (!si->ignore) return FALSE; strcpy(buf, si->ignore); p1 = buf; while (*p1) { if (*p1 == ',') *p1 = ' '; p1++; } argc = parse_line(buf, arglist); for (i = 0; i < argc; i++) { if (STREQ(name, arglist[i])) return TRUE; } return FALSE; } /* * dump_kmem_cache() displays basic information about kmalloc() slabs. * At this point, only kmem_cache_s structure data for each slab is dumped. * * TBD: Given a specified physical address, and determine which slab it came * from, and whether it's in use or not. */ #define SLAB_C_MAGIC 0x4F17A36DUL #define SLAB_MAGIC_ALLOC 0xA5C32F2BUL /* slab is alive */ #define SLAB_MAGIC_DESTROYED 0xB2F23C5AUL /* slab has been destroyed */ #define SLAB_CFLGS_BUFCTL 0x020000UL /* bufctls in own cache */ #define SLAB_CFLGS_OBJFREELIST 0x40000000UL /* Freelist as an object */ #define KMEM_SLAB_ADDR (1) #define KMEM_BUFCTL_ADDR (2) #define KMEM_OBJECT_ADDR_FREE (3) #define KMEM_OBJECT_ADDR_INUSE (4) #define KMEM_OBJECT_ADDR_CACHED (5) #define KMEM_ON_SLAB (6) #define KMEM_OBJECT_ADDR_SHARED (7) #define KMEM_SLAB_OVERLOAD_PAGE (8) #define KMEM_SLAB_FREELIST (9) #define DUMP_KMEM_CACHE_TAG(addr, name, tag) \ fprintf(fp, "%lx %-43s %s\n", addr, tag, name) #define DUMP_KMEM_CACHE_INFO() dump_kmem_cache_info(si) static void dump_kmem_cache_info(struct meminfo *si) { char b1[BUFSIZE]; ulong objsize, allocated, total; if (si->flags & SLAB_GATHER_FAILURE) error(INFO, "%s: cannot gather relevant slab data\n", si->curname); objsize = (vt->flags & KMALLOC_SLUB) ? si->objsize : si->size; fprintf(fp, "%s %8ld ", mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache)), objsize); if (si->flags & SLAB_GATHER_FAILURE) { fprintf(fp, "%9s %8s %5s ", "?", "?", "?"); } else { allocated = (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) ? si->inuse - si->cpucached_cache : si->inuse; total = (vt->flags & KMALLOC_SLUB) ? si->inuse + si->free : si->num_slabs * si->c_num; fprintf(fp, "%9ld %8ld %5ld ", allocated, total, si->num_slabs); } fprintf(fp, "%4ldk %s\n", si->slabsize/1024, si->curname); } #define DUMP_SLAB_INFO() \ { \ char b1[BUFSIZE], b2[BUFSIZE]; \ ulong allocated, freeobjs, slab; \ if (vt->flags & SLAB_OVERLOAD_PAGE) \ slab = si->slab - OFFSET(page_lru); \ else \ slab = si->slab; \ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { \ allocated = si->s_inuse - si->cpucached_slab; \ freeobjs = si->c_num - allocated - si->cpucached_slab; \ } else { \ allocated = si->s_inuse; \ freeobjs = si->c_num - si->s_inuse; \ } \ fprintf(fp, "%s %s %5ld %9ld %4ld\n", \ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(slab)), \ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->s_mem)), \ si->c_num, allocated, \ vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? \ freeobjs + si->cpucached_slab : freeobjs); \ } static void dump_kmem_cache(struct meminfo *si) { char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_cache; ulong name, magic; int cnt; char *p1; if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(FATAL, "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) { readmem(si->cache+OFFSET(kmem_cache_s_c_nextp), KVADDR, &cache_cache, sizeof(ulong), "kmem_cache next", FAULT_ON_ERROR); } else si->cache = cache_cache = symbol_value("cache_cache"); if (si->flags & ADDRESS_SPECIFIED) { if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; si->cache_buf = GETBUF(SIZE(kmem_cache_s)); do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", FAULT_ON_ERROR); if (vt->kmem_cache_namelen) { BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name), buf, vt->kmem_cache_namelen); } else { name = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_name)); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { DUMP_KMEM_CACHE_TAG(si->cache, buf, "[IGNORED]"); goto next_cache; } si->curname = buf; if (CRASHDEBUG(1)) fprintf(fp, "cache: %lx %s\n", si->cache, si->curname); console("cache: %lx %s\n", si->cache, si->curname); magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic)); if (magic == SLAB_C_MAGIC) { si->size = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_org_size)); if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: c_org_size: %ld\n", si->curname, si->size); si->errors++; } } si->c_flags = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_flags)); si->c_offset = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_offset)); si->order = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_gfporder)); si->c_num = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_num)); do_slab_chain(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) DUMP_KMEM_CACHE_INFO(); if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; do_slab_chain(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO(); fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(ON-SLAB kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(ON-SLAB kmem_slab_t)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; } break; } } } else { error(INFO, "\"%s\" cache: invalid c_magic: %lx\n", si->curname, magic); si->errors++; } next_cache: si->cache = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_nextp)); } while (si->cache != cache_cache); FREEBUF(si->cache_buf); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); } /* * dump_kmem_cache() adapted for newer percpu slab format. */ static void dump_kmem_cache_percpu_v1(struct meminfo *si) { int i; char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_cache; ulong name; int cnt; uint tmp_val; /* Used as temporary variable to read sizeof(int) and assigned to ulong variable. We are doing this to mask the endian issue */ char *p1; if (!(vt->flags & PERCPU_KMALLOC_V1)) error(FATAL, "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V1\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); for (i = 0; i < vt->kmem_max_cpus; i++) si->cpudata[i] = (ulong *) GETBUF(vt->kmem_max_limit * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) { readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &cache_cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); } else si->cache = cache_cache = symbol_value("cache_cache"); if (si->flags & ADDRESS_SPECIFIED) { if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); if (vt->kmem_cache_namelen) { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, buf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, &name, sizeof(ulong), "name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { DUMP_KMEM_CACHE_TAG(si->cache, buf, "[IGNORED]"); goto next_cache; } si->curname = buf; readmem(si->cache+OFFSET(kmem_cache_s_objsize), KVADDR, &tmp_val, sizeof(uint), "objsize", FAULT_ON_ERROR); si->size = (ulong)tmp_val; if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: objsize: %ld\n", si->curname, si->size); si->errors++; } } readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); si->c_flags = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_gfporder), KVADDR, &tmp_val, sizeof(uint), "gfporder", FAULT_ON_ERROR); si->order = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_num), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s num", FAULT_ON_ERROR); si->c_num = (ulong)tmp_val; do_slab_chain_percpu_v1(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { DUMP_KMEM_CACHE_INFO(); if (CRASHDEBUG(3)) dump_struct("kmem_cache_s", si->cache, 0); } if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { gather_cpudata_list_v1(si); si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; do_slab_chain_percpu_v1(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO(); fprintf(fp, "%s", slab_hdr); gather_slab_cached_count(si); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp,"(kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(slab_s)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_CACHED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (cpu %d cache)\n", si->container ? si->container : (ulong)si->spec_addr, si->cpu); break; } break; } } next_cache: readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &si->cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); si->cache -= OFFSET(kmem_cache_s_next); } while (si->cache != cache_cache); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); FREEBUF(si->kmem_bufctl); for (i = 0; i < vt->kmem_max_cpus; i++) FREEBUF(si->cpudata[i]); } /* * Updated for 2.6 slab substructure. */ static void dump_kmem_cache_percpu_v2(struct meminfo *si) { int i; char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_end; ulong name, page_head; int cnt; uint tmp_val; /* Used as temporary variable to read sizeof(int) and assigned to ulong variable. We are doing this to mask the endian issue */ char *p1; if (!(vt->flags & PERCPU_KMALLOC_V2)) error(FATAL, "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); if (vt->flags & SLAB_OVERLOAD_PAGE) { si->freelist = si->kmem_bufctl; si->freelist_index_size = slab_freelist_index_size(); si->list_offset = VALID_MEMBER(slab_slab_list) ? OFFSET(slab_slab_list) : OFFSET(page_lru); } for (i = 0; i < vt->kmem_max_cpus; i++) si->cpudata[i] = (ulong *) GETBUF(vt->kmem_max_limit * sizeof(ulong)); if(vt->flags & PERCPU_KMALLOC_V2_NODES) si->shared_array_cache = (ulong *) GETBUF(vt->kmem_cache_len_nodes * (vt->kmem_max_limit+1) * sizeof(ulong)); else si->shared_array_cache = (ulong *) GETBUF((vt->kmem_max_limit+1) * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &cache_end, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); else { if (vt->flags & KMALLOC_COMMON) { get_symbol_data("slab_caches", sizeof(ulong), &si->cache); si->cache -= OFFSET(kmem_cache_s_next); cache_end = symbol_value("slab_caches"); } else { get_symbol_data("cache_chain", sizeof(ulong), &si->cache); si->cache -= OFFSET(kmem_cache_s_next); cache_end = symbol_value("cache_chain"); } } if (si->flags & ADDRESS_SPECIFIED) { if ((p1 = is_slab_overload_page(si->spec_addr, &page_head, kbuf))) { si->flags |= SLAB_OVERLOAD_PAGE_PTR; si->spec_addr = page_head; } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); if (vt->kmem_cache_namelen) { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, buf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, &name, sizeof(ulong), "name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { DUMP_KMEM_CACHE_TAG(si->cache, buf, "[IGNORED]"); goto next_cache; } if (bad_slab_cache(si->cache)) { DUMP_KMEM_CACHE_TAG(si->cache, buf, "[INVALID/CORRUPTED]"); goto next_cache; } si->curname = buf; readmem(si->cache+OFFSET(kmem_cache_s_objsize), KVADDR, &tmp_val, sizeof(uint), "objsize", FAULT_ON_ERROR); si->size = (ulong)tmp_val; if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: objsize: %ld\n", si->curname, si->size); si->errors++; } } readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); si->c_flags = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_gfporder), KVADDR, &tmp_val, sizeof(uint), "gfporder", FAULT_ON_ERROR); si->order = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_num), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s num", FAULT_ON_ERROR); si->c_num = (ulong)tmp_val; if (vt->flags & PERCPU_KMALLOC_V2_NODES) { if (vt->flags & SLAB_OVERLOAD_PAGE) do_slab_chain_slab_overload_page(SLAB_GET_COUNTS, si); else do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si); } else do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { DUMP_KMEM_CACHE_INFO(); if (CRASHDEBUG(3)) dump_struct("kmem_cache_s", si->cache, 0); } if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { if (!(vt->flags & PERCPU_KMALLOC_V2_NODES)) gather_cpudata_list_v2(si); si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; if (vt->flags & PERCPU_KMALLOC_V2_NODES) { if (vt->flags & SLAB_OVERLOAD_PAGE) do_slab_chain_slab_overload_page(SLAB_WALKTHROUGH, si); else do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si); } else do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO(); fprintf(fp, "%s", slab_hdr); gather_slab_cached_count(si); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp,"(kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(slab)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_SLAB_FREELIST: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(on-slab freelist)\n"); break; case KMEM_SLAB_OVERLOAD_PAGE: si->flags &= ~ADDRESS_SPECIFIED; dump_slab_objects_percpu(si); si->flags |= ADDRESS_SPECIFIED; break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_CACHED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (cpu %d cache)\n", si->container ? si->container : (ulong)si->spec_addr, si->cpu); break; case KMEM_OBJECT_ADDR_SHARED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (shared cache)\n", si->container ? si->container : (ulong)si->spec_addr); break; } break; } } next_cache: readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &si->cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); if (si->cache != cache_end) si->cache -= OFFSET(kmem_cache_s_next); } while (si->cache != cache_end); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); FREEBUF(si->kmem_bufctl); for (i = 0; i < vt->kmem_max_cpus; i++) FREEBUF(si->cpudata[i]); FREEBUF(si->shared_array_cache); } /* * Walk through the slab chain hanging off a kmem_cache_s structure, * gathering basic statistics. * * TBD: Given a specified physical address, determine whether it's in this * slab chain, and whether it's in use or not. */ #define INSLAB(obj, si) \ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == si->s_mem) static void do_slab_chain(int cmd, struct meminfo *si) { ulong tmp, magic; ulong kmem_slab_end; char *kmem_slab_s_buf; si->slabsize = (power(2, si->order) * PAGESIZE()); kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset); switch (cmd) { case SLAB_GET_COUNTS: si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); if (slab_data_saved(si)) return; si->num_slabs = si->inuse = 0; if (si->slab == kmem_slab_end) return; kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); do { if (received_SIGINT()) { FREEBUF(kmem_slab_s_buf); restart(0); } readmem(si->slab, KVADDR, kmem_slab_s_buf, SIZE(kmem_slab_s), "kmem_slab_s buffer", FAULT_ON_ERROR); magic = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_magic)); if (magic == SLAB_MAGIC_ALLOC) { tmp = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_inuse)); si->inuse += tmp; si->num_slabs++; } else { fprintf(fp, "\"%s\" cache: invalid s_magic: %lx\n", si->curname, magic); si->errors++; FREEBUF(kmem_slab_s_buf); return; } si->slab = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_nextp)); } while (si->slab != kmem_slab_end); FREEBUF(kmem_slab_s_buf); save_slab_data(si); break; case SLAB_WALKTHROUGH: if (!si->slab) si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); if (si->slab == kmem_slab_end) return; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } si->slab_buf = kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); do { if (received_SIGINT()) { FREEBUF(kmem_slab_s_buf); restart(0); } readmem(si->slab, KVADDR, kmem_slab_s_buf, SIZE(kmem_slab_s), "kmem_slab_s buffer", FAULT_ON_ERROR); dump_slab(si); if (si->found) { FREEBUF(kmem_slab_s_buf); return; } si->slab = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_nextp)); } while (si->slab != kmem_slab_end); FREEBUF(kmem_slab_s_buf); break; } } /* * do_slab_chain() adapted for newer percpu slab format. */ #define SLAB_BASE(X) (PTOB(BTOP(X))) #define INSLAB_PERCPU(obj, si) \ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem)) #define SLAB_CHAINS (3) static char *slab_chain_name_v1[] = {"full", "partial", "free"}; static void do_slab_chain_percpu_v1(long cmd, struct meminfo *si) { int i, tmp, s; int list_borked; char *slab_s_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; list_borked = 0; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; if (VALID_MEMBER(kmem_cache_s_slabs)) { slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs); slab_chains[1] = 0; slab_chains[2] = 0; } else { slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full); slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial); slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free); } if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "full: %lx partial: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } switch (cmd) { case SLAB_GET_COUNTS: si->flags |= SLAB_GET_COUNTS; si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; gather_cpudata_list_v1(si); slab_s_buf = GETBUF(SIZE(slab_s)); for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v1[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_s_buf); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_s_buf); restart(0); } if (!verify_slab_v1(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_s_list); readmem(si->slab, KVADDR, slab_s_buf, SIZE(slab_s), "slab_s buffer", FAULT_ON_ERROR); tmp = INT(slab_s_buf + OFFSET(slab_s_inuse)); si->inuse += tmp; if (ACTIVE()) gather_cpudata_list_v1(si); si->s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_s_buf + OFFSET(slab_s_list)); si->slab -= OFFSET(slab_s_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v1[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } FREEBUF(slab_s_buf); if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= SLAB_WALKTHROUGH; si->flags &= ~SLAB_GET_COUNTS; for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v1[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) restart(0); if (!verify_slab_v1(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_s_list); dump_slab_percpu_v1(si); if (si->found) { return; } readmem(si->slab+OFFSET(slab_s_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_s_list); } while (si->slab != slab_chains[s] && !list_borked); } break; } } /* * Try to preclude any attempt to translate a bogus slab structure. */ static int verify_slab_v1(struct meminfo *si, ulong last, int s) { char slab_s_buf[BUFSIZE]; struct kernel_list_head *list_head; unsigned int inuse; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v1[s]; errcnt = 0; if (!readmem(si->slab, KVADDR, slab_s_buf, SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); return FALSE; } list_head = (struct kernel_list_head *) (slab_s_buf + OFFSET(slab_s_list)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse)); if (inuse > si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } if (!last) goto no_inuse_check_v1; switch (s) { case 0: /* full -- but can be one singular list */ if (VALID_MEMBER(kmem_cache_s_slabs_full) && (inuse != si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 1: /* partial */ if ((inuse == 0) || (inuse == si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 2: /* free */ if (inuse > 0) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; } no_inuse_check_v1: s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; return(errcnt ? FALSE : TRUE); } /* * Updated for 2.6 slab substructure. */ static char *slab_chain_name_v2[] = {"partial", "full", "free"}; static void do_slab_chain_percpu_v2(long cmd, struct meminfo *si) { int i, tmp, s; int list_borked; char *slab_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; list_borked = 0; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; slab_chains[0] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_full); slab_chains[2] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_free); if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } switch (cmd) { case SLAB_GET_COUNTS: si->flags |= SLAB_GET_COUNTS; si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; gather_cpudata_list_v2(si); slab_buf = GETBUF(SIZE(slab)); for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_buf); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_buf); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); tmp = INT(slab_buf + OFFSET(slab_inuse)); si->inuse += tmp; if (ACTIVE()) gather_cpudata_list_v2(si); si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_buf + OFFSET(slab_list)); si->slab -= OFFSET(slab_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } FREEBUF(slab_buf); if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= SLAB_WALKTHROUGH; si->flags &= ~SLAB_GET_COUNTS; for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) restart(0); if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); dump_slab_percpu_v2(si); if (si->found) { return; } readmem(si->slab+OFFSET(slab_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_list); } while (si->slab != slab_chains[s] && !list_borked); } break; } } /* * Added To Traverse the Nodelists */ static void do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si) { int i, tmp, s, node; int list_borked; char *slab_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; ulong *start_address; int index; list_borked = 0; slab_buf = NULL; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) error(INFO, "cannot read kmem_cache nodelists array"); switch (cmd) { case SLAB_GET_COUNTS: si->flags |= (SLAB_GET_COUNTS|SLAB_FIRST_NODE); si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; slab_buf = GETBUF(SIZE(slab)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_buf); FREEBUF(start_address); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_buf); FREEBUF(start_address); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); tmp = INT(slab_buf + OFFSET(slab_inuse)); si->inuse += tmp; si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_buf + OFFSET(slab_list)); si->slab -= OFFSET(slab_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } } if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= (SLAB_WALKTHROUGH|SLAB_FIRST_NODE); si->flags &= ~SLAB_GET_COUNTS; slab_buf = GETBUF(SIZE(slab)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: " "bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) { FREEBUF(start_address); FREEBUF(slab_buf); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); dump_slab_percpu_v2(si); if (si->found) { FREEBUF(start_address); FREEBUF(slab_buf); return; } readmem(si->slab+OFFSET(slab_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_list); } while (si->slab != slab_chains[s] && !list_borked); } } break; } FREEBUF(slab_buf); FREEBUF(start_address); } static int slab_freelist_index_size(void) { struct datatype_member datatype, *dm; dm = &datatype; BZERO(dm, sizeof(*dm)); dm->name = "freelist_idx_t"; if (is_typedef(dm->name)) return DATATYPE_SIZE(dm); if (CRASHDEBUG(1)) error(INFO, "freelist_idx_t does not exist\n"); return sizeof(int); } static void do_slab_chain_slab_overload_page(long cmd, struct meminfo *si) { int i, tmp, s, node; int list_borked; char *page_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; ulong *start_address; int index; list_borked = 0; page_buf = NULL; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) error(INFO, "cannot read kmem_cache nodelists array"); switch (cmd) { case SLAB_GET_COUNTS: si->flags |= (SLAB_GET_COUNTS|SLAB_FIRST_NODE); si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; page_buf = GETBUF(SIZE(page)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad page/slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(page_buf); FREEBUF(start_address); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(page_buf); FREEBUF(start_address); restart(0); } if (!verify_slab_overload_page(si, last, s)) { list_borked = 1; continue; } last = si->slab; readmem(si->slab - si->list_offset, KVADDR, page_buf, SIZE(page), "page (slab) buffer", FAULT_ON_ERROR); tmp = INT(page_buf + OFFSET(page_active)); si->inuse += tmp; si->s_mem = ULONG(page_buf + OFFSET(page_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(page_buf + si->list_offset); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } } if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: if (si->flags & SLAB_OVERLOAD_PAGE_PTR) { specified_slab = si->spec_addr; si->slab = si->spec_addr + si->list_offset; } else { specified_slab = si->slab; if (si->slab) si->slab += si->list_offset; } si->flags |= (SLAB_WALKTHROUGH|SLAB_FIRST_NODE); si->flags &= ~SLAB_GET_COUNTS; page_buf = GETBUF(SIZE(page)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: " "bad page/slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; readmem(si->slab - si->list_offset, KVADDR, page_buf, SIZE(page), "page (slab) buffer", FAULT_ON_ERROR); si->s_mem = ULONG(page_buf + OFFSET(page_s_mem)); if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) { FREEBUF(start_address); FREEBUF(page_buf); restart(0); } if (!verify_slab_overload_page(si, last, s)) { list_borked = 1; continue; } last = si->slab; dump_slab_overload_page(si); if (si->found) { FREEBUF(start_address); FREEBUF(page_buf); return; } readmem(si->slab, KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); } while (si->slab != slab_chains[s] && !list_borked); } } break; } FREEBUF(page_buf); FREEBUF(start_address); } /* * Try to preclude any attempt to translate a bogus slab structure. */ static int verify_slab_v2(struct meminfo *si, ulong last, int s) { char slab_buf[BUFSIZE]; struct kernel_list_head *list_head; unsigned int inuse; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v2[s]; errcnt = 0; if (!readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); return FALSE; } list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } inuse = UINT(slab_buf + OFFSET(slab_inuse)); if (inuse > si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } if (!last) goto no_inuse_check_v2; switch (s) { case 0: /* partial */ if ((inuse == 0) || (inuse == si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 1: /* full */ if (inuse != si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 2: /* free */ if (inuse > 0) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; } no_inuse_check_v2: s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; return(errcnt ? FALSE : TRUE); } static int verify_slab_overload_page(struct meminfo *si, ulong last, int s) { char *page_buf; struct kernel_list_head *list_head; unsigned int active; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v2[s]; page_buf = GETBUF(SIZE(page)); errcnt = 0; if (!readmem(si->slab - si->list_offset, KVADDR, page_buf, SIZE(page), "page (slab) buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); FREEBUF(page_buf); return FALSE; } list_head = (struct kernel_list_head *)(page_buf + si->list_offset); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: page/slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: page/slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } active = UINT(page_buf + OFFSET(page_active)); if (active > si->c_num) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } if (!last) goto no_inuse_check_v2; switch (s) { case 0: /* partial */ if ((active == 0) || (active == si->c_num)) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; case 1: /* full */ if (active != si->c_num) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; case 2: /* free */ if (active > 0) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; } no_inuse_check_v2: s_mem = ULONG(page_buf + OFFSET(page_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: page/slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; FREEBUF(page_buf); return(errcnt ? FALSE : TRUE); } /* * If it's a dumpfile, save the essential slab data to avoid re-reading * the whole slab chain more than once. This may seem like overkill, but * if the problem is a memory leak, or just the over-use of the buffer_head * cache, it's painful to wait each time subsequent kmem -s or -i commands * simply need the basic slab counts. */ struct slab_data { ulong cache_addr; int num_slabs; int inuse; ulong cpucached_cache; }; #define NO_SLAB_DATA ((void *)(-1)) static void save_slab_data(struct meminfo *si) { int i; if (si->flags & SLAB_DATA_NOSAVE) { si->flags &= ~SLAB_DATA_NOSAVE; return; } if (ACTIVE()) return; if (vt->slab_data == NO_SLAB_DATA) return; if (!vt->slab_data) { if (!(vt->slab_data = (struct slab_data *) malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) { error(INFO, "cannot malloc slab_data table"); vt->slab_data = NO_SLAB_DATA; return; } for (i = 0; i < vt->kmem_cache_count; i++) { vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA; vt->slab_data[i].num_slabs = 0; vt->slab_data[i].inuse = 0; vt->slab_data[i].cpucached_cache = 0; } } for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == si->cache) break; if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) { vt->slab_data[i].cache_addr = si->cache; vt->slab_data[i].num_slabs = si->num_slabs; vt->slab_data[i].inuse = si->inuse; vt->slab_data[i].cpucached_cache = si->cpucached_cache; break; } } } static int slab_data_saved(struct meminfo *si) { int i; if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) return FALSE; for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == si->cache) { si->inuse = vt->slab_data[i].inuse; si->num_slabs = vt->slab_data[i].num_slabs; si->cpucached_cache = vt->slab_data[i].cpucached_cache; return TRUE; } } return FALSE; } static void dump_saved_slab_data(void) { int i; if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) return; for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) break; fprintf(fp, " cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n", vt->slab_data[i].cache_addr, vt->slab_data[i].inuse, vt->slab_data[i].num_slabs, vt->slab_data[i].cpucached_cache); } } /* * Dump the contents of a kmem slab. */ static void dump_slab(struct meminfo *si) { si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem)); si->s_mem = PTOB(BTOP(si->s_mem)); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))) { si->found = KMEM_SLAB_ADDR; return; } if (INSLAB(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep)); si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse)); si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index)); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects(si); } /* * dump_slab() adapted for newer percpu slab format. */ static void dump_slab_percpu_v1(struct meminfo *si) { int tmp; readmem(si->slab+OFFSET(slab_s_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "s_mem", FAULT_ON_ERROR); /* * Include the array of kmem_bufctl_t's appended to slab. */ tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB_PERCPU(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+tmp))) { if (si->spec_addr >= (si->slab + SIZE(slab_s))) si->found = KMEM_BUFCTL_ADDR; else si->found = KMEM_SLAB_ADDR; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(si->slab+OFFSET(slab_s_inuse), KVADDR, &tmp, sizeof(int), "inuse", FAULT_ON_ERROR); si->s_inuse = tmp; readmem(si->slab+OFFSET(slab_s_free), KVADDR, &si->free, SIZE(kmem_bufctl_t), "kmem_bufctl_t", FAULT_ON_ERROR); gather_slab_free_list_percpu(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } /* * Updated for 2.6 slab substructure. */ static void dump_slab_percpu_v2(struct meminfo *si) { int tmp; readmem(si->slab+OFFSET(slab_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "s_mem", FAULT_ON_ERROR); /* * Include the array of kmem_bufctl_t's appended to slab. */ tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB_PERCPU(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+tmp))) { if (si->spec_addr >= (si->slab + SIZE(slab))) si->found = KMEM_BUFCTL_ADDR; else si->found = KMEM_SLAB_ADDR; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(si->slab+OFFSET(slab_inuse), KVADDR, &tmp, sizeof(int), "inuse", FAULT_ON_ERROR); si->s_inuse = tmp; readmem(si->slab+OFFSET(slab_free), KVADDR, &si->free, SIZE(kmem_bufctl_t), "kmem_bufctl_t", FAULT_ON_ERROR); gather_slab_free_list_percpu(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } static void dump_slab_overload_page(struct meminfo *si) { int tmp; ulong slab_overload_page, freelist; slab_overload_page = si->slab - si->list_offset; readmem(slab_overload_page + OFFSET(page_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "page.s_mem", FAULT_ON_ERROR); readmem(slab_overload_page + OFFSET(page_freelist), KVADDR, &freelist, sizeof(ulong), "page.freelist", FAULT_ON_ERROR); if (si->flags & ADDRESS_SPECIFIED) { if ((si->spec_addr >= slab_overload_page) && (si->spec_addr < (slab_overload_page+SIZE(page)))) { si->found = KMEM_SLAB_OVERLOAD_PAGE; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(slab_overload_page + OFFSET(page_active), KVADDR, &tmp, sizeof(int), "active", FAULT_ON_ERROR); si->s_inuse = tmp; gather_slab_free_list_slab_overload_page(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } /* * Gather the free objects in a slab into the si->addrlist, checking for * specified addresses that are in-slab kmem_bufctls, and making error checks * along the way. Object address checks are deferred to dump_slab_objects(). */ #define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size))) static void gather_slab_free_list(struct meminfo *si) { ulong *next, obj; ulong expected, cnt; BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); if (!si->s_freep) return; cnt = 0; expected = si->c_num - si->s_inuse; next = si->s_freep; do { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } /* * Off-slab kmem_bufctls are contained in arrays of object * pointers that point to: * 1. next kmem_bufctl (or NULL) if the object is free. * 2. to the object if it the object is in use. * * On-slab kmem_bufctls resides just after the object itself, * and point to: * 1. next kmem_bufctl (or NULL) if object is free. * 2. the containing slab if the object is in use. */ if (si->c_flags & SLAB_CFLGS_BUFCTL) obj = si->s_mem + ((next - si->s_index) * si->c_offset); else obj = (ulong)next - si->c_offset; si->addrlist[cnt] = obj; if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB(next, si) && (si->spec_addr >= (ulong)next) && (si->spec_addr < (ulong)(next + 1))) { si->found = KMEM_BUFCTL_ADDR; return; } } cnt++; if (!INSLAB(obj, si)) { error(INFO, "\"%s\" cache: address not contained within slab: %lx\n", si->curname, obj); si->errors++; } readmem((ulong)next, KVADDR, &next, sizeof(void *), "s_freep chain entry", FAULT_ON_ERROR); } while (next); if (cnt != expected) { error(INFO, "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } /* * gather_slab_free_list() adapted for newer percpu slab format. */ #define BUFCTL_END 0xffffFFFF static void gather_slab_free_list_percpu(struct meminfo *si) { int i; ulong obj; ulong expected, cnt; int free_index; ulong kmembp; short *kbp; BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); if (CRASHDEBUG(1)) fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", si->slab, si->s_inuse, si->c_num); if (si->s_inuse == si->c_num ) return; kmembp = si->slab + SIZE_OPTION(slab_s, slab); readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, SIZE(kmem_bufctl_t) * si->c_num, "kmem_bufctl array", FAULT_ON_ERROR); if (CRASHDEBUG(1)) { for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && (i < si->c_num); i++) fprintf(fp, "%d ", si->kmem_bufctl[i]); for (kbp = (short *)&si->kmem_bufctl[0], i = 0; (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num); i++) fprintf(fp, "%d ", *(kbp + i)); fprintf(fp, "\n"); } cnt = 0; expected = si->c_num - si->s_inuse; if (SIZE(kmem_bufctl_t) == sizeof(int)) { for (free_index = si->free; free_index != BUFCTL_END; free_index = si->kmem_bufctl[free_index]) { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } obj = si->s_mem + (free_index*si->size); si->addrlist[cnt] = obj; cnt++; } } else if (SIZE(kmem_bufctl_t) == sizeof(short)) { kbp = (short *)&si->kmem_bufctl[0]; for (free_index = si->free; free_index != BUFCTL_END; free_index = (int)*(kbp + free_index)) { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } obj = si->s_mem + (free_index*si->size); si->addrlist[cnt] = obj; cnt++; } } else error(FATAL, "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n", SIZE(kmem_bufctl_t)); if (cnt != expected) { error(INFO, "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } static void gather_slab_free_list_slab_overload_page(struct meminfo *si) { int i, active, start_offset; ulong obj, objnr, cnt, freelist; unsigned char *ucharptr; unsigned short *ushortptr; unsigned int *uintptr; unsigned int cache_flags, overload_active; ulong slab_overload_page; if (CRASHDEBUG(1)) fprintf(fp, "slab page: %lx active: %ld si->c_num: %ld\n", si->slab - si->list_offset, si->s_inuse, si->c_num); if (si->s_inuse == si->c_num ) return; slab_overload_page = si->slab - si->list_offset; readmem(slab_overload_page + OFFSET(page_freelist), KVADDR, &freelist, sizeof(void *), "page freelist", FAULT_ON_ERROR); readmem(freelist, KVADDR, si->freelist, si->freelist_index_size * si->c_num, "freelist array", FAULT_ON_ERROR); readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &cache_flags, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); readmem(slab_overload_page + OFFSET(page_active), KVADDR, &overload_active, sizeof(uint), "active", FAULT_ON_ERROR); BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); cnt = objnr = 0; ucharptr = NULL; ushortptr = NULL; uintptr = NULL; active = si->s_inuse; /* * On an OBJFREELIST slab, the object might have been recycled * and everything before the active count can be random data. */ start_offset = 0; if (cache_flags & SLAB_CFLGS_OBJFREELIST) start_offset = overload_active; switch (si->freelist_index_size) { case 1: ucharptr = (unsigned char *)si->freelist + start_offset; break; case 2: ushortptr = (unsigned short *)si->freelist + start_offset; break; case 4: uintptr = (unsigned int *)si->freelist + start_offset; break; } for (i = start_offset; i < si->c_num; i++) { switch (si->freelist_index_size) { case 1: objnr = (ulong)*ucharptr++; break; case 2: objnr = (ulong)*ushortptr++; break; case 4: objnr = (ulong)*uintptr++; break; } if (objnr >= si->c_num) { error(INFO, "\"%s\" cache: invalid/corrupt freelist entry: %ld\n", si->curname, objnr); si->errors++; } if (i >= active) { obj = si->s_mem + (objnr * si->size); si->addrlist[cnt++] = obj; if (CRASHDEBUG(1)) fprintf(fp, "%ld ", objnr); } else if (CRASHDEBUG(1)) fprintf(fp, "[%ld] ", objnr); } if (CRASHDEBUG(1)) fprintf(fp, "\n"); } /* * Dump the FREE, [ALLOCATED] and objects of a slab. */ #define DUMP_SLAB_OBJECT() \ for (j = on_free_list = 0; j < si->c_num; j++) { \ if (obj == si->addrlist[j]) { \ on_free_list = TRUE; \ break; \ } \ } \ \ if (on_free_list) { \ if (!(si->flags & ADDRESS_SPECIFIED)) \ fprintf(fp, " %lx\n", obj); \ if (si->flags & ADDRESS_SPECIFIED) { \ if (INOBJECT(si->spec_addr, obj)) { \ si->found = \ KMEM_OBJECT_ADDR_FREE; \ si->container = obj; \ return; \ } \ } \ } else { \ if (!(si->flags & ADDRESS_SPECIFIED)) \ fprintf(fp, " [%lx]\n", obj); \ cnt++; \ if (si->flags & ADDRESS_SPECIFIED) { \ if (INOBJECT(si->spec_addr, obj)) { \ si->found = \ KMEM_OBJECT_ADDR_INUSE; \ si->container = obj; \ return; \ } \ } \ } static void dump_slab_objects(struct meminfo *si) { int i, j; ulong *next; int on_free_list; ulong cnt, expected; ulong bufctl, obj; gather_slab_free_list(si); if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) return; cnt = 0; expected = si->s_inuse; si->container = 0; if (CRASHDEBUG(1)) for (i = 0; i < si->c_num; i++) { fprintf(fp, "si->addrlist[%d]: %lx\n", i, si->addrlist[i]); } if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s", free_inuse_hdr); /* For on-slab bufctls, c_offset is the distance between the start of * an obj and its related bufctl. For off-slab bufctls, c_offset is * the distance between objs in the slab. */ if (si->c_flags & SLAB_CFLGS_BUFCTL) { for (i = 0, next = si->s_index; i < si->c_num; i++, next++) { obj = si->s_mem + ((next - si->s_index) * si->c_offset); DUMP_SLAB_OBJECT(); } } else { /* * Get the "real" s_mem, i.e., without the offset stripped off. * It contains the address of the first object. */ readmem(si->slab+OFFSET(kmem_slab_s_s_mem), KVADDR, &obj, sizeof(ulong), "s_mem", FAULT_ON_ERROR); for (i = 0; i < si->c_num; i++) { DUMP_SLAB_OBJECT(); if (si->flags & ADDRESS_SPECIFIED) { bufctl = obj + si->c_offset; if ((si->spec_addr >= bufctl) && (si->spec_addr < (bufctl + SIZE(kmem_bufctl_t)))) { si->found = KMEM_BUFCTL_ADDR; return; } } obj += (si->c_offset + SIZE(kmem_bufctl_t)); } } if (cnt != expected) { error(INFO, "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } /* * dump_slab_objects() adapted for newer percpu slab format. */ static void dump_slab_objects_percpu(struct meminfo *si) { int i, j; int on_free_list, on_cpudata_list, on_shared_list; ulong cnt, expected; ulong obj, freelist; if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) if (!(si->found & KMEM_SLAB_OVERLOAD_PAGE)) return; cnt = 0; expected = si->s_inuse; si->container = 0; if (CRASHDEBUG(1)) for (i = 0; i < si->c_num; i++) { fprintf(fp, "si->addrlist[%d]: %lx\n", i, si->addrlist[i]); } if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s", free_inuse_hdr); for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { on_free_list = FALSE; on_cpudata_list = FALSE; on_shared_list = FALSE; for (j = 0; j < si->c_num; j++) { if (obj == si->addrlist[j]) { on_free_list = TRUE; break; } } on_cpudata_list = check_cpudata_list(si, obj); on_shared_list = check_shared_list(si, obj); if (on_free_list && on_cpudata_list) { error(INFO, "\"%s\" cache: object %lx on both free and cpu %d lists\n", si->curname, obj, si->cpu); si->errors++; } if (on_free_list && on_shared_list) { error(INFO, "\"%s\" cache: object %lx on both free and shared lists\n", si->curname, obj); si->errors++; } if (on_cpudata_list && on_shared_list) { error(INFO, "\"%s\" cache: object %lx on both cpu %d and shared lists\n", si->curname, obj, si->cpu); si->errors++; } if (on_free_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx\n", obj); if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_FREE; si->container = obj; return; } } } else if (on_cpudata_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx (cpu %d cache)\n", obj, si->cpu); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_CACHED; si->container = obj; return; } } } else if (on_shared_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx (shared cache)\n", obj); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_SHARED; si->container = obj; return; } } } else { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " [%lx]\n", obj); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_INUSE; si->container = obj; return; } } } } if (cnt != expected) { error(INFO, "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } if ((si->flags & ADDRESS_SPECIFIED) && (vt->flags & SLAB_OVERLOAD_PAGE)) { readmem(si->slab - si->list_offset + OFFSET(page_freelist), KVADDR, &freelist, sizeof(ulong), "page.freelist", FAULT_ON_ERROR); if ((si->spec_addr >= freelist) && (si->spec_addr < si->s_mem)) si->found = KMEM_SLAB_FREELIST; } } /* * Determine how many of the "inuse" slab objects are actually cached * in the kmem_cache_s header. Set the per-slab count and update the * cumulative per-cache count. With the addition of the shared list * check, the terms "cpucached_cache" and "cpucached_slab" are somewhat * misleading. But they both are types of objects that are cached * in the kmem_cache_s header, just not necessarily per-cpu. */ static void gather_slab_cached_count(struct meminfo *si) { int i; ulong obj; int in_cpudata, in_shared; si->cpucached_slab = 0; for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { in_cpudata = in_shared = 0; if (check_cpudata_list(si, obj)) { in_cpudata = TRUE; si->cpucached_slab++; if (si->flags & SLAB_GET_COUNTS) { si->cpucached_cache++; } } if (check_shared_list(si, obj)) { in_shared = TRUE; if (!in_cpudata) { si->cpucached_slab++; if (si->flags & SLAB_GET_COUNTS) { si->cpucached_cache++; } } } if (in_cpudata && in_shared) { si->flags |= SLAB_DATA_NOSAVE; if (!(si->flags & VERBOSE)) error(INFO, "\"%s\" cache: object %lx on both cpu %d and shared lists\n", si->curname, obj, si->cpu); } } } /* * Populate the percpu object list for a given slab. */ static void gather_cpudata_list_v1(struct meminfo *si) { int i, j; int avail; ulong cpudata[NR_CPUS]; if (INVALID_MEMBER(kmem_cache_s_cpudata)) return; readmem(si->cache+OFFSET(kmem_cache_s_cpudata), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), "cpudata array", FAULT_ON_ERROR); for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && cpudata[i]; i++) { BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); readmem(cpudata[i]+OFFSET(cpucache_s_avail), KVADDR, &avail, sizeof(int), "cpucache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: cpucache_s.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(cpucache_s), KVADDR, si->cpudata[i], sizeof(void *) * avail, "cpucache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx\n", si->cpudata[i][j]); } } /* * Updated for 2.6 slab percpu data structure, this also gathers * the shared array_cache list as well. */ static void gather_cpudata_list_v2(struct meminfo *si) { int i, j; int avail; ulong cpudata[NR_CPUS]; ulong shared; readmem(si->cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), "array_cache array", FAULT_ON_ERROR); for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && cpudata[i]; i++) { BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); readmem(cpudata[i]+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "array cache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(array_cache), KVADDR, si->cpudata[i], sizeof(void *) * avail, "array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); } /* * If the shared list contains anything, gather them as well. */ BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit); if (!VALID_MEMBER(kmem_list3_shared) || !VALID_MEMBER(kmem_cache_s_lists) || !readmem(si->cache+OFFSET(kmem_cache_s_lists)+ OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || !readmem(shared+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) return; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; return; } if (CRASHDEBUG(2)) fprintf(fp, "%s: shared avail: %d\n", si->curname, avail); readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache, sizeof(void *) * avail, "shared array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); } /* * Updated gather_cpudata_list_v2 for per-node kmem_list3's in kmem_cache */ static void gather_cpudata_list_v2_nodes(struct meminfo *si, int index) { int i, j; int avail; ulong cpudata[NR_CPUS]; ulong shared, percpu_ptr; ulong *start_address; start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (vt->flags & SLAB_CPU_CACHE) { readmem(si->cache+OFFSET(kmem_cache_cpu_cache), KVADDR, &percpu_ptr, sizeof(void *), "kmem_cache.cpu_cache", FAULT_ON_ERROR); for (i = 0; i < vt->kmem_max_cpus; i++) cpudata[i] = percpu_ptr + kt->__per_cpu_offset[i]; } else { readmem(si->cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * vt->kmem_max_cpus, "array_cache array", FAULT_ON_ERROR); } for (i = 0; (i < vt->kmem_max_cpus) && cpudata[i] && !(index); i++) { if (si->cpudata[i]) BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); else continue; readmem(cpudata[i]+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "array cache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; continue; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(array_cache), KVADDR, si->cpudata[i], sizeof(void *) * avail, "array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); } /* * If the shared list contains anything, gather them as well. */ if (si->flags & SLAB_FIRST_NODE) { BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit * vt->kmem_cache_len_nodes); si->current_cache_index = 0; } if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes , "array nodelist array", RETURN_ON_ERROR) || !readmem(start_address[index] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || !shared || !readmem(shared + OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) { FREEBUF(start_address); return; } if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; FREEBUF(start_address); return; } if (CRASHDEBUG(2)) fprintf(fp, "%s: shared avail: %d\n", si->curname, avail); readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache + si->current_cache_index, sizeof(void *) * avail, "shared array_cache avail", FAULT_ON_ERROR); if ((si->current_cache_index + avail) > (vt->kmem_max_limit * vt->kmem_cache_len_nodes)) { error(INFO, "\"%s\" cache: total shared array_cache.avail %d greater than total limit %ld\n", si->curname, si->current_cache_index + avail, vt->kmem_max_limit * vt->kmem_cache_len_nodes); si->errors++; FREEBUF(start_address); return; } if (CRASHDEBUG(2)) for (j = si->current_cache_index; j < (si->current_cache_index + avail); j++) fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); si->current_cache_index += avail; FREEBUF(start_address); } /* * Check whether a given address is contained in the previously-gathered * percpu object cache. */ static int check_cpudata_list(struct meminfo *si, ulong obj) { int i, j; for (i = 0; i < vt->kmem_max_cpus; i++) { for (j = 0; si->cpudata[i][j]; j++) if (si->cpudata[i][j] == obj) { si->cpu = i; return TRUE; } } return FALSE; } /* * Check whether a given address is contained in the previously-gathered * shared object cache. */ static int check_shared_list(struct meminfo *si, ulong obj) { int i; if (INVALID_MEMBER(kmem_list3_shared) || !si->shared_array_cache) return FALSE; for (i = 0; si->shared_array_cache[i]; i++) { if (si->shared_array_cache[i] == obj) return TRUE; } return FALSE; } /* * Search the various memory subsystems for instances of this address. * Start with the most specific areas, ending up with at least the * mem_map page data. */ static void kmem_search(struct meminfo *mi) { struct syment *sp; struct meminfo tmp_meminfo; char buf[BUFSIZE]; ulong vaddr, orig_flags; physaddr_t paddr; ulong offset; ulong task; ulong show_flags; struct task_context *tc; vaddr = 0; pc->curcmd_flags &= ~HEADER_PRINTED; pc->curcmd_flags |= IGNORE_ERRORS; switch (mi->memtype) { case KVADDR: vaddr = mi->spec_addr; break; case PHYSADDR: vaddr = mi->spec_addr < VTOP(vt->high_memory) ? PTOV(mi->spec_addr) : BADADDR; break; } orig_flags = mi->flags; mi->retval = 0; /* * Check first for a possible symbolic display of the virtual * address associated with mi->spec_addr or PTOV(mi->spec_addr). */ if (((vaddr >= kt->stext) && (vaddr <= kt->end)) || IS_MODULE_VADDR(mi->spec_addr)) { if ((sp = value_search(vaddr, &offset))) { show_flags = SHOW_LINENUM | SHOW_RADIX(); if (module_symbol(sp->value, NULL, NULL, NULL, 0)) show_flags |= SHOW_MODULE; show_symbol(sp, offset, show_flags); fprintf(fp, "\n"); } } /* * Check for a valid mapped address. */ if ((mi->memtype == KVADDR) && IS_VMALLOC_ADDR(mi->spec_addr)) { if ((task = stkptr_to_task(vaddr)) && (tc = task_to_context(task))) { show_context(tc); fprintf(fp, "\n"); } if (kvtop(NULL, mi->spec_addr, &paddr, 0)) { mi->flags = orig_flags | VMLIST_VERIFY; dump_vmlist(mi); if (mi->retval) { mi->flags = orig_flags; dump_vmlist(mi); fprintf(fp, "\n"); mi->spec_addr = paddr; mi->memtype = PHYSADDR; goto mem_map; } } } /* * If the address is physical, check whether it's in vmalloc space. */ if (mi->memtype == PHYSADDR) { mi->flags = orig_flags; mi->flags |= GET_PHYS_TO_VMALLOC; mi->retval = 0; dump_vmlist(mi); mi->flags &= ~GET_PHYS_TO_VMALLOC; if (mi->retval) { if ((task = stkptr_to_task(mi->retval)) && (tc = task_to_context(task))) { show_context(tc); fprintf(fp, "\n"); } if ((sp = value_search(mi->retval, &offset))) { show_symbol(sp, offset, SHOW_LINENUM | SHOW_RADIX()); fprintf(fp, "\n"); } dump_vmlist(mi); fprintf(fp, "\n"); goto mem_map; } } /* * Check whether the containing page belongs to the slab subsystem. */ mi->flags = orig_flags; mi->retval = 0; if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf, VERBOSE)) { BZERO(&tmp_meminfo, sizeof(struct meminfo)); tmp_meminfo.spec_addr = vaddr; tmp_meminfo.memtype = KVADDR; tmp_meminfo.flags = mi->flags; vt->dump_kmem_cache(&tmp_meminfo); fprintf(fp, "\n"); } if ((vaddr != BADADDR) && is_slab_page(mi, buf)) { BZERO(&tmp_meminfo, sizeof(struct meminfo)); tmp_meminfo.spec_addr = vaddr; tmp_meminfo.memtype = KVADDR; tmp_meminfo.flags = mi->flags; vt->dump_kmem_cache(&tmp_meminfo); fprintf(fp, "\n"); } /* * Check free list. */ mi->flags = orig_flags; mi->retval = 0; vt->dump_free_pages(mi); if (mi->retval) fprintf(fp, "\n"); if (vt->page_hash_table) { /* * Check the page cache. */ mi->flags = orig_flags; mi->retval = 0; dump_page_hash_table(mi); if (mi->retval) fprintf(fp, "\n"); } /* * Check whether it's a current task or stack address. */ if ((mi->memtype & (KVADDR|PHYSADDR)) && (task = vaddr_in_task_struct(vaddr)) && (tc = task_to_context(task))) { show_context(tc); fprintf(fp, "\n"); } else if ((mi->memtype & (KVADDR|PHYSADDR)) && (task = stkptr_to_task(vaddr)) && (tc = task_to_context(task))) { show_context(tc); fprintf(fp, "\n"); } mem_map: mi->flags = orig_flags; pc->curcmd_flags &= ~HEADER_PRINTED; if (vaddr != BADADDR) dump_mem_map(mi); else mi->retval = FALSE; if (!mi->retval) fprintf(fp, "%llx: %s address not found in mem map\n", mi->spec_addr, memtype_string(mi->memtype, 0)); } int generic_is_page_ptr(ulong addr, physaddr_t *phys) { return FALSE; } /* * Determine whether an address is a page pointer from the mem_map[] array. * If the caller requests it, return the associated physical address. */ int is_page_ptr(ulong addr, physaddr_t *phys) { int n; ulong ppstart, ppend; struct node_table *nt; ulong pgnum, node_size; ulong nr, sec_addr; ulong nr_mem_sections; ulong coded_mem_map, mem_map, end_mem_map; physaddr_t section_paddr; if (machdep->is_page_ptr(addr, phys)) return TRUE; if (IS_SPARSEMEM()) { nr_mem_sections = vt->max_mem_section_nr+1; for (nr = 0; nr < nr_mem_sections ; nr++) { if ((sec_addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(sec_addr, 0); mem_map = sparse_decode_mem_map(coded_mem_map, nr); end_mem_map = mem_map + (PAGES_PER_SECTION() * SIZE(page)); if ((addr >= mem_map) && (addr < end_mem_map)) { if ((addr - mem_map) % SIZE(page)) return FALSE; if (phys) { section_paddr = PTOB(section_nr_to_pfn(nr)); pgnum = (addr - mem_map) / SIZE(page); *phys = section_paddr + ((physaddr_t)pgnum * PAGESIZE()); } return TRUE; } } } return FALSE; } for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; ppstart = nt->mem_map; ppend = ppstart + (node_size * SIZE(page)); if ((addr < ppstart) || (addr >= ppend)) continue; /* * We're in the mem_map range -- but it is a page pointer? */ if ((addr - ppstart) % SIZE(page)) return FALSE; if (phys) { pgnum = (addr - nt->mem_map) / SIZE(page); *phys = ((physaddr_t)pgnum * PAGESIZE()) + nt->start_paddr; } return TRUE; } return FALSE; #ifdef PRE_NODES ppstart = vt->mem_map; ppend = ppstart + (vt->total_pages * vt->page_struct_len); if ((addr < ppstart) || (addr >= ppend)) return FALSE; if ((addr - ppstart) % vt->page_struct_len) return FALSE; return TRUE; #endif } /* * Return the physical address associated with this page pointer. */ static int page_to_phys(ulong pp, physaddr_t *phys) { return(is_page_ptr(pp, phys)); } /* * Return the page pointer associated with this physical address. */ int phys_to_page(physaddr_t phys, ulong *pp) { int n; ulong pgnum; struct node_table *nt; physaddr_t pstart, pend; ulong node_size; if (IS_SPARSEMEM()) { ulong map; map = pfn_to_map(phys >> PAGESHIFT()); if (map) { *pp = map; return TRUE; } return FALSE; } for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; pstart = nt->start_paddr; pend = pstart + ((ulonglong)node_size * PAGESIZE()); if ((phys < pstart) || (phys >= pend)) continue; /* * We're in the physical range -- calculate the page. */ pgnum = BTOP(phys - pstart); *pp = nt->mem_map + (pgnum * SIZE(page)); return TRUE; } return FALSE; #ifdef PRE_NODES if (phys >= (vt->total_pages * PAGESIZE())) return FALSE; pgnum = PTOB(BTOP(phys)) / PAGESIZE(); *pp = vt->mem_map + (pgnum * vt->page_struct_len); return TRUE; #endif } /* * Fill the caller's buffer with up to maxlen non-NULL bytes * starting from kvaddr, returning the number of consecutive * non-NULL bytes found. If the buffer gets filled with * maxlen bytes without a NULL, then the caller is reponsible * for handling it. */ int read_string(ulong kvaddr, char *buf, int maxlen) { int i; BZERO(buf, maxlen); readmem(kvaddr, KVADDR, buf, maxlen, "read_string characters", QUIET|RETURN_ON_ERROR); for (i = 0; i < maxlen; i++) { if (buf[i] == NULLCHAR) { BZERO(&buf[i], maxlen-i); break; } } return i; } /* * "help -v" output */ void dump_vm_table(int verbose) { int i; struct node_table *nt; int others; ulong *up; others = 0; fprintf(fp, " flags: %lx %s(", vt->flags, count_bits_long(vt->flags) > 4 ? "\n " : ""); if (vt->flags & NODES) fprintf(fp, "%sNODES", others++ ? "|" : ""); if (vt->flags & NODES_ONLINE) fprintf(fp, "%sNODES_ONLINE", others++ ? "|" : ""); if (vt->flags & ZONES) fprintf(fp, "%sZONES", others++ ? "|" : ""); if (vt->flags & PERCPU_KMALLOC_V1) fprintf(fp, "%sPERCPU_KMALLOC_V1", others++ ? "|" : ""); if (vt->flags & PERCPU_KMALLOC_V2) fprintf(fp, "%sPERCPU_KMALLOC_V2", others++ ? "|" : ""); if (vt->flags & COMMON_VADDR) fprintf(fp, "%sCOMMON_VADDR", others++ ? "|" : ""); if (vt->flags & KMEM_CACHE_INIT) fprintf(fp, "%sKMEM_CACHE_INIT", others++ ? "|" : ""); if (vt->flags & V_MEM_MAP) fprintf(fp, "%sV_MEM_MAP", others++ ? "|" : ""); if (vt->flags & KMEM_CACHE_UNAVAIL) fprintf(fp, "%sKMEM_CACHE_UNAVAIL", others++ ? "|" : ""); if (vt->flags & DISCONTIGMEM) fprintf(fp, "%sDISCONTIGMEM", others++ ? "|" : ""); if (vt->flags & FLATMEM) fprintf(fp, "%sFLATMEM", others++ ? "|" : ""); if (vt->flags & SPARSEMEM) fprintf(fp, "%sSPARSEMEM", others++ ? "|" : "");\ if (vt->flags & SPARSEMEM_EX) fprintf(fp, "%sSPARSEMEM_EX", others++ ? "|" : "");\ if (vt->flags & KMEM_CACHE_DELAY) fprintf(fp, "%sKMEM_CACHE_DELAY", others++ ? "|" : "");\ if (vt->flags & PERCPU_KMALLOC_V2_NODES) fprintf(fp, "%sPERCPU_KMALLOC_V2_NODES", others++ ? "|" : "");\ if (vt->flags & VM_STAT) fprintf(fp, "%sVM_STAT", others++ ? "|" : "");\ if (vt->flags & KMALLOC_SLUB) fprintf(fp, "%sKMALLOC_SLUB", others++ ? "|" : "");\ if (vt->flags & KMALLOC_COMMON) fprintf(fp, "%sKMALLOC_COMMON", others++ ? "|" : "");\ if (vt->flags & SLAB_OVERLOAD_PAGE) fprintf(fp, "%sSLAB_OVERLOAD_PAGE", others++ ? "|" : "");\ if (vt->flags & SLAB_CPU_CACHE) fprintf(fp, "%sSLAB_CPU_CACHE", others++ ? "|" : "");\ if (vt->flags & SLAB_ROOT_CACHES) fprintf(fp, "%sSLAB_ROOT_CACHES", others++ ? "|" : "");\ if (vt->flags & USE_VMAP_AREA) fprintf(fp, "%sUSE_VMAP_AREA", others++ ? "|" : "");\ if (vt->flags & USE_VMAP_NODES) fprintf(fp, "%sUSE_VMAP_NODES", others++ ? "|" : "");\ if (vt->flags & CONFIG_NUMA) fprintf(fp, "%sCONFIG_NUMA", others++ ? "|" : "");\ if (vt->flags & VM_EVENT) fprintf(fp, "%sVM_EVENT", others++ ? "|" : "");\ if (vt->flags & PGCNT_ADJ) fprintf(fp, "%sPGCNT_ADJ", others++ ? "|" : "");\ if (vt->flags & PAGEFLAGS) fprintf(fp, "%sPAGEFLAGS", others++ ? "|" : "");\ if (vt->flags & SWAPINFO_V1) fprintf(fp, "%sSWAPINFO_V1", others++ ? "|" : "");\ if (vt->flags & SWAPINFO_V2) fprintf(fp, "%sSWAPINFO_V2", others++ ? "|" : "");\ if (vt->flags & NODELISTS_IS_PTR) fprintf(fp, "%sNODELISTS_IS_PTR", others++ ? "|" : "");\ if (vt->flags & VM_INIT) fprintf(fp, "%sVM_INIT", others++ ? "|" : "");\ if (vt->flags & SLAB_PAGEFLAGS) fprintf(fp, "%sSLAB_PAGEFLAGS", others++ ? "|" : "");\ fprintf(fp, ")\n"); if (vt->kernel_pgd[0] == vt->kernel_pgd[1]) fprintf(fp, " kernel_pgd[NR_CPUS]: %lx ...\n", vt->kernel_pgd[0]); else { fprintf(fp, " kernel_pgd[NR_CPUS]: "); for (i = 0; i < NR_CPUS; i++) { if ((i % 4) == 0) fprintf(fp, "\n "); fprintf(fp, "%lx ", vt->kernel_pgd[i]); } fprintf(fp, "\n"); } fprintf(fp, " high_memory: %lx\n", vt->high_memory); fprintf(fp, " vmalloc_start: %lx\n", vt->vmalloc_start); fprintf(fp, " mem_map: %lx\n", vt->mem_map); fprintf(fp, " total_pages: %ld\n", vt->total_pages); fprintf(fp, " max_mapnr: %ld\n", vt->max_mapnr); fprintf(fp, " totalram_pages: %ld\n", vt->totalram_pages); fprintf(fp, " totalhigh_pages: %ld\n", vt->totalhigh_pages); fprintf(fp, " num_physpages: %ld\n", vt->num_physpages); fprintf(fp, " page_hash_table: %lx\n", vt->page_hash_table); fprintf(fp, "page_hash_table_len: %d\n", vt->page_hash_table_len); fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen); fprintf(fp, "kmem_cache_len_nodes: %ld\n", vt->kmem_cache_len_nodes); fprintf(fp, " nr_bad_slab_caches: %d\n", vt->nr_bad_slab_caches); if (!vt->nr_bad_slab_caches) fprintf(fp, " bad_slab_caches: (unused)\n"); else { for (i = 0; i < vt->nr_bad_slab_caches; i++) { fprintf(fp, " bad_slab_caches[%d]: %lx\n", i, vt->bad_slab_caches[i]); } } fprintf(fp, " paddr_prlen: %d\n", vt->paddr_prlen); fprintf(fp, " numnodes: %d\n", vt->numnodes); fprintf(fp, " nr_zones: %d\n", vt->nr_zones); fprintf(fp, " nr_free_areas: %d\n", vt->nr_free_areas); for (i = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; fprintf(fp, " node_table[%d]: \n", i); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } fprintf(fp, " dump_free_pages: "); if (vt->dump_free_pages == dump_free_pages) fprintf(fp, "dump_free_pages()\n"); else if (vt->dump_free_pages == dump_free_pages_zones_v1) fprintf(fp, "dump_free_pages_zones_v1()\n"); else if (vt->dump_free_pages == dump_free_pages_zones_v2) fprintf(fp, "dump_free_pages_zones_v2()\n"); else if (vt->dump_free_pages == dump_multidimensional_free_pages) fprintf(fp, "dump_multidimensional_free_pages()\n"); else fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages); fprintf(fp, " dump_kmem_cache: "); if (vt->dump_kmem_cache == dump_kmem_cache) fprintf(fp, "dump_kmem_cache()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v1) fprintf(fp, "dump_kmem_cache_percpu_v1()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v2) fprintf(fp, "dump_kmem_cache_percpu_v2()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_slub) fprintf(fp, "dump_kmem_cache_slub()\n"); else fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_kmem_cache); fprintf(fp, " slab_data: %lx\n", (ulong)vt->slab_data); if (verbose) dump_saved_slab_data(); fprintf(fp, " cpu_slab_type: %d\n", vt->cpu_slab_type); fprintf(fp, " nr_swapfiles: %d\n", vt->nr_swapfiles); fprintf(fp, " last_swap_read: %lx\n", vt->last_swap_read); fprintf(fp, " swap_info_struct: %lx\n", (ulong)vt->swap_info_struct); fprintf(fp, " mem_sec: %lx\n", (ulong)vt->mem_sec); fprintf(fp, " mem_section: %lx\n", (ulong)vt->mem_section); fprintf(fp, " max_mem_section_nr: %ld\n", (ulong)vt->max_mem_section_nr); fprintf(fp, " ZONE_HIGHMEM: %d\n", vt->ZONE_HIGHMEM); fprintf(fp, "node_online_map_len: %d\n", vt->node_online_map_len); if (vt->node_online_map_len) { fprintf(fp, " node_online_map: "); up = (ulong *)vt->node_online_map; for (i = 0; i < vt->node_online_map_len; i++) { fprintf(fp, "%s%lx", i ? ", " : "[", *up); up++; } fprintf(fp, "]\n"); } else { fprintf(fp, " node_online_map: (unused)\n"); } fprintf(fp, " zero_paddr: %lx\n", vt->zero_paddr); fprintf(fp, " huge_zero_paddr: %lx\n", vt->huge_zero_paddr); fprintf(fp, " nr_vm_stat_items: %d\n", vt->nr_vm_stat_items); fprintf(fp, " vm_stat_items: %s", (vt->flags & VM_STAT) ? "\n" : "(not used)\n"); for (i = 0; i < vt->nr_vm_stat_items; i++) fprintf(fp, " [%d] %s\n", i, vt->vm_stat_items[i]); fprintf(fp, " nr_vm_event_items: %d\n", vt->nr_vm_event_items); fprintf(fp, " vm_event_items: %s", (vt->flags & VM_EVENT) ? "\n" : "(not used)\n"); for (i = 0; i < vt->nr_vm_event_items; i++) fprintf(fp, " [%d] %s\n", i, vt->vm_event_items[i]); fprintf(fp, " PG_reserved: %lx\n", vt->PG_reserved); fprintf(fp, " PG_slab: %ld (%lx)\n", vt->PG_slab, (ulong)1 << vt->PG_slab); fprintf(fp, " PG_head_tail_mask: %lx\n", vt->PG_head_tail_mask); fprintf(fp, " nr_pageflags: %d\n", vt->nr_pageflags); fprintf(fp, " pageflags_data: %s\n", vt->nr_pageflags ? "" : "(not used)"); for (i = 0; i < vt->nr_pageflags; i++) { fprintf(fp, " %s[%d] %08lx: %s\n", i < 10 ? " " : "", i, vt->pageflags_data[i].mask, vt->pageflags_data[i].name); } fprintf(fp, " page_type_base: %x\n", vt->page_type_base); dump_vma_cache(VERBOSE); } /* * Calculate the amount of memory referenced in the kernel-specific "nodes". */ uint64_t total_node_memory() { int i; struct node_table *nt; uint64_t total; for (i = total = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; if (CRASHDEBUG(1)) { console("node_table[%d]: \n", i); console(" id: %d\n", nt->node_id); console(" pgdat: %lx\n", nt->pgdat); console(" size: %ld\n", nt->size); console(" present: %ld\n", nt->present); console(" mem_map: %lx\n", nt->mem_map); console(" start_paddr: %lx\n", nt->start_paddr); console(" start_mapnr: %ld\n", nt->start_mapnr); } if (nt->present) total += (uint64_t)((uint64_t)nt->present * (uint64_t)PAGESIZE()); else total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); } return total; } /* * Dump just the vm_area_struct cache table data so that it can be * called from above or for debug purposes. */ void dump_vma_cache(ulong verbose) { int i; ulong vhits; if (!verbose) goto show_hits; for (i = 0; i < VMA_CACHE; i++) fprintf(fp, " cached_vma[%2d]: %lx (%ld)\n", i, vt->cached_vma[i], vt->cached_vma_hits[i]); fprintf(fp, " vma_cache: %lx\n", (ulong)vt->vma_cache); fprintf(fp, " vma_cache_index: %d\n", vt->vma_cache_index); fprintf(fp, " vma_cache_fills: %ld\n", vt->vma_cache_fills); fflush(fp); show_hits: if (vt->vma_cache_fills) { for (i = vhits = 0; i < VMA_CACHE; i++) vhits += vt->cached_vma_hits[i]; fprintf(fp, "%s vma hit rate: %2ld%% (%ld of %ld)\n", verbose ? "" : " ", (vhits * 100)/vt->vma_cache_fills, vhits, vt->vma_cache_fills); } } /* * Guess at the "real" amount of physical memory installed, formatting * it in a MB or GB based string. */ char * get_memory_size(char *buf) { uint64_t total; ulong next_gig; #ifdef OLDWAY ulong mbs, gbs; #endif total = machdep->memory_size(); if ((next_gig = roundup(total, GIGABYTES(1)))) { if ((next_gig - total) <= MEGABYTES(64)) total = next_gig; } return (pages_to_size((ulong)(total/PAGESIZE()), buf)); #ifdef OLDWAY gbs = (ulong)(total/GIGABYTES(1)); mbs = (ulong)(total/MEGABYTES(1)); if (gbs) mbs = (total % GIGABYTES(1))/MEGABYTES(1); if (total%MEGABYTES(1)) mbs++; if (gbs) sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs); else sprintf(buf, "%ld MB", mbs); return buf; #endif } /* * For use by architectures not having machine-specific manners for * best determining physical memory size. */ uint64_t generic_memory_size(void) { if (machdep->memsize) return machdep->memsize; return (machdep->memsize = total_node_memory()); } /* * Determine whether a virtual address is user or kernel or ambiguous. */ int vaddr_type(ulong vaddr, struct task_context *tc) { int memtype, found; if (!tc) tc = CURRENT_CONTEXT(); memtype = found = 0; if (machdep->is_uvaddr(vaddr, tc)) { memtype |= UVADDR; found++; } if (machdep->is_kvaddr(vaddr)) { memtype |= KVADDR; found++; } if (found == 1) return memtype; else return AMBIGUOUS; } /* * Determine the first valid user space address */ static int address_space_start(struct task_context *tc, ulong *addr) { ulong mm_mt, entry_num, i, vma = 0; char *vma_buf; struct list_pair *entry_list; if (!tc->mm_struct) return FALSE; if (INVALID_MEMBER(mm_struct_mmap) && VALID_MEMBER(mm_struct_mm_mt)) { mm_mt = tc->mm_struct + OFFSET(mm_struct_mm_mt); entry_num = do_maple_tree(mm_mt, MAPLE_TREE_COUNT, NULL); entry_list = (struct list_pair *)GETBUF(entry_num * sizeof(struct list_pair)); do_maple_tree(mm_mt, MAPLE_TREE_GATHER, entry_list); for (i = 0; i < entry_num; i++) { if (!!(vma = (ulong)entry_list[i].value)) break; } FREEBUF(entry_list); } else { fill_mm_struct(tc->mm_struct); vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); } if (!vma) return FALSE; vma_buf = fill_vma_cache(vma); *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); return TRUE; } int generic_get_kvaddr_ranges(struct vaddr_range *rp) { int cnt; if (XEN_HYPER_MODE()) return 0; cnt = 0; rp[cnt].type = KVADDR_UNITY_MAP; rp[cnt].start = machdep->kvbase; rp[cnt++].end = vt->vmalloc_start; rp[cnt].type = KVADDR_VMALLOC; rp[cnt].start = vt->vmalloc_start; rp[cnt++].end = (ulong)(-1); return cnt; } /* * Search for a given value between a starting and ending address range, * applying an optional mask for "don't care" bits. As an alternative * to entering the starting address value, -k means "start of kernel address * space". For processors with ambiguous user/kernel address spaces, * -u or -k must be used (with or without -s) as a differentiator. */ void cmd_search(void) { int i, c, memtype, ranges, context, max; ulonglong start, end; ulong value, mask, len; ulong uvaddr_start, uvaddr_end; ulong kvaddr_start, kvaddr_end, range_end; int sflag, Kflag, Vflag, pflag, Tflag, tflag; struct searchinfo searchinfo; struct syment *sp; struct node_table *nt; struct vaddr_range vaddr_ranges[MAX_KVADDR_RANGES]; struct vaddr_range *vrp; struct task_context *tc; #define vaddr_overflow(ADDR) (BITS32() && ((ADDR) > 0xffffffffULL)) #define uint_overflow(VALUE) ((VALUE) > 0xffffffffUL) #define ushort_overflow(VALUE) ((VALUE) > 0xffffUL) context = max = 0; start = end = 0; value = mask = sflag = pflag = Kflag = Vflag = memtype = len = Tflag = tflag = 0; kvaddr_start = kvaddr_end = 0; uvaddr_start = UNINITIALIZED; uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase; BZERO(&searchinfo, sizeof(struct searchinfo)); vrp = &vaddr_ranges[0]; ranges = machdep->get_kvaddr_ranges(vrp); if (CRASHDEBUG(1)) { fprintf(fp, "kvaddr ranges:\n"); for (i = 0; i < ranges; i++) { fprintf(fp, " [%d] %lx %lx ", i, vrp[i].start, vrp[i].end); switch (vrp[i].type) { case KVADDR_UNITY_MAP: fprintf(fp, "KVADDR_UNITY_MAP\n"); break; case KVADDR_START_MAP: fprintf(fp, "KVADDR_START_MAP\n"); break; case KVADDR_VMALLOC: fprintf(fp, "KVADDR_VMALLOC\n"); break; case KVADDR_MODULES: fprintf(fp, "KVADDR_MODULES\n"); break; case KVADDR_VMEMMAP: fprintf(fp, "KVADDR_VMEMMAP\n"); break; } } } searchinfo.mode = SEARCH_ULONG; /* default search */ while ((c = getopt(argcnt, args, "Ttl:ukKVps:e:v:m:hwcx:")) != EOF) { switch(c) { case 'u': if (XEN_HYPER_MODE()) error(FATAL, "-u option is not applicable to the " "Xen hypervisor\n"); if (is_kernel_thread(CURRENT_TASK()) || !task_mm(CURRENT_TASK(), TRUE)) error(FATAL, "current context has no user address space\n"); if (!sflag) { address_space_start(CURRENT_CONTEXT(), &uvaddr_start); start = (ulonglong)uvaddr_start; } memtype = UVADDR; sflag++; break; case 'p': if (XEN_HYPER_MODE()) error(FATAL, "-p option is not applicable to the " "Xen hypervisor\n"); memtype = PHYSADDR; if (!sflag) { nt = &vt->node_table[0]; start = nt->start_paddr; } sflag++; break; case 'V': case 'K': case 'k': if (XEN_HYPER_MODE()) error(FATAL, "-%c option is not applicable to the " "Xen hypervisor\n", c); if (!sflag) start = vrp[0].start; memtype = KVADDR; sflag++; if (c == 'K') Kflag++; else if (c == 'V') Vflag++; break; case 's': if ((sp = symbol_search(optarg))) start = (ulonglong)sp->value; else start = htoll(optarg, FAULT_ON_ERROR, NULL); sflag++; break; case 'e': if ((sp = symbol_search(optarg))) end = (ulonglong)sp->value; else end = htoll(optarg, FAULT_ON_ERROR, NULL); if (!end) error(FATAL, "invalid ending address: 0\n"); break; case 'l': len = stol(optarg, FAULT_ON_ERROR, NULL); break; case 'm': mask = htol(optarg, FAULT_ON_ERROR, NULL); break; case 'h': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search mode with \"h\"\n"); searchinfo.mode = SEARCH_USHORT; break; case 'w': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search mode with \"w\"\n"); searchinfo.mode = SEARCH_UINT; break; case 'c': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search type with \"c\"\n"); searchinfo.mode = SEARCH_CHARS; break; case 'x': context = dtoi(optarg, FAULT_ON_ERROR, NULL); break; case 'T': case 't': if (XEN_HYPER_MODE()) error(FATAL, "-%c option is not applicable to the " "Xen hypervisor\n", c); if (c == 'T') Tflag++; else if (c == 't') tflag++; if (tflag && Tflag) error(FATAL, "-t and -T options are mutually exclusive\n"); break; default: argerrs++; break; } } if ((tflag || Tflag) && (memtype || start || end || len)) error(FATAL, "-%c option cannot be used with other " "memory-selection options\n", tflag ? 't' : 'T'); if (XEN_HYPER_MODE()) { memtype = KVADDR; if (!sflag) error(FATAL, "the \"-s start\" option is required for" " the Xen hypervisor\n"); } else if (!memtype) { memtype = KVADDR; if (!tflag && !sflag++) start = vrp[0].start; } if (argerrs || (!sflag && !tflag) || !args[optind] || (len && end) || !memtype) cmd_usage(pc->curcmd, SYNOPSIS); searchinfo.memtype = memtype; /* * Verify starting address. */ switch (memtype) { case UVADDR: if (vaddr_overflow(start) || !IS_UVADDR((ulong)start, CURRENT_CONTEXT())) { error(INFO, "invalid user virtual address: %llx\n", start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (tflag) break; if (vaddr_overflow(start) || !IS_KVADDR((ulong)start)) { error(INFO, "invalid kernel virtual address: %llx\n", (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case AMBIGUOUS: error(INFO, "ambiguous virtual address: %llx (requires -u or -k)\n", (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } /* * Set up ending address if necessary. */ if (!end && !len && !tflag) { switch (memtype) { case UVADDR: end = (ulonglong)uvaddr_end; break; case KVADDR: if (XEN_HYPER_MODE()) end = (ulong)(-1); else { range_end = 0; for (i = 0; i < ranges; i++) { if (vrp[i].end > range_end) range_end = vrp[i].end; } end = (ulonglong)range_end; } break; case PHYSADDR: nt = &vt->node_table[vt->numnodes-1]; end = nt->start_paddr + (nt->size * PAGESIZE()); break; } } else if (len) end = start + len; /* * Final verification and per-type start/end variable setting. */ switch (memtype) { case UVADDR: uvaddr_start = (ulong)start; if (end > (ulonglong)uvaddr_end) { error(INFO, "ending address %lx is in kernel space: %llx\n", end); cmd_usage(pc->curcmd, SYNOPSIS); } if (end < (ulonglong)uvaddr_end) uvaddr_end = (ulong)end; if (uvaddr_end < uvaddr_start) { error(INFO, "ending address %lx is below starting address %lx\n", uvaddr_end, uvaddr_start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (tflag) break; kvaddr_start = (ulong)start; kvaddr_end = (ulong)end; if (kvaddr_end < kvaddr_start) { error(INFO, "ending address %lx is below starting address %lx\n", kvaddr_end, kvaddr_start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case PHYSADDR: if (end < start) { error(INFO, "ending address %llx is below starting address %llx\n", (ulonglong)end, (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } break; } if (mask) { switch (searchinfo.mode) { case SEARCH_ULONG: searchinfo.s_parms.s_ulong.mask = mask; break; case SEARCH_UINT: searchinfo.s_parms.s_uint.mask = mask; break; case SEARCH_USHORT: searchinfo.s_parms.s_ushort.mask = mask; break; case SEARCH_CHARS: error(INFO, "mask ignored on string search\n"); break; } } if (context) { switch (searchinfo.mode) { case SEARCH_ULONG: max = PAGESIZE()/sizeof(long); break; case SEARCH_UINT: max = PAGESIZE()/sizeof(int); break; case SEARCH_USHORT: max = PAGESIZE()/sizeof(short); break; case SEARCH_CHARS: error(FATAL, "-x option is not allowed with -c\n"); break; } if (context > max) error(FATAL, "context value %d is too large: maximum is %d\n", context, max); searchinfo.context = context; } searchinfo.vcnt = 0; searchinfo.val = UNUSED; while (args[optind]) { switch (searchinfo.mode) { case SEARCH_ULONG: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ulong.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_ulong.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ulong.value[searchinfo.vcnt] = value; searchinfo.vcnt++; break; case SEARCH_UINT: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_uint.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_uint.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_uint.value[searchinfo.vcnt] = value; if (uint_overflow(value)) error(FATAL, "value too large for -w option: %lx %s\n", value, show_opt_string(&searchinfo)); searchinfo.vcnt++; break; case SEARCH_USHORT: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ushort.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_ushort.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ushort.value[searchinfo.vcnt] = value; if (ushort_overflow(value)) error(FATAL, "value too large for -h option: %lx %s\n", value, show_opt_string(&searchinfo)); searchinfo.vcnt++; break; case SEARCH_CHARS: /* parser can deliver empty strings */ if (strlen(args[optind])) { searchinfo.s_parms.s_chars.value[searchinfo.vcnt] = args[optind]; searchinfo.s_parms.s_chars.len[searchinfo.vcnt] = strlen(args[optind]); searchinfo.vcnt++; } break; } optind++; } if (!searchinfo.vcnt) cmd_usage(pc->curcmd, SYNOPSIS); switch (memtype) { case PHYSADDR: searchinfo.paddr_start = start; searchinfo.paddr_end = end; search_physical(&searchinfo); break; case UVADDR: searchinfo.vaddr_start = uvaddr_start; searchinfo.vaddr_end = uvaddr_end; search_virtual(&searchinfo); break; case KVADDR: if (XEN_HYPER_MODE()) { searchinfo.vaddr_start = kvaddr_start; searchinfo.vaddr_end = kvaddr_end; search_virtual(&searchinfo); break; } if (tflag || Tflag) { searchinfo.tasks_found = 0; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (Tflag && !is_task_active(tc->task)) continue; searchinfo.vaddr_start = GET_STACKBASE(tc->task); searchinfo.vaddr_end = GET_STACKTOP(tc->task); searchinfo.task_context = tc; searchinfo.do_task_header = TRUE; search_virtual(&searchinfo); } break; } for (i = 0; i < ranges; i++) { if ((kvaddr_start >= vrp[i].end) || (kvaddr_end <= vrp[i].start)) continue; switch (vrp[i].type) { case KVADDR_UNITY_MAP: case KVADDR_START_MAP: if (Vflag) continue; break; case KVADDR_VMALLOC: case KVADDR_MODULES: case KVADDR_VMEMMAP: if (Kflag) continue; break; } pc->curcmd_private = vrp[i].type; searchinfo.vaddr_start = kvaddr_start > vrp[i].start ? kvaddr_start : vrp[i].start; searchinfo.vaddr_end = (kvaddr_end < vrp[i].end) ? kvaddr_end : vrp[i].end; search_virtual(&searchinfo); } break; } } /* * Do the work for cmd_search(). */ static char * show_opt_string(struct searchinfo *si) { char *opt_string; int index; index = (si->val == UNUSED) ? si->vcnt : si->val; switch (si->mode) { case SEARCH_USHORT: opt_string = si->s_parms.s_ushort.opt_string[index]; break; case SEARCH_UINT: opt_string = si->s_parms.s_uint.opt_string[index]; break; case SEARCH_ULONG: default: opt_string = si->s_parms.s_ulong.opt_string[index]; break; } if (!opt_string) return ""; else if (FIRSTCHAR(opt_string) == '(') return opt_string; else { sprintf(si->buf, "(%s)", opt_string); return si->buf; } } #define SEARCHMASK(X) ((X) | mask) static void display_with_pre_and_post(void *bufptr, ulonglong addr, struct searchinfo *si) { int ctx, memtype, t, amount; ulonglong addr_d; ulong flag; char buf[BUFSIZE]; ctx = si->context; memtype = si->memtype; flag = HEXADECIMAL|NO_ERROR|ASCII_ENDLINE; switch (si->mode) { case SEARCH_USHORT: t = sizeof(ushort); break; case SEARCH_UINT: t = sizeof(uint); break; case SEARCH_ULONG: default: t = sizeof(ulong); break; } switch (t) { case 8: flag |= DISPLAY_64; break; case 4: flag |= DISPLAY_32; break; case 2: flag |= DISPLAY_16; break; } amount = ctx * t; addr_d = addr - amount; display_memory(addr_d, ctx, flag, memtype, NULL); BZERO(buf, BUFSIZE); fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); switch(si->mode) { case SEARCH_ULONG: fprintf(fp, "%lx %s\n", *((ulong *)bufptr), show_opt_string(si)); break; case SEARCH_UINT: fprintf(fp, "%x %s\n", *((uint *)bufptr), show_opt_string(si)); break; case SEARCH_USHORT: fprintf(fp, "%x %s\n", *((ushort *)bufptr), show_opt_string(si)); break; } addr_d = addr + t; display_memory(addr_d, ctx, flag, memtype, NULL); fprintf(fp, "\n"); } static ulong search_ulong(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; ulong mask = si->s_parms.s_ulong.mask; for (i = 0; i < longcnt; i++, bufptr++, addr += sizeof(long)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*bufptr) == SEARCHMASK(si->s_parms.s_ulong.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(bufptr, addr, si); else fprintf(fp, "%lx: %lx %s\n", addr, *bufptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_ulong_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; ulong mask = si->s_parms.s_ulong.mask; for (i = 0; i < longcnt; i++, bufptr++, addr += sizeof(long)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*bufptr) == SEARCHMASK(si->s_parms.s_ulong.value[si->val])) { if (si->context) display_with_pre_and_post(bufptr, addr, si); else fprintf(fp, "%llx: %lx %s\n", addr, *bufptr, show_opt_string(si)); } } } return addr; } static ulong search_uint(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(int)); uint *ptr = (uint *)bufptr; uint mask = si->s_parms.s_uint.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(int)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_uint.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%lx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_uint_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(int)); uint *ptr = (uint *)bufptr; uint mask = si->s_parms.s_uint.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(int)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_uint.value[si->val])) { if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%llx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } static ulong search_ushort(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(short)); ushort *ptr = (ushort *)bufptr; ushort mask = si->s_parms.s_ushort.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(short)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_ushort.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%lx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_ushort_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(short)); ushort *ptr = (ushort *)bufptr; ushort mask = si->s_parms.s_ushort.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(short)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_ushort.value[si->val])) { if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%llx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* * String search "memory" to remember possible matches that cross * page (or search buffer) boundaries. * The cross_match zone is the last strlen-1 chars of the page for * each of the possible targets. */ struct cross_match { int cnt; /* possible hits in the cross_match zone */ ulong addr; /* starting addr of crossing match zone for this target */ ulonglong addr_p; /* for physical search */ char hit[BUFSIZE]; /* array of hit locations in the crossing match zone */ /* This should really be the much-smaller MAXARGLEN, but * no one seems to be enforcing that in the parser. */ } cross[MAXARGS]; ulong cross_match_next_addr; /* the expected starting value of the next page */ ulonglong cross_match_next_addr_p; /* the expected starting value of the next physical page */ #define CHARS_CTX 56 static void report_match(struct searchinfo *si, ulong addr, char *ptr1, int len1, char *ptr2, int len2) { int i; if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } fprintf(fp, "%lx: ", addr); for (i = 0; i < len1; i++) { if (isprint(ptr1[i])) fprintf(fp, "%c", ptr1[i]); else fprintf(fp, "."); } for (i = 0; i < len2; i++) { if (isprint(ptr2[i])) fprintf(fp, "%c", ptr2[i]); else fprintf(fp, "."); } fprintf(fp, "\n"); } static ulong search_chars(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i, j; int len; char *target; int charcnt = longcnt * sizeof(long); char *ptr = (char *)bufptr; /* is this the first page of this search? */ if (si->s_parms.s_chars.started_flag == 0) { for (j = 0; j < si->vcnt; j++) { cross[j].cnt = 0; /* no hits */ } cross_match_next_addr = (ulong)-1; /* no page match for first page */ si->s_parms.s_chars.started_flag++; } if (cross_match_next_addr == addr) { for (j = 0; j < si->vcnt; j++) { if (cross[j].cnt) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; for (i = 0; i < len - 1; i++) { if (cross[j].hit[i] && !strncmp(&target[len - 1 - i], ptr, i + 1)) report_match(si, cross[j].addr + i, target, len, &ptr[i+1], CHARS_CTX - len); } } } } /* set up for possible cross matches on this page */ cross_match_next_addr = addr + charcnt; for (j = 0; j < si->vcnt; j++) { len = si->s_parms.s_chars.len[j]; cross[j].cnt = 0; cross[j].addr = addr + longcnt * sizeof(long) - (len - 1); for (i = 0; i < len - 1; i++) cross[j].hit[i] = 0; } for (i = 0; i < charcnt; i++, ptr++, addr++) { for (j = 0; j < si->vcnt; j++) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; if ((i + len) > charcnt) { /* check for cross match */ if (!strncmp(target, ptr, charcnt - i)) { cross[j].hit[len + i - charcnt - 1] = 1; cross[j].cnt++; } } else { if (!strncmp(target, ptr, len)) { int slen = CHARS_CTX; if ((i + CHARS_CTX) > charcnt) slen = charcnt - i; report_match(si, addr, ptr, slen, (char *)0, 0); } } } } return addr; } static void report_match_p(ulonglong addr, char *ptr1, int len1, char *ptr2, int len2) { int i; fprintf(fp, "%llx: ", addr); for (i = 0; i < len1; i++) { if (isprint(ptr1[i])) fprintf(fp, "%c", ptr1[i]); else fprintf(fp, "."); } for (i = 0; i < len2; i++) { if (isprint(ptr2[i])) fprintf(fp, "%c", ptr2[i]); else fprintf(fp, "."); } fprintf(fp, "\n"); } static ulonglong search_chars_p(ulong *bufptr, ulonglong addr_p, int longcnt, struct searchinfo *si) { int i, j; int len; char *target; int charcnt = longcnt * sizeof(long); char *ptr = (char *)bufptr; /* is this the first page of this search? */ if (si->s_parms.s_chars.started_flag == 0) { for (j = 0; j < si->vcnt; j++) { cross[j].cnt = 0; /* no hits */ } cross_match_next_addr_p = (ulonglong)-1; /* no page match for first page */ si->s_parms.s_chars.started_flag++; } if (cross_match_next_addr_p == addr_p) { for (j = 0; j < si->vcnt; j++) { if (cross[j].cnt) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; for (i = 0; i < len - 1; i++) { if (cross[j].hit[i] && !strncmp(&target[len - 1 - i], ptr, i + 1)) report_match_p(cross[j].addr_p + i, target, len, &ptr[i+1], CHARS_CTX - len); } } } } /* set up for possible cross matches on this page */ cross_match_next_addr_p = addr_p + charcnt; for (j = 0; j < si->vcnt; j++) { len = si->s_parms.s_chars.len[j]; cross[j].cnt = 0; cross[j].addr_p = addr_p + longcnt * sizeof(long) - (len - 1); for (i = 0; i < len - 1; i++) cross[j].hit[i] = 0; } for (i = 0; i < charcnt; i++, ptr++, addr_p++) { for (j = 0; j < si->vcnt; j++) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; if ((i + len) > charcnt) { /* check for cross match */ if (!strncmp(target, ptr, charcnt - i)) { cross[j].hit[len + i - charcnt - 1] = 1; cross[j].cnt++; } } else { if (!strncmp(target, ptr, len)) { int slen = CHARS_CTX; if ((i + CHARS_CTX) > charcnt) slen = charcnt - i; report_match_p(addr_p, ptr, slen, (char *)0, 0); } } } } return addr_p; } static void search_virtual(struct searchinfo *si) { ulong start, end; ulong pp, next, *ubp; int wordcnt, lastpage; ulong page; physaddr_t paddr; char *pagebuf; ulong pct, pages_read, pages_checked; time_t begin, finish; start = si->vaddr_start; end = si->vaddr_end; pages_read = pages_checked = 0; begin = finish = 0; pagebuf = GETBUF(PAGESIZE()); if (start & (sizeof(long)-1)) { start &= ~(sizeof(long)-1); error(INFO, "rounding down start address to: %lx\n", start); } if (CRASHDEBUG(1)) { begin = time(NULL); fprintf(fp, "search_virtual: start: %lx end: %lx\n", start, end); } next = start; for (pp = VIRTPAGEBASE(start); next < end; next = pp) { pages_checked++; lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end)); if (LKCD_DUMPFILE()) set_lkcd_nohash(); /* * Keep it virtual for Xen hypervisor. */ if (XEN_HYPER_MODE()) { if (!readmem(pp, KVADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { if (CRASHDEBUG(1)) fprintf(fp, "search suspended at: %lx\n", pp); goto done; } goto virtual; } switch (si->memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || !phys_to_page(paddr, &page)) { if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) goto done; continue; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || !phys_to_page(paddr, &page)) { if (!next_kpage(pp, &pp)) goto done; continue; } break; } if (!readmem(paddr, PHYSADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { pp += PAGESIZE(); continue; } virtual: pages_read++; ubp = (ulong *)&pagebuf[next - pp]; if (lastpage) { if (end == (ulong)(-1)) wordcnt = PAGESIZE()/sizeof(long); else wordcnt = (end - next)/sizeof(long); } else wordcnt = (PAGESIZE() - (next - pp))/sizeof(long); switch (si->mode) { case SEARCH_ULONG: next = search_ulong(ubp, next, wordcnt, si); break; case SEARCH_UINT: next = search_uint(ubp, next, wordcnt, si); break; case SEARCH_USHORT: next = search_ushort(ubp, next, wordcnt, si); break; case SEARCH_CHARS: next = search_chars(ubp, next, wordcnt, si); break; default: /* unimplemented search type */ next += wordcnt * (sizeof(long)); break; } if (CRASHDEBUG(1)) if ((pp % (1024*1024)) == 0) console("%lx\n", pp); pp += PAGESIZE(); } done: if (CRASHDEBUG(1)) { finish = time(NULL); pct = (pages_read * 100)/pages_checked; fprintf(fp, "search_virtual: read %ld (%ld%%) of %ld pages checked in %ld seconds\n", pages_read, pct, pages_checked, finish - begin); } FREEBUF(pagebuf); } static void search_physical(struct searchinfo *si) { ulonglong start_in, end_in; ulong *ubp; int wordcnt, lastpage; ulonglong pnext, ppp; char *pagebuf; ulong pct, pages_read, pages_checked; time_t begin, finish; ulong page; start_in = si->paddr_start; end_in = si->paddr_end; pages_read = pages_checked = 0; begin = finish = 0; pagebuf = GETBUF(PAGESIZE()); if (start_in & (sizeof(ulonglong)-1)) { start_in &= ~(sizeof(ulonglong)-1); error(INFO, "rounding down start address to: %llx\n", (ulonglong)start_in); } if (CRASHDEBUG(1)) { begin = time(NULL); fprintf(fp, "search_physical: start: %llx end: %llx\n", start_in, end_in); } pnext = start_in; for (ppp = PHYSPAGEBASE(start_in); pnext < end_in; pnext = ppp) { pages_checked++; lastpage = (PHYSPAGEBASE(pnext) == PHYSPAGEBASE(end_in)); if (LKCD_DUMPFILE()) set_lkcd_nohash(); if (!phys_to_page(ppp, &page) || !readmem(ppp, PHYSADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { if (!next_physpage(ppp, &ppp)) break; continue; } pages_read++; ubp = (ulong *)&pagebuf[pnext - ppp]; if (lastpage) { if (end_in == (ulonglong)(-1)) wordcnt = PAGESIZE()/sizeof(long); else wordcnt = (end_in - pnext)/sizeof(long); } else wordcnt = (PAGESIZE() - (pnext - ppp))/sizeof(long); switch (si->mode) { case SEARCH_ULONG: pnext = search_ulong_p(ubp, pnext, wordcnt, si); break; case SEARCH_UINT: pnext = search_uint_p(ubp, pnext, wordcnt, si); break; case SEARCH_USHORT: pnext = search_ushort_p(ubp, pnext, wordcnt, si); break; case SEARCH_CHARS: pnext = search_chars_p(ubp, pnext, wordcnt, si); break; default: /* unimplemented search type */ pnext += wordcnt * (sizeof(long)); break; } ppp += PAGESIZE(); } if (CRASHDEBUG(1)) { finish = time(NULL); pct = (pages_read * 100)/pages_checked; fprintf(fp, "search_physical: read %ld (%ld%%) of %ld pages checked in %ld seconds\n", pages_read, pct, pages_checked, finish - begin); } FREEBUF(pagebuf); } static bool check_vma(ulong vma, ulong vaddr, ulong *vm_next, ulong *nextvaddr) { char *vma_buf; ulong vm_start, vm_end; vma_buf = fill_vma_cache(vma); vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); if (vm_next) *vm_next = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next)); if (vaddr <= vm_start) { *nextvaddr = vm_start; return TRUE; } if ((vaddr > vm_start) && (vaddr < vm_end)) { *nextvaddr = vaddr; return TRUE; } return FALSE; } /* * Return the next mapped user virtual address page that comes after * the passed-in address. */ static int next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr) { ulong vma, total_vm; ulong vm_next; ulong mm_mt, entry_num, i; struct list_pair *entry_list; if (!tc->mm_struct) return FALSE; fill_mm_struct(tc->mm_struct); vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); if (!total_vm) return FALSE; if (INVALID_MEMBER(mm_struct_mmap) && VALID_MEMBER(mm_struct_mm_mt)) { mm_mt = tc->mm_struct + OFFSET(mm_struct_mm_mt); entry_num = do_maple_tree(mm_mt, MAPLE_TREE_COUNT, NULL); entry_list = (struct list_pair *)GETBUF(entry_num * sizeof(struct list_pair)); do_maple_tree(mm_mt, MAPLE_TREE_GATHER, entry_list); for (i = 0; i < entry_num; i++) { if (!!(vma = (ulong)entry_list[i].value) && check_vma(vma, vaddr, NULL, nextvaddr)) { FREEBUF(entry_list); return TRUE; } } FREEBUF(entry_list); } else { vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); if (!vma) return FALSE; for ( ; vma; vma = vm_next) { if (check_vma(vma, vaddr, &vm_next, nextvaddr)) return TRUE; } } return FALSE; } /* * Return the next mapped kernel virtual address in the vmlist * that is equal to or comes after the passed-in address. * Prevent repeated calls to dump_vmlist() by only doing it * one time for dumpfiles, or one time per (active) command. */ static int next_vmlist_vaddr(ulong vaddr, ulong *nextvaddr) { int i, retval; ulong cnt; struct meminfo meminfo, *mi; static int count = 0; static struct vmlist *vmlist = NULL; static ulong cmdgencur = BADVAL; /* * Search the stashed vmlist if possible. */ if (vmlist && ACTIVE()) { if (pc->cmdgencur != cmdgencur) { free(vmlist); vmlist = NULL; } } if (vmlist) { for (i = 0, retval = FALSE; i < count; i++) { if (vaddr <= vmlist[i].addr) { *nextvaddr = vmlist[i].addr; retval = TRUE; break; } if (vaddr < (vmlist[i].addr + vmlist[i].size)) { *nextvaddr = vaddr; retval = TRUE; break; } } return retval; } mi = &meminfo; BZERO(mi, sizeof(struct meminfo)); mi->flags = GET_VMLIST_COUNT; dump_vmlist(mi); cnt = mi->retval; if (!cnt) return FALSE; mi->vmlist = (struct vmlist *)GETBUF(sizeof(struct vmlist)*cnt); mi->flags = GET_VMLIST; dump_vmlist(mi); for (i = 0, retval = FALSE; i < cnt; i++) { if (vaddr <= mi->vmlist[i].addr) { *nextvaddr = mi->vmlist[i].addr; retval = TRUE; break; } if (vaddr < (mi->vmlist[i].addr + mi->vmlist[i].size)) { *nextvaddr = vaddr; retval = TRUE; break; } } if (!vmlist) { vmlist = (struct vmlist *) malloc(sizeof(struct vmlist)*cnt); if (vmlist) { BCOPY(mi->vmlist, vmlist, sizeof(struct vmlist)*cnt); count = cnt; cmdgencur = pc->cmdgencur; } } FREEBUF(mi->vmlist); return retval; } /* * Determine whether a virtual address is inside a vmlist segment. */ int in_vmlist_segment(ulong vaddr) { ulong next; if (next_vmlist_vaddr(vaddr, &next) && (vaddr == next)) return TRUE; return FALSE; } /* * Return the next kernel module virtual address that is * equal to or comes after the passed-in address. */ static int next_module_vaddr(ulong vaddr, ulong *nextvaddr) { int i, t; ulong start, end, min = (ulong)-1; struct load_module *lm; if (!MODULE_MEMORY()) goto old_module; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; for_each_mod_mem_type(t) { if (!lm->mem[t].size) continue; start = lm->mem[t].base; end = start + lm->mem[t].size; if (vaddr >= end) continue; if (vaddr < start) { if (start < min) /* replace candidate */ min = start; continue; } *nextvaddr = vaddr; return TRUE; } } if (min != (ulong)-1) { *nextvaddr = min; return TRUE; } return FALSE; old_module: for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; start = lm->mod_base; end = lm->mod_base + lm->mod_size; if (vaddr >= end) continue; /* * Either below or in this module. */ if (vaddr < start) *nextvaddr = start; else *nextvaddr = vaddr; return TRUE; } return FALSE; } /* * Return the next kernel virtual address page in a designated * kernel virtual address range that comes after the passed-in, * untranslatable, address. */ static int next_kpage(ulong vaddr, ulong *nextvaddr) { ulong vaddr_orig; vaddr_orig = vaddr; vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ if (vaddr < vaddr_orig) /* wrapped back to zero? */ return FALSE; switch (pc->curcmd_private) { case KVADDR_UNITY_MAP: return next_identity_mapping(vaddr, nextvaddr); case KVADDR_VMALLOC: return next_vmlist_vaddr(vaddr, nextvaddr); case KVADDR_VMEMMAP: *nextvaddr = vaddr; return TRUE; case KVADDR_START_MAP: *nextvaddr = vaddr; return TRUE; case KVADDR_MODULES: return next_module_vaddr(vaddr, nextvaddr); } return FALSE; } /* * Return the next physical address page that comes after * the passed-in, unreadable, address. */ static int next_physpage(ulonglong paddr, ulonglong *nextpaddr) { int n; ulonglong node_start; ulonglong node_end; struct node_table *nt; for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_start = nt->start_paddr; node_end = nt->start_paddr + (nt->size * PAGESIZE()); if (paddr >= node_end) continue; if (paddr < node_start) { *nextpaddr = node_start; return TRUE; } if (paddr < node_end) { *nextpaddr = paddr + PAGESIZE(); return TRUE; } } return FALSE; } static int get_hugetlb_total_pages(ulong *nr_total_pages, ulong *nr_total_free_pages) { ulong hstate_p, vaddr; int i, len; ulong nr_huge_pages; ulong free_huge_pages; uint horder; *nr_total_pages = *nr_total_free_pages = 0; if (kernel_symbol_exists("hstates")) { if (INVALID_SIZE(hstate) || INVALID_MEMBER(hstate_order) || INVALID_MEMBER(hstate_nr_huge_pages) || INVALID_MEMBER(hstate_free_huge_pages)) return FALSE; len = get_array_length("hstates", NULL, 0); hstate_p = symbol_value("hstates"); for (i = 0; i < len; i++) { vaddr = hstate_p + (SIZE(hstate) * i); readmem(vaddr + OFFSET(hstate_order), KVADDR, &horder, sizeof(uint), "hstate_order", FAULT_ON_ERROR); if (!horder) continue; readmem(vaddr + OFFSET(hstate_nr_huge_pages), KVADDR, &nr_huge_pages, sizeof(ulong), "hstate_nr_huge_pages", FAULT_ON_ERROR); readmem(vaddr + OFFSET(hstate_free_huge_pages), KVADDR, &free_huge_pages, sizeof(ulong), "hstate_free_huge_pages", FAULT_ON_ERROR); *nr_total_pages += nr_huge_pages * (1 << horder); *nr_total_free_pages += free_huge_pages * (1 << horder); } } else if (kernel_symbol_exists("nr_huge_pages")) { unsigned long hpage_shift = 21; if ((machine_type("X86") && !(machdep->flags & PAE))) hpage_shift = 22; get_symbol_data("nr_huge_pages", sizeof(ulong), &nr_huge_pages); get_symbol_data("free_huge_pages", sizeof(ulong), &free_huge_pages); *nr_total_pages = nr_huge_pages * ((1 << hpage_shift) / machdep->pagesize); *nr_total_free_pages = free_huge_pages * ((1 << hpage_shift) / machdep->pagesize); } return TRUE; } /* * Display swap statistics. */ void cmd_swap(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); dump_swap_info(VERBOSE, NULL, NULL); } /* * Do the work for cmd_swap(). */ #define SWP_USED 1 #define SWAP_MAP_BAD 0x8000 char *swap_info_hdr = \ "SWAP_INFO_STRUCT TYPE SIZE USED PCT PRI FILENAME\n"; static int dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages) { int i, j; int swap_device, prio; ulong pages, usedswap; ulong flags, swap_file, max, swap_map, pct; ulong vfsmnt; ulong swap_info, swap_info_ptr; ushort *smap; ulong inuse_pages, totalswap, totalused; char *devname; char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; int swap_file_is_file = STREQ(MEMBER_TYPE_NAME("swap_info_struct", "swap_file"), "file"); if (!symbol_exists("nr_swapfiles")) error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); if (!symbol_exists("swap_info")) error(FATAL, "swap_info doesn't exist in this kernel!\n"); swap_info_init(); swap_info = symbol_value("swap_info"); if (swapflags & VERBOSE) fprintf(fp, "%s", swap_info_hdr); totalswap = totalused = 0; for (i = 0; i < vt->nr_swapfiles; i++, swap_info += (vt->flags & SWAPINFO_V1 ? SIZE(swap_info_struct) : sizeof(void *))) { if (vt->flags & SWAPINFO_V2) { if (!readmem(swap_info, KVADDR, &swap_info_ptr, sizeof(void *), "swap_info pointer", QUIET|RETURN_ON_ERROR)) continue; if (!swap_info_ptr) continue; fill_swap_info(swap_info_ptr); } else fill_swap_info(swap_info); if (MEMBER_SIZE("swap_info_struct", "flags") == sizeof(uint)) flags = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_flags)); else flags = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_flags)); if (!(flags & SWP_USED)) continue; swap_file = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_file)); /* Linux 6.10 and later */ if (INVALID_MEMBER(swap_info_struct_swap_device) && INVALID_MEMBER(swap_info_struct_old_block_size) && swap_file_is_file) { ulong inode; ushort mode; readmem(swap_file + OFFSET(file_f_inode), KVADDR, &inode, sizeof(ulong), "swap_file.f_inode", FAULT_ON_ERROR); readmem(inode + OFFSET(inode_i_mode), KVADDR, &mode, sizeof(ushort), "inode.i_mode", FAULT_ON_ERROR); swap_device = S_ISBLK(mode); } else swap_device = INT(vt->swap_info_struct + OFFSET_OPTION(swap_info_struct_swap_device, swap_info_struct_old_block_size)); pages = INT(vt->swap_info_struct + OFFSET(swap_info_struct_pages)); totalswap += pages; pages <<= (PAGESHIFT() - 10); inuse_pages = 0; if (MEMBER_SIZE("swap_info_struct", "prio") == sizeof(short)) prio = SHORT(vt->swap_info_struct + OFFSET(swap_info_struct_prio)); else prio = INT(vt->swap_info_struct + OFFSET(swap_info_struct_prio)); if (MEMBER_SIZE("swap_info_struct", "max") == sizeof(int)) max = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_max)); else max = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_max)); if (VALID_MEMBER(swap_info_struct_inuse_pages)) { if (MEMBER_SIZE("swap_info_struct", "inuse_pages") == sizeof(int)) inuse_pages = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_inuse_pages)); else inuse_pages = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_inuse_pages)); } swap_map = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_map)); if (swap_file) { if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { vfsmnt = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_vfsmnt)); get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); } else if (VALID_MEMBER(swap_info_struct_old_block_size) || swap_file_is_file) { /* * Linux 6.10 and later kernels do not have old_block_size, * but this still should work, if swap_file is file. */ devname = vfsmount_devname(file_to_vfsmnt(swap_file), buf1, BUFSIZE); get_pathname(file_to_dentry(swap_file), buf, BUFSIZE, 1, file_to_vfsmnt(swap_file)); if ((STREQ(devname, "devtmpfs") || STREQ(devname, "udev")) && !STRNEQ(buf, "/dev/")) string_insert("/dev", buf); } else { get_pathname(swap_file, buf, BUFSIZE, 1, 0); } } else sprintf(buf, "(unknown)"); smap = NULL; if (vt->flags & SWAPINFO_V1) { smap = (ushort *)GETBUF(sizeof(ushort) * max); if (!readmem(swap_map, KVADDR, smap, sizeof(ushort) * max, "swap_info swap_map data", RETURN_ON_ERROR|QUIET)) { if (swapflags & RETURN_ON_ERROR) { *totalswap_pages = swap_map; *totalused_pages = i; FREEBUF(smap); return FALSE; } else error(FATAL, "swap_info[%d].swap_map at %lx is inaccessible\n", i, swap_map); } } usedswap = 0; if (smap) { for (j = 0; j < max; j++) { switch (smap[j]) { case SWAP_MAP_BAD: case 0: continue; default: usedswap++; } } FREEBUF(smap); } else usedswap = inuse_pages; totalused += usedswap; usedswap <<= (PAGESHIFT() - 10); pct = (usedswap * 100)/pages; if (swapflags & VERBOSE) { sprintf(buf1, "%lx", (vt->flags & SWAPINFO_V2) ? swap_info_ptr : swap_info); sprintf(buf2, "%ldk", pages); sprintf(buf3, "%ldk", usedswap); sprintf(buf4, "%2ld%%", pct); sprintf(buf5, "%d", prio); fprintf(fp, "%s %s %s %s %s %s %s\n", mkstring(buf1, MAX(VADDR_PRLEN, strlen("SWAP_INFO_STRUCT")), CENTER|LJUST, NULL), swap_device ? "PARTITION" : " FILE ", mkstring(buf2, 10, CENTER|RJUST, NULL), mkstring(buf3, 10, CENTER|RJUST, NULL), mkstring(buf4, 4, CENTER|RJUST, NULL), mkstring(buf5, 4, RJUST, NULL), buf); } } if (totalswap_pages) *totalswap_pages = totalswap; if (totalused_pages) *totalused_pages = totalused; return TRUE; } /* * Determine the swap_info_struct usage. */ void swap_info_init(void) { struct gnu_request *req; if (vt->flags & (SWAPINFO_V1|SWAPINFO_V2)) return; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); if ((get_symbol_type("swap_info", NULL, req) == TYPE_CODE_ARRAY) && ((req->target_typecode == TYPE_CODE_PTR) || (req->target_typecode == TYPE_CODE_STRUCT))) { switch (req->target_typecode) { case TYPE_CODE_STRUCT: vt->flags |= SWAPINFO_V1; break; case TYPE_CODE_PTR: vt->flags |= SWAPINFO_V2; break; } } else { if (THIS_KERNEL_VERSION >= LINUX(2,6,33)) vt->flags |= SWAPINFO_V2; else vt->flags |= SWAPINFO_V1; } FREEBUF(req); } /* * Translate a PTE into a swap device and offset string. */ char * swap_location(ulonglong pte, char *buf) { char swapdev[BUFSIZE]; if (!pte) return NULL; if (!symbol_exists("nr_swapfiles") || !symbol_exists("swap_info")) return NULL; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) sprintf(buf, "%s OFFSET: %lld", get_swapdev(__swp_type(pte), swapdev), (ulonglong)__swp_offset(pte)); else sprintf(buf, "%s OFFSET: %llx", get_swapdev(SWP_TYPE(pte), swapdev), (ulonglong)SWP_OFFSET(pte)); return buf; } /* * Given the type field from a PTE, return the name of the swap device. */ static char * get_swapdev(ulong type, char *buf) { unsigned int i, swap_info_len; ulong swap_info, swap_info_ptr, swap_file; struct syment *sp; ulong vfsmnt; char *devname; char buf1[BUFSIZE]; swap_info_init(); swap_info = symbol_value("swap_info"); swap_info_len = (i = ARRAY_LENGTH(swap_info)) ? i : get_array_length("swap_info", NULL, 0); /* * Even though the swap_info[] array is declared statically as: * * struct swap_info_struct *swap_info[MAX_SWAPFILES]; * * the dimension may not be shown by the debuginfo data, * for example: * * struct swap_info_struct *swap_info[28]; * or * struct swap_info_struct *swap_info[]; * * In that case, calculate its length by checking the next * symbol's value. */ if ((swap_info_len == 0) && (vt->flags & SWAPINFO_V2) && (sp = next_symbol("swap_info", NULL))) swap_info_len = (sp->value - swap_info) / sizeof(void *); sprintf(buf, "(unknown swap location)"); if (type >= swap_info_len) return buf; switch (vt->flags & (SWAPINFO_V1|SWAPINFO_V2)) { case SWAPINFO_V1: swap_info += type * SIZE(swap_info_struct); fill_swap_info(swap_info); break; case SWAPINFO_V2: swap_info += type * sizeof(void *); if (!readmem(swap_info, KVADDR, &swap_info_ptr, sizeof(void *), "swap_info pointer", RETURN_ON_ERROR|QUIET)) return buf; if (!swap_info_ptr) return buf; fill_swap_info(swap_info_ptr); break; } swap_file = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_file)); if (swap_file) { if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { vfsmnt = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_vfsmnt)); get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { devname = vfsmount_devname(file_to_vfsmnt(swap_file), buf1, BUFSIZE); get_pathname(file_to_dentry(swap_file), buf, BUFSIZE, 1, file_to_vfsmnt(swap_file)); if ((STREQ(devname, "devtmpfs") || STREQ(devname, "udev")) && !STRNEQ(buf, "/dev/")) string_insert("/dev", buf); } else { get_pathname(swap_file, buf, BUFSIZE, 1, 0); } } return buf; } /* * If not currently stashed, cache the passed-in swap_info_struct. */ static void fill_swap_info(ulong swap_info) { if (vt->last_swap_read == swap_info) return; if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *) malloc(SIZE(swap_info_struct)))) error(FATAL, "cannot malloc swap_info_struct space\n"); readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct), "fill_swap_info", FAULT_ON_ERROR); vt->last_swap_read = swap_info; } /* * If active, clear references to the swap_info references. */ void clear_swap_info_cache(void) { if (ACTIVE()) vt->last_swap_read = 0; } /* * Translage a vm_area_struct and virtual address into a filename * and offset string. */ #define PAGE_CACHE_SHIFT (machdep->pageshift) /* This is supposed to change! */ static char * vma_file_offset(ulong vma, ulong vaddr, char *buf) { ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset; ulong vfsmnt; char file[BUFSIZE]; char *vma_buf, *file_buf; if (!vma) return NULL; vma_buf = fill_vma_cache(vma); vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); if (!vm_file) goto no_file_offset; file_buf = fill_file_cache(vm_file); dentry = ULONG(file_buf + OFFSET(file_f_dentry)); if (!dentry) goto no_file_offset; file[0] = NULLCHAR; if (VALID_MEMBER(file_f_vfsmnt)) { vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); get_pathname(dentry, file, BUFSIZE, 1, vfsmnt); } else get_pathname(dentry, file, BUFSIZE, 1, 0); if (!strlen(file)) goto no_file_offset; vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_offset = vm_pgoff = 0xdeadbeef; if (VALID_MEMBER(vm_area_struct_vm_offset)) vm_offset = ULONG(vma_buf + OFFSET(vm_area_struct_vm_offset)); else if (VALID_MEMBER(vm_area_struct_vm_pgoff)) vm_pgoff = ULONG(vma_buf + OFFSET(vm_area_struct_vm_pgoff)); else goto no_file_offset; offset = 0; if (vm_offset != 0xdeadbeef) offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset; else if (vm_pgoff != 0xdeadbeef) { offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff; offset <<= PAGE_CACHE_SHIFT; } sprintf(buf, "%s OFFSET: %lx", file, offset); return buf; no_file_offset: return NULL; } /* * Translate a PTE into its physical address and flags. */ void cmd_pte(void) { int c; ulonglong pte; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { pte = htoll(args[optind], FAULT_ON_ERROR, NULL); machdep->translate_pte((ulong)pte, NULL, pte); optind++; } } static char *node_zone_hdr = "ZONE NAME SIZE"; /* * On systems supporting memory nodes, display the basic per-node data. */ static void dump_memory_nodes(int initialize) { int i, j; int n, id, node, flen, slen, badaddr; ulong node_mem_map; ulong temp_node_start_paddr; ulonglong node_start_paddr; ulong node_start_pfn; ulong node_start_mapnr; ulong node_spanned_pages, node_present_pages; ulong free_pages, zone_size, node_size, cum_zone_size; ulong zone_start_paddr, zone_start_mapnr, zone_mem_map; physaddr_t phys; ulong pp; ulong zone_start_pfn; ulong bdata; ulong pgdat; ulong node_zones; ulong value; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; struct node_table *nt; node = slen = 0; if (!(vt->flags & (NODES|NODES_ONLINE)) && initialize) { nt = &vt->node_table[0]; nt->node_id = 0; if (symbol_exists("contig_page_data")) nt->pgdat = symbol_value("contig_page_data"); else nt->pgdat = 0; nt->size = vt->total_pages; nt->mem_map = vt->mem_map; nt->start_paddr = 0; nt->start_mapnr = 0; if (CRASHDEBUG(1)) { fprintf(fp, "node_table[%d]: \n", 0); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } return; } if (initialize) { pgdat = UNINITIALIZED; /* * This order may have to change based upon architecture... */ if (symbol_exists("pgdat_list") && (VALID_MEMBER(pglist_data_node_next) || VALID_MEMBER(pglist_data_pgdat_next))) { get_symbol_data("pgdat_list", sizeof(void *), &pgdat); vt->flags &= ~NODES_ONLINE; } else if (vt->flags & NODES_ONLINE) { if ((node = next_online_node(0)) < 0) { error(WARNING, "cannot determine first node from node_online_map\n\n"); return; } if (!(pgdat = next_online_pgdat(node))) { error(WARNING, "cannot determine pgdat list for this kernel/architecture\n\n"); return; } } } else pgdat = vt->node_table[0].pgdat; if (initialize && (pgdat == UNINITIALIZED)) { error(WARNING, "cannot initialize pgdat list\n\n"); return; } for (n = 0, badaddr = FALSE; pgdat; n++) { if (n >= vt->numnodes) error(FATAL, "numnodes out of sync with pgdat_list?\n"); nt = &vt->node_table[n]; readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id, sizeof(int), "pglist node_id", FAULT_ON_ERROR); if (VALID_MEMBER(pglist_data_node_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &node_mem_map, sizeof(ulong), "node_mem_map", FAULT_ON_ERROR); } else { node_mem_map = BADADDR; badaddr = TRUE; } if (VALID_MEMBER(pglist_data_node_start_paddr)) { readmem(pgdat+OFFSET(pglist_data_node_start_paddr), KVADDR, &temp_node_start_paddr, sizeof(ulong), "pglist node_start_paddr", FAULT_ON_ERROR); node_start_paddr = temp_node_start_paddr; } else if (VALID_MEMBER(pglist_data_node_start_pfn)) { readmem(pgdat+OFFSET(pglist_data_node_start_pfn), KVADDR, &node_start_pfn, sizeof(ulong), "pglist node_start_pfn", FAULT_ON_ERROR); node_start_mapnr = node_start_pfn; node_start_paddr = PTOB(node_start_pfn); if (badaddr && IS_SPARSEMEM()) { if (!verify_pfn(node_start_pfn)) error(WARNING, "questionable node_start_pfn: %lx\n", node_start_pfn); phys = PTOB(node_start_pfn); if (phys_to_page(phys, &pp)) node_mem_map = pp; } } else error(INFO, "cannot determine zone starting physical address\n"); if (VALID_MEMBER(pglist_data_node_start_mapnr)) readmem(pgdat+OFFSET(pglist_data_node_start_mapnr), KVADDR, &node_start_mapnr, sizeof(ulong), "pglist node_start_mapnr", FAULT_ON_ERROR); if (VALID_MEMBER(pglist_data_node_size)) readmem(pgdat+OFFSET(pglist_data_node_size), KVADDR, &node_size, sizeof(ulong), "pglist node_size", FAULT_ON_ERROR); else if (VALID_MEMBER(pglist_data_node_spanned_pages)) { readmem(pgdat+OFFSET(pglist_data_node_spanned_pages), KVADDR, &node_spanned_pages, sizeof(ulong), "pglist node_spanned_pages", FAULT_ON_ERROR); node_size = node_spanned_pages; } else error(INFO, "cannot determine zone size\n"); if (VALID_MEMBER(pglist_data_node_present_pages)) readmem(pgdat+OFFSET(pglist_data_node_present_pages), KVADDR, &node_present_pages, sizeof(ulong), "pglist node_present_pages", FAULT_ON_ERROR); else node_present_pages = 0; if (VALID_MEMBER(pglist_data_bdata)) readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata, sizeof(ulong), "pglist bdata", FAULT_ON_ERROR); else bdata = BADADDR; if (initialize) { nt->node_id = id; nt->pgdat = pgdat; if (VALID_MEMBER(zone_struct_memsize)) nt->size = 0; /* initialize below */ else nt->size = node_size; nt->present = node_present_pages; nt->mem_map = node_mem_map; nt->start_paddr = node_start_paddr; nt->start_mapnr = node_start_mapnr; if (CRASHDEBUG(1)) { fprintf(fp, "node_table[%d]: \n", n); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } } if (!initialize) { if (n) { fprintf(fp, "\n"); pad_line(fp, slen, '-'); } flen = MAX(VADDR_PRLEN, strlen("BOOTMEM_DATA")); fprintf(fp, "%sNODE %s %s %s %s\n", n ? "\n\n" : "", mkstring(buf1, 8, CENTER, "SIZE"), mkstring(buf2, flen, CENTER|LJUST, "PGLIST_DATA"), mkstring(buf3, flen, CENTER|LJUST, "BOOTMEM_DATA"), mkstring(buf4, flen, CENTER|LJUST, "NODE_ZONES")); node_zones = pgdat + OFFSET(pglist_data_node_zones); sprintf(buf5, " %2d %s %s %s %s\n", id, mkstring(buf1, 8, CENTER|LJUST|LONG_DEC, MKSTR(node_size)), mkstring(buf2, flen, CENTER|LJUST|LONG_HEX, MKSTR(pgdat)), bdata == BADADDR ? mkstring(buf3, flen, CENTER, "----") : mkstring(buf3, flen, CENTER|LONG_HEX, MKSTR(bdata)), mkstring(buf4, flen, CENTER|LJUST|LONG_HEX, MKSTR(node_zones))); fprintf(fp, "%s", buf5); j = 12 + strlen(buf1) + strlen(buf2) + strlen(buf3) + count_leading_spaces(buf4); for (i = 1; i < vt->nr_zones; i++) { node_zones += SIZE_OPTION(zone_struct, zone); INDENT(j); fprintf(fp, "%lx\n", node_zones); } fprintf(fp, "%s START_PADDR START_MAPNR\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(node_mem_map)), mkstring(buf2, strlen(" START_PADDR "), CENTER|LONGLONG_HEX|RJUST, MKSTR(&node_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(node_start_mapnr))); sprintf(buf2, "%s %s START_PADDR START_MAPNR", node_zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, "MEM_MAP")); slen = strlen(buf2); fprintf(fp, "\n%s\n", buf2); } node_zones = pgdat + OFFSET(pglist_data_node_zones); cum_zone_size = 0; for (i = 0; i < vt->nr_zones; i++) { if (CRASHDEBUG(7)) fprintf(fp, "zone %d at %lx\n", i, node_zones); if (VALID_MEMBER(zone_struct_size)) readmem(node_zones+OFFSET(zone_struct_size), KVADDR, &zone_size, sizeof(ulong), "zone_struct size", FAULT_ON_ERROR); else if (VALID_MEMBER(zone_struct_memsize)) { readmem(node_zones+OFFSET(zone_struct_memsize), KVADDR, &zone_size, sizeof(ulong), "zone_struct memsize", FAULT_ON_ERROR); nt->size += zone_size; } else if (VALID_MEMBER(zone_spanned_pages)) { readmem(node_zones+ OFFSET(zone_spanned_pages), KVADDR, &zone_size, sizeof(ulong), "zone spanned_pages", FAULT_ON_ERROR); } else error(FATAL, "zone_struct has neither size nor memsize field\n"); readmem(node_zones+ OFFSET_OPTION(zone_struct_free_pages, zone_free_pages), KVADDR, &free_pages, sizeof(ulong), "zone[_struct] free_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET_OPTION(zone_struct_name, zone_name), KVADDR, &value, sizeof(void *), "zone[_struct] name", FAULT_ON_ERROR); if (!read_string(value, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown) "); if (VALID_STRUCT(zone_struct)) { if (VALID_MEMBER(zone_struct_zone_start_paddr)) { readmem(node_zones+OFFSET (zone_struct_zone_start_paddr), KVADDR, &zone_start_paddr, sizeof(ulong), "node_zones zone_start_paddr", FAULT_ON_ERROR); } else { readmem(node_zones+ OFFSET(zone_struct_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); } readmem(node_zones+ OFFSET(zone_struct_zone_start_mapnr), KVADDR, &zone_start_mapnr, sizeof(ulong), "node_zones zone_start_mapnr", FAULT_ON_ERROR); } else { readmem(node_zones+ OFFSET(zone_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); if (IS_SPARSEMEM()) { zone_mem_map = 0; zone_start_mapnr = 0; if (zone_size) { phys = PTOB(zone_start_pfn); zone_start_mapnr = phys/PAGESIZE(); } } else if (!(vt->flags & NODES) && INVALID_MEMBER(zone_zone_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &zone_mem_map, sizeof(void *), "contig_page_data mem_map", FAULT_ON_ERROR); if (zone_size) zone_mem_map += cum_zone_size * SIZE(page); } else readmem(node_zones+ OFFSET(zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); if (zone_mem_map) zone_start_mapnr = (zone_mem_map - node_mem_map) / SIZE(page); else if (!IS_SPARSEMEM()) zone_start_mapnr = 0; } if (IS_SPARSEMEM()) { zone_mem_map = 0; if (zone_size) { phys = PTOB(zone_start_pfn); if (phys_to_page(phys, &pp)) zone_mem_map = pp; } } else if (!(vt->flags & NODES) && INVALID_MEMBER(zone_struct_zone_mem_map) && INVALID_MEMBER(zone_zone_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &zone_mem_map, sizeof(void *), "contig_page_data mem_map", FAULT_ON_ERROR); if (zone_size) zone_mem_map += cum_zone_size * SIZE(page); else zone_mem_map = 0; } else readmem(node_zones+ OFFSET_OPTION(zone_struct_zone_mem_map, zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); if (!initialize) { fprintf(fp, " %2d %-9s %7ld ", i, buf1, zone_size); cum_zone_size += zone_size; fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), LONG_HEX|RJUST,MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); } node_zones += SIZE_OPTION(zone_struct, zone); } if (initialize) { if (vt->flags & NODES_ONLINE) { if ((node = next_online_node(node+1)) < 0) pgdat = 0; else if (!(pgdat = next_online_pgdat(node))) { error(WARNING, "cannot determine pgdat list for this kernel/architecture (node %d)\n\n", node); pgdat = 0; } } else readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } else { if ((n+1) < vt->numnodes) pgdat = vt->node_table[n+1].pgdat; else pgdat = 0; } } if (n != vt->numnodes) { if (CRASHDEBUG(2)) error(NOTE, "changing numnodes from %d to %d\n", vt->numnodes, n); vt->numnodes = n; } if (IS_SPARSEMEM()) { dump_mem_sections(initialize); dump_memory_blocks(initialize); } } /* * At least verify that page-shifted physical address. */ static int verify_pfn(ulong pfn) { int i; physaddr_t mask; if (!machdep->max_physmem_bits) return TRUE; mask = 0; for (i = machdep->max_physmem_bits; i < machdep->bits; i++) mask |= ((physaddr_t)1 << i); if (mask & PTOB(pfn)) return FALSE; return TRUE; } static void dump_zone_stats(void) { int i, n; ulong pgdat, node_zones; char *zonebuf; char buf1[BUFSIZE]; int ivalue; ulong value1; ulong value2; ulong value3; ulong value4; ulong value5; ulong value6; long min, low, high; value1 = value2 = value3 = value4 = value5 = value6 = 0; min = low = high = 0; pgdat = vt->node_table[0].pgdat; zonebuf = GETBUF(SIZE_OPTION(zone_struct, zone)); vm_stat_init(); for (n = 0; pgdat; n++) { node_zones = pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (!readmem(node_zones, KVADDR, zonebuf, SIZE_OPTION(zone_struct, zone), "zone buffer", FAULT_ON_ERROR)) break; value1 = ULONG(zonebuf + OFFSET_OPTION(zone_struct_name, zone_name)); if (!read_string(value1, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown) "); if (VALID_MEMBER(zone_struct_size)) value1 = value6 = ULONG(zonebuf + OFFSET(zone_struct_size)); else if (VALID_MEMBER(zone_struct_memsize)) { value1 = value6 = ULONG(zonebuf + OFFSET(zone_struct_memsize)); } else if (VALID_MEMBER(zone_spanned_pages)) { value1 = ULONG(zonebuf + OFFSET(zone_spanned_pages)); value6 = ULONG(zonebuf + OFFSET(zone_present_pages)); } else error(FATAL, "zone struct has unknown size field\n"); if (VALID_MEMBER(zone_watermark)) { if (!enumerator_value("WMARK_MIN", &min) || !enumerator_value("WMARK_LOW", &low) || !enumerator_value("WMARK_HIGH", &high)) { min = 0; low = 1; high = 2; } value2 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * min)); value3 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * low)); value4 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * high)); } else { value2 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_min, zone_struct_pages_min)); value3 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_low, zone_struct_pages_low)); value4 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_high, zone_struct_pages_high)); } value5 = ULONG(zonebuf + OFFSET_OPTION(zone_free_pages, zone_struct_free_pages)); fprintf(fp, "NODE: %d ZONE: %d ADDR: %lx NAME: \"%s\"\n", n, i, node_zones, buf1); if (!value1) { fprintf(fp, " [unpopulated]\n"); goto next_zone; } fprintf(fp, " SIZE: %ld", value1); if (value6 < value1) fprintf(fp, " PRESENT: %ld", value6); fprintf(fp, " MIN/LOW/HIGH: %ld/%ld/%ld", value2, value3, value4); if (VALID_MEMBER(zone_vm_stat)) dump_vm_stat("NR_FREE_PAGES", (long *)&value5, node_zones + OFFSET(zone_vm_stat)); if (VALID_MEMBER(zone_nr_active) && VALID_MEMBER(zone_nr_inactive)) { value1 = ULONG(zonebuf + OFFSET(zone_nr_active)); value2 = ULONG(zonebuf + OFFSET(zone_nr_inactive)); fprintf(fp, "\n NR_ACTIVE: %ld NR_INACTIVE: %ld FREE: %ld\n", value1, value2, value5); if (VALID_MEMBER(zone_vm_stat)) { fprintf(fp, " VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } } else if (VALID_MEMBER(zone_vm_stat) && dump_vm_stat("NR_ACTIVE", (long *)&value1, node_zones + OFFSET(zone_vm_stat)) && dump_vm_stat("NR_INACTIVE", (long *)&value2, node_zones + OFFSET(zone_vm_stat))) { fprintf(fp, "\n VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } else { if (VALID_MEMBER(zone_vm_stat)) { fprintf(fp, "\n VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } else fprintf(fp, " FREE: %ld\n", value5); } if (VALID_MEMBER(zone_all_unreclaimable)) { ivalue = UINT(zonebuf + OFFSET(zone_all_unreclaimable)); fprintf(fp, " ALL_UNRECLAIMABLE: %s ", ivalue ? "yes" : "no"); } else if (VALID_MEMBER(zone_flags) && enumerator_value("ZONE_ALL_UNRECLAIMABLE", (long *)&value1)) { value2 = ULONG(zonebuf + OFFSET(zone_flags)); value3 = value2 & (1 << value1); fprintf(fp, " ALL_UNRECLAIMABLE: %s ", value3 ? "yes" : "no"); } if (VALID_MEMBER(zone_pages_scanned)) { value1 = ULONG(zonebuf + OFFSET(zone_pages_scanned)); fprintf(fp, "PAGES_SCANNED: %lu ", value1); } fprintf(fp, "\n"); next_zone: fprintf(fp, "\n"); node_zones += SIZE_OPTION(zone_struct, zone); } if ((n+1) < vt->numnodes) pgdat = vt->node_table[n+1].pgdat; else pgdat = 0; } FREEBUF(zonebuf); } /* * Gather essential information regarding each memory node. */ static void node_table_init(void) { int n; ulong pgdat; /* * Override numnodes -- some kernels may leave it at 1 on a system * with multiple memory nodes. */ if ((vt->flags & NODES) && (VALID_MEMBER(pglist_data_node_next) || VALID_MEMBER(pglist_data_pgdat_next))) { get_symbol_data("pgdat_list", sizeof(void *), &pgdat); for (n = 0; pgdat; n++) { readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } if (n != vt->numnodes) { if (CRASHDEBUG(2)) error(NOTE, "changing numnodes from %d to %d\n", vt->numnodes, n); vt->numnodes = n; } } else vt->flags &= ~NODES; if (!(vt->node_table = (struct node_table *) malloc(sizeof(struct node_table) * vt->numnodes))) error(FATAL, "cannot malloc node_table %s(%d nodes)", vt->numnodes > 1 ? "array " : "", vt->numnodes); BZERO(vt->node_table, sizeof(struct node_table) * vt->numnodes); dump_memory_nodes(MEMORY_NODES_INITIALIZE); qsort((void *)vt->node_table, (size_t)vt->numnodes, sizeof(struct node_table), compare_node_data); if (CRASHDEBUG(2)) dump_memory_nodes(MEMORY_NODES_DUMP); } /* * The comparison function must return an integer less than, * equal to, or greater than zero if the first argument is * considered to be respectively less than, equal to, or * greater than the second. If two members compare as equal, * their order in the sorted array is undefined. */ static int compare_node_data(const void *v1, const void *v2) { struct node_table *t1, *t2; t1 = (struct node_table *)v1; t2 = (struct node_table *)v2; return (t1->node_id < t2->node_id ? -1 : t1->node_id == t2->node_id ? 0 : 1); } /* * Depending upon the processor, and whether we're running live or on a * dumpfile, get the system page size. */ uint memory_page_size(void) { uint psz; if (machdep->pagesize) return machdep->pagesize; if (REMOTE_MEMSRC()) return remote_page_size(); switch (pc->flags & MEMORY_SOURCES) { case DISKDUMP: psz = diskdump_page_size(); break; case XENDUMP: psz = xendump_page_size(); break; case KDUMP: psz = kdump_page_size(); break; case NETDUMP: psz = netdump_page_size(); break; case MCLXCD: psz = (uint)mclx_page_size(); break; case LKCD: #if 0 /* REMIND: */ psz = lkcd_page_size(); /* dh_dump_page_size is HW page size; should add dh_page_size */ #else psz = (uint)getpagesize(); #endif break; case DEVMEM: case MEMMOD: case CRASHBUILTIN: case KVMDUMP: case PROC_KCORE: case LIVE_RAMDUMP: psz = (uint)getpagesize(); break; case S390D: psz = s390_page_size(); break; case SADUMP: psz = sadump_page_size(); break; case VMWARE_VMSS: psz = vmware_vmss_page_size(); break; default: psz = 0; error(FATAL, "memory_page_size: invalid pc->flags: %lx\n", pc->flags & MEMORY_SOURCES); } return psz; } /* * If the page size cannot be determined by the dumpfile (like kdump), * and the processor default cannot be used, allow the force-feeding * of a crash command-line page size option. */ void force_page_size(char *s) { int k, err; ulong psize; k = 1; err = FALSE; psize = 0; switch (LASTCHAR(s)) { case 'k': case 'K': LASTCHAR(s) = NULLCHAR; if (!decimal(s, 0)) { err = TRUE; break; } k = 1024; /* FALLTHROUGH */ default: if (decimal(s, 0)) psize = dtol(s, QUIET|RETURN_ON_ERROR, &err); else if (hexadecimal(s, 0)) psize = htol(s, QUIET|RETURN_ON_ERROR, &err); else err = TRUE; break; } if (err) error(INFO, "invalid page size: %s\n", s); else machdep->pagesize = psize * k; } /* * Return the vmalloc address referenced by the first vm_struct * on the vmlist. This can normally be used by the machine-specific * xxx_vmalloc_start() routines. */ ulong first_vmalloc_address(void) { static ulong vmalloc_start = 0; ulong vm_struct, vmap_area; char *vmalloc_start_string; if (DUMPFILE() && vmalloc_start) return vmalloc_start; /* * 'vmap_area_list' and 'vmlist' in Linux 6.9 and later kernels might be * empty, prefer NUMBER(VMALLOC_START) if exported in vmcoreinfo. */ vmalloc_start_string = pc->read_vmcoreinfo("NUMBER(VMALLOC_START)"); if (vmalloc_start_string) { vmalloc_start = htol(vmalloc_start_string, QUIET, NULL); free(vmalloc_start_string); } else if (vt->flags & USE_VMAP_AREA) { get_symbol_data("vmap_area_list", sizeof(void *), &vmap_area); if (!vmap_area) return 0; if (!readmem(vmap_area - OFFSET(vmap_area_list) + OFFSET(vmap_area_va_start), KVADDR, &vmalloc_start, sizeof(void *), "first vmap_area va_start", RETURN_ON_ERROR)) non_matching_kernel(); } else if (kernel_symbol_exists("vmlist")) { get_symbol_data("vmlist", sizeof(void *), &vm_struct); if (!vm_struct) return 0; if (!readmem(vm_struct+OFFSET(vm_struct_addr), KVADDR, &vmalloc_start, sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) non_matching_kernel(); } return vmalloc_start; } /* * Return the highest vmalloc address in the vmlist. */ ulong last_vmalloc_address(void) { struct meminfo meminfo; static ulong vmalloc_limit = 0; if (!vmalloc_limit || ACTIVE()) { BZERO(&meminfo, sizeof(struct meminfo)); meminfo.memtype = KVADDR; meminfo.spec_addr = 0; meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); dump_vmlist(&meminfo); vmalloc_limit = meminfo.retval; } return vmalloc_limit; } /* * Determine whether an identity-mapped virtual address * refers to an existant physical page, and if not bump * it up to the next node. */ static int next_identity_mapping(ulong vaddr, ulong *nextvaddr) { int n, retval; struct node_table *nt; ulonglong paddr, pstart, psave, pend; ulong node_size; paddr = VTOP(vaddr); psave = 0; retval = FALSE; for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; pstart = nt->start_paddr; pend = pstart + ((ulonglong)node_size * PAGESIZE()); /* * Check the next node. */ if (paddr >= pend) continue; /* * Bump up to the next node, but keep looking in * case of non-sequential nodes. */ if (paddr < pstart) { if (psave && (psave < pstart)) continue; *nextvaddr = PTOV(pstart); psave = pstart; retval = TRUE; continue; } /* * We're in the physical range. */ *nextvaddr = vaddr; retval = TRUE; break; } return retval; } /* * Return the L1 cache size in bytes, which can be found stored in the * cache_cache. */ int l1_cache_size(void) { ulong cache; ulong c_align; int colour_off; int retval; retval = -1; if (VALID_MEMBER(kmem_cache_s_c_align)) { cache = symbol_value("cache_cache"); readmem(cache+OFFSET(kmem_cache_s_c_align), KVADDR, &c_align, sizeof(ulong), "c_align", FAULT_ON_ERROR); retval = (int)c_align; } else if (VALID_MEMBER(kmem_cache_s_colour_off)) { cache = symbol_value("cache_cache"); readmem(cache+OFFSET(kmem_cache_s_colour_off), KVADDR, &colour_off, sizeof(int), "colour_off", FAULT_ON_ERROR); retval = colour_off; } return retval; } /* * Multi-purpose routine used to query/control dumpfile memory usage. */ int dumpfile_memory(int cmd) { int retval; retval = 0; switch (cmd) { case DUMPFILE_MEM_USED: if (REMOTE_DUMPFILE()) retval = remote_memory_used(); else if (pc->flags & NETDUMP) retval = netdump_memory_used(); else if (pc->flags & KDUMP) retval = kdump_memory_used(); else if (pc->flags & XENDUMP) retval = xendump_memory_used(); else if (pc->flags & KVMDUMP) retval = kvmdump_memory_used(); else if (pc->flags & DISKDUMP) retval = diskdump_memory_used(); else if (pc->flags & LKCD) retval = lkcd_memory_used(); else if (pc->flags & MCLXCD) retval = vas_memory_used(); else if (pc->flags & S390D) retval = s390_memory_used(); else if (pc->flags & SADUMP) retval = sadump_memory_used(); break; case DUMPFILE_FREE_MEM: if (REMOTE_DUMPFILE()) retval = remote_free_memory(); else if (pc->flags & NETDUMP) retval = netdump_free_memory(); else if (pc->flags & KDUMP) retval = kdump_free_memory(); else if (pc->flags & XENDUMP) retval = xendump_free_memory(); else if (pc->flags & KVMDUMP) retval = kvmdump_free_memory(); else if (pc->flags & DISKDUMP) retval = diskdump_free_memory(); else if (pc->flags & LKCD) retval = lkcd_free_memory(); else if (pc->flags & MCLXCD) retval = vas_free_memory(NULL); else if (pc->flags & S390D) retval = s390_free_memory(); else if (pc->flags & SADUMP) retval = sadump_free_memory(); break; case DUMPFILE_MEM_DUMP: if (REMOTE_DUMPFILE()) retval = remote_memory_dump(0); else if (pc->flags & NETDUMP) retval = netdump_memory_dump(fp); else if (pc->flags & KDUMP) retval = kdump_memory_dump(fp); else if (pc->flags & XENDUMP) retval = xendump_memory_dump(fp); else if (pc->flags & KVMDUMP) retval = kvmdump_memory_dump(fp); else if (pc->flags & DISKDUMP) retval = diskdump_memory_dump(fp); else if (pc->flags & LKCD) retval = lkcd_memory_dump(set_lkcd_fp(fp)); else if (pc->flags & MCLXCD) retval = vas_memory_dump(fp); else if (pc->flags & S390D) retval = s390_memory_dump(fp); else if (pc->flags & PROC_KCORE) retval = kcore_memory_dump(fp); else if (pc->flags & SADUMP) retval = sadump_memory_dump(fp); else if (pc->flags & VMWARE_VMSS) { if (pc->flags2 & VMWARE_VMSS_GUESTDUMP) retval = vmware_guestdump_memory_dump(fp); else retval = vmware_vmss_memory_dump(fp); } break; case DUMPFILE_ENVIRONMENT: if (pc->flags & LKCD) { set_lkcd_fp(fp); dump_lkcd_environment(0); } else if (pc->flags & REM_LKCD) retval = remote_memory_dump(VERBOSE); break; } return retval; } /* * Functions for sparse mem support */ ulong sparse_decode_mem_map(ulong coded_mem_map, ulong section_nr) { return coded_mem_map + (section_nr_to_pfn(section_nr) * SIZE(page)); } void sparse_mem_init(void) { ulong addr; ulong mem_section_size; int len, dimension, mem_section_is_ptr; if (!IS_SPARSEMEM()) return; MEMBER_OFFSET_INIT(mem_section_section_mem_map, "mem_section", "section_mem_map"); if (!MAX_PHYSMEM_BITS()) error(FATAL, "CONFIG_SPARSEMEM kernels not supported for this architecture\n"); /* * The kernel's mem_section changed from array to pointer in this commit: * * commit 83e3c48729d9ebb7af5a31a504f3fd6aff0348c4 * mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y */ mem_section_is_ptr = get_symbol_type("mem_section", NULL, NULL) == TYPE_CODE_PTR ? TRUE : FALSE; if (((len = get_array_length("mem_section", &dimension, 0)) == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) || mem_section_is_ptr || !dimension) vt->flags |= SPARSEMEM_EX; if (IS_SPARSEMEM_EX()) { machdep->sections_per_root = _SECTIONS_PER_ROOT_EXTREME(); mem_section_size = sizeof(void *) * NR_SECTION_ROOTS(); } else { machdep->sections_per_root = _SECTIONS_PER_ROOT(); mem_section_size = SIZE(mem_section) * NR_SECTION_ROOTS(); } if (CRASHDEBUG(1)) { fprintf(fp, "PAGESIZE=%d\n",PAGESIZE()); fprintf(fp,"mem_section_size = %ld\n", mem_section_size); fprintf(fp, "NR_SECTION_ROOTS = %ld\n", NR_SECTION_ROOTS()); fprintf(fp, "NR_MEM_SECTIONS = %ld\n", NR_MEM_SECTIONS()); fprintf(fp, "SECTIONS_PER_ROOT = %ld\n", SECTIONS_PER_ROOT() ); fprintf(fp, "SECTION_ROOT_MASK = 0x%lx\n", SECTION_ROOT_MASK()); fprintf(fp, "PAGES_PER_SECTION = %ld\n", PAGES_PER_SECTION()); if (!mem_section_is_ptr && IS_SPARSEMEM_EX() && !len) error(WARNING, "SPARSEMEM_EX: questionable section values\n"); } if (!(vt->mem_sec = (void *)malloc(mem_section_size))) error(FATAL, "cannot malloc mem_sec cache\n"); if (!(vt->mem_section = (char *)malloc(SIZE(mem_section)))) error(FATAL, "cannot malloc mem_section cache\n"); if (mem_section_is_ptr) get_symbol_data("mem_section", sizeof(void *), &addr); else addr = symbol_value("mem_section"); readmem(addr, KVADDR, vt->mem_sec, mem_section_size, "memory section root table", FAULT_ON_ERROR); } char * read_mem_section(ulong addr) { if ((addr == 0) || !IS_KVADDR(addr)) return 0; readmem(addr, KVADDR, vt->mem_section, SIZE(mem_section), "memory section", FAULT_ON_ERROR); return vt->mem_section; } ulong nr_to_section(ulong nr) { ulong addr; ulong *mem_sec = vt->mem_sec; if (IS_SPARSEMEM_EX()) { if (SECTION_NR_TO_ROOT(nr) >= NR_SECTION_ROOTS()) { if (!STREQ(pc->curcmd, "rd") && !STREQ(pc->curcmd, "search") && !STREQ(pc->curcmd, "kmem")) error(WARNING, "sparsemem: invalid section number: %ld\n", nr); return 0; } } if (IS_SPARSEMEM_EX()) { if ((mem_sec[SECTION_NR_TO_ROOT(nr)] == 0) || !IS_KVADDR(mem_sec[SECTION_NR_TO_ROOT(nr)])) return 0; addr = mem_sec[SECTION_NR_TO_ROOT(nr)] + (nr & SECTION_ROOT_MASK()) * SIZE(mem_section); } else addr = symbol_value("mem_section") + (SECTIONS_PER_ROOT() * SECTION_NR_TO_ROOT(nr) + (nr & SECTION_ROOT_MASK())) * SIZE(mem_section); if (!IS_KVADDR(addr)) return 0; return addr; } /* * We use the lower bits of the mem_map pointer to store * a little bit of information. The pointer is calculated * as mem_map - section_nr_to_pfn(pnum). The result is * aligned to the minimum alignment of the two values: * 1. All mem_map arrays are page-aligned. * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT * lowest bits. PFN_SECTION_SHIFT is arch-specific * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the * worst combination is powerpc with 256k pages, * which results in PFN_SECTION_SHIFT equal 6. * To sum it up, at least 6 bits are available. */ #define SECTION_MARKED_PRESENT (1UL<<0) #define SECTION_HAS_MEM_MAP (1UL<<1) #define SECTION_IS_ONLINE (1UL<<2) #define SECTION_IS_EARLY (1UL<<3) #define SECTION_TAINT_ZONE_DEVICE (1UL<<4) #define SECTION_MAP_LAST_BIT (1UL<<5) #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) int valid_section(ulong addr) { char *mem_section; if ((mem_section = read_mem_section(addr))) return (ULONG(mem_section + OFFSET(mem_section_section_mem_map)) & SECTION_MARKED_PRESENT); return 0; } int section_has_mem_map(ulong addr) { char *mem_section; ulong kernel_version_bit; if (THIS_KERNEL_VERSION >= LINUX(2,6,24)) kernel_version_bit = SECTION_HAS_MEM_MAP; else kernel_version_bit = SECTION_MARKED_PRESENT; if ((mem_section = read_mem_section(addr))) return (ULONG(mem_section + OFFSET(mem_section_section_mem_map)) & kernel_version_bit); return 0; } ulong section_mem_map_addr(ulong addr, int raw) { char *mem_section; ulong map; if ((mem_section = read_mem_section(addr))) { map = ULONG(mem_section + OFFSET(mem_section_section_mem_map)); if (!raw) map &= SECTION_MAP_MASK; return map; } return 0; } ulong valid_section_nr(ulong nr) { ulong addr = nr_to_section(nr); if (valid_section(addr)) return addr; return 0; } ulong pfn_to_map(ulong pfn) { ulong section, page_offset; ulong section_nr; ulong coded_mem_map, mem_map; section_nr = pfn_to_section_nr(pfn); if (!(section = valid_section_nr(section_nr))) return 0; if (section_has_mem_map(section)) { page_offset = pfn - section_nr_to_pfn(section_nr); coded_mem_map = section_mem_map_addr(section, 0); mem_map = sparse_decode_mem_map(coded_mem_map, section_nr) + (page_offset * SIZE(page)); return mem_map; } return 0; } static void fill_mem_section_state(ulong state, char *buf) { int bufidx = 0; memset(buf, 0, sizeof(*buf) * BUFSIZE); if (state & SECTION_MARKED_PRESENT) bufidx += sprintf(buf + bufidx, "%s", "P"); if (state & SECTION_HAS_MEM_MAP) bufidx += sprintf(buf + bufidx, "%s", "M"); if (state & SECTION_IS_ONLINE) bufidx += sprintf(buf + bufidx, "%s", "O"); if (state & SECTION_IS_EARLY) bufidx += sprintf(buf + bufidx, "%s", "E"); if (state & SECTION_TAINT_ZONE_DEVICE) bufidx += sprintf(buf + bufidx, "%s", "D"); } void dump_mem_sections(int initialize) { ulong nr, max, addr; ulong nr_mem_sections; ulong coded_mem_map, mem_map, pfn; char statebuf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; nr_mem_sections = NR_MEM_SECTIONS(); if (initialize) { for (nr = max = 0; nr < nr_mem_sections ; nr++) { if (valid_section_nr(nr)) max = nr; } vt->max_mem_section_nr = max; return; } fprintf(fp, "\n"); pad_line(fp, BITS32() ? 59 : 67, '-'); fprintf(fp, "\n\nNR %s %s %s %s PFN\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SECTION"), mkstring(buf2, MAX(VADDR_PRLEN,strlen("CODED_MEM_MAP")), CENTER|LJUST, "CODED_MEM_MAP"), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP"), mkstring(buf4, strlen("STATE"), CENTER, "STATE")); for (nr = 0; nr < nr_mem_sections ; nr++) { if ((addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(addr, 0); mem_map = sparse_decode_mem_map(coded_mem_map,nr); pfn = section_nr_to_pfn(nr); fill_mem_section_state(section_mem_map_addr(addr, 1), statebuf); fprintf(fp, "%2ld %s %s %s %s %s\n", nr, mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(addr)), mkstring(buf2, MAX(VADDR_PRLEN, strlen("CODED_MEM_MAP")), CENTER|LONG_HEX|RJUST, MKSTR(coded_mem_map)), mkstring(buf3, VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(mem_map)), mkstring(buf4, strlen("STATE"), CENTER, statebuf), pc->output_radix == 10 ? mkstring(buf5, VADDR_PRLEN, LONG_DEC|LJUST, MKSTR(pfn)) : mkstring(buf5, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(pfn))); } } } #define MEM_ONLINE (1<<0) #define MEM_GOING_OFFLINE (1<<1) #define MEM_OFFLINE (1<<2) #define MEM_GOING_ONLINE (1<<3) #define MEM_CANCEL_ONLINE (1<<4) #define MEM_CANCEL_OFFLINE (1<<5) static void fill_memory_block_state(ulong memblock, char *buf) { ulong state; memset(buf, 0, sizeof(*buf) * BUFSIZE); readmem(memblock + OFFSET(memory_block_state), KVADDR, &state, sizeof(void *), "memory_block state", FAULT_ON_ERROR); switch (state) { case MEM_ONLINE: sprintf(buf, "%s", "ONLINE"); break; case MEM_GOING_OFFLINE: sprintf(buf, "%s", "GOING_OFFLINE"); break; case MEM_OFFLINE: sprintf(buf, "%s", "OFFLINE"); break; case MEM_GOING_ONLINE: sprintf(buf, "%s", "GOING_ONLINE"); break; case MEM_CANCEL_ONLINE: sprintf(buf, "%s", "CANCEL_ONLINE"); break; case MEM_CANCEL_OFFLINE: sprintf(buf, "%s", "CANCEL_OFFLINE"); break; default: sprintf(buf, "%s", "UNKNOWN"); } } static ulong pfn_to_phys(ulong pfn) { return pfn << PAGESHIFT(); } static void fill_memory_block_name(ulong memblock, char *name) { ulong kobj, value; memset(name, 0, sizeof(*name) * BUFSIZE); kobj = memblock + OFFSET(memory_block_dev) + OFFSET(device_kobj); readmem(kobj + OFFSET(kobject_name), KVADDR, &value, sizeof(void *), "kobject name", FAULT_ON_ERROR); read_string(value, name, BUFSIZE-1); } static void fill_memory_block_parange(ulong saddr, ulong eaddr, char *parange) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; memset(parange, 0, sizeof(*parange) * BUFSIZE); if (eaddr == ULLONG_MAX) sprintf(parange, "%s", mkstring(buf1, PADDR_PRLEN*2 + 3, CENTER|LONG_HEX, MKSTR(saddr))); else sprintf(parange, "%s - %s", mkstring(buf1, PADDR_PRLEN, RJUST|LONG_HEX, MKSTR(saddr)), mkstring(buf2, PADDR_PRLEN, RJUST|LONG_HEX, MKSTR(eaddr))); } static void fill_memory_block_srange(ulong start_sec, char *srange) { memset(srange, 0, sizeof(*srange) * BUFSIZE); sprintf(srange, "%lu", start_sec); } static void print_memory_block(ulong memory_block) { ulong start_sec, end_sec, nid; ulong memblock_size, mbs, start_addr, end_addr = (ulong)ULLONG_MAX; char statebuf[BUFSIZE]; char srangebuf[BUFSIZE]; char parangebuf[BUFSIZE]; char name[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; char buf7[BUFSIZE]; readmem(memory_block + OFFSET(memory_block_start_section_nr), KVADDR, &start_sec, sizeof(void *), "memory_block start_section_nr", FAULT_ON_ERROR); start_addr = pfn_to_phys(section_nr_to_pfn(start_sec)); if (symbol_exists("memory_block_size_probed")) { memblock_size = symbol_value("memory_block_size_probed"); readmem(memblock_size, KVADDR, &mbs, sizeof(ulong), "memory_block_size_probed", FAULT_ON_ERROR); end_addr = start_addr + mbs - 1; } else if (MEMBER_EXISTS("memory_block", "end_section_nr")) { readmem(memory_block + OFFSET(memory_block_end_section_nr), KVADDR, &end_sec, sizeof(void *), "memory_block end_section_nr", FAULT_ON_ERROR); end_addr = pfn_to_phys(section_nr_to_pfn(end_sec + 1)) - 1; } fill_memory_block_state(memory_block, statebuf); fill_memory_block_name(memory_block, name); fill_memory_block_parange(start_addr, end_addr, parangebuf); fill_memory_block_srange(start_sec, srangebuf); if (MEMBER_EXISTS("memory_block", "nid")) { readmem(memory_block + OFFSET(memory_block_nid), KVADDR, &nid, sizeof(int), "memory_block nid", FAULT_ON_ERROR); fprintf(fp, " %s %s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(memory_block)), mkstring(buf2, 12, CENTER, name), parangebuf, mkstring(buf5, strlen("NODE"), CENTER|INT_DEC, MKSTR(nid)), mkstring(buf6, strlen("OFFLINE"), LJUST, statebuf), mkstring(buf7, 12, LJUST, srangebuf)); } else fprintf(fp, " %s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(memory_block)), mkstring(buf2, 10, CENTER, name), parangebuf, mkstring(buf5, strlen("OFFLINE"), LJUST, statebuf), mkstring(buf6, 12, LJUST, srangebuf)); } static void init_memory_block_offset(void) { MEMBER_OFFSET_INIT(bus_type_p, "bus_type", "p"); if (INVALID_MEMBER(bus_type_p)) { MEMBER_OFFSET_INIT(kset_list, "kset", "list"); MEMBER_OFFSET_INIT(kset_kobj, "kset", "kobj"); MEMBER_OFFSET_INIT(kobject_name, "kobject", "name"); MEMBER_OFFSET_INIT(kobject_entry, "kobject", "entry"); MEMBER_OFFSET_INIT(subsys_private_subsys, "subsys_private", "subsys"); } MEMBER_OFFSET_INIT(subsys_private_klist_devices, "subsys_private", "klist_devices"); MEMBER_OFFSET_INIT(klist_k_list, "klist", "k_list"); MEMBER_OFFSET_INIT(klist_node_n_node, "klist_node", "n_node"); MEMBER_OFFSET_INIT(device_kobj, "device", "kobj"); MEMBER_OFFSET_INIT(kobject_name, "kobject", "name"); MEMBER_OFFSET_INIT(device_private_knode_bus, "device_private", "knode_bus"); MEMBER_OFFSET_INIT(device_private_device, "device_private", "device"); MEMBER_OFFSET_INIT(memory_block_dev, "memory_block", "dev"); MEMBER_OFFSET_INIT(memory_block_start_section_nr, "memory_block", "start_section_nr"); MEMBER_OFFSET_INIT(memory_block_end_section_nr, "memory_block", "end_section_nr"); MEMBER_OFFSET_INIT(memory_block_state, "memory_block", "state"); if (MEMBER_EXISTS("memory_block", "nid")) MEMBER_OFFSET_INIT(memory_block_nid, "memory_block", "nid"); } static void init_memory_block(int *klistcnt, ulong **klistbuf) { ulong private, klist, start; struct list_data list_data, *ld; ld = &list_data; private = 0; init_memory_block_offset(); /* * v6.3-rc1 * d2bf38c088e0 driver core: remove private pointer from struct bus_type */ if (INVALID_MEMBER(bus_type_p)) private = get_subsys_private("bus_kset", "memory"); else { ulong memory_subsys = symbol_value("memory_subsys"); readmem(memory_subsys + OFFSET(bus_type_p), KVADDR, &private, sizeof(void *), "memory_subsys.private", FAULT_ON_ERROR); } if (!private) error(FATAL, "cannot determine subsys_private for memory.\n"); klist = private + OFFSET(subsys_private_klist_devices) + OFFSET(klist_k_list); BZERO(ld, sizeof(struct list_data)); readmem(klist, KVADDR, &start, sizeof(void *), "klist klist", FAULT_ON_ERROR); ld->start = start; ld->end = klist; ld->list_head_offset = OFFSET(klist_node_n_node) + OFFSET(device_private_knode_bus); hq_open(); *klistcnt = do_list(ld); *klistbuf = (ulong *)GETBUF(*klistcnt * sizeof(ulong)); *klistcnt = retrieve_list(*klistbuf, *klistcnt); hq_close(); } void dump_memory_blocks(int initialize) { ulong memory_block, device; ulong *klistbuf; int klistcnt, i; char mb_hdr[BUFSIZE]; char paddr_hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; if ((!STRUCT_EXISTS("memory_block")) || (!symbol_exists("memory_subsys"))) return; if (initialize) return; init_memory_block(&klistcnt, &klistbuf); if ((symbol_exists("memory_block_size_probed")) || (MEMBER_EXISTS("memory_block", "end_section_nr"))) sprintf(paddr_hdr, "%s", "PHYSICAL RANGE"); else sprintf(paddr_hdr, "%s", "PHYSICAL START"); if (MEMBER_EXISTS("memory_block", "nid")) sprintf(mb_hdr, "\n%s %s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_BLOCK"), mkstring(buf2, 10, CENTER, "NAME"), mkstring(buf3, PADDR_PRLEN*2 + 2, CENTER, paddr_hdr), mkstring(buf4, strlen("NODE"), CENTER, "NODE"), mkstring(buf5, strlen("OFFLINE"), LJUST, "STATE"), mkstring(buf6, 12, LJUST, "START_SECTION_NO")); else sprintf(mb_hdr, "\n%s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_BLOCK"), mkstring(buf2, 10, CENTER, "NAME"), mkstring(buf3, PADDR_PRLEN*2, CENTER, paddr_hdr), mkstring(buf4, strlen("OFFLINE"), LJUST, "STATE"), mkstring(buf5, 12, LJUST, "START_SECTION_NO")); fprintf(fp, "%s", mb_hdr); for (i = 0; i < klistcnt; i++) { readmem(klistbuf[i] + OFFSET(device_private_device), KVADDR, &device, sizeof(void *), "device_private device", FAULT_ON_ERROR); memory_block = device - OFFSET(memory_block_dev); print_memory_block(memory_block); } FREEBUF(klistbuf); } void list_mem_sections(void) { ulong nr,addr; ulong nr_mem_sections = NR_MEM_SECTIONS(); ulong coded_mem_map; for (nr = 0; nr <= nr_mem_sections ; nr++) { if ((addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(addr, 0); fprintf(fp, "nr=%ld section = %lx coded_mem_map=%lx pfn=%ld mem_map=%lx\n", nr, addr, coded_mem_map, section_nr_to_pfn(nr), sparse_decode_mem_map(coded_mem_map,nr)); } } } /* * For kernels containing the node_online_map or node_states[], * return the number of online node bits set. */ static int get_nodes_online(void) { int i, len, online; struct gnu_request req; ulong *maskptr; long N_ONLINE; ulong mapaddr; if (!symbol_exists("node_online_map") && !symbol_exists("node_states")) return 0; len = mapaddr = 0; if (symbol_exists("node_online_map")) { if (LKCD_KERNTYPES()) { if ((len = STRUCT_SIZE("nodemask_t")) < 0) error(FATAL, "cannot determine type nodemask_t\n"); mapaddr = symbol_value("node_online_map"); } else { len = get_symbol_type("node_online_map", NULL, &req) == TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; mapaddr = symbol_value("node_online_map"); } } else if (symbol_exists("node_states")) { if ((get_symbol_type("node_states", NULL, &req) != TYPE_CODE_ARRAY) || !(len = get_array_length("node_states", NULL, 0)) || !enumerator_value("N_ONLINE", &N_ONLINE)) return 0; len = req.length / len; mapaddr = symbol_value("node_states") + (N_ONLINE * len); } if (!(vt->node_online_map = (ulong *)malloc(len))) error(FATAL, "cannot malloc node_online_map\n"); if (!readmem(mapaddr, KVADDR, (void *)&vt->node_online_map[0], len, "node_online_map", QUIET|RETURN_ON_ERROR)) error(FATAL, "cannot read node_online_map/node_states\n"); vt->node_online_map_len = len/sizeof(ulong); online = 0; maskptr = (ulong *)vt->node_online_map; for (i = 0; i < vt->node_online_map_len; i++, maskptr++) online += count_bits_long(*maskptr); if (CRASHDEBUG(1)) { fprintf(fp, "node_online_map: ["); for (i = 0; i < vt->node_online_map_len; i++) fprintf(fp, "%s%lx", i ? ", " : "", vt->node_online_map[i]); fprintf(fp, "] -> nodes online: %d\n", online); } if (online) vt->numnodes = online; return online; } /* * Return the next node index, with "first" being the first acceptable node. */ static int next_online_node(int first) { int i, j, node; ulong mask, *maskptr; if ((first/BITS_PER_LONG) >= vt->node_online_map_len) return -1; maskptr = (ulong *)vt->node_online_map; for (i = node = 0; i < vt->node_online_map_len; i++, maskptr++) { mask = *maskptr; for (j = 0; j < BITS_PER_LONG; j++, node++) { if (mask & 1) { if (node >= first) return node; } mask >>= 1; } } return -1; } /* * Modify appropriately for architecture/kernel nuances. */ static ulong next_online_pgdat(int node) { char buf[BUFSIZE]; ulong pgdat; /* * "__node_data" is used in the mips64 architecture, * and "node_data" is used in other architectures. */ #ifndef __mips64 #define NODE_DATA_VAR "node_data" #else #define NODE_DATA_VAR "__node_data" #endif /* * Default -- look for type: node_data[]/__node_data[] */ if (LKCD_KERNTYPES()) { if (!kernel_symbol_exists(NODE_DATA_VAR)) goto pgdat2; /* * Just index into node_data[]/__node_data[] without checking that * it is an array; kerntypes have no such symbol information. */ } else { if (get_symbol_type(NODE_DATA_VAR, NULL, NULL) != TYPE_CODE_ARRAY) goto pgdat2; open_tmpfile(); sprintf(buf, "whatis " NODE_DATA_VAR); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); goto pgdat2; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "type = ")) break; } close_tmpfile(); if ((!strstr(buf, "struct pglist_data *") && !strstr(buf, "pg_data_t *") && !strstr(buf, "struct node_data *")) || (count_chars(buf, '[') != 1) || (count_chars(buf, ']') != 1)) goto pgdat2; } if (!readmem(symbol_value(NODE_DATA_VAR) + (node * sizeof(void *)), KVADDR, &pgdat, sizeof(void *), NODE_DATA_VAR, RETURN_ON_ERROR) || !IS_KVADDR(pgdat)) goto pgdat2; return pgdat; pgdat2: if (LKCD_KERNTYPES()) { if (!kernel_symbol_exists("pgdat_list")) goto pgdat3; } else { if (get_symbol_type("pgdat_list",NULL,NULL) != TYPE_CODE_ARRAY) goto pgdat3; open_tmpfile(); sprintf(buf, "whatis pgdat_list"); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); goto pgdat3; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "type = ")) break; } close_tmpfile(); if ((!strstr(buf, "struct pglist_data *") && !strstr(buf, "pg_data_t *") && !strstr(buf, "struct node_data *")) || (count_chars(buf, '[') != 1) || (count_chars(buf, ']') != 1)) goto pgdat3; } if (!readmem(symbol_value("pgdat_list") + (node * sizeof(void *)), KVADDR, &pgdat, sizeof(void *), "pgdat_list", RETURN_ON_ERROR) || !IS_KVADDR(pgdat)) goto pgdat3; return pgdat; pgdat3: if (symbol_exists("contig_page_data") && (node == 0)) return symbol_value("contig_page_data"); return 0; } /* * Make the vm_stat[] array contents easily accessible. */ static int vm_stat_init(void) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int i, count, stringlen, total; int c ATTRIBUTE_UNUSED; struct gnu_request *req; char *start; long enum_value, zone_cnt = -1, node_cnt = -1; int split_vmstat = 0, ni = 0; if (vt->flags & VM_STAT) return TRUE; if ((vt->nr_vm_stat_items == -1) || (!symbol_exists("vm_stat") && !symbol_exists("vm_zone_stat"))) goto bailout; /* * look for type: type = atomic_long_t [] */ if (LKCD_KERNTYPES()) { if ((!symbol_exists("vm_stat") && !symbol_exists("vm_zone_stat"))) goto bailout; /* * Just assume that vm_stat is an array; there is * no symbol info in a kerntypes file. */ } else { if (symbol_exists("vm_stat") && get_symbol_type("vm_stat", NULL, NULL) == TYPE_CODE_ARRAY) { vt->nr_vm_stat_items = get_array_length("vm_stat", NULL, 0); } else if (symbol_exists("vm_zone_stat") && get_symbol_type("vm_zone_stat", NULL, NULL) == TYPE_CODE_ARRAY) { if (symbol_exists("vm_numa_stat") && get_array_length("vm_numa_stat", NULL, 0)) { vt->nr_vm_stat_items = get_array_length("vm_zone_stat", NULL, 0) + get_array_length("vm_node_stat", NULL, 0) + ARRAY_LENGTH(vm_numa_stat); split_vmstat = 2; enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zone_cnt); enumerator_value("NR_VM_NODE_STAT_ITEMS", &node_cnt); } else { vt->nr_vm_stat_items = get_array_length("vm_zone_stat", NULL, 0) + get_array_length("vm_node_stat", NULL, 0); split_vmstat = 1; enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zone_cnt); } } else { goto bailout; } } open_tmpfile(); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_GET_DATATYPE; req->name = "zone_stat_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); if (split_vmstat >= 1) { req->command = GNU_GET_DATATYPE; req->name = "node_stat_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); } if (split_vmstat == 2) { req->command = GNU_GET_DATATYPE; req->name = "numa_stat_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); } FREEBUF(req); stringlen = 1; count = -1; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; clean_line(buf); c = parse_line(buf, arglist); if ((!split_vmstat && STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) || ((split_vmstat == 1) && STREQ(arglist[0], "NR_VM_NODE_STAT_ITEMS")) || ((split_vmstat == 2) && STREQ(arglist[0], "NR_VM_NUMA_STAT_ITEMS"))) { if (LKCD_KERNTYPES()) vt->nr_vm_stat_items = MAX(atoi(arglist[2]), count); break; } else if ((split_vmstat == 1) && STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) { continue; } else if ((split_vmstat == 2) && STREQ(arglist[0], "NR_VM_NODE_STAT_ITEMS")) { continue; } else { stringlen += strlen(arglist[0]) + 1; count++; } } total = stringlen + (sizeof(void *) * vt->nr_vm_stat_items); if (!(vt->vm_stat_items = (char **)malloc(total))) { close_tmpfile(); error(FATAL, "cannot malloc vm_stat_items cache\n"); } BZERO(vt->vm_stat_items, total); start = (char *)&vt->vm_stat_items[vt->nr_vm_stat_items]; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; c = parse_line(buf, arglist); if (!enumerator_value(arglist[0], &enum_value)) { close_tmpfile(); goto bailout; } i = ni + enum_value; if (!ni && (enum_value == zone_cnt)) { ni = zone_cnt; continue; } else if ((ni == zone_cnt) && (enum_value == node_cnt)) { ni += node_cnt; continue; } if (i < vt->nr_vm_stat_items) { vt->vm_stat_items[i] = start; strcpy(start, arglist[0]); start += strlen(arglist[0]) + 1; } } close_tmpfile(); vt->flags |= VM_STAT; return TRUE; bailout: vt->nr_vm_stat_items = -1; return FALSE; } /* * Either dump all vm_stat entries, or return the value of * the specified vm_stat item. Use the global counter unless * a zone-specific address is passed. */ static int dump_vm_stat(char *item, long *retval, ulong zone) { char *buf; ulong *vp; ulong location; int i, maxlen, len, node_start = -1, numa_start = 1; long total_cnt, zone_cnt = 0, node_cnt = 0, numa_cnt = 0; int split_vmstat = 0; if (!vm_stat_init()) { if (!item) if (CRASHDEBUG(1)) error(INFO, "vm_stat not available in this kernel\n"); return FALSE; } buf = GETBUF(sizeof(ulong) * vt->nr_vm_stat_items); if (symbol_exists("vm_node_stat") && symbol_exists("vm_zone_stat") && symbol_exists("vm_numa_stat") && ARRAY_LENGTH(vm_numa_stat)) split_vmstat = 2; else if (symbol_exists("vm_node_stat") && symbol_exists("vm_zone_stat")) split_vmstat = 1; else location = zone ? zone : symbol_value("vm_stat"); if (split_vmstat == 1) { enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zone_cnt); location = zone ? zone : symbol_value("vm_zone_stat"); readmem(location, KVADDR, buf, sizeof(ulong) * zone_cnt, "vm_zone_stat", FAULT_ON_ERROR); if (!zone) { location = symbol_value("vm_node_stat"); enumerator_value("NR_VM_NODE_STAT_ITEMS", &node_cnt); readmem(location, KVADDR, buf + (sizeof(ulong) * zone_cnt), sizeof(ulong) * node_cnt, "vm_node_stat", FAULT_ON_ERROR); } node_start = zone_cnt; total_cnt = zone_cnt + node_cnt; } else if (split_vmstat == 2) { enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zone_cnt); location = zone ? zone : symbol_value("vm_zone_stat"); readmem(location, KVADDR, buf, sizeof(ulong) * zone_cnt, "vm_zone_stat", FAULT_ON_ERROR); if (!zone) { location = symbol_value("vm_node_stat"); enumerator_value("NR_VM_NODE_STAT_ITEMS", &node_cnt); readmem(location, KVADDR, buf + (sizeof(ulong) * zone_cnt), sizeof(ulong) * node_cnt, "vm_node_stat", FAULT_ON_ERROR); } node_start = zone_cnt; if (!zone) { location = symbol_value("vm_numa_stat"); enumerator_value("NR_VM_NUMA_STAT_ITEMS", &numa_cnt); readmem(location, KVADDR, buf + (sizeof(ulong) * (zone_cnt+node_cnt)), sizeof(ulong) * numa_cnt, "vm_numa_stat", FAULT_ON_ERROR); } numa_start = zone_cnt+node_cnt; total_cnt = zone_cnt + node_cnt + numa_cnt; } else { readmem(location, KVADDR, buf, sizeof(ulong) * vt->nr_vm_stat_items, "vm_stat", FAULT_ON_ERROR); total_cnt = vt->nr_vm_stat_items; } if (!item) { if (!zone) { if (symbol_exists("vm_zone_stat")) fprintf(fp, " VM_ZONE_STAT:\n"); else fprintf(fp, " VM_STAT:\n"); } for (i = maxlen = 0; i < total_cnt; i++) if ((len = strlen(vt->vm_stat_items[i])) > maxlen) maxlen = len; vp = (ulong *)buf; for (i = 0; i < total_cnt; i++) { if (!zone) { if ((i == node_start) && symbol_exists("vm_node_stat")) fprintf(fp, "\n VM_NODE_STAT:\n"); if ((i == numa_start) && symbol_exists("vm_numa_stat") && ARRAY_LENGTH(vm_numa_stat)) fprintf(fp, "\n VM_NUMA_STAT:\n"); } fprintf(fp, "%s%s: %ld\n", space(maxlen - strlen(vt->vm_stat_items[i])), vt->vm_stat_items[i], vp[i]); } return TRUE; } vp = (ulong *)buf; for (i = 0; i < total_cnt; i++) { if (STREQ(vt->vm_stat_items[i], item)) { *retval = vp[i]; return TRUE; } } return FALSE; } /* * Dump the cumulative totals of the per_cpu__page_states counters. */ int dump_page_states(void) { struct syment *sp; ulong addr, value; int i, c, fd, len, instance, members; char buf[BUFSIZE]; char *arglist[MAXARGS]; struct entry { char *name; ulong value; } *entry_list; struct stat stat; char *namebuf, *nameptr; if (!(sp = per_cpu_symbol_search("per_cpu__page_states"))) { if (CRASHDEBUG(1)) error(INFO, "per_cpu__page_states" "not available in this kernel\n"); return FALSE; } instance = members = len = 0; sprintf(buf, "ptype struct page_state"); open_tmpfile(); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); return FALSE; } fflush(pc->tmpfile); fd = fileno(pc->tmpfile); fstat(fd, &stat); namebuf = GETBUF(stat.st_size); nameptr = namebuf; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state") || strstr(buf, "}")) continue; members++; } entry_list = (struct entry *) GETBUF(sizeof(struct entry) * members); rewind(pc->tmpfile); i = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state") || strstr(buf, "}")) continue; strip_ending_char(strip_linefeeds(buf), ';'); c = parse_line(buf, arglist); strcpy(nameptr, arglist[c-1]); entry_list[i].name = nameptr; if (strlen(nameptr) > len) len = strlen(nameptr); nameptr += strlen(nameptr)+2; i++; } close_tmpfile(); open_tmpfile(); for (c = 0; c < kt->cpus; c++) { addr = sp->value + kt->__per_cpu_offset[c]; dump_struct("page_state", addr, RADIX(16)); } i = 0; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state")) { instance++; i = 0; continue; } if (strstr(buf, "}")) continue; strip_linefeeds(buf); extract_hex(buf, &value, ',', TRUE); entry_list[i].value += value; i++; } close_tmpfile(); fprintf(fp, " PAGE_STATES:\n"); for (i = 0; i < members; i++) { sprintf(buf, "%s", entry_list[i].name); fprintf(fp, "%s", mkstring(buf, len+2, RJUST, 0)); fprintf(fp, ": %ld\n", entry_list[i].value); } FREEBUF(namebuf); FREEBUF(entry_list); return TRUE; } /* * Dump the cumulative totals of the per_cpu__vm_event_state * counters. */ static int dump_vm_event_state(void) { int i, c, maxlen, len; struct syment *sp; ulong addr; ulong *events, *cumulative; if (!vm_event_state_init()) return FALSE; events = (ulong *)GETBUF((sizeof(ulong) * vt->nr_vm_event_items) * 2); cumulative = &events[vt->nr_vm_event_items]; sp = per_cpu_symbol_search("per_cpu__vm_event_states"); for (c = 0; c < kt->cpus; c++) { addr = sp->value + kt->__per_cpu_offset[c]; if (CRASHDEBUG(1)) { fprintf(fp, "[%d]: %lx\n", c, addr); dump_struct("vm_event_state", addr, RADIX(16)); } readmem(addr, KVADDR, events, sizeof(ulong) * vt->nr_vm_event_items, "vm_event_states buffer", FAULT_ON_ERROR); for (i = 0; i < vt->nr_vm_event_items; i++) cumulative[i] += events[i]; } fprintf(fp, "\n VM_EVENT_STATES:\n"); for (i = maxlen = 0; i < vt->nr_vm_event_items; i++) if ((len = strlen(vt->vm_event_items[i])) > maxlen) maxlen = len; for (i = 0; i < vt->nr_vm_event_items; i++) fprintf(fp, "%s%s: %lu\n", space(maxlen - strlen(vt->vm_event_items[i])), vt->vm_event_items[i], cumulative[i]); FREEBUF(events); return TRUE; } static int vm_event_state_init(void) { int i, stringlen, total; int c ATTRIBUTE_UNUSED; long count, enum_value; struct gnu_request *req; char *arglist[MAXARGS]; char buf[BUFSIZE]; char *start; if (vt->flags & VM_EVENT) return TRUE; if ((vt->nr_vm_event_items == -1) || !per_cpu_symbol_search("per_cpu__vm_event_states")) goto bailout; if (!enumerator_value("NR_VM_EVENT_ITEMS", &count)) return FALSE; vt->nr_vm_event_items = count; open_tmpfile(); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_GET_DATATYPE; req->name = "vm_event_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); FREEBUF(req); stringlen = 1; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; clean_line(buf); c = parse_line(buf, arglist); if (STREQ(arglist[0], "NR_VM_EVENT_ITEMS")) break; else stringlen += strlen(arglist[0]); } total = stringlen + vt->nr_vm_event_items + (sizeof(void *) * vt->nr_vm_event_items); if (!(vt->vm_event_items = (char **)malloc(total))) { close_tmpfile(); error(FATAL, "cannot malloc vm_event_items cache\n"); } BZERO(vt->vm_event_items, total); start = (char *)&vt->vm_event_items[vt->nr_vm_event_items]; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; c = parse_line(buf, arglist); if (enumerator_value(arglist[0], &enum_value)) i = enum_value; else { close_tmpfile(); goto bailout; } if (i < vt->nr_vm_event_items) { vt->vm_event_items[i] = start; strcpy(start, arglist[0]); start += strlen(arglist[0]) + 1; } } close_tmpfile(); vt->flags |= VM_EVENT; return TRUE; bailout: vt->nr_vm_event_items = -1; return FALSE; } /* * Dump the per-cpu offset values that are used to * resolve per-cpu symbol values. */ static void dump_per_cpu_offsets(void) { int c; char buf[BUFSIZE]; fprintf(fp, "PER-CPU OFFSET VALUES:\n"); for (c = 0; c < kt->cpus; c++) { sprintf(buf, "CPU %d", c); fprintf(fp, "%7s: %lx", buf, kt->__per_cpu_offset[c]); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } } /* * Dump the value(s) of a page->flags bitmap. */ void dump_page_flags(ulonglong flags) { int c ATTRIBUTE_UNUSED; int sz, val, found, largest, longest, header_printed; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char header[BUFSIZE]; char *arglist[MAXARGS]; ulonglong tmpflag; found = longest = largest = header_printed = 0; open_tmpfile(); if (dump_enumerator_list("pageflags")) { rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (strstr(buf1, " = ")) { c = parse_line(buf1, arglist); if ((sz = strlen(arglist[0])) > longest) longest = sz; if (strstr(arglist[0], "PG_") && ((val = atoi(arglist[2])) > largest)) largest = val; } } } else error(FATAL, "enum pageflags does not exist in this kernel\n"); largest = (largest+1)/4 + 1; sprintf(header, "%s BIT VALUE\n", mkstring(buf1, longest, LJUST, "PAGE-FLAG")); rewind(pc->tmpfile); if (flags) fprintf(pc->saved_fp, "FLAGS: %llx\n", flags); fprintf(pc->saved_fp, "%s%s", flags ? " " : "", header); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (strstr(buf1, " = ") && strstr(buf1, "PG_")) { c = parse_line(buf1, arglist); val = atoi(arglist[2]); tmpflag = 1ULL << val; if (!flags || (flags & tmpflag)) { fprintf(pc->saved_fp, "%s%s %2d %.*lx\n", flags ? " " : "", mkstring(buf2, longest, LJUST, arglist[0]), val, largest, (ulong)(1ULL << val)); if (flags & tmpflag) found++; } } } if (flags && !found) fprintf(pc->saved_fp, " (none found)\n"); close_tmpfile(); } /* * Support for slub.c slab cache. */ static void kmem_cache_init_slub(void) { if (vt->flags & KMEM_CACHE_INIT) return; if (CRASHDEBUG(1) && !(vt->flags & CONFIG_NUMA) && (vt->numnodes > 1)) error(WARNING, "kmem_cache_init_slub: numnodes: %d without CONFIG_NUMA\n", vt->numnodes); if (kmem_cache_downsize()) add_to_downsized("kmem_cache"); vt->cpu_slab_type = MEMBER_TYPE("kmem_cache", "cpu_slab"); vt->flags |= KMEM_CACHE_INIT; } static void kmem_cache_list_common(struct meminfo *mi) { int i, cnt; ulong *cache_list; ulong name; char buf[BUFSIZE]; if (mi->flags & GET_SLAB_ROOT_CACHES) cnt = get_kmem_cache_root_list(&cache_list); else cnt = get_kmem_cache_list(&cache_list); for (i = 0; i < cnt; i++) { fprintf(fp, "%lx ", cache_list[i]); readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) sprintf(buf, "(unknown)\n"); fprintf(fp, "%s\n", buf); } FREEBUF(cache_list); } static void dump_kmem_cache_slub(struct meminfo *si) { int i; ulong name, oo; unsigned int size, objsize, objects, order, offset; char *reqname, *p1; char kbuf[BUFSIZE]; char buf[BUFSIZE]; if (INVALID_MEMBER(kmem_cache_node_nr_slabs)) { error(INFO, "option requires kmem_cache_node.nr_slabs member!\n" "(the kernel must be built with CONFIG_SLUB_DEBUG)\n"); return; } order = objects = 0; if (si->flags & GET_SLAB_ROOT_CACHES) si->cache_count = get_kmem_cache_root_list(&si->cache_list); else si->cache_count = get_kmem_cache_list(&si->cache_list); si->cache_buf = GETBUF(SIZE(kmem_cache)); si->list_offset = VALID_MEMBER(slab_slab_list) ? OFFSET(slab_slab_list) : OFFSET(page_lru); if (VALID_MEMBER(page_objects) && OFFSET(page_objects) == OFFSET(page_inuse)) si->flags |= SLAB_BITFIELD; if (!si->reqname && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); if (si->flags & ADDRESS_SPECIFIED) { if ((p1 = is_slab_page(si, kbuf))) { si->flags |= VERBOSE; si->slab = (ulong)si->spec_addr; } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); goto bailout; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; for (i = 0; i < si->cache_count; i++) { BZERO(si->cache_buf, SIZE(kmem_cache)); if (!readmem(si->cache_list[i], KVADDR, si->cache_buf, SIZE(kmem_cache), "kmem_cache buffer", RETURN_ON_ERROR|RETURN_PARTIAL)) goto next_cache; name = ULONG(si->cache_buf + OFFSET(kmem_cache_name)); if (!read_string(name, buf, BUFSIZE-1)) sprintf(buf, "(unknown)"); if (reqname) { if (!STREQ(reqname, buf)) continue; fprintf(fp, "%s", kmem_cache_hdr); } if (ignore_cache(si, buf)) { DUMP_KMEM_CACHE_TAG(si->cache_list[i], buf, "[IGNORED]"); goto next_cache; } objsize = UINT(si->cache_buf + OFFSET(kmem_cache_objsize)); size = UINT(si->cache_buf + OFFSET(kmem_cache_size)); offset = UINT(si->cache_buf + OFFSET(kmem_cache_offset)); if (VALID_MEMBER(kmem_cache_objects)) { objects = UINT(si->cache_buf + OFFSET(kmem_cache_objects)); order = UINT(si->cache_buf + OFFSET(kmem_cache_order)); } else if (VALID_MEMBER(kmem_cache_oo)) { oo = ULONG(si->cache_buf + OFFSET(kmem_cache_oo)); objects = oo_objects(oo); order = oo_order(oo); } else error(FATAL, "cannot determine " "kmem_cache objects/order values\n"); si->cache = si->cache_list[i]; si->curname = buf; si->objsize = objsize; si->size = size; si->objects = objects; si->slabsize = (PAGESIZE() << order); si->inuse = si->num_slabs = 0; si->slab_offset = offset; si->random = VALID_MEMBER(kmem_cache_random) ? ULONG(si->cache_buf + OFFSET(kmem_cache_random)) : 0; if (!get_kmem_cache_slub_data(GET_SLUB_SLABS, si) || !get_kmem_cache_slub_data(GET_SLUB_OBJECTS, si)) si->flags |= SLAB_GATHER_FAILURE; /* accumulate children's slabinfo */ if (si->flags & GET_SLAB_ROOT_CACHES) { struct meminfo *mi; int j; char buf2[BUFSIZE]; mi = (struct meminfo *)GETBUF(sizeof(struct meminfo)); memcpy(mi, si, sizeof(struct meminfo)); mi->cache_count = get_kmem_cache_child_list(&mi->cache_list, si->cache_list[i]); if (!mi->cache_count) goto no_children; mi->cache_buf = GETBUF(SIZE(kmem_cache)); for (j = 0; j < mi->cache_count; j++) { BZERO(mi->cache_buf, SIZE(kmem_cache)); if (!readmem(mi->cache_list[j], KVADDR, mi->cache_buf, SIZE(kmem_cache), "kmem_cache buffer", RETURN_ON_ERROR|RETURN_PARTIAL)) continue; name = ULONG(mi->cache_buf + OFFSET(kmem_cache_name)); if (!read_string(name, buf2, BUFSIZE-1)) sprintf(buf2, "(unknown)"); objsize = UINT(mi->cache_buf + OFFSET(kmem_cache_objsize)); size = UINT(mi->cache_buf + OFFSET(kmem_cache_size)); offset = UINT(mi->cache_buf + OFFSET(kmem_cache_offset)); if (VALID_MEMBER(kmem_cache_objects)) { objects = UINT(mi->cache_buf + OFFSET(kmem_cache_objects)); order = UINT(mi->cache_buf + OFFSET(kmem_cache_order)); } else if (VALID_MEMBER(kmem_cache_oo)) { oo = ULONG(mi->cache_buf + OFFSET(kmem_cache_oo)); objects = oo_objects(oo); order = oo_order(oo); } else error(FATAL, "cannot determine " "kmem_cache objects/order values\n"); mi->cache = mi->cache_list[j]; mi->curname = buf2; mi->objsize = objsize; mi->size = size; mi->objects = objects; mi->slabsize = (PAGESIZE() << order); mi->inuse = mi->num_slabs = 0; mi->slab_offset = offset; mi->random = VALID_MEMBER(kmem_cache_random) ? ULONG(mi->cache_buf + OFFSET(kmem_cache_random)) : 0; if (!get_kmem_cache_slub_data(GET_SLUB_SLABS, mi) || !get_kmem_cache_slub_data(GET_SLUB_OBJECTS, mi)) { si->flags |= SLAB_GATHER_FAILURE; continue; } si->inuse += mi->inuse; si->free += mi->free; si->num_slabs += mi->num_slabs; if (CRASHDEBUG(1)) dump_kmem_cache_info(mi); } FREEBUF(mi->cache_buf); FREEBUF(mi->cache_list); no_children: FREEBUF(mi); } DUMP_KMEM_CACHE_INFO(); if (si->flags & SLAB_GATHER_FAILURE) { si->flags &= ~SLAB_GATHER_FAILURE; goto next_cache; } if (si->flags & ADDRESS_SPECIFIED) { if (!si->slab) si->slab = vaddr_to_slab(si->spec_addr); do_slab_slub(si, VERBOSE); } else if (si->flags & VERBOSE) { do_kmem_cache_slub(si); if (!reqname && ((i+1) < si->cache_count)) fprintf(fp, "%s", kmem_cache_hdr); } next_cache: if (reqname) break; } bailout: FREEBUF(si->cache_list); FREEBUF(si->cache_buf); } static ushort slub_page_objects(struct meminfo *si, ulong page) { ulong objects_vaddr; ushort objects; /* * Pre-2.6.27, the object count and order were fixed in the * kmem_cache structure. Now they may change, say if a high * order slab allocation fails, so the per-slab object count * is kept in the slab. */ if (VALID_MEMBER(page_objects)) { objects_vaddr = page + OFFSET(page_objects); if (si->flags & SLAB_BITFIELD) objects_vaddr += sizeof(ushort); if (!readmem(objects_vaddr, KVADDR, &objects, sizeof(ushort), "page.objects", RETURN_ON_ERROR)) return 0; /* * Strip page.frozen bit. */ if (si->flags & SLAB_BITFIELD) { if (__BYTE_ORDER == __LITTLE_ENDIAN) { objects <<= 1; objects >>= 1; } if (__BYTE_ORDER == __BIG_ENDIAN) objects >>= 1; } if (CRASHDEBUG(1) && (objects != si->objects)) error(NOTE, "%s: slab: %lx oo objects: %ld " "slab objects: %d\n", si->curname, page, si->objects, objects); if (objects == (ushort)(-1)) { error(INFO, "%s: slab: %lx invalid page.objects: -1\n", si->curname, page); return 0; } } else objects = (ushort)si->objects; return objects; } static short count_cpu_partial(struct meminfo *si, int cpu) { short cpu_partial_inuse, cpu_partial_objects, free_objects; ulong cpu_partial; free_objects = 0; if (VALID_MEMBER(kmem_cache_cpu_partial) && VALID_MEMBER(page_objects)) { readmem(ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu] + OFFSET(kmem_cache_cpu_partial), KVADDR, &cpu_partial, sizeof(ulong), "kmem_cache_cpu.partial", RETURN_ON_ERROR); while (cpu_partial) { if (!is_page_ptr(cpu_partial, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, cpu_partial); return 0; } if (!readmem(cpu_partial + OFFSET(page_inuse), KVADDR, &cpu_partial_inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) return 0; if (cpu_partial_inuse == -1) return 0; cpu_partial_objects = slub_page_objects(si, cpu_partial); if (!cpu_partial_objects) return 0; free_objects += cpu_partial_objects - cpu_partial_inuse; readmem(cpu_partial + OFFSET(page_next), KVADDR, &cpu_partial, sizeof(ulong), "page.next", RETURN_ON_ERROR); } } return free_objects; } /* * Emulate the total count calculation done by the * slab_objects() sysfs function in slub.c. */ static int get_kmem_cache_slub_data(long cmd, struct meminfo *si) { int i, n, node; ulong total_objects, total_slabs, free_objects; ulong cpu_slab_ptr, node_ptr, cpu_freelist, orig_slab; ulong node_nr_partial, node_nr_slabs, node_total_objects; int full_slabs, objects, node_total_avail; long p; short inuse; ulong *nodes, *per_cpu; struct node_table *nt; /* * nodes[n] is not being used (for now) * per_cpu[n] is a count of cpu_slab pages per node. */ nodes = (ulong *)GETBUF(2 * sizeof(ulong) * vt->numnodes); per_cpu = nodes + vt->numnodes; total_slabs = total_objects = free_objects = cpu_freelist = 0; node_total_avail = VALID_MEMBER(kmem_cache_node_total_objects) ? TRUE : FALSE; for (i = 0; i < kt->cpus; i++) { cpu_slab_ptr = get_cpu_slab_ptr(si, i, &cpu_freelist); if (!cpu_slab_ptr) continue; if ((node = page_to_nid(cpu_slab_ptr)) < 0) goto bailout; switch (cmd) { case GET_SLUB_OBJECTS: { /* For better error report, set cur slab to si->slab. */ orig_slab = si->slab; si->slab = cpu_slab_ptr; if (!readmem(cpu_slab_ptr + OFFSET(page_inuse), KVADDR, &inuse, sizeof(short), "page inuse", RETURN_ON_ERROR)) { si->slab = orig_slab; return FALSE; } objects = slub_page_objects(si, cpu_slab_ptr); if (!objects) { si->slab = orig_slab; return FALSE; } free_objects += objects - inuse; free_objects += count_free_objects(si, cpu_freelist); free_objects += count_cpu_partial(si, i); if (!node_total_avail) total_objects += inuse; total_slabs++; si->slab = orig_slab; } break; case GET_SLUB_SLABS: total_slabs++; break; } per_cpu[node]++; } for (n = 0; n < vt->numnodes; n++) { if (vt->flags & CONFIG_NUMA) { nt = &vt->node_table[n]; node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *) * nt->node_id)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); if (!node_ptr) continue; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), KVADDR, &node_nr_partial, sizeof(ulong), "kmem_cache_node nr_partial", RETURN_ON_ERROR)) goto bailout; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), KVADDR, &node_nr_slabs, sizeof(ulong), "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) goto bailout; if (node_total_avail) { if (!readmem(node_ptr + OFFSET(kmem_cache_node_total_objects), KVADDR, &node_total_objects, sizeof(ulong), "kmem_cache_node total_objects", RETURN_ON_ERROR)) goto bailout; } switch (cmd) { case GET_SLUB_OBJECTS: if ((p = count_partial(node_ptr, si, &free_objects)) < 0) return FALSE; if (!node_total_avail) total_objects += p; total_slabs += node_nr_partial; break; case GET_SLUB_SLABS: total_slabs += node_nr_partial; break; } full_slabs = node_nr_slabs - per_cpu[n] - node_nr_partial; objects = si->objects; switch (cmd) { case GET_SLUB_OBJECTS: if (node_total_avail) total_objects += node_total_objects; else total_objects += (full_slabs * objects); total_slabs += full_slabs; break; case GET_SLUB_SLABS: total_slabs += full_slabs; break; } if (!(vt->flags & CONFIG_NUMA)) break; } switch (cmd) { case GET_SLUB_OBJECTS: if (!node_total_avail) si->inuse = total_objects; else si->inuse = total_objects - free_objects; if (VALID_MEMBER(page_objects) && node_total_avail) si->free = free_objects; else si->free = (total_slabs * si->objects) - si->inuse; break; case GET_SLUB_SLABS: si->num_slabs = total_slabs; break; } FREEBUF(nodes); return TRUE; bailout: FREEBUF(nodes); return FALSE; } static void do_cpu_partial_slub(struct meminfo *si, int cpu) { ulong cpu_slab_ptr; void *partial; cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu]; readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_partial), KVADDR, &partial, sizeof(void *), "kmem_cache_cpu.partial", RETURN_ON_ERROR); fprintf(fp, "CPU %d PARTIAL:\n%s", cpu, partial ? "" : " (empty)\n"); /* * kmem_cache_cpu.partial points to the first page of per cpu partial * list. */ while (partial) { si->slab = (ulong)partial; if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, si->slab); break; } if (!do_slab_slub(si, VERBOSE)) break; readmem((ulong)partial + OFFSET(page_next), KVADDR, &partial, sizeof(void *), "page.next", RETURN_ON_ERROR); } } static void do_kmem_cache_slub(struct meminfo *si) { int i, n; ulong cpu_slab_ptr, node_ptr; ulong node_nr_partial, node_nr_slabs; ulong *per_cpu; struct node_table *nt; per_cpu = (ulong *)GETBUF(sizeof(ulong) * vt->numnodes); for (i = 0; i < kt->cpus; i++) { if (si->spec_cpumask && !NUM_IN_BITMAP(si->spec_cpumask, i)) continue; if (hide_offline_cpu(i)) { fprintf(fp, "CPU %d [OFFLINE]\n", i); continue; } cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[i]; fprintf(fp, "CPU %d KMEM_CACHE_CPU:\n %lx\n", i, cpu_slab_ptr); cpu_slab_ptr = get_cpu_slab_ptr(si, i, NULL); fprintf(fp, "CPU %d SLAB:\n%s", i, cpu_slab_ptr ? "" : " (empty)\n"); if (cpu_slab_ptr) { if ((n = page_to_nid(cpu_slab_ptr)) >= 0) per_cpu[n]++; si->slab = cpu_slab_ptr; if (!do_slab_slub(si, VERBOSE)) continue; } if (VALID_MEMBER(kmem_cache_cpu_partial)) do_cpu_partial_slub(si, i); if (received_SIGINT()) restart(0); } for (n = 0; n < vt->numnodes; n++) { if (vt->flags & CONFIG_NUMA) { nt = &vt->node_table[n]; node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *)* nt->node_id)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); if (node_ptr) { if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), KVADDR, &node_nr_partial, sizeof(ulong), "kmem_cache_node nr_partial", RETURN_ON_ERROR)) break; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), KVADDR, &node_nr_slabs, sizeof(ulong), "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) break; } else node_nr_partial = node_nr_slabs = 0; fprintf(fp, "KMEM_CACHE_NODE NODE SLABS PARTIAL PER-CPU\n"); fprintf(fp, "%lx%s", node_ptr, space(VADDR_PRLEN > 8 ? 2 : 10)); fprintf(fp, "%4d %5ld %7ld %7ld\n", n, node_nr_slabs, node_nr_partial, per_cpu[n]); do_node_lists_slub(si, node_ptr, n); if (!(vt->flags & CONFIG_NUMA)) break; } fprintf(fp, "\n"); FREEBUF(per_cpu); } #define DUMP_SLAB_INFO_SLUB() \ { \ char b1[BUFSIZE], b2[BUFSIZE]; \ fprintf(fp, " %s %s %4d %5d %9d %4d\n", \ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), \ node, objects, inuse, objects - inuse); \ } static int do_slab_slub(struct meminfo *si, int verbose) { physaddr_t paddr; ulong vaddr; ushort inuse, objects; ulong freelist, cpu_freelist, cpu_slab_ptr; int i, free_objects, cpu_slab, is_free, node; ulong p, q; #define SLAB_RED_ZONE 0x00000400UL ulong flags; uint red_left_pad; if (!si->slab) { if (CRASHDEBUG(1)) error(INFO, "-S option not supported for CONFIG_SLUB\n"); return FALSE; } if (!page_to_phys(si->slab, &paddr)) { error(INFO, "%s: invalid slab address: %lx\n", si->curname, si->slab); return FALSE; } node = page_to_nid(si->slab); vaddr = PTOV(paddr); if (verbose) fprintf(fp, " %s", slab_hdr); if (!readmem(si->slab + OFFSET(page_inuse), KVADDR, &inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) return FALSE; if (!readmem(si->slab + OFFSET(page_freelist), KVADDR, &freelist, sizeof(void *), "page.freelist", RETURN_ON_ERROR)) return FALSE; objects = slub_page_objects(si, si->slab); if (!objects) return FALSE; if (!verbose) { DUMP_SLAB_INFO_SLUB(); return TRUE; } cpu_freelist = 0; for (i = 0, cpu_slab = -1; i < kt->cpus; i++) { cpu_slab_ptr = get_cpu_slab_ptr(si, i, &cpu_freelist); if (!cpu_slab_ptr) continue; if (cpu_slab_ptr == si->slab) { cpu_slab = i; /* * Later slub scheme uses the per-cpu freelist * so count the free objects by hand. */ if ((free_objects = count_free_objects(si, cpu_freelist)) < 0) return FALSE; /* * If the object is freed on foreign cpu, the * object is liked to page->freelist. */ if (freelist) free_objects += objects - inuse; inuse = objects - free_objects; break; } } DUMP_SLAB_INFO_SLUB(); fprintf(fp, " %s", free_inuse_hdr); #define PAGE_MAPPING_ANON 1 if (CRASHDEBUG(8)) { fprintf(fp, "< SLUB: free list START: >\n"); i = 0; for (q = freelist; q; q = get_freepointer(si, (void *)q)) { if (q & PAGE_MAPPING_ANON) { fprintf(fp, "< SLUB: free list END: %lx (%d found) >\n", q, i); break; } fprintf(fp, " %lx\n", q); i++; } if (!q) fprintf(fp, "< SLUB: free list END (%d found) >\n", i); } red_left_pad = 0; if (VALID_MEMBER(kmem_cache_red_left_pad)) { flags = ULONG(si->cache_buf + OFFSET(kmem_cache_flags)); if (flags & SLAB_RED_ZONE) red_left_pad = UINT(si->cache_buf + OFFSET(kmem_cache_red_left_pad)); } for (p = vaddr; p < vaddr + objects * si->size; p += si->size) { hq_open(); is_free = FALSE; /* Search an object on both of freelist and cpu_freelist */ ulong lists[] = { freelist, cpu_freelist, }; for (i = 0; i < sizeof(lists) / sizeof(lists[0]); i++) { for (is_free = 0, q = lists[i]; q; q = get_freepointer(si, (void *)q)) { if (q == BADADDR) { hq_close(); return FALSE; } if (q & PAGE_MAPPING_ANON) break; if ((p + red_left_pad) == q) { is_free = TRUE; goto found_object; } if (!hq_enter(q)) { hq_close(); error(INFO, "%s: slab: %lx duplicate freelist object: %lx\n", si->curname, si->slab, q); return FALSE; } } } found_object: hq_close(); if (si->flags & ADDRESS_SPECIFIED) { if ((si->spec_addr < p) || (si->spec_addr >= (p + si->size))) { if (!(si->flags & VERBOSE)) continue; } } fprintf(fp, " %s%lx%s", is_free ? " " : "[", pc->flags2 & REDZONE ? p : p + red_left_pad, is_free ? " " : "]"); if (is_free && (cpu_slab >= 0)) fprintf(fp, "(cpu %d cache)", cpu_slab); fprintf(fp, "\n"); } return TRUE; } static int count_free_objects(struct meminfo *si, ulong freelist) { int c; ulong q; hq_open(); c = 0; for (q = freelist; q; q = get_freepointer(si, (void *)q)) { if (q & PAGE_MAPPING_ANON) break; if (!hq_enter(q)) { error(INFO, "%s: slab: %lx duplicate freelist object: %lx\n", si->curname, si->slab, q); break; } c++; } hq_close(); return c; } static ulong freelist_ptr(struct meminfo *si, ulong ptr, ulong ptr_addr) { if (VALID_MEMBER(kmem_cache_random)) { /* CONFIG_SLAB_FREELIST_HARDENED */ ulong addr = (sizeof(long) == 8) ? bswap_64(ptr_addr) : bswap_32(ptr_addr); addr = ptr ^ si->random ^ addr; if (!addr || accessible(addr)) return addr; return (ptr ^ si->random ^ ptr_addr); } else return ptr; } static ulong get_freepointer(struct meminfo *si, void *object) { ulong vaddr, nextfree; vaddr = (ulong)(object + si->slab_offset); if (!readmem(vaddr, KVADDR, &nextfree, sizeof(void *), "get_freepointer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: slab: %lx invalid freepointer: %lx\n", si->curname, si->slab, vaddr); return BADADDR; } return (freelist_ptr(si, nextfree, vaddr)); } static void do_node_lists_slub(struct meminfo *si, ulong node_ptr, int node) { ulong next, last, list_head, flags; int first; if (!node_ptr) return; list_head = node_ptr + OFFSET(kmem_cache_node_partial); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node partial", RETURN_ON_ERROR)) return; fprintf(fp, "NODE %d PARTIAL:\n%s", node, next == list_head ? " (empty)\n" : ""); first = 0; while (next != list_head) { si->slab = last = next - si->list_offset; if (first++ == 0) fprintf(fp, " %s", slab_hdr); if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, si->slab); return; } if (!do_slab_slub(si, !VERBOSE)) return; if (received_SIGINT()) restart(0); if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) return; if (!IS_KVADDR(next) || ((next != list_head) && !is_page_ptr(next - si->list_offset, NULL))) { error(INFO, "%s: partial list slab: %lx invalid page.lru.next: %lx\n", si->curname, last, next); return; } } #define SLAB_STORE_USER (0x00010000UL) flags = ULONG(si->cache_buf + OFFSET(kmem_cache_flags)); if (INVALID_MEMBER(kmem_cache_node_full) || !(flags & SLAB_STORE_USER)) { fprintf(fp, "NODE %d FULL:\n (not tracked)\n", node); return; } list_head = node_ptr + OFFSET(kmem_cache_node_full); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node full", RETURN_ON_ERROR)) return; fprintf(fp, "NODE %d FULL:\n%s", node, next == list_head ? " (empty)\n" : ""); first = 0; while (next != list_head) { si->slab = next - si->list_offset; if (first++ == 0) fprintf(fp, " %s", slab_hdr); if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid full list slab pointer: %lx\n", si->curname, si->slab); return; } if (!do_slab_slub(si, !VERBOSE)) return; if (received_SIGINT()) restart(0); if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) return; if (!IS_KVADDR(next)) { error(INFO, "%s: full list slab: %lx page.lru.next: %lx\n", si->curname, si->slab, next); return; } } } static char * is_kmem_cache_addr_common(ulong vaddr, char *kbuf) { int i, cnt; ulong *cache_list; ulong name; int found; cnt = get_kmem_cache_list(&cache_list); for (i = 0, found = FALSE; i < cnt; i++) { if (cache_list[i] != vaddr) continue; if (!readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", RETURN_ON_ERROR)) break; if (!read_string(name, kbuf, BUFSIZE-1)) sprintf(kbuf, "(unknown)"); found = TRUE; break; } FREEBUF(cache_list); return (found ? kbuf : NULL); } /* * Kernel-config-neutral page-to-node evaluator. */ static int page_to_nid(ulong page) { int i; physaddr_t paddr; struct node_table *nt; physaddr_t end_paddr; if (!page_to_phys(page, &paddr)) { error(INFO, "page_to_nid: invalid page: %lx\n", page); return -1; } for (i = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; end_paddr = nt->start_paddr + ((physaddr_t)nt->size * (physaddr_t)PAGESIZE()); if ((paddr >= nt->start_paddr) && (paddr < end_paddr)) return i; } error(INFO, "page_to_nid: cannot determine node for pages: %lx\n", page); return -1; } /* * Allocate and fill the passed-in buffer with a list of * the current kmem_cache structures. */ static int get_kmem_cache_list(ulong **cache_buf) { int cnt; ulong vaddr; struct list_data list_data, *ld; get_symbol_data("slab_caches", sizeof(void *), &vaddr); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; ld->start = vaddr; ld->list_head_offset = OFFSET(kmem_cache_list); ld->end = symbol_value("slab_caches"); if (CRASHDEBUG(3)) ld->flags |= VERBOSE; cnt = do_list(ld); *cache_buf = ld->list_ptr; return cnt; } static int get_kmem_cache_root_list(ulong **cache_buf) { int cnt; ulong vaddr; struct list_data list_data, *ld; get_symbol_data("slab_root_caches", sizeof(void *), &vaddr); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; ld->start = vaddr; ld->list_head_offset = OFFSET(kmem_cache_memcg_params) + OFFSET(memcg_cache_params___root_caches_node); ld->end = symbol_value("slab_root_caches"); if (CRASHDEBUG(3)) ld->flags |= VERBOSE; cnt = do_list(ld); *cache_buf = ld->list_ptr; return cnt; } static int get_kmem_cache_child_list(ulong **cache_buf, ulong root) { int cnt; ulong vaddr, children; struct list_data list_data, *ld; children = root + OFFSET(kmem_cache_memcg_params) + OFFSET(memcg_cache_params_children); readmem(children, KVADDR, &vaddr, sizeof(ulong), "kmem_cache.memcg_params.children", FAULT_ON_ERROR); /* * When no children, since there is the difference of offset * of children list between root and child, do_list returns * an incorrect cache_buf[0]. So we determine wheather it has * children or not with the value of list_head.next. */ if (children == vaddr) return 0; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; ld->start = vaddr; ld->list_head_offset = OFFSET(kmem_cache_memcg_params) + OFFSET(memcg_cache_params_children_node); ld->end = children; if (CRASHDEBUG(3)) ld->flags |= VERBOSE; cnt = do_list(ld); *cache_buf = ld->list_ptr; return cnt; } /* * Get the address of the head page of a compound page. */ static ulong compound_head(ulong page) { ulong flags, first_page, compound_head; first_page = page; if (VALID_MEMBER(page_compound_head)) { if (readmem(page+OFFSET(page_compound_head), KVADDR, &compound_head, sizeof(ulong), "page.compound_head", RETURN_ON_ERROR)) { if (compound_head & 1) first_page = compound_head - 1; } } else if (readmem(page+OFFSET(page_flags), KVADDR, &flags, sizeof(ulong), "page.flags", RETURN_ON_ERROR)) { if ((flags & vt->PG_head_tail_mask) == vt->PG_head_tail_mask) readmem(page+OFFSET(page_first_page), KVADDR, &first_page, sizeof(ulong), "page.first_page", RETURN_ON_ERROR); } return first_page; } long count_partial(ulong node, struct meminfo *si, ulong *free) { ulong list_head, next, last; short inuse, objects; ulong total_inuse; ulong count = 0; count = 0; total_inuse = 0; list_head = node + OFFSET(kmem_cache_node_partial); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node.partial", RETURN_ON_ERROR)) return -1; hq_open(); while (next != list_head) { if (!readmem(next - si->list_offset + OFFSET(page_inuse), KVADDR, &inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) { hq_close(); return -1; } last = next - si->list_offset; if (inuse == -1) { error(INFO, "%s: partial list slab: %lx invalid page.inuse: -1\n", si->curname, last); break; } total_inuse += inuse; if (VALID_MEMBER(page_objects)) { objects = slub_page_objects(si, last); if (!objects) { hq_close(); return -1; } *free += objects - inuse; } if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) { hq_close(); return -1; } if (!IS_KVADDR(next) || ((next != list_head) && !is_page_ptr(next - si->list_offset, NULL))) { error(INFO, "%s: partial list slab: %lx invalid page.lru.next: %lx\n", si->curname, last, next); break; } /* * Keep track of the last 1000 entries, and check * whether the list has recursed back onto itself. */ if ((++count % 1000) == 0) { hq_close(); hq_open(); } if (!hq_enter(next)) { error(INFO, "%s: partial list slab: %lx duplicate slab entry: %lx\n", si->curname, last, next); hq_close(); return -1; } } hq_close(); return total_inuse; } char * is_slab_page(struct meminfo *si, char *buf) { int i, cnt; ulong pg_slab, page_flags, name; ulong *cache_list; char *retval; if (!(vt->flags & KMALLOC_SLUB)) return NULL; if (!is_page_ptr((ulong)si->spec_addr, NULL)) return NULL; if (!readmem(si->spec_addr + OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", RETURN_ON_ERROR|QUIET)) return NULL; if (!page_slab(si->spec_addr, page_flags)) return NULL; if (!readmem(si->spec_addr + OFFSET(page_slab), KVADDR, &pg_slab, sizeof(ulong), "page.slab", RETURN_ON_ERROR|QUIET)) return NULL; retval = NULL; cnt = get_kmem_cache_list(&cache_list); for (i = 0; i < cnt; i++) { if (pg_slab == cache_list[i]) { if (!readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", QUIET|RETURN_ON_ERROR)) goto bailout; if (!read_string(name, buf, BUFSIZE-1)) goto bailout; retval = buf; break; } } bailout: FREEBUF(cache_list); return retval; } /* * Figure out which of the kmem_cache.cpu_slab declarations * is used by this kernel, and return a pointer to the slab * page being used. Return the kmem_cache_cpu.freelist pointer * if requested. */ static ulong get_cpu_slab_ptr(struct meminfo *si, int cpu, ulong *cpu_freelist) { ulong cpu_slab_ptr, page, freelist; if (cpu_freelist) *cpu_freelist = 0; switch (vt->cpu_slab_type) { case TYPE_CODE_STRUCT: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + OFFSET(kmem_cache_cpu_page)); if (cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) *cpu_freelist = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + OFFSET(kmem_cache_cpu_freelist)); break; case TYPE_CODE_ARRAY: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + (sizeof(void *)*cpu)); if (cpu_slab_ptr && cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) { if (readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_freelist), KVADDR, &freelist, sizeof(void *), "kmem_cache_cpu.freelist", RETURN_ON_ERROR)) *cpu_freelist = freelist; } if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) { if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page), KVADDR, &page, sizeof(void *), "kmem_cache_cpu.page", RETURN_ON_ERROR)) cpu_slab_ptr = 0; else cpu_slab_ptr = page; } break; case TYPE_CODE_PTR: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu]; if (cpu_slab_ptr && cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) { if (readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_freelist), KVADDR, &freelist, sizeof(void *), "kmem_cache_cpu.freelist", RETURN_ON_ERROR)) *cpu_freelist = freelist; } if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) { if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page), KVADDR, &page, sizeof(void *), "kmem_cache_cpu.page", RETURN_ON_ERROR)) cpu_slab_ptr = 0; else cpu_slab_ptr = page; } break; default: cpu_slab_ptr = 0; error(FATAL, "cannot determine location of kmem_cache.cpu_slab page\n"); } return cpu_slab_ptr; } /* * In 2.6.27 kmem_cache.order and kmem_cache.objects were merged * into the kmem_cache.oo, a kmem_cache_order_objects structure. * oo_order() and oo_objects() emulate the kernel functions * of the same name. */ static unsigned int oo_order(ulong oo) { return (oo >> 16); } static unsigned int oo_objects(ulong oo) { return (oo & ((1 << 16) - 1)); } #ifdef NOT_USED ulong slab_to_kmem_cache_node(struct meminfo *si, ulong slab_page) { int node; ulong node_ptr; if (vt->flags & CONFIG_NUMA) { node = page_to_nid(slab_page); node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *)*node)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); return node_ptr; } ulong get_kmem_cache_by_name(char *request) { int i, cnt; ulong *cache_list; ulong name; char buf[BUFSIZE]; ulong found; cnt = get_kmem_cache_list(&cache_list); cache_buf = GETBUF(SIZE(kmem_cache)); found = 0; for (i = 0; i < cnt; i++) { readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) continue; if (STREQ(buf, request)) { found = cache_list[i]; break; } } FREEBUF(cache_list); return found; } #endif /* NOT_USED */ crash-utility-crash-9cd43f5/qemu-load.c0000664000372000037200000006350115107550337017431 0ustar juerghjuergh/* * Qemu save VM loader * * Copyright (C) 2009, 2010, 2011 Red Hat, Inc. * Written by Paolo Bonzini. * * Portions Copyright (C) 2009 David Anderson * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define _GNU_SOURCE #include "qemu-load.h" #include #include #include #include #include "kvmdump.h" struct qemu_device * device_alloc (struct qemu_device_list *dl, size_t sz, struct qemu_device_vtbl *vtbl, uint32_t section_id, uint32_t instance_id, uint32_t version_id) { struct qemu_device *d = calloc (1, sz); d->vtbl = vtbl; d->list = dl; d->section_id = section_id; d->instance_id = instance_id; d->version_id = version_id; if (!dl->head) dl->head = dl->tail = d; else { dl->tail->next = d; d->prev = dl->tail; dl->tail = d; } return d; } struct qemu_device * device_find (struct qemu_device_list *dl, uint32_t section_id) { struct qemu_device *d; d = dl->head; while (d && d->section_id != section_id) d = d->next; return d; } struct qemu_device * device_find_instance (struct qemu_device_list *dl, const char *name, uint32_t instance_id) { struct qemu_device *d; d = dl->head; while (d && (strcmp (d->vtbl->name, name) || d->instance_id != instance_id)) d = d->next; return d; } void device_free (struct qemu_device *d) { struct qemu_device_list *dl = d->list; if (d->prev) d->prev->next = d->next; else dl->head = d->next; if (d->next) d->next->prev = d->prev; else dl->tail = d->prev; d->prev = d->next = NULL; if (d->vtbl->free) d->vtbl->free (d, dl); } void device_list_free (struct qemu_device_list *l) { if (!l) return; while (l->head) device_free (l->head); } /* File access. */ static inline uint16_t get_be16 (FILE *fp) { uint8_t a = getc (fp); uint8_t b = getc (fp); return (a << 8) | b; } static inline uint16_t get_le16 (FILE *fp) { uint8_t b = getc (fp); uint8_t a = getc (fp); return (a << 8) | b; } static inline uint32_t get_be32 (FILE *fp) { uint16_t a = get_be16 (fp); uint16_t b = get_be16 (fp); return (a << 16) | b; } static inline uint32_t get_le32 (FILE *fp) { uint16_t b = get_le16 (fp); uint16_t a = get_le16 (fp); return (a << 16) | b; } static inline uint64_t get_be64 (FILE *fp) { uint32_t a = get_be32 (fp); uint32_t b = get_be32 (fp); return ((uint64_t)a << 32) | b; } static inline uint64_t get_le64 (FILE *fp) { uint32_t b = get_le32 (fp); uint32_t a = get_le32 (fp); return ((uint64_t)a << 32) | b; } static inline void get_qemu128 (FILE *fp, union qemu_uint128_t *result) { result->i[1] = get_le32 (fp); result->i[0] = get_le32 (fp); result->i[3] = get_le32 (fp); result->i[2] = get_le32 (fp); } /* RAM loader. */ #define RAM_SAVE_FLAG_FULL 0x01 #define RAM_SAVE_FLAG_COMPRESS 0x02 #define RAM_SAVE_FLAG_MEM_SIZE 0x04 #define RAM_SAVE_FLAG_PAGE 0x08 #define RAM_SAVE_FLAG_EOS 0x10 #define RAM_SAVE_FLAG_CONTINUE 0x20 #define RAM_SAVE_ADDR_MASK (~4095LL) #define RAM_OFFSET_COMPRESSED (~(off_t)255) static void ram_alloc (struct qemu_device_ram *dram, uint64_t size) { // size_t old_npages = dram->offsets ? 0 : dram->last_ram_offset / 4096; // size_t new_npages = size / 4096; // assert (size <= SIZE_MAX); // if (dram->offsets) // dram->offsets = realloc (dram->offsets, // new_npages * sizeof (off_t)); // else // dram->offsets = malloc (new_npages * sizeof (off_t)); // // assert (dram->offsets); // while (old_npages < new_npages) // dram->offsets[old_npages++] = RAM_OFFSET_COMPRESSED | 0; dram->last_ram_offset = size; } #ifndef ATTRIBUTE_UNUSED #define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) #endif static int get_string (FILE *fp, char *name) { size_t items ATTRIBUTE_UNUSED; int sz = (uint8_t) getc (fp); if (sz == EOF) return -1; items = fread (name, sz, 1, fp); name[sz] = 0; return sz; } static int get_string_len (FILE *fp, char *name, uint32_t sz) { size_t items ATTRIBUTE_UNUSED; if (sz == EOF) return -1; items = fread (name, sz, 1, fp); name[sz] = 0; return sz; } static void ram_read_blocks (FILE *fp, uint64_t size) { char name[257]; /* The RAM block table is a list of block names followed by their sizes. Read it until the sizes sum up to SIZE bytes. */ while (size) { get_string (fp, name); size -= get_be64 (fp); } } static uint32_t ram_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { char name[257]; struct qemu_device_ram *dram = (struct qemu_device_ram *)d; uint64_t header; static int pc_ram = 0; for (;;) { uint64_t addr; off_t entry; header = get_be64 (fp); if (feof (fp) || ferror (fp)) return 0; if (header & RAM_SAVE_FLAG_EOS) break; assert (!(header & RAM_SAVE_FLAG_FULL)); addr = header & RAM_SAVE_ADDR_MASK; if (header & RAM_SAVE_FLAG_MEM_SIZE) { ram_alloc (dram, addr); if (d->version_id >= 4) ram_read_blocks(fp, addr); continue; } if (d->version_id >= 4 && !(header & RAM_SAVE_FLAG_CONTINUE)) { get_string(fp, name); if (strcmp(name, "pc.ram") == 0) pc_ram = 1; else pc_ram = 0; } if (header & RAM_SAVE_FLAG_COMPRESS) { entry = RAM_OFFSET_COMPRESSED | getc(fp); if ((d->version_id == 3) || (d->version_id >= 4 && pc_ram)) store_mapfile_offset(addr, &entry); } else if (header & RAM_SAVE_FLAG_PAGE) { entry = ftell(fp); if ((d->version_id == 3) || (d->version_id >= 4 && pc_ram)) store_mapfile_offset(addr, &entry); fseek (fp, 4096, SEEK_CUR); } } dram->fp = fp; return QEMU_FEATURE_RAM; } static void ram_free (struct qemu_device *d, struct qemu_device_list *dl) { struct qemu_device_ram *dram = (struct qemu_device_ram *)d; free (dram->offsets); } int ram_read_phys_page (struct qemu_device_ram *dram, void *buf, uint64_t addr) { off_t ofs; ssize_t bytes ATTRIBUTE_UNUSED; if (addr >= dram->last_ram_offset) return false; assert ((addr & 0xfff) == 0); // ofs = dram->offsets[addr / 4096]; if (load_mapfile_offset(addr, &ofs) < 0) return 0; if ((ofs & RAM_OFFSET_COMPRESSED) == RAM_OFFSET_COMPRESSED) memset (buf, ofs & 255, 4096); else bytes = pread (fileno (dram->fp), buf, 4096, ofs); return true; } static struct qemu_device * ram_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl ram = { "ram", ram_load, ram_free }; assert (version_id == 3 || version_id == 4); kvm->mapinfo.ram_version_id = version_id; return device_alloc (dl, sizeof (struct qemu_device_ram), &ram, section_id, instance_id, version_id); } #define BLK_MIG_FLAG_EOS 2 static uint32_t block_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { uint64_t header; header = get_be64 (fp); assert (header == BLK_MIG_FLAG_EOS); return 0; } static struct qemu_device * block_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl block = { "block", block_load, NULL }; return device_alloc (dl, sizeof (struct qemu_device), &block, section_id, instance_id, version_id); } /* RHEL5 marker. */ static uint32_t rhel5_marker_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { return 0; } static struct qemu_device * rhel5_marker_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl rhel5_marker = { "__rhel5", rhel5_marker_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device), &rhel5_marker, section_id, instance_id, version_id); } /* cpu_common loader. */ struct qemu_device_cpu_common { struct qemu_device base; uint32_t halted; uint32_t irq; }; static uint32_t cpu_common_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { struct qemu_device_cpu_common *cpu = (struct qemu_device_cpu_common *)d; cpu->halted = get_be32 (fp); cpu->irq = get_be32 (fp); return 0; } static struct qemu_device * cpu_common_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl cpu_common = { "cpu_common", cpu_common_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device_cpu_common), &cpu_common, section_id, instance_id, version_id); } /* CPU loader. */ static inline uint64_t get_be_long (FILE *fp, int size) { uint32_t a = size == 32 ? 0 : get_be32 (fp); uint32_t b = get_be32 (fp); return ((uint64_t)a << 32) | b; } static inline void get_be_fp80 (FILE *fp, union qemu_fpu_reg *result) { result->mmx = get_be64 (fp); result->bytes[9] = getc (fp); result->bytes[8] = getc (fp); } static void cpu_load_seg (FILE *fp, struct qemu_x86_seg *seg, int size) { seg->selector = get_be32 (fp); seg->base = get_be_long (fp, size); seg->limit = get_be32 (fp); seg->flags = get_be32 (fp); } static bool v12_has_xsave_state(FILE *fp) { char name[257]; bool ret = true; long offset = ftell(fp); // save offset /* * peek into byte stream to check for APIC vmstate */ if (getc(fp) == QEMU_VM_SECTION_FULL) { get_be32(fp); // skip section id get_string(fp, name); if (strcmp(name, "apic") == 0) ret = false; } fseek(fp, offset, SEEK_SET); // restore offset return ret; } static uint32_t cpu_load (struct qemu_device *d, FILE *fp, int size) { struct qemu_device_x86 *dx86 = (struct qemu_device_x86 *)d; uint32_t qemu_hflags = 0, qemu_hflags2 = 0; int nregs; uint32_t version_id = dx86->dev_base.version_id; uint32_t rhel5_version_id; int i; off_t restart; struct qemu_device *drhel5; struct qemu_device_cpu_common *dcpu; if (kvm->flags & KVMHOST_32) size = 32; restart = ftello(fp); retry: nregs = size == 32 ? 8 : 16; drhel5 = device_find_instance (d->list, "__rhel5", 0); if (drhel5 || (version_id >= 7 && version_id <= 9)) { rhel5_version_id = version_id; version_id = 7; } else { rhel5_version_id = 0; version_id = dx86->dev_base.version_id; } dprintf("cpu_load: rhel5_version_id: %d (effective) version_id: %d\n", rhel5_version_id, version_id); dcpu = (struct qemu_device_cpu_common *) device_find_instance (d->list, "cpu_common", d->instance_id); if (dcpu) { dx86->halted = dcpu->halted; dx86->irq = dcpu->irq; // device_free ((struct qemu_device *) dcpu); } for (i = 0; i < nregs; i++) dx86->regs[i] = get_be_long (fp, size); dx86->eip = get_be_long (fp, size); dx86->eflags = get_be_long (fp, size); qemu_hflags = get_be32 (fp); dx86->fpucw = get_be16 (fp); dx86->fpusw = get_be16 (fp); dx86->fpu_free = get_be16 (fp); if (get_be16 (fp)) for (i = 0; i < 8; i++) dx86->st[i].mmx = get_be64 (fp); else for (i = 0; i < 8; i++) get_be_fp80 (fp, &dx86->st[i]); cpu_load_seg (fp, &dx86->es, size); cpu_load_seg (fp, &dx86->cs, size); cpu_load_seg (fp, &dx86->ss, size); cpu_load_seg (fp, &dx86->ds, size); cpu_load_seg (fp, &dx86->fs, size); cpu_load_seg (fp, &dx86->gs, size); cpu_load_seg (fp, &dx86->ldt, size); cpu_load_seg (fp, &dx86->tr, size); cpu_load_seg (fp, &dx86->gdt, size); cpu_load_seg (fp, &dx86->idt, size); dx86->sysenter.cs = get_be32 (fp); dx86->sysenter.esp = get_be_long (fp, version_id <= 6 ? 32 : size); dx86->sysenter.eip = get_be_long (fp, version_id <= 6 ? 32 : size); dx86->cr0 = get_be_long (fp, size); dx86->cr2 = get_be_long (fp, size); dx86->cr3 = get_be_long (fp, size); dx86->cr4 = get_be_long (fp, size); for (i = 0; i < 8; i++) dx86->dr[i] = get_be_long (fp, size); dx86->a20_masked = get_be32 (fp) != 0xffffffff; dx86->mxcsr = get_be32 (fp); for (i = 0; i < nregs; i++) get_qemu128 (fp, &dx86->xmm[i]); if (size == 64) { dx86->efer = get_be64 (fp); dx86->star = get_be64 (fp); dx86->lstar = get_be64 (fp); dx86->cstar = get_be64 (fp); dx86->fmask = get_be64 (fp); dx86->kernel_gs_base = get_be64 (fp); } dx86->smbase = get_be32 (fp); dx86->soft_mmu = qemu_hflags & (1 << 2); dx86->smm = qemu_hflags & (1 << 19); if (version_id == 4) goto store; dx86->pat = get_be64 (fp); qemu_hflags2 = get_be32 (fp); dx86->global_if = qemu_hflags2 & (1 << 0); dx86->in_nmi = qemu_hflags2 & (1 << 2); if (version_id < 6) dx86->halted = get_be32 (fp); dx86->svm.hsave = get_be64 (fp); dx86->svm.vmcb = get_be64 (fp); dx86->svm.tsc_offset = get_be64 (fp); dx86->svm.in_vmm = qemu_hflags & (1 << 21); dx86->svm.guest_if_mask = qemu_hflags2 & (1 << 1); dx86->svm.guest_intr_masking = qemu_hflags2 & (1 << 3); dx86->svm.intercept_mask = get_be64 (fp); dx86->svm.cr_read_mask = get_be16 (fp); dx86->svm.cr_write_mask = get_be16 (fp); dx86->svm.dr_read_mask = get_be16 (fp); dx86->svm.dr_write_mask = get_be16 (fp); dx86->svm.exception_intercept_mask = get_be32 (fp); dx86->cr8 = getc (fp); if (version_id >= 8) { for (i = 0; i < 11; i++) dx86->fixed_mtrr[i] = get_be64 (fp); dx86->deftype_mtrr = get_be64 (fp); for (i = 0; i < 8; i++) { dx86->variable_mtrr[i].base = get_be64 (fp); dx86->variable_mtrr[i].mask = get_be64 (fp); } } /* This was present only when KVM was enabled up to v8. * Furthermore, it changed format in v9. */ if (version_id >= 9) { int32_t pending_irq = (int32_t) get_be32 (fp); if (pending_irq >= 0 && pending_irq <= 255) dx86->kvm.int_bitmap[pending_irq / 64] |= (uint64_t)1 << (pending_irq & 63); dx86->kvm.mp_state = get_be32 (fp); dx86->kvm.tsc = get_be64 (fp); } else if (d->list->features & QEMU_FEATURE_KVM) { for (i = 0; i < 4; i++) dx86->kvm.int_bitmap[i] = get_be64 (fp); dx86->kvm.tsc = get_be64 (fp); if (version_id >= 5) dx86->kvm.mp_state = get_be32 (fp); } if (version_id >= 11) { dx86->kvm.exception_injected = get_be32 (fp); } if (rhel5_version_id >= 8) { dx86->kvm.system_time_msr = get_be64 (fp); dx86->kvm.wall_clock_msr = get_be64 (fp); } if (version_id >= 11 || rhel5_version_id >= 9) { dx86->kvm.soft_interrupt = getc (fp); dx86->kvm.nmi_injected = getc (fp); dx86->kvm.nmi_pending = getc (fp); dx86->kvm.has_error_code = getc (fp); dx86->kvm.sipi_vector = get_be32 (fp); } if (version_id >= 10) { dx86->mce.mcg_cap = get_be64 (fp); dx86->mce.mcg_status = get_be64 (fp); dx86->mce.mcg_ctl = get_be64 (fp); for (i = 0; i < 10 * 4; i++) dx86->mce.mce_banks[i] = get_be64 (fp); } if (version_id >= 11) { dx86->tsc_aux = get_be64 (fp); dx86->kvm.system_time_msr = get_be64 (fp); dx86->kvm.wall_clock_msr = get_be64 (fp); } if (version_id >= 12 && v12_has_xsave_state(fp)) { dx86->xcr0 = get_be64 (fp); dx86->xstate_bv = get_be64 (fp); for (i = 0; i < nregs; i++) get_qemu128 (fp, &dx86->ymmh_regs[i]); } store: if (!kvmdump_regs_store(d->instance_id, dx86)) { size = 32; kvm->flags |= KVMHOST_32; fseeko(fp, restart, SEEK_SET); dprintf("cpu_load: invalid registers: retry with 32-bit host\n"); goto retry; } if (dcpu) device_free ((struct qemu_device *) dcpu); return QEMU_FEATURE_CPU; } static uint32_t cpu_load_32 (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { return cpu_load (d, fp, 32); } static struct qemu_device * cpu_init_load_32 (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { struct qemu_device_x86 *dx86; static struct qemu_device_vtbl cpu = { "cpu", cpu_load_32, NULL }; assert (!live); // assert (version_id >= 4 && version_id <= 9); assert (version_id >= 4 && version_id <= 12); kvm->mapinfo.cpu_version_id = version_id; dx86 = (struct qemu_device_x86 *) device_alloc (dl, sizeof (struct qemu_device_x86), &cpu, section_id, instance_id, version_id); return (struct qemu_device *) dx86; } static uint32_t cpu_load_64 (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { return cpu_load (d, fp, 64); } static struct qemu_device * cpu_init_load_64 (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { struct qemu_device_x86 *dx86; static struct qemu_device_vtbl cpu = { "cpu", cpu_load_64, NULL }; assert (!live); // assert (version_id >= 4 && version_id <= 9); assert (version_id >= 4 && version_id <= 12); kvm->mapinfo.cpu_version_id = version_id; dx86 = (struct qemu_device_x86 *) device_alloc (dl, sizeof (struct qemu_device_x86), &cpu, section_id, instance_id, version_id); return (struct qemu_device *) dx86; } /* IOAPIC loader. */ static uint32_t apic_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { switch (d->version_id) { case 1: fseek (fp, 173, SEEK_CUR); break; case 2: case 3: fseek (fp, 181, SEEK_CUR); break; } return 0; } static struct qemu_device * apic_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl apic = { "apic", apic_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device), &apic, section_id, instance_id, version_id); } /* timer loader. */ static uint32_t timer_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { fseek (fp, 24, SEEK_CUR); return QEMU_FEATURE_TIMER; } static struct qemu_device * timer_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl timer = { "timer", timer_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device), &timer, section_id, instance_id, version_id); } /* kvmclock loader. */ static uint32_t kvmclock_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { fseek (fp, 8, SEEK_CUR); return QEMU_FEATURE_KVM; } static struct qemu_device * kvmclock_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl kvmclock = { "kvmclock", kvmclock_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device), &kvmclock, section_id, instance_id, version_id); } /* kvm-tpr-opt loader. */ static uint32_t kvm_tpr_opt_load (struct qemu_device *d, FILE *fp, enum qemu_save_section sec) { fseek (fp, 144, SEEK_CUR); return QEMU_FEATURE_KVM; } static struct qemu_device * kvm_tpr_opt_init_load (struct qemu_device_list *dl, uint32_t section_id, uint32_t instance_id, uint32_t version_id, bool live, FILE *fp) { static struct qemu_device_vtbl kvm_tpr_opt = { "kvm-tpr-opt", kvm_tpr_opt_load, NULL }; assert (!live); return device_alloc (dl, sizeof (struct qemu_device), &kvm_tpr_opt, section_id, instance_id, version_id); } /* Putting it together. */ const struct qemu_device_loader devices_x86_64[] = { { "__rhel5", rhel5_marker_init_load }, { "cpu_common", cpu_common_init_load }, { "kvm-tpr-opt", kvm_tpr_opt_init_load }, { "kvmclock", kvmclock_init_load }, { "cpu", cpu_init_load_64 }, { "apic", apic_init_load }, { "block", block_init_load }, { "ram", ram_init_load }, { "timer", timer_init_load }, { NULL, NULL } }; const struct qemu_device_loader devices_x86_32[] = { { "__rhel5", rhel5_marker_init_load }, { "cpu_common", cpu_common_init_load }, { "kvm-tpr-opt", kvm_tpr_opt_init_load }, { "kvmclock", kvmclock_init_load }, { "cpu", cpu_init_load_32 }, { "apic", apic_init_load }, { "block", block_init_load }, { "ram", ram_init_load }, { "timer", timer_init_load }, { NULL, NULL } }; #define QEMU_VM_FILE_MAGIC 0x5145564D #define LIBVIRT_QEMU_VM_FILE_MAGIC 0x4c696276 struct libvirt_header { char magic[16]; uint32_t version; uint32_t xml_length; uint32_t was_running; uint32_t padding[16]; }; static long device_search(const struct qemu_device_loader *, FILE *); static struct qemu_device * device_get (const struct qemu_device_loader *devices, struct qemu_device_list *dl, enum qemu_save_section sec, FILE *fp) { char name[257]; uint32_t section_id, instance_id, version_id; // bool live; const struct qemu_device_loader *devp; long next_device_offset; next_device: devp = devices; if (sec == QEMU_VM_SUBSECTION) { get_string(fp, name); goto search_device; } section_id = get_be32 (fp); if (sec != QEMU_VM_SECTION_START && sec != QEMU_VM_SECTION_FULL) return device_find (dl, section_id); get_string(fp, name); instance_id = get_be32 (fp); version_id = get_be32 (fp); while (devp->name && strcmp (devp->name, name)) devp++; if (!devp->name) { search_device: dprintf("device_get: unknown/unsupported: \"%s\"\n", name); if ((next_device_offset = device_search(devices, fp))) { fseek(fp, next_device_offset, SEEK_CUR); sec = getc(fp); if (sec == QEMU_VM_EOF) return NULL; goto next_device; } return NULL; } return devp->init_load (dl, section_id, instance_id, version_id, sec == QEMU_VM_SECTION_START, fp); } struct qemu_device_list * qemu_load (const struct qemu_device_loader *devices, uint32_t required_features, FILE *fp) { struct qemu_device_list *result = NULL; struct qemu_device *last = NULL;; size_t items ATTRIBUTE_UNUSED; uint32_t footerSecId ATTRIBUTE_UNUSED; char name[257]; switch (get_be32 (fp)) { case QEMU_VM_FILE_MAGIC: break; case LIBVIRT_QEMU_VM_FILE_MAGIC: { struct libvirt_header header; memcpy (header.magic, "Libv", 4); items = fread (&header.magic[4], sizeof (header) - 4, 1, fp); if (memcmp ("LibvirtQemudSave", header.magic, 16)) goto fail; fseek (fp, header.xml_length, SEEK_CUR); if (get_be32 (fp) != QEMU_VM_FILE_MAGIC) goto fail; break; } default: goto fail; } if (get_be32 (fp) != 3) return NULL; dprintf("\n"); result = calloc (1, sizeof (struct qemu_device_list)); for (;;) { struct qemu_device *d; uint32_t features; enum qemu_save_section sec = getc (fp); if (feof (fp) || ferror (fp)) break; if (sec == QEMU_VM_EOF) break; if (sec == QEMU_VM_SECTION_FOOTER) { footerSecId = get_be32 (fp); continue; } if (sec == QEMU_VM_CONFIGURATION) { uint32_t len = get_be32 (fp); get_string_len (fp, name, len); continue; } d = device_get (devices, result, sec, fp); if (!d) break; if (d != last) { dprintf("qemu_load: \"%s\"\n", d->vtbl->name); last = d; } features = d->vtbl->load (d, fp, sec); if (feof (fp) || ferror (fp)) break; if (sec == QEMU_VM_SECTION_END || sec == QEMU_VM_SECTION_FULL) result->features |= features; } if (ferror (fp) || (result->features & required_features) != required_features) goto fail; return result; fail: device_list_free (result); free (result); return NULL; } /* * crash utility adaptation. */ #include "defs.h" int is_qemu_vm_file(char *filename) { struct libvirt_header header; FILE *vmp; int retval; size_t items ATTRIBUTE_UNUSED; char *xml; if ((vmp = fopen(filename, "r")) == NULL) { error(INFO, "%s: %s\n", filename, strerror(errno)); return FALSE; } retval = FALSE; xml = NULL; switch (get_be32(vmp)) { case QEMU_VM_FILE_MAGIC: retval = TRUE; break; case LIBVIRT_QEMU_VM_FILE_MAGIC: rewind(vmp); items = fread(&header.magic[0], sizeof(header), 1, vmp); if (STRNEQ(header.magic, "LibvirtQemudSave")) { if ((xml = (char *)malloc(header.xml_length))) { items = fread(xml, header.xml_length, 1, vmp); /* * Parse here if necessary or desirable. */ } else fseek(vmp, header.xml_length, SEEK_CUR); if (get_be32(vmp) == QEMU_VM_FILE_MAGIC) retval = TRUE; } break; default: retval = FALSE; } if (xml) free(xml); switch (retval) { case TRUE: kvm->vmp = vmp; kvm->vmfd = fileno(vmp); break; case FALSE: fclose(vmp); break; } return retval; } void dump_qemu_header(FILE *out) { int i; struct libvirt_header header; char magic[4]; uint8_t c; size_t items ATTRIBUTE_UNUSED; rewind(kvm->vmp); if (get_be32(kvm->vmp) == QEMU_VM_FILE_MAGIC) { fprintf(out, "%s: QEMU_VM_FILE_MAGIC\n", pc->dumpfile); return; } rewind(kvm->vmp); items = fread(&header, sizeof(header), 1, kvm->vmp); fprintf(out, "%s: libvirt_header:\n\n", pc->dumpfile); fprintf(out, " magic: "); for (i = 0; i < 16; i++) fprintf(out, "%c", header.magic[i]); fprintf(out, "\n"); fprintf(out, " version: %d\n", header.version); fprintf(out, " xml_length: %d\n", header.xml_length); fprintf(out, " was_running: %d\n", header.was_running); fprintf(out, " padding: (not shown)\n\n"); for (i = 0; i < header.xml_length; i++) { c = getc(kvm->vmp); if (c) fprintf(out, "%c", c); } fprintf(out, "\n"); items = fread(&magic, sizeof(char), 4, kvm->vmp); for (i = 0; i < 4; i++) fprintf(out, "%c", magic[i]); fprintf(out, "\n"); } static long device_search(const struct qemu_device_loader *devices, FILE *fp) { uint sz; char *p1, *p2; long next_device_offset; long remaining; char buf[4096]; off_t current; BZERO(buf, 4096); current = ftello(fp); if (fread(buf, sizeof(char), 4096, fp) != 4096) { fseeko(fp, current, SEEK_SET); return 0; } fseeko(fp, current, SEEK_SET); while (devices->name) { for (p1 = buf, remaining = 4096; (p2 = memchr(p1, devices->name[0], remaining)); p1 = p2+1, remaining = 4096 - (p1-buf)) { sz = *((unsigned char *)p2-1); if (STRNEQ(p2, devices->name) && (strlen(devices->name) == sz)) { *(p2+sz) = '\0'; dprintf("device_search: %s\n", p2); next_device_offset = (p2-buf) - 6; return next_device_offset; } } devices++; } return 0; } crash-utility-crash-9cd43f5/xendump.c0000664000372000037200000022770015107550337017230 0ustar juerghjuergh/* * xendump.c * * Copyright (C) 2006-2011, 2013-2014 David Anderson * Copyright (C) 2006-2011, 2013-2014 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "xendump.h" static struct xendump_data xendump_data = { 0 }; struct xendump_data *xd = &xendump_data; static int xc_save_verify(char *); static int xc_core_verify(char *, char *); static int xc_save_read(void *, int, ulong, physaddr_t); static int xc_core_read(void *, int, ulong, physaddr_t); static int xc_core_mfns(ulong, FILE *); static void poc_store(ulong, off_t); static off_t poc_get(ulong, int *); static void xen_dump_vmconfig(FILE *); static void xc_core_create_pfn_tables(void); static ulong xc_core_pfn_to_page_index(ulong); static int xc_core_pfn_valid(ulong); static void xendump_print(char *fmt, ...); static int xc_core_elf_verify(char *, char *); static void xc_core_elf_dump(void); static char *xc_core_elf_mfn_to_page(ulong, char *); static int xc_core_elf_mfn_to_page_index(ulong); static ulong xc_core_elf_pfn_valid(ulong); static ulong xc_core_elf_pfn_to_page_index(ulong); static void xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *); static void xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *); static void xc_core_dump_Elf32_Shdr(Elf32_Off offset, int); static void xc_core_dump_Elf64_Shdr(Elf64_Off offset, int); static char *xc_core_strtab(uint32_t, char *); static void xc_core_dump_elfnote(off_t, size_t, int); static void xc_core_elf_pfn_init(void); #define ELFSTORE 1 #define ELFREAD 0 /* * Determine whether a file is a xendump creation, and if TRUE, * initialize the xendump_data structure. */ int is_xendump(char *file) { int verified; char buf[BUFSIZE]; if ((xd->xfd = open(file, O_RDWR)) < 0) { if ((xd->xfd = open(file, O_RDONLY)) < 0) { sprintf(buf, "%s: open", file); perror(buf); return FALSE; } } if (read(xd->xfd, buf, BUFSIZE) != BUFSIZE) return FALSE; if (machine_type("X86") || machine_type("X86_64")) xd->page_size = 4096; else if (machine_type("IA64") && !machdep->pagesize) xd->page_size = 16384; else xd->page_size = machdep->pagesize; verified = xc_save_verify(buf) || xc_core_verify(file, buf); if (!verified) close(xd->xfd); return (verified); } /* * Verify whether the dump was created by the xc_domain_dumpcore() * library function in libxc/xc_core.c. */ static int xc_core_verify(char *file, char *buf) { struct xc_core_header *xcp; xcp = (struct xc_core_header *)buf; if (xc_core_elf_verify(file, buf)) return TRUE; if ((xcp->xch_magic != XC_CORE_MAGIC) && (xcp->xch_magic != XC_CORE_MAGIC_HVM)) return FALSE; if (!xcp->xch_nr_vcpus) { error(INFO, "faulty xc_core dump file header: xch_nr_vcpus is 0\n\n"); fprintf(stderr, " xch_magic: %x\n", xcp->xch_magic); fprintf(stderr, " xch_nr_vcpus: %d\n", xcp->xch_nr_vcpus); fprintf(stderr, " xch_nr_pages: %d\n", xcp->xch_nr_pages); fprintf(stderr, " xch_ctxt_offset: %d\n", xcp->xch_ctxt_offset); fprintf(stderr, " xch_index_offset: %d\n", xcp->xch_index_offset); fprintf(stderr, " xch_pages_offset: %d\n\n", xcp->xch_pages_offset); clean_exit(1); } xd->xc_core.header.xch_magic = xcp->xch_magic; xd->xc_core.header.xch_nr_vcpus = xcp->xch_nr_vcpus; xd->xc_core.header.xch_nr_pages = xcp->xch_nr_pages; xd->xc_core.header.xch_ctxt_offset = (off_t)xcp->xch_ctxt_offset; xd->xc_core.header.xch_index_offset = (off_t)xcp->xch_index_offset; xd->xc_core.header.xch_pages_offset = (off_t)xcp->xch_pages_offset; xd->flags |= (XENDUMP_LOCAL | XC_CORE_ORIG | XC_CORE_P2M_CREATE); if (xc_core_mfns(XC_CORE_64BIT_HOST, stderr)) xd->flags |= XC_CORE_64BIT_HOST; if (!xd->page_size) error(FATAL, "unknown page size: use -p command line option\n"); if (!(xd->page = (char *)malloc(xd->page_size))) error(FATAL, "cannot malloc page space."); if (!(xd->poc = (struct pfn_offset_cache *)calloc (PFN_TO_OFFSET_CACHE_ENTRIES, sizeof(struct pfn_offset_cache)))) error(FATAL, "cannot malloc pfn_offset_cache\n"); xd->last_pfn = ~(0UL); if (CRASHDEBUG(1)) xendump_memory_dump(stderr); return TRUE; } /* * Do the work for read_xendump() for the XC_CORE dumpfile format. */ static int xc_core_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr) { ulong pfn, page_index; off_t offset; int redundant; if (xd->flags & (XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE)) xc_core_create_pfn_tables(); pfn = (ulong)BTOP(paddr); if ((offset = poc_get(pfn, &redundant))) { if (!redundant) { if (lseek(xd->xfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; xd->last_pfn = pfn; } BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } if ((page_index = xc_core_pfn_to_page_index(pfn)) == PFN_NOT_FOUND) return READ_ERROR; offset = xd->xc_core.header.xch_pages_offset + ((off_t)(page_index) * (off_t)xd->page_size); if (lseek(xd->xfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; poc_store(pfn, offset); BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } /* * Verify whether the dumpfile was created by the "xm save" facility. * This gets started by the "save" function in XendCheckpoint.py, and * then by xc_save.c, with the work done in the xc_linux_save() library * function in libxc/xc_linux_save.c. */ #define MAX_BATCH_SIZE 1024 /* * Number of P2M entries in a page. */ #define ULPP (xd->page_size/sizeof(unsigned long)) /* * Number of P2M entries in the pfn_to_mfn_frame_list. */ #define P2M_FL_ENTRIES (((xd->xc_save.nr_pfns)+ULPP-1)/ULPP) /* * Size in bytes of the pfn_to_mfn_frame_list. */ #define P2M_FL_SIZE ((P2M_FL_ENTRIES)*sizeof(unsigned long)) #define XTAB (0xf<<28) /* invalid page */ #define LTAB_MASK XTAB static int xc_save_verify(char *buf) { int i, batch_count, done_batch, *intptr; ulong flags, *ulongptr; ulong batch_index, total_pages_read; ulong N; if (!STRNEQ(buf, XC_SAVE_SIGNATURE)) return FALSE; if (lseek(xd->xfd, strlen(XC_SAVE_SIGNATURE), SEEK_SET) == -1) return FALSE; flags = XC_SAVE; if (CRASHDEBUG(1)) { fprintf(stderr, "\"%s\"\n", buf); fprintf(stderr, "endian: %d %s\n", __BYTE_ORDER, __BYTE_ORDER == __BIG_ENDIAN ? "__BIG_ENDIAN" : (__BYTE_ORDER == __LITTLE_ENDIAN ? "__LITTLE_ENDIAN" : "???")); } /* * size of vmconfig data structure (big-endian) */ if (read(xd->xfd, buf, sizeof(int)) != sizeof(int)) return FALSE; intptr = (int *)buf; if (CRASHDEBUG(1) && BYTE_SWAP_REQUIRED(__BIG_ENDIAN)) { fprintf(stderr, "byte-swap required for this:\n"); for (i = 0; i < sizeof(int); i++) fprintf(stderr, "[%x]", buf[i] & 0xff); fprintf(stderr, ": %x -> ", *intptr); } xd->xc_save.vmconfig_size = swab32(*intptr); if (CRASHDEBUG(1)) fprintf(stderr, "%x\n", xd->xc_save.vmconfig_size); if (!(xd->xc_save.vmconfig_buf = (char *)malloc (xd->xc_save.vmconfig_size))) error(FATAL, "cannot malloc xc_save vmconfig space."); if (!xd->page_size) error(FATAL, "unknown page size: use -p command line option\n"); if (!(xd->page = (char *)malloc(xd->page_size))) error(FATAL, "cannot malloc page space."); if (!(xd->poc = (struct pfn_offset_cache *)calloc (PFN_TO_OFFSET_CACHE_ENTRIES, sizeof(struct pfn_offset_cache)))) error(FATAL, "cannot malloc pfn_offset_cache\n"); xd->last_pfn = ~(0UL); if (!(xd->xc_save.region_pfn_type = (ulong *)calloc (MAX_BATCH_SIZE, sizeof(ulong)))) error(FATAL, "cannot malloc region_pfn_type\n"); if (read(xd->xfd, xd->xc_save.vmconfig_buf, xd->xc_save.vmconfig_size) != xd->xc_save.vmconfig_size) goto xc_save_bailout; /* * nr_pfns (native byte order) */ if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong)) goto xc_save_bailout; ulongptr = (ulong *)buf; if (CRASHDEBUG(1)) { for (i = 0; i < sizeof(ulong); i++) fprintf(stderr, "[%x]", buf[i] & 0xff); fprintf(stderr, ": %lx (nr_pfns)\n", *ulongptr); } xd->xc_save.nr_pfns = *ulongptr; if (machine_type("IA64")) goto xc_save_ia64; /* * Get a local copy of the live_P2M_frame_list */ if (!(xd->xc_save.p2m_frame_list = (unsigned long *)malloc(P2M_FL_SIZE))) error(FATAL, "cannot allocate p2m_frame_list array"); if (!(xd->xc_save.batch_offsets = (off_t *)calloc((size_t)P2M_FL_ENTRIES, sizeof(off_t)))) error(FATAL, "cannot allocate batch_offsets array"); xd->xc_save.batch_count = P2M_FL_ENTRIES; if (read(xd->xfd, xd->xc_save.p2m_frame_list, P2M_FL_SIZE) != P2M_FL_SIZE) goto xc_save_bailout; if (CRASHDEBUG(1)) fprintf(stderr, "pre-batch file pointer: %lld\n", (ulonglong)lseek(xd->xfd, 0L, SEEK_CUR)); /* * ... * int batch_count * ulong region pfn_type[batch_count] * page 0 * page 1 * ... * page batch_count-1 * (repeat) */ total_pages_read = 0; batch_index = 0; done_batch = FALSE; while (!done_batch) { xd->xc_save.batch_offsets[batch_index] = (off_t) lseek(xd->xfd, 0L, SEEK_CUR); if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int)) goto xc_save_bailout; if (CRASHDEBUG(1)) fprintf(stderr, "batch[%ld]: %d ", batch_index, batch_count); batch_index++; if (batch_index >= P2M_FL_ENTRIES) { fprintf(stderr, "more than %ld batches encountered?\n", P2M_FL_ENTRIES); goto xc_save_bailout; } switch (batch_count) { case 0: if (CRASHDEBUG(1)) { fprintf(stderr, ": Batch work is done: %ld pages read (P2M_FL_ENTRIES: %ld)\n", total_pages_read, P2M_FL_ENTRIES); } done_batch = TRUE; continue; case -1: if (CRASHDEBUG(1)) fprintf(stderr, ": Entering page verify mode\n"); continue; default: if (batch_count > MAX_BATCH_SIZE) { if (CRASHDEBUG(1)) fprintf(stderr, ": Max batch size exceeded. Giving up.\n"); done_batch = TRUE; continue; } if (CRASHDEBUG(1)) fprintf(stderr, "\n"); break; } if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != batch_count * sizeof(ulong)) goto xc_save_bailout; for (i = 0; i < batch_count; i++) { unsigned long pagetype; unsigned long pfn; pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK; pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK; if (pagetype == XTAB) /* a bogus/unmapped page: skip it */ continue; if (pfn > xd->xc_save.nr_pfns) { if (CRASHDEBUG(1)) fprintf(stderr, "batch_count: %d pfn %ld out of range", batch_count, pfn); } if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) goto xc_save_bailout; total_pages_read++; } } /* * Get the list of PFNs that are not in the psuedo-phys map */ if (read(xd->xfd, &xd->xc_save.pfns_not, sizeof(xd->xc_save.pfns_not)) != sizeof(xd->xc_save.pfns_not)) goto xc_save_bailout; if (CRASHDEBUG(1)) fprintf(stderr, "PFNs not in pseudo-phys map: %d\n", xd->xc_save.pfns_not); if ((total_pages_read + xd->xc_save.pfns_not) != xd->xc_save.nr_pfns) error(WARNING, "nr_pfns: %ld != (total pages: %ld + pages not saved: %d)\n", xd->xc_save.nr_pfns, total_pages_read, xd->xc_save.pfns_not); xd->xc_save.pfns_not_offset = lseek(xd->xfd, 0L, SEEK_CUR); if (lseek(xd->xfd, sizeof(ulong) * xd->xc_save.pfns_not, SEEK_CUR) == -1) goto xc_save_bailout; xd->xc_save.vcpu_ctxt_offset = lseek(xd->xfd, 0L, SEEK_CUR); lseek(xd->xfd, 0, SEEK_END); lseek(xd->xfd, -((off_t)(xd->page_size)), SEEK_CUR); xd->xc_save.shared_info_page_offset = lseek(xd->xfd, 0L, SEEK_CUR); xd->flags |= (XENDUMP_LOCAL | flags); kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND); if (CRASHDEBUG(1)) xendump_memory_dump(stderr); return TRUE; xc_save_ia64: /* * Completely different format for ia64: * * ... * pfn # * page data * pfn # * page data * ... */ free(xd->poc); xd->poc = NULL; free(xd->xc_save.region_pfn_type); xd->xc_save.region_pfn_type = NULL; if (!(xd->xc_save.ia64_page_offsets = (ulong *)calloc(xd->xc_save.nr_pfns, sizeof(off_t)))) error(FATAL, "cannot allocate ia64_page_offsets array"); /* * version */ if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong)) goto xc_save_bailout; xd->xc_save.ia64_version = *((ulong *)buf); if (CRASHDEBUG(1)) fprintf(stderr, "ia64 version: %lx\n", xd->xc_save.ia64_version); /* * xen_domctl_arch_setup structure */ if (read(xd->xfd, buf, sizeof(xen_domctl_arch_setup_t)) != sizeof(xen_domctl_arch_setup_t)) goto xc_save_bailout; if (CRASHDEBUG(1)) { xen_domctl_arch_setup_t *setup = (xen_domctl_arch_setup_t *)buf; fprintf(stderr, "xen_domctl_arch_setup:\n"); fprintf(stderr, " flags: %lx\n", (ulong)setup->flags); fprintf(stderr, " bp: %lx\n", (ulong)setup->bp); fprintf(stderr, " maxmem: %lx\n", (ulong)setup->maxmem); fprintf(stderr, " xsi_va: %lx\n", (ulong)setup->xsi_va); fprintf(stderr, "hypercall_imm: %x\n", setup->hypercall_imm); } for (i = N = 0; i < xd->xc_save.nr_pfns; i++) { if (read(xd->xfd, &N, sizeof(N)) != sizeof(N)) goto xc_save_bailout; if (N < xd->xc_save.nr_pfns) xd->xc_save.ia64_page_offsets[N] = lseek(xd->xfd, 0, SEEK_CUR); else error(WARNING, "[%d]: pfn of %lx (0x%lx) in ia64 canonical page list exceeds %ld\n", i, N, N, xd->xc_save.nr_pfns); if (CRASHDEBUG(1)) { if ((i < 10) || (N >= (xd->xc_save.nr_pfns-10))) fprintf(stderr, "[%d]: %ld\n%s", i, N, i == 9 ? "...\n" : ""); } if ((N+1) >= xd->xc_save.nr_pfns) break; if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) goto xc_save_bailout; } if (CRASHDEBUG(1)) { for (i = N = 0; i < xd->xc_save.nr_pfns; i++) { if (!xd->xc_save.ia64_page_offsets[i]) N++; } fprintf(stderr, "%ld out of %ld pfns not dumped\n", N, xd->xc_save.nr_pfns); } xd->flags |= (XENDUMP_LOCAL | flags | XC_SAVE_IA64); kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND); if (CRASHDEBUG(1)) xendump_memory_dump(stderr); return TRUE; xc_save_bailout: error(INFO, "xc_save_verify: \"LinuxGuestRecord\" file handling/format error\n"); if (xd->xc_save.p2m_frame_list) { free(xd->xc_save.p2m_frame_list); xd->xc_save.p2m_frame_list = NULL; } if (xd->xc_save.batch_offsets) { free(xd->xc_save.batch_offsets); xd->xc_save.batch_offsets = NULL; } if (xd->xc_save.vmconfig_buf) { free(xd->xc_save.vmconfig_buf); xd->xc_save.vmconfig_buf = NULL; } if (xd->page) { free(xd->page); xd->page = NULL; } return FALSE; } /* * Do the work for read_xendump() for the XC_SAVE dumpfile format. */ static int xc_save_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int b, i, redundant; ulong reqpfn; int batch_count; off_t file_offset; reqpfn = (ulong)BTOP(paddr); if (CRASHDEBUG(8)) fprintf(xd->ofp, "xc_save_read(bufptr: %lx cnt: %d addr: %lx paddr: %llx (%ld, 0x%lx)\n", (ulong)bufptr, cnt, addr, (ulonglong)paddr, reqpfn, reqpfn); if (xd->flags & XC_SAVE_IA64) { if (reqpfn >= xd->xc_save.nr_pfns) { if (CRASHDEBUG(1)) fprintf(xd->ofp, "xc_save_read: pfn %lx too large: nr_pfns: %lx\n", reqpfn, xd->xc_save.nr_pfns); return SEEK_ERROR; } file_offset = xd->xc_save.ia64_page_offsets[reqpfn]; if (!file_offset) { if (CRASHDEBUG(1)) fprintf(xd->ofp, "xc_save_read: pfn %lx not stored in xendump\n", reqpfn); return SEEK_ERROR; } if (reqpfn != xd->last_pfn) { if (lseek(xd->xfd, file_offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; } else { xd->redundant++; xd->cache_hits++; } xd->accesses++; xd->last_pfn = reqpfn; BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } if ((file_offset = poc_get(reqpfn, &redundant))) { if (!redundant) { if (lseek(xd->xfd, file_offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; xd->last_pfn = reqpfn; } else if (CRASHDEBUG(1)) console("READ %ld (0x%lx) skipped!\n", reqpfn, reqpfn); BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } /* * ... * int batch_count * ulong region pfn_type[batch_count] * page 0 * page 1 * ... * page batch_count-1 * (repeat) */ for (b = 0; b < xd->xc_save.batch_count; b++) { if (lseek(xd->xfd, xd->xc_save.batch_offsets[b], SEEK_SET) == -1) return SEEK_ERROR; if (CRASHDEBUG(8)) fprintf(xd->ofp, "check batch[%d]: offset: %llx\n", b, (ulonglong)xd->xc_save.batch_offsets[b]); if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int)) return READ_ERROR; switch (batch_count) { case 0: if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) { fprintf(xd->ofp, "batch[%d]: has count of zero -- bailing out on pfn %ld\n", b, reqpfn); } return READ_ERROR; case -1: return READ_ERROR; default: if (CRASHDEBUG(8)) fprintf(xd->ofp, "batch[%d]: offset: %llx batch count: %d\n", b, (ulonglong)xd->xc_save.batch_offsets[b], batch_count); break; } if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != batch_count * sizeof(ulong)) return READ_ERROR; for (i = 0; i < batch_count; i++) { unsigned long pagetype; unsigned long pfn; pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK; pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK; if (pagetype == XTAB) /* a bogus/unmapped page: skip it */ continue; if (pfn > xd->xc_save.nr_pfns) { if (CRASHDEBUG(1)) fprintf(stderr, "batch_count: %d pfn %ld out of range", batch_count, pfn); } if (pfn == reqpfn) { file_offset = lseek(xd->xfd, 0, SEEK_CUR); poc_store(pfn, file_offset); if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) return SEEK_ERROR; } } return READ_ERROR; } /* * Stash a pfn's offset. If they're all in use, put it in the * least-used slot that's closest to the beginning of the array. */ static void poc_store(ulong pfn, off_t file_offset) { int i; struct pfn_offset_cache *poc, *plow; ulong curlow; curlow = ~(0UL); plow = NULL; poc = xd->poc; for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) { if (poc->cnt == 0) { poc->cnt = 1; poc->pfn = pfn; poc->file_offset = file_offset; xd->last_pfn = pfn; return; } if (poc->cnt < curlow) { curlow = poc->cnt; plow = poc; } } plow->cnt = 1; plow->pfn = pfn; plow->file_offset = file_offset; xd->last_pfn = pfn; } /* * Check whether a pfn's offset has been cached. */ static off_t poc_get(ulong pfn, int *redundant) { int i; struct pfn_offset_cache *poc; xd->accesses++; if (pfn == xd->last_pfn) { xd->redundant++; *redundant = TRUE; return 1; } else *redundant = FALSE; poc = xd->poc; for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) { if (poc->cnt && (poc->pfn == pfn)) { poc->cnt++; xd->cache_hits++; return poc->file_offset; } } return 0; } /* * Perform any post-dumpfile determination stuff here. */ int xendump_init(char *unused, FILE *fptr) { if (!XENDUMP_VALID()) return FALSE; xd->ofp = fptr; return TRUE; } int read_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (pc->curcmd_flags & XEN_MACHINE_ADDR) return READ_ERROR; switch (xd->flags & (XC_SAVE|XC_CORE_ORIG|XC_CORE_ELF)) { case XC_SAVE: return xc_save_read(bufptr, cnt, addr, paddr); case XC_CORE_ORIG: case XC_CORE_ELF: return xc_core_read(bufptr, cnt, addr, paddr); default: return READ_ERROR; } } int read_xendump_hyper(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { ulong pfn, page_index; off_t offset; pfn = (ulong)BTOP(paddr); /* ODA: pfn == mfn !!! */ if ((page_index = xc_core_mfn_to_page_index(pfn)) == PFN_NOT_FOUND) return READ_ERROR; offset = xd->xc_core.header.xch_pages_offset + ((off_t)(page_index) * (off_t)xd->page_size); if (lseek(xd->xfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } int write_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return WRITE_ERROR; } uint xendump_page_size(void) { if (!XENDUMP_VALID()) return 0; return xd->page_size; } /* * xendump_free_memory(), and xendump_memory_used() * are debug only, and typically unnecessary to implement. */ int xendump_free_memory(void) { return 0; } int xendump_memory_used(void) { return 0; } /* * This function is dump-type independent, used here to * to dump the xendump_data structure contents. */ int xendump_memory_dump(FILE *fp) { int i, linefeed, used, others; ulong *ulongptr; Elf32_Off offset32; Elf64_Off offset64; FILE *fpsave; fprintf(fp, " flags: %lx (", xd->flags); others = 0; if (xd->flags & XENDUMP_LOCAL) fprintf(fp, "%sXENDUMP_LOCAL", others++ ? "|" : ""); if (xd->flags & XC_SAVE) fprintf(fp, "%sXC_SAVE", others++ ? "|" : ""); if (xd->flags & XC_CORE_ORIG) fprintf(fp, "%sXC_CORE_ORIG", others++ ? "|" : ""); if (xd->flags & XC_CORE_ELF) fprintf(fp, "%sXC_CORE_ELF", others++ ? "|" : ""); if (xd->flags & XC_CORE_P2M_CREATE) fprintf(fp, "%sXC_CORE_P2M_CREATE", others++ ? "|" : ""); if (xd->flags & XC_CORE_PFN_CREATE) fprintf(fp, "%sXC_CORE_PFN_CREATE", others++ ? "|" : ""); if (xd->flags & XC_CORE_NO_P2M) fprintf(fp, "%sXC_CORE_NO_P2M", others++ ? "|" : ""); if (xd->flags & XC_SAVE_IA64) fprintf(fp, "%sXC_SAVE_IA64", others++ ? "|" : ""); if (xd->flags & XC_CORE_64BIT_HOST) fprintf(fp, "%sXC_CORE_64BIT_HOST", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " xfd: %d\n", xd->xfd); fprintf(fp, " page_size: %d\n", xd->page_size); fprintf(fp, " ofp: %lx\n", (ulong)xd->ofp); fprintf(fp, " page: %lx\n", (ulong)xd->page); fprintf(fp, " panic_pc: %lx\n", xd->panic_pc); fprintf(fp, " panic_sp: %lx\n", xd->panic_sp); fprintf(fp, " accesses: %ld\n", (ulong)xd->accesses); fprintf(fp, " cache_hits: %ld ", (ulong)xd->cache_hits); if (xd->accesses) fprintf(fp, "(%ld%%)\n", xd->cache_hits * 100 / xd->accesses); else fprintf(fp, "\n"); fprintf(fp, " last_pfn: %ld\n", xd->last_pfn); fprintf(fp, " redundant: %ld ", (ulong)xd->redundant); if (xd->accesses) fprintf(fp, "(%ld%%)\n", xd->redundant * 100 / xd->accesses); else fprintf(fp, "\n"); for (i = used = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) if (xd->poc && xd->poc[i].cnt) used++; if (xd->poc) fprintf(fp, " poc[%d]: %lx %s", PFN_TO_OFFSET_CACHE_ENTRIES, (ulong)xd->poc, xd->poc ? "" : "(none)"); else fprintf(fp, " poc[0]: (unused)\n"); for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) { if (!xd->poc) break; if (!xd->poc[i].cnt) { if (!i) fprintf(fp, "(none used)\n"); break; } else if (!i) fprintf(fp, "(%d used)\n", used); if (CRASHDEBUG(2)) fprintf(fp, " [%d]: pfn: %ld (0x%lx) count: %ld file_offset: %llx\n", i, xd->poc[i].pfn, xd->poc[i].pfn, xd->poc[i].cnt, (ulonglong)xd->poc[i].file_offset); } if (!xd->poc) fprintf(fp, "\n"); fprintf(fp, "\n xc_save:\n"); fprintf(fp, " nr_pfns: %ld (0x%lx)\n", xd->xc_save.nr_pfns, xd->xc_save.nr_pfns); fprintf(fp, " vmconfig_size: %d (0x%x)\n", xd->xc_save.vmconfig_size, xd->xc_save.vmconfig_size); fprintf(fp, " vmconfig_buf: %lx\n", (ulong)xd->xc_save.vmconfig_buf); if (xd->flags & XC_SAVE) xen_dump_vmconfig(fp); fprintf(fp, " p2m_frame_list: %lx ", (ulong)xd->xc_save.p2m_frame_list); if ((xd->flags & XC_SAVE) && xd->xc_save.p2m_frame_list) { fprintf(fp, "\n"); ulongptr = xd->xc_save.p2m_frame_list; for (i = 0; i < P2M_FL_ENTRIES; i++, ulongptr++) fprintf(fp, "%ld ", *ulongptr); fprintf(fp, "\n"); } else fprintf(fp, "(none)\n"); fprintf(fp, " pfns_not: %d\n", xd->xc_save.pfns_not); fprintf(fp, " pfns_not_offset: %lld\n", (ulonglong)xd->xc_save.pfns_not_offset); fprintf(fp, " vcpu_ctxt_offset: %lld\n", (ulonglong)xd->xc_save.vcpu_ctxt_offset); fprintf(fp, " shared_info_page_offset: %lld\n", (ulonglong)xd->xc_save.shared_info_page_offset); fprintf(fp, " region_pfn_type: %lx\n", (ulong)xd->xc_save.region_pfn_type); fprintf(fp, " batch_count: %ld\n", (ulong)xd->xc_save.batch_count); fprintf(fp, " batch_offsets: %lx %s\n", (ulong)xd->xc_save.batch_offsets, xd->xc_save.batch_offsets ? "" : "(none)"); for (i = linefeed = 0; i < xd->xc_save.batch_count; i++) { fprintf(fp, "[%d]: %llx ", i, (ulonglong)xd->xc_save.batch_offsets[i]); if (((i+1)%4) == 0) { fprintf(fp, "\n"); linefeed = FALSE; } else linefeed = TRUE; } if (linefeed) fprintf(fp, "\n"); fprintf(fp, " ia64_version: %ld\n", (ulong)xd->xc_save.ia64_version); fprintf(fp, " ia64_page_offsets: %lx ", (ulong)xd->xc_save.ia64_page_offsets); if (xd->xc_save.ia64_page_offsets) fprintf(fp, "(%ld entries)\n\n", xd->xc_save.nr_pfns); else fprintf(fp, "(none)\n\n"); fprintf(fp, " xc_core:\n"); fprintf(fp, " header:\n"); fprintf(fp, " xch_magic: %x ", xd->xc_core.header.xch_magic); if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC) fprintf(fp, "(XC_CORE_MAGIC)\n"); else if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC_HVM) fprintf(fp, "(XC_CORE_MAGIC_HVM)\n"); else fprintf(fp, "(unknown)\n"); fprintf(fp, " xch_nr_vcpus: %d\n", xd->xc_core.header.xch_nr_vcpus); fprintf(fp, " xch_nr_pages: %d (0x%x)\n", xd->xc_core.header.xch_nr_pages, xd->xc_core.header.xch_nr_pages); fprintf(fp, " xch_ctxt_offset: %llu (0x%llx)\n", (ulonglong)xd->xc_core.header.xch_ctxt_offset, (ulonglong)xd->xc_core.header.xch_ctxt_offset); fprintf(fp, " xch_index_offset: %llu (0x%llx)\n", (ulonglong)xd->xc_core.header.xch_index_offset, (ulonglong)xd->xc_core.header.xch_index_offset); fprintf(fp, " xch_pages_offset: %llu (0x%llx)\n", (ulonglong)xd->xc_core.header.xch_pages_offset, (ulonglong)xd->xc_core.header.xch_pages_offset); fprintf(fp, " elf_class: %s\n", xd->xc_core.elf_class == ELFCLASS64 ? "ELFCLASS64" : xd->xc_core.elf_class == ELFCLASS32 ? "ELFCLASS32" : "n/a"); fprintf(fp, " elf_strtab_offset: %lld (0x%llx)\n", (ulonglong)xd->xc_core.elf_strtab_offset, (ulonglong)xd->xc_core.elf_strtab_offset); fprintf(fp, " format_version: %016llx\n", (ulonglong)xd->xc_core.format_version); fprintf(fp, " shared_info_offset: %lld (0x%llx)\n", (ulonglong)xd->xc_core.shared_info_offset, (ulonglong)xd->xc_core.shared_info_offset); if (machine_type("IA64")) fprintf(fp, " ia64_mapped_regs_offset: %lld (0x%llx)\n", (ulonglong)xd->xc_core.ia64_mapped_regs_offset, (ulonglong)xd->xc_core.ia64_mapped_regs_offset); fprintf(fp, " elf_index_pfn[%d]: %s", INDEX_PFN_COUNT, xd->xc_core.elf_class ? "\n" : "(none used)\n"); if (xd->xc_core.elf_class) { for (i = 0; i < INDEX_PFN_COUNT; i++) { fprintf(fp, "%ld:%ld ", xd->xc_core.elf_index_pfn[i].index, xd->xc_core.elf_index_pfn[i].pfn); } fprintf(fp, "\n"); } fprintf(fp, " last_batch:\n"); fprintf(fp, " index: %ld (%ld - %ld)\n", xd->xc_core.last_batch.index, xd->xc_core.last_batch.start, xd->xc_core.last_batch.end); fprintf(fp, " accesses: %ld\n", xd->xc_core.last_batch.accesses); fprintf(fp, " duplicates: %ld ", xd->xc_core.last_batch.duplicates); if (xd->xc_core.last_batch.accesses) fprintf(fp, "(%ld%%)\n", xd->xc_core.last_batch.duplicates * 100 / xd->xc_core.last_batch.accesses); else fprintf(fp, "\n"); fprintf(fp, " elf32: %lx\n", (ulong)xd->xc_core.elf32); fprintf(fp, " elf64: %lx\n", (ulong)xd->xc_core.elf64); fprintf(fp, " p2m_frames: %d\n", xd->xc_core.p2m_frames); fprintf(fp, " p2m_frame_index_list: %s\n", (xd->flags & (XC_CORE_NO_P2M|XC_SAVE)) ? "(not used)" : ""); for (i = 0; i < xd->xc_core.p2m_frames; i++) { fprintf(fp, "%ld ", xd->xc_core.p2m_frame_index_list[i]); } fprintf(fp, xd->xc_core.p2m_frames ? "\n" : ""); if ((xd->flags & XC_CORE_ORIG) && CRASHDEBUG(8)) xc_core_mfns(XENDUMP_LOCAL, fp); switch (xd->xc_core.elf_class) { case ELFCLASS32: fpsave = xd->ofp; xd->ofp = fp; xc_core_elf_dump(); offset32 = xd->xc_core.elf32->e_shoff; for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) { xc_core_dump_Elf32_Shdr(offset32, ELFREAD); offset32 += xd->xc_core.elf32->e_shentsize; } xendump_print("\n"); xd->ofp = fpsave; break; case ELFCLASS64: fpsave = xd->ofp; xd->ofp = fp; xc_core_elf_dump(); offset64 = xd->xc_core.elf64->e_shoff; for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) { xc_core_dump_Elf64_Shdr(offset64, ELFREAD); offset64 += xd->xc_core.elf64->e_shentsize; } xendump_print("\n"); xd->ofp = fpsave; break; } return 0; } static void xen_dump_vmconfig(FILE *fp) { int i, opens, closes; char *p; opens = closes = 0; p = xd->xc_save.vmconfig_buf; for (i = 0; i < xd->xc_save.vmconfig_size; i++, p++) { if (ascii(*p)) fprintf(fp, "%c", *p); else fprintf(fp, "<%x>", *p); if (*p == '(') opens++; else if (*p == ')') closes++; } fprintf(fp, "\n"); if (opens != closes) error(WARNING, "invalid vmconfig contents?\n"); } /* * Looking at the active set, try to determine who panicked, * or who was the "suspend" kernel thread. */ ulong get_xendump_panic_task(void) { int i; ulong task; struct task_context *tc; switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE)) { case XC_CORE_ORIG: case XC_CORE_ELF: if (machdep->xendump_panic_task) return (machdep->xendump_panic_task((void *)xd)); break; case XC_SAVE: for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->active_set[i])) continue; tc = task_to_context(task); if (is_kernel_thread(task) && STREQ(tc->comm, "suspend")) return tc->task; } break; } return NO_TASK; } /* * Figure out the back trace hooks. */ void get_xendump_regs(struct bt_info *bt, ulong *pc, ulong *sp) { int i; ulong *up; if ((tt->panic_task == bt->task) && (xd->panic_pc && xd->panic_sp)) { *pc = xd->panic_pc; *sp = xd->panic_sp; return; } switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE)) { case XC_CORE_ORIG: case XC_CORE_ELF: if (machdep->get_xendump_regs) return (machdep->get_xendump_regs(xd, bt, pc, sp)); break; case XC_SAVE: if (tt->panic_task != bt->task) break; for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++) { if (is_kernel_text(*up) && (STREQ(closest_symbol(*up), "__do_suspend"))) { *pc = *up; *sp = tt->flags & THREAD_INFO ? bt->tc->thread_info + (i * sizeof(long)) : bt->task + (i * sizeof(long)); xd->panic_pc = *pc; xd->panic_sp = *sp; return; } } } machdep->get_stack_frame(bt, pc, sp); } /* * Farm out most of the work to the proper architecture to create * the p2m table. For ELF core dumps, create the index;pfn table. */ static void xc_core_create_pfn_tables(void) { if (xd->flags & XC_CORE_P2M_CREATE) { if (!machdep->xendump_p2m_create) error(FATAL, "xen xc_core dumpfiles not supported on this architecture"); if (!machdep->xendump_p2m_create((void *)xd)) error(FATAL, "cannot create xen pfn-to-mfn mapping\n"); } if (xd->flags & XC_CORE_PFN_CREATE) xc_core_elf_pfn_init(); xd->flags &= ~(XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE); if (CRASHDEBUG(1)) xendump_memory_dump(xd->ofp); } /* * Find the page index containing the mfn, and read the * machine page into the buffer. */ char * xc_core_mfn_to_page(ulong mfn, char *pgbuf) { int i, b, idx, done; ulong tmp[MAX_BATCH_SIZE]; off_t offset; size_t size; uint nr_pages; if (xd->flags & XC_CORE_ELF) return xc_core_elf_mfn_to_page(mfn, pgbuf); if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to page index\n"); return NULL; } nr_pages = xd->xc_core.header.xch_nr_pages; if (xd->flags & XC_CORE_64BIT_HOST) nr_pages *= 2; for (b = 0, idx = -1, done = FALSE; !done && (b < nr_pages); b += MAX_BATCH_SIZE) { size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp, size) != size) { error(INFO, "cannot read index page %d\n", b); return NULL; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) { done = TRUE; break; } if (tmp[i] == mfn) { idx = i+b; if (CRASHDEBUG(4)) fprintf(xd->ofp, "page: found mfn 0x%lx (%ld) at index %d\n", mfn, mfn, idx); done = TRUE; } } } if (idx == -1) { error(INFO, "cannot find mfn %ld (0x%lx) in page index\n", mfn, mfn); return NULL; } if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to xch_pages_offset\n"); return NULL; } offset = (off_t)(idx) * (off_t)xd->page_size; if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { error(INFO, "cannot lseek to mfn-specified page\n"); return NULL; } if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) { error(INFO, "cannot read mfn-specified page\n"); return NULL; } return pgbuf; } /* * Find the page index containing the mfn, and read the * machine page into the buffer. */ static char * xc_core_elf_mfn_to_page(ulong mfn, char *pgbuf) { int i, b, idx, done; off_t offset; size_t size; uint nr_pages; ulong tmp; struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = 0, idx = -1, done = FALSE; !done && (b < nr_pages); b += MAX_BATCH_SIZE) { size = sizeof(struct xen_dumpcore_p2m) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &p2m_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return NULL; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) { done = TRUE; break; } tmp = (ulong)p2m_batch[i].gmfn; if (tmp == mfn) { idx = i+b; if (CRASHDEBUG(4)) fprintf(xd->ofp, "page: found mfn 0x%lx (%ld) at index %d\n", mfn, mfn, idx); done = TRUE; } } } if (idx == -1) { error(INFO, "cannot find mfn %ld (0x%lx) in page index\n", mfn, mfn); return NULL; } if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_pages_offset\n"); offset = (off_t)(idx) * (off_t)xd->page_size; if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { error(INFO, "cannot lseek to mfn-specified page\n"); return NULL; } if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) { error(INFO, "cannot read mfn-specified page\n"); return NULL; } return pgbuf; } /* * Find and return the page index containing the mfn. */ int xc_core_mfn_to_page_index(ulong mfn) { int i, b; ulong tmp[MAX_BATCH_SIZE]; uint nr_pages; size_t size; if (xd->flags & XC_CORE_ELF) return xc_core_elf_mfn_to_page_index(mfn); if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to page index\n"); return MFN_NOT_FOUND; } nr_pages = xd->xc_core.header.xch_nr_pages; if (xd->flags & XC_CORE_64BIT_HOST) nr_pages *= 2; for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp, size) != size) { error(INFO, "cannot read index page %d\n", b); return MFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; if (tmp[i] == mfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found mfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, mfn, mfn, i+b); return (i+b); } } } return MFN_NOT_FOUND; } /* * Find and return the page index containing the mfn. */ static int xc_core_elf_mfn_to_page_index(ulong mfn) { int i, b; off_t offset; size_t size; uint nr_pages; ulong tmp; struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(struct xen_dumpcore_p2m) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &p2m_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return MFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; tmp = (ulong)p2m_batch[i].gmfn; if (tmp == mfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found mfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, mfn, mfn, i+b); return (i+b); } } } return MFN_NOT_FOUND; } /* * XC_CORE mfn-related utility function. */ static int xc_core_mfns(ulong arg, FILE *ofp) { int i, b; uint nr_pages; ulong tmp[MAX_BATCH_SIZE]; ulonglong tmp64[MAX_BATCH_SIZE]; size_t size; if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to page index\n"); return FALSE; } switch (arg) { case XC_CORE_64BIT_HOST: /* * Determine whether this is a 32-bit guest xendump that * was taken on a 64-bit xen host. */ if (machine_type("X86_64") || machine_type("IA64")) return FALSE; check_next_4: if (read(xd->xfd, tmp, sizeof(ulong) * 4) != (4 * sizeof(ulong))) { error(INFO, "cannot read index pages\n"); return FALSE; } if ((tmp[0] == 0xffffffff) || (tmp[1] == 0xffffffff) || (tmp[2] == 0xffffffff) || (tmp[3] == 0xffffffff) || (!tmp[0] && !tmp[1]) || (!tmp[2] && !tmp[3])) goto check_next_4; if (CRASHDEBUG(2)) fprintf(ofp, "mfns: %08lx %08lx %08lx %08lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); if (tmp[0] && !tmp[1] && tmp[2] && !tmp[3]) return TRUE; else return FALSE; case XENDUMP_LOCAL: if (BITS64() || (xd->flags & XC_CORE_64BIT_HOST)) goto show_64bit_mfns; fprintf(ofp, "xch_index_offset mfn list:\n"); nr_pages = xd->xc_core.header.xch_nr_pages; for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp, size) != size) { error(INFO, "cannot read index page %d\n", b); return FALSE; } if (b) fprintf(ofp, "\n"); for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; if ((i%8) == 0) fprintf(ofp, "%s[%d]:", i ? "\n" : "", b+i); if (tmp[i] == 0xffffffff) fprintf(ofp, " INVALID"); else fprintf(ofp, " %lx", tmp[i]); } } fprintf(ofp, "\nxch_nr_pages: %d\n", xd->xc_core.header.xch_nr_pages); return TRUE; show_64bit_mfns: fprintf(ofp, "xch_index_offset mfn list: %s\n", BITS32() ? "(64-bit mfns)" : ""); nr_pages = xd->xc_core.header.xch_nr_pages; for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(ulonglong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp64, size) != size) { error(INFO, "cannot read index page %d\n", b); return FALSE; } if (b) fprintf(ofp, "\n"); for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; if ((i%8) == 0) fprintf(ofp, "%s[%d]:", i ? "\n" : "", b+i); if (tmp64[i] == 0xffffffffffffffffULL) fprintf(ofp, " INVALID"); else fprintf(ofp, " %llx", tmp64[i]); } } fprintf(ofp, "\nxch_nr_pages: %d\n", nr_pages); return TRUE; default: return FALSE; } } /* * Given a normal kernel pfn, determine the page index in the dumpfile. * * - First determine which of the pages making up the * phys_to_machine_mapping[] array would contain the pfn. * - From the phys_to_machine_mapping page, determine the mfn. * - Find the mfn in the dumpfile page index. */ #define PFNS_PER_PAGE (xd->page_size/sizeof(unsigned long)) static ulong xc_core_pfn_to_page_index(ulong pfn) { ulong idx, p2m_idx, mfn_idx; ulong *up, mfn; off_t offset; /* * This function does not apply when there's no p2m * mapping and/or if this is an ELF format dumpfile. */ switch (xd->flags & (XC_CORE_NO_P2M|XC_CORE_ELF)) { case (XC_CORE_NO_P2M|XC_CORE_ELF): return xc_core_elf_pfn_valid(pfn); case XC_CORE_NO_P2M: return(xc_core_pfn_valid(pfn) ? pfn : PFN_NOT_FOUND); case XC_CORE_ELF: return xc_core_elf_pfn_to_page_index(pfn); } idx = pfn/PFNS_PER_PAGE; if (idx >= xd->xc_core.p2m_frames) { error(INFO, "pfn: %lx is too large for dumpfile\n", pfn); return PFN_NOT_FOUND; } p2m_idx = xd->xc_core.p2m_frame_index_list[idx]; if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to xch_pages_offset\n"); return PFN_NOT_FOUND; } offset = (off_t)(p2m_idx) * (off_t)xd->page_size; if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { error(INFO, "cannot lseek to pfn-specified page\n"); return PFN_NOT_FOUND; } if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) { error(INFO, "cannot read pfn-specified page\n"); return PFN_NOT_FOUND; } up = (ulong *)xd->page; up += (pfn%PFNS_PER_PAGE); mfn = *up; if ((mfn_idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) { if (!STREQ(pc->curcmd, "search")) error(INFO, "cannot find mfn in page index\n"); return PFN_NOT_FOUND; } return mfn_idx; } /* * Search the .xen_p2m array for the target pfn, starting at a * higher batch if appropriate. This presumes that the pfns * are laid out in ascending order. */ static ulong xc_core_elf_pfn_to_page_index(ulong pfn) { int i, b, start_index; off_t offset; size_t size; uint nr_pages; ulong tmp; struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; /* * Initialize the start_index. */ xd->xc_core.last_batch.accesses++; start_index = 0; if ((pfn >= xd->xc_core.last_batch.start) && (pfn <= xd->xc_core.last_batch.end)) { xd->xc_core.last_batch.duplicates++; start_index = xd->xc_core.last_batch.index; } else { for (i = 0; i <= INDEX_PFN_COUNT; i++) { if ((i == INDEX_PFN_COUNT) || (pfn < xd->xc_core.elf_index_pfn[i].pfn)) { if (--i < 0) i = 0; start_index = xd->xc_core.elf_index_pfn[i].index; break; } } } offset += (start_index * sizeof(struct xen_dumpcore_p2m)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(struct xen_dumpcore_p2m) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &p2m_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return PFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; tmp = (ulong)p2m_batch[i].pfn; if (tmp == pfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found pfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, pfn, pfn, i+b); if ((b+MAX_BATCH_SIZE) < nr_pages) { xd->xc_core.last_batch.index = b; xd->xc_core.last_batch.start = p2m_batch[0].pfn; xd->xc_core.last_batch.end = p2m_batch[MAX_BATCH_SIZE-1].pfn; } return (i+b); } } } return PFN_NOT_FOUND; } /* * In xendumps containing INVALID_MFN markers in the page index, * return the validity of the pfn. */ static int xc_core_pfn_valid(ulong pfn) { ulong mfn; off_t offset; if (pfn >= (ulong)xd->xc_core.header.xch_nr_pages) return FALSE; offset = xd->xc_core.header.xch_index_offset; if (xd->flags & XC_CORE_64BIT_HOST) offset += (off_t)(pfn * sizeof(ulonglong)); else offset += (off_t)(pfn * sizeof(ulong)); /* * The lseek and read should never fail, so report * any errors unconditionally. */ if (lseek(xd->xfd, offset, SEEK_SET) == -1) { error(INFO, "xendump: cannot lseek to page index for pfn %lx\n", pfn); return FALSE; } if (read(xd->xfd, &mfn, sizeof(ulong)) != sizeof(ulong)) { error(INFO, "xendump: cannot read index page for pfn %lx\n", pfn); return FALSE; } /* * If it's an invalid mfn, let the caller decide whether * to display an error message (unless debugging). */ if (mfn == INVALID_MFN) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "xendump: pfn %lx contains INVALID_MFN\n", pfn); return FALSE; } return TRUE; } /* * Return the index into the .xen_pfn array containing the pfn. * If not found, return PFN_NOT_FOUND. */ static ulong xc_core_elf_pfn_valid(ulong pfn) { int i, b, start_index; off_t offset; size_t size; uint nr_pages; ulong tmp; uint64_t pfn_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; /* * Initialize the start_index. */ xd->xc_core.last_batch.accesses++; start_index = 0; if ((pfn >= xd->xc_core.last_batch.start) && (pfn <= xd->xc_core.last_batch.end)) { xd->xc_core.last_batch.duplicates++; start_index = xd->xc_core.last_batch.index; } else { for (i = 0; i <= INDEX_PFN_COUNT; i++) { if ((i == INDEX_PFN_COUNT) || (pfn < xd->xc_core.elf_index_pfn[i].pfn)) { if (--i < 0) i = 0; start_index = xd->xc_core.elf_index_pfn[i].index; break; } } } offset += (start_index * sizeof(uint64_t)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(uint64_t) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &pfn_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return PFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; tmp = (ulong)pfn_batch[i]; if (tmp == pfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found pfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, pfn, pfn, i+b); if ((b+MAX_BATCH_SIZE) < nr_pages) { xd->xc_core.last_batch.index = b; xd->xc_core.last_batch.start = (ulong)pfn_batch[0]; xd->xc_core.last_batch.end = (ulong)pfn_batch[MAX_BATCH_SIZE-1]; } return (i+b); } } } return PFN_NOT_FOUND; } /* * Store the panic task's stack hooks from where it was found * in get_active_set_panic_task(). */ void xendump_panic_hook(char *stack) { int i, err, argc; char *arglist[MAXARGS]; char buf[BUFSIZE]; ulong value, *sp; if (machine_type("IA64")) /* needs switch_stack address */ return; strcpy(buf, stack); argc = parse_line(buf, arglist); if ((value = htol(strip_ending_char(arglist[0], ':'), RETURN_ON_ERROR, &err)) == BADADDR) return; for (sp = (ulong *)value, i = 1; i < argc; i++, sp++) { if (strstr(arglist[i], "xen_panic_event")) { if (!readmem((ulong)sp, KVADDR, &value, sizeof(ulong), "xen_panic_event address", RETURN_ON_ERROR)) return; xd->panic_sp = (ulong)sp; xd->panic_pc = value; } else if (strstr(arglist[i], "panic") && !xd->panic_sp) { if (!readmem((ulong)sp, KVADDR, &value, sizeof(ulong), "xen_panic_event address", RETURN_ON_ERROR)) return; xd->panic_sp = (ulong)sp; xd->panic_pc = value; } } } static void xendump_print(char *fmt, ...) { char buf[BUFSIZE]; va_list ap; if (!fmt || !strlen(fmt)) return; va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (xd->ofp) fprintf(xd->ofp, "%s", buf); else if (!XENDUMP_VALID() && CRASHDEBUG(7)) fprintf(stderr, "%s", buf); } /* * Support for xc_core ELF dumpfile format. */ static int xc_core_elf_verify(char *file, char *buf) { int i; Elf32_Ehdr *elf32; Elf64_Ehdr *elf64; Elf32_Off offset32; Elf64_Off offset64; char *eheader; int swap; eheader = buf; if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) goto bailout; swap = (((eheader[EI_DATA] == ELFDATA2LSB) && (__BYTE_ORDER == __BIG_ENDIAN)) || ((eheader[EI_DATA] == ELFDATA2MSB) && (__BYTE_ORDER == __LITTLE_ENDIAN))); elf32 = (Elf32_Ehdr *)buf; elf64 = (Elf64_Ehdr *)buf; if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && (swap16(elf32->e_type, swap) == ET_CORE) && (swap32(elf32->e_version, swap) == EV_CURRENT) && (swap16(elf32->e_shnum, swap) > 0)) { switch (swap16(elf32->e_machine, swap)) { case EM_386: if (machine_type_mismatch(file, "X86", NULL, 0)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, 0)) goto bailout; break; } if (endian_mismatch(file, elf32->e_ident[EI_DATA], 0)) goto bailout; xd->xc_core.elf_class = ELFCLASS32; if ((xd->xc_core.elf32 = (Elf32_Ehdr *)malloc(sizeof(Elf32_Ehdr))) == NULL) { fprintf(stderr, "cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(buf, xd->xc_core.elf32, sizeof(Elf32_Ehdr)); } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && (swap16(elf64->e_type, swap) == ET_CORE) && (swap32(elf64->e_version, swap) == EV_CURRENT) && (swap16(elf64->e_shnum, swap) > 0)) { switch (swap16(elf64->e_machine, swap)) { case EM_IA_64: if (machine_type_mismatch(file, "IA64", NULL, 0)) goto bailout; break; case EM_X86_64: if (machine_type_mismatch(file, "X86_64", "X86", 0)) goto bailout; break; case EM_386: if (machine_type_mismatch(file, "X86", NULL, 0)) goto bailout; break; case EM_ARM: if (machine_type_mismatch(file, "ARM", NULL, 0)) goto bailout; break; case EM_AARCH64: if (machine_type_mismatch(file, "ARM64", NULL, 0)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, 0)) goto bailout; } if (endian_mismatch(file, elf64->e_ident[EI_DATA], 0)) goto bailout; xd->xc_core.elf_class = ELFCLASS64; if ((xd->xc_core.elf64 = (Elf64_Ehdr *)malloc(sizeof(Elf64_Ehdr))) == NULL) { fprintf(stderr, "cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(buf, xd->xc_core.elf64, sizeof(Elf64_Ehdr)); } else { if (CRASHDEBUG(1)) error(INFO, "%s: not a xen ELF core file\n", file); goto bailout; } xc_core_elf_dump(); switch (xd->xc_core.elf_class) { case ELFCLASS32: offset32 = xd->xc_core.elf32->e_shoff; for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) { xc_core_dump_Elf32_Shdr(offset32, ELFSTORE); offset32 += xd->xc_core.elf32->e_shentsize; } xendump_print("\n"); break; case ELFCLASS64: offset64 = xd->xc_core.elf64->e_shoff; for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) { xc_core_dump_Elf64_Shdr(offset64, ELFSTORE); offset64 += xd->xc_core.elf64->e_shentsize; } xendump_print("\n"); break; } xd->flags |= (XENDUMP_LOCAL | XC_CORE_ELF); if (!xd->page_size) error(FATAL, "unknown page size: use -p command line option\n"); if (!(xd->page = (char *)malloc(xd->page_size))) error(FATAL, "cannot malloc page space."); if (!(xd->poc = (struct pfn_offset_cache *)calloc (PFN_TO_OFFSET_CACHE_ENTRIES, sizeof(struct pfn_offset_cache)))) error(FATAL, "cannot malloc pfn_offset_cache\n"); xd->last_pfn = ~(0UL); for (i = 0; i < INDEX_PFN_COUNT; i++) xd->xc_core.elf_index_pfn[i].pfn = ~0UL; if (CRASHDEBUG(1)) xendump_memory_dump(fp); return TRUE; bailout: return FALSE; } /* * Dump the relevant ELF header. */ static void xc_core_elf_dump(void) { switch (xd->xc_core.elf_class) { case ELFCLASS32: xc_core_dump_Elf32_Ehdr(xd->xc_core.elf32); break; case ELFCLASS64: xc_core_dump_Elf64_Ehdr(xd->xc_core.elf64); break; } } /* * Dump the 32-bit ELF header, and grab a pointer to the strtab section. */ static void xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *elf) { char buf[BUFSIZE]; Elf32_Off offset32; Elf32_Shdr shdr; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); xendump_print("\nElf32_Ehdr:\n"); xendump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); xendump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: xendump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: xendump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: xendump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: xendump_print("(ELFCLASSNUM)\n"); break; default: xendump_print("(?)\n"); break; } xendump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: xendump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: xendump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: xendump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: xendump_print("(ELFDATANUM)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) xendump_print("(EV_CURRENT)\n"); else xendump_print("(?)\n"); xendump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: xendump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: xendump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: xendump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: xendump_print("(ELFOSABI_STANDALONE)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); xendump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: xendump_print("(ET_NONE)\n"); break; case ET_REL: xendump_print("(ET_REL)\n"); break; case ET_EXEC: xendump_print("(ET_EXEC)\n"); break; case ET_DYN: xendump_print("(ET_DYN)\n"); break; case ET_CORE: xendump_print("(ET_CORE)\n"); break; case ET_NUM: xendump_print("(ET_NUM)\n"); break; case ET_LOOS: xendump_print("(ET_LOOS)\n"); break; case ET_HIOS: xendump_print("(ET_HIOS)\n"); break; case ET_LOPROC: xendump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: xendump_print("(ET_HIPROC)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_386: xendump_print("(EM_386)\n"); break; default: xendump_print("(unsupported)\n"); break; } xendump_print(" e_version: %ld ", (ulong)elf->e_version); xendump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); xendump_print(" e_entry: %lx\n", (ulong)elf->e_entry); xendump_print(" e_phoff: %lx\n", (ulong)elf->e_phoff); xendump_print(" e_shoff: %lx\n", (ulong)elf->e_shoff); xendump_print(" e_flags: %lx\n", (ulong)elf->e_flags); xendump_print(" e_ehsize: %x\n", elf->e_ehsize); xendump_print(" e_phentsize: %x\n", elf->e_phentsize); xendump_print(" e_phnum: %x\n", elf->e_phnum); xendump_print(" e_shentsize: %x\n", elf->e_shentsize); xendump_print(" e_shnum: %x\n", elf->e_shnum); xendump_print(" e_shstrndx: %x\n", elf->e_shstrndx); /* Determine the strtab location. */ offset32 = elf->e_shoff + (elf->e_shstrndx * elf->e_shentsize); if (lseek(xd->xfd, offset32, SEEK_SET) != offset32) error(FATAL, "xc_core_dump_Elf32_Ehdr: cannot seek to strtab Elf32_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) error(FATAL, "xc_core_dump_Elf32_Ehdr: cannot read strtab Elf32_Shdr\n"); xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset; } /* * Dump the 64-bit ELF header, and grab a pointer to the strtab section. */ static void xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *elf) { char buf[BUFSIZE]; Elf64_Off offset64; Elf64_Shdr shdr; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); xendump_print("\nElf64_Ehdr:\n"); xendump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); xendump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: xendump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: xendump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: xendump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: xendump_print("(ELFCLASSNUM)\n"); break; default: xendump_print("(?)\n"); break; } xendump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: xendump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: xendump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: xendump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: xendump_print("(ELFDATANUM)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) xendump_print("(EV_CURRENT)\n"); else xendump_print("(?)\n"); xendump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: xendump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: xendump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: xendump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: xendump_print("(ELFOSABI_STANDALONE)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); xendump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: xendump_print("(ET_NONE)\n"); break; case ET_REL: xendump_print("(ET_REL)\n"); break; case ET_EXEC: xendump_print("(ET_EXEC)\n"); break; case ET_DYN: xendump_print("(ET_DYN)\n"); break; case ET_CORE: xendump_print("(ET_CORE)\n"); break; case ET_NUM: xendump_print("(ET_NUM)\n"); break; case ET_LOOS: xendump_print("(ET_LOOS)\n"); break; case ET_HIOS: xendump_print("(ET_HIOS)\n"); break; case ET_LOPROC: xendump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: xendump_print("(ET_HIPROC)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_386: xendump_print("(EM_386)\n"); break; case EM_IA_64: xendump_print("(EM_IA_64)\n"); break; case EM_PPC64: xendump_print("(EM_PPC64)\n"); break; case EM_X86_64: xendump_print("(EM_X86_64)\n"); break; default: xendump_print("(unsupported)\n"); break; } xendump_print(" e_version: %ld ", (ulong)elf->e_version); xendump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); xendump_print(" e_entry: %lx\n", (ulong)elf->e_entry); xendump_print(" e_phoff: %lx\n", (ulong)elf->e_phoff); xendump_print(" e_shoff: %lx\n", (ulong)elf->e_shoff); xendump_print(" e_flags: %lx\n", (ulong)elf->e_flags); xendump_print(" e_ehsize: %x\n", elf->e_ehsize); xendump_print(" e_phentsize: %x\n", elf->e_phentsize); xendump_print(" e_phnum: %x\n", elf->e_phnum); xendump_print(" e_shentsize: %x\n", elf->e_shentsize); xendump_print(" e_shnum: %x\n", elf->e_shnum); xendump_print(" e_shstrndx: %x\n", elf->e_shstrndx); /* Determine the strtab location. */ offset64 = elf->e_shoff + (elf->e_shstrndx * elf->e_shentsize); if (lseek(xd->xfd, offset64, SEEK_SET) != offset64) error(FATAL, "xc_core_dump_Elf64_Ehdr: cannot seek to strtab Elf32_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) error(FATAL, "xc_core_dump_Elf64_Ehdr: cannot read strtab Elf32_Shdr\n"); xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset; } /* * Dump each 32-bit section header and the data that they reference. */ static void xc_core_dump_Elf32_Shdr(Elf32_Off offset, int store) { Elf32_Shdr shdr; char name[BUFSIZE]; int i; char c; if (lseek(xd->xfd, offset, SEEK_SET) != offset) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot seek to Elf32_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot read Elf32_Shdr\n"); xendump_print("\nElf32_Shdr:\n"); xendump_print(" sh_name: %lx ", shdr.sh_name); xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name)); xendump_print(" sh_type: %lx ", shdr.sh_type); switch (shdr.sh_type) { case SHT_NULL: xendump_print("(SHT_NULL)\n"); break; case SHT_PROGBITS: xendump_print("(SHT_PROGBITS)\n"); break; case SHT_STRTAB: xendump_print("(SHT_STRTAB)\n"); break; case SHT_NOTE: xendump_print("(SHT_NOTE)\n"); break; default: xendump_print("\n"); break; } xendump_print(" sh_flags: %lx\n", shdr.sh_flags); xendump_print(" sh_addr: %lx\n", shdr.sh_addr); xendump_print(" sh_offset: %lx\n", shdr.sh_offset); xendump_print(" sh_size: %lx\n", shdr.sh_size); xendump_print(" sh_link: %lx\n", shdr.sh_link); xendump_print(" sh_info: %lx\n", shdr.sh_info); xendump_print(" sh_addralign: %lx\n", shdr.sh_addralign); xendump_print(" sh_entsize: %lx\n", shdr.sh_entsize); if (STREQ(name, ".shstrtab")) { if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != xd->xc_core.elf_strtab_offset) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot seek to strtab data\n"); xendump_print(" "); for (i = 0; i < shdr.sh_size; i++) { if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot read strtab data\n"); if (i && !c) xendump_print("\n "); else xendump_print("%c", c); } } if (STREQ(name, ".note.Xen")) xc_core_dump_elfnote((off_t)shdr.sh_offset, (size_t)shdr.sh_size, store); if (!store) return; if (STREQ(name, ".xen_prstatus")) xd->xc_core.header.xch_ctxt_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_shared_info")) xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_pfn")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE); } if (STREQ(name, ".xen_p2m")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= XC_CORE_PFN_CREATE; } if (STREQ(name, ".xen_pages")) xd->xc_core.header.xch_pages_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_ia64_mapped_regs")) xd->xc_core.ia64_mapped_regs_offset = (off_t)shdr.sh_offset; } /* * Dump each 64-bit section header and the data that they reference. */ static void xc_core_dump_Elf64_Shdr(Elf64_Off offset, int store) { Elf64_Shdr shdr; char name[BUFSIZE]; int i; char c; if (lseek(xd->xfd, offset, SEEK_SET) != offset) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot seek to Elf64_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf64_Shdr)) != sizeof(Elf64_Shdr)) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot read Elf64_Shdr\n"); xendump_print("\nElf64_Shdr:\n"); xendump_print(" sh_name: %x ", shdr.sh_name); xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name)); xendump_print(" sh_type: %x ", shdr.sh_type); switch (shdr.sh_type) { case SHT_NULL: xendump_print("(SHT_NULL)\n"); break; case SHT_PROGBITS: xendump_print("(SHT_PROGBITS)\n"); break; case SHT_STRTAB: xendump_print("(SHT_STRTAB)\n"); break; case SHT_NOTE: xendump_print("(SHT_NOTE)\n"); break; default: xendump_print("\n"); break; } xendump_print(" sh_flags: %lx\n", shdr.sh_flags); xendump_print(" sh_addr: %lx\n", shdr.sh_addr); xendump_print(" sh_offset: %lx\n", shdr.sh_offset); xendump_print(" sh_size: %lx\n", shdr.sh_size); xendump_print(" sh_link: %x\n", shdr.sh_link); xendump_print(" sh_info: %x\n", shdr.sh_info); xendump_print(" sh_addralign: %lx\n", shdr.sh_addralign); xendump_print(" sh_entsize: %lx\n", shdr.sh_entsize); if (STREQ(name, ".shstrtab")) { if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != xd->xc_core.elf_strtab_offset) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot seek to strtab data\n"); xendump_print(" "); for (i = 0; i < shdr.sh_size; i++) { if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot read strtab data\n"); if (i && !c) xendump_print("\n "); else xendump_print("%c", c); } } if (STREQ(name, ".note.Xen")) xc_core_dump_elfnote((off_t)shdr.sh_offset, (size_t)shdr.sh_size, store); if (!store) return; if (STREQ(name, ".xen_prstatus")) xd->xc_core.header.xch_ctxt_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_shared_info")) xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_pfn")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE); } if (STREQ(name, ".xen_p2m")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= XC_CORE_PFN_CREATE; } if (STREQ(name, ".xen_pages")) xd->xc_core.header.xch_pages_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_ia64_mapped_regs")) xd->xc_core.ia64_mapped_regs_offset = (off_t)shdr.sh_offset; } /* * Return the string found at the specified index into * the dumpfile's strtab. */ static char * xc_core_strtab(uint32_t index, char *buf) { off_t offset; int i; offset = xd->xc_core.elf_strtab_offset + index; if (lseek(xd->xfd, offset, SEEK_SET) != offset) error(FATAL, "xc_core_strtab: cannot seek to Elf64_Shdr\n"); BZERO(buf, BUFSIZE); i = 0; while (read(xd->xfd, &buf[i], sizeof(char)) == sizeof(char)) { if (buf[i] == NULLCHAR) break; i++; } return buf; } /* * Dump the array of elfnote structures, storing relevant info * when requested during initialization. This function is * common to both 32-bit and 64-bit ELF files. */ static void xc_core_dump_elfnote(off_t sh_offset, size_t sh_size, int store) { int i, lf, index; char *notes_buffer; struct elfnote *elfnote; ulonglong *data; struct xen_dumpcore_elfnote_header_desc *elfnote_header; struct xen_dumpcore_elfnote_format_version_desc *format_version; elfnote_header = NULL; format_version = NULL; if (!(notes_buffer = (char *)malloc(sh_size))) error(FATAL, "cannot malloc notes space."); if (lseek(xd->xfd, sh_offset, SEEK_SET) != sh_offset) error(FATAL, "xc_core_dump_elfnote: cannot seek to sh_offset\n"); if (read(xd->xfd, notes_buffer, sh_size) != sh_size) error(FATAL, "xc_core_dump_elfnote: cannot read elfnote data\n"); for (index = 0; index < sh_size; ) { elfnote = (struct elfnote *)¬es_buffer[index]; xendump_print(" namesz: %d\n", elfnote->namesz); xendump_print(" descz: %d\n", elfnote->descsz); xendump_print(" type: %x ", elfnote->type); switch (elfnote->type) { case XEN_ELFNOTE_DUMPCORE_NONE: xendump_print("(XEN_ELFNOTE_DUMPCORE_NONE)\n"); break; case XEN_ELFNOTE_DUMPCORE_HEADER: xendump_print("(XEN_ELFNOTE_DUMPCORE_HEADER)\n"); elfnote_header = (struct xen_dumpcore_elfnote_header_desc *) (elfnote+1); break; case XEN_ELFNOTE_DUMPCORE_XEN_VERSION: xendump_print("(XEN_ELFNOTE_DUMPCORE_XEN_VERSION)\n"); break; case XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION: xendump_print("(XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION)\n"); format_version = (struct xen_dumpcore_elfnote_format_version_desc *) (elfnote+1); break; default: xendump_print("(unknown)\n"); break; } xendump_print(" name: %s\n", elfnote->name); data = (ulonglong *)(elfnote+1); for (i = lf = 0; i < elfnote->descsz/sizeof(ulonglong); i++) { if (((i%2)==0)) { xendump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; xendump_print("%016llx ", *data++); } if (!elfnote->descsz) xendump_print(" (empty)"); xendump_print("\n"); index += sizeof(struct elfnote) + elfnote->descsz; } if (!store) { free(notes_buffer); return; } if (elfnote_header) { xd->xc_core.header.xch_magic = elfnote_header->xch_magic; xd->xc_core.header.xch_nr_vcpus = elfnote_header->xch_nr_vcpus; xd->xc_core.header.xch_nr_pages = elfnote_header->xch_nr_pages; xd->page_size = elfnote_header->xch_page_size; } if (format_version) { switch (format_version->version) { case FORMAT_VERSION_0000000000000001: break; default: error(WARNING, "unsupported xen dump-core format version: %016llx\n", format_version->version); } xd->xc_core.format_version = format_version->version; } free(notes_buffer); } /* * Initialize the batching list for the .xen_p2m or .xen_pfn * arrays. */ static void xc_core_elf_pfn_init(void) { int i, c, chunk; off_t offset; struct xen_dumpcore_p2m p2m; uint64_t pfn; switch (xd->flags & (XC_CORE_ELF|XC_CORE_NO_P2M)) { case (XC_CORE_ELF|XC_CORE_NO_P2M): chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT; for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) { offset = xd->xc_core.header.xch_index_offset + (off_t)(c * sizeof(uint64_t)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index %d\n", c); if (read(xd->xfd, &pfn, sizeof(uint64_t)) != sizeof(uint64_t)) error(FATAL, "cannot read page index %d\n", c); xd->xc_core.elf_index_pfn[i].index = c; xd->xc_core.elf_index_pfn[i].pfn = (ulong)pfn; } break; case XC_CORE_ELF: chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT; for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) { offset = xd->xc_core.header.xch_index_offset + (off_t)(c * sizeof(struct xen_dumpcore_p2m)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index %d\n", c); if (read(xd->xfd, &p2m, sizeof(struct xen_dumpcore_p2m)) != sizeof(struct xen_dumpcore_p2m)) error(FATAL, "cannot read page index %d\n", c); xd->xc_core.elf_index_pfn[i].index = c; xd->xc_core.elf_index_pfn[i].pfn = (ulong)p2m.pfn; } break; } } struct xendump_data * get_xendump_data(void) { return (XENDUMP_VALID() ? xd : NULL); } crash-utility-crash-9cd43f5/arm.c0000664000372000037200000013606715107550337016334 0ustar juerghjuergh/* * arm.c - core analysis suite * * Authors: * Thomas Fänge * Jan Karlsson * Mika Westerberg * * Copyright (C) 2010-2011 Nokia Corporation * Copyright (C) 2010 Sony Ericsson. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef ARM #include #include "defs.h" static void arm_parse_cmdline_args(void); static void arm_get_crash_notes(void); static int arm_verify_symbol(const char *, ulong, char); static int arm_is_module_addr(ulong); static int arm_is_kvaddr(ulong); static int arm_is_uvaddr(ulong, struct task_context *); static int arm_in_exception_text(ulong); static int arm_in_ret_from_syscall(ulong, int *); static void arm_back_trace(struct bt_info *); static void arm_back_trace_cmd(struct bt_info *); static ulong arm_processor_speed(void); static int arm_translate_pte(ulong, void *, ulonglong); static int arm_vtop(ulong, ulong *, physaddr_t *, int); static int arm_kvtop(struct task_context *, ulong, physaddr_t *, int); static int arm_uvtop(struct task_context *, ulong, physaddr_t *, int); static int arm_get_frame(struct bt_info *, ulong *, ulong *); static int arm_get_dumpfile_stack_frame(struct bt_info *, ulong *, ulong *); static void arm_get_stack_frame(struct bt_info *, ulong *, ulong *); static void arm_dump_exception_stack(ulong, ulong); static void arm_display_full_frame(struct bt_info *, ulong); static ulong arm_vmalloc_start(void); static int arm_is_task_addr(ulong); static int arm_dis_filter(ulong, char *, unsigned int); static int arm_eframe_search(struct bt_info *); static ulong arm_get_task_pgd(ulong); static void arm_cmd_mach(void); static void arm_display_machine_stats(void); static int arm_get_smp_cpus(void); static void arm_init_machspec(void); static struct line_number_hook arm_line_number_hooks[]; static struct machine_specific arm_machine_specific; /** * struct arm_cpu_context_save - idle task registers * * This structure holds idle task registers. Only FP, SP, and PC are needed for * unwinding the stack. */ struct arm_cpu_context_save { ulong fp; ulong sp; ulong pc; }; /* * Holds registers during the crash. */ static struct arm_pt_regs *panic_task_regs; #define PGDIR_SIZE() (4 * PAGESIZE()) #define PGDIR_OFFSET(X) (((ulong)(X)) & (PGDIR_SIZE() - 1)) #define _SECTION_PAGE_MASK (~((MEGABYTES(1))-1)) #define PMD_TYPE_MASK 3 #define PMD_TYPE_SECT 2 #define PMD_TYPE_TABLE 1 #define PMD_TYPE_SECT_LPAE 1 static inline ulong * pmd_page_addr(ulong pmd) { ulong ptr; if (machdep->flags & PGTABLE_V2) { ptr = PAGEBASE(pmd); } else { ptr = pmd & ~(PTRS_PER_PTE * sizeof(void *) - 1); ptr += PTRS_PER_PTE * sizeof(void *); } return (ulong *)ptr; } /* * "Linux" PTE definitions. */ #define L_PTE_PRESENT (1 << 0) #define L_PTE_YOUNG (1 << 1) #define L_PTE_FILE (1 << 2) #define L_PTE_DIRTY (1 << 6) #define L_PTE_WRITE (1 << 7) #define L_PTE_RDONLY L_PTE_WRITE #define L_PTE_USER (1 << 8) #define L_PTE_EXEC (1 << 9) #define L_PTE_XN L_PTE_EXEC #define L_PTE_SHARED (1 << 10) #define pte_val(pte) (pte) #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) #define pte_rdonly(pte) (pte_val(pte) & L_PTE_RDONLY) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) #define pte_xn(pte) (pte_val(pte) & L_PTE_XN) /* * Following stuff is taken directly from the kernel sources. These are used in * dump_exception_stack() to format an exception stack entry. */ #define USR26_MODE 0x00000000 #define FIQ26_MODE 0x00000001 #define IRQ26_MODE 0x00000002 #define SVC26_MODE 0x00000003 #define USR_MODE 0x00000010 #define FIQ_MODE 0x00000011 #define IRQ_MODE 0x00000012 #define SVC_MODE 0x00000013 #define ABT_MODE 0x00000017 #define UND_MODE 0x0000001b #define SYSTEM_MODE 0x0000001f #define MODE32_BIT 0x00000010 #define MODE_MASK 0x0000001f #define PSR_T_BIT 0x00000020 #define PSR_F_BIT 0x00000040 #define PSR_I_BIT 0x00000080 #define PSR_A_BIT 0x00000100 #define PSR_E_BIT 0x00000200 #define PSR_J_BIT 0x01000000 #define PSR_Q_BIT 0x08000000 #define PSR_V_BIT 0x10000000 #define PSR_C_BIT 0x20000000 #define PSR_Z_BIT 0x40000000 #define PSR_N_BIT 0x80000000 #define isa_mode(regs) \ ((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \ (((regs)->ARM_cpsr & PSR_T_BIT) >> 5)) #define processor_mode(regs) \ ((regs)->ARM_cpsr & MODE_MASK) #define interrupts_enabled(regs) \ (!((regs)->ARM_cpsr & PSR_I_BIT)) #define fast_interrupts_enabled(regs) \ (!((regs)->ARM_cpsr & PSR_F_BIT)) static const char *processor_modes[] = { "USER_26", "FIQ_26", "IRQ_26", "SVC_26", "UK4_26", "UK5_26", "UK6_26", "UK7_26" , "UK8_26", "UK9_26", "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", "USER_32", "FIQ_32", "IRQ_32", "SVC_32", "UK4_32", "UK5_32", "UK6_32", "ABT_32", "UK8_32", "UK9_32", "UK10_32", "UND_32", "UK12_32", "UK13_32", "UK14_32", "SYS_32", }; static const char *isa_modes[] = { "ARM" , "Thumb" , "Jazelle", "ThumbEE", }; #define NOT_IMPLEMENTED() \ error(FATAL, "%s: N/A\n", __func__) /* * Do all necessary machine-specific setup here. This is called several times * during initialization. */ void arm_init(int when) { ulong vaddr; char *string; struct syment *sp; #if defined(__i386__) || defined(__x86_64__) if (ACTIVE()) error(FATAL, "compiled for the ARM architecture\n"); #endif switch (when) { case PRE_SYMTAB: machdep->verify_symbol = arm_verify_symbol; machdep->machspec = &arm_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; if (machdep->cmdline_args[0]) arm_parse_cmdline_args(); break; case PRE_GDB: if ((machdep->pgd = (char *)malloc(PGDIR_SIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PMDSIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); /* * LPAE requires an additional page for the PGD, * so PG_DIR_SIZE = 0x5000 for LPAE */ if ((string = pc->read_vmcoreinfo("CONFIG_ARM_LPAE"))) { machdep->flags |= PAE; free(string); } else if ((sp = next_symbol("swapper_pg_dir", NULL)) && (sp->value - symbol_value("swapper_pg_dir")) == 0x5000) machdep->flags |= PAE; machdep->kvbase = symbol_value("_stext") & ~KVBASE_MASK; machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = arm_is_kvaddr; machdep->is_uvaddr = arm_is_uvaddr; machdep->eframe_search = arm_eframe_search; machdep->back_trace = arm_back_trace_cmd; machdep->processor_speed = arm_processor_speed; machdep->uvtop = arm_uvtop; machdep->kvtop = arm_kvtop; machdep->get_task_pgd = arm_get_task_pgd; machdep->get_stack_frame = arm_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = arm_translate_pte; machdep->memory_size = generic_memory_size; machdep->vmalloc_start = arm_vmalloc_start; machdep->is_task_addr = arm_is_task_addr; machdep->dis_filter = arm_dis_filter; machdep->cmd_mach = arm_cmd_mach; machdep->get_smp_cpus = arm_get_smp_cpus; machdep->line_number_hooks = arm_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; arm_init_machspec(); break; case POST_GDB: /* * Starting from 2.6.38 hardware and Linux page tables * were reordered. See also mainline kernel commit * d30e45eeabe (ARM: pgtable: switch order of Linux vs * hardware page tables). */ if (THIS_KERNEL_VERSION > LINUX(2,6,37) || STRUCT_EXISTS("pteval_t")) machdep->flags |= PGTABLE_V2; if (THIS_KERNEL_VERSION >= LINUX(3,3,0) || symbol_exists("idmap_pgd")) machdep->flags |= IDMAP_PGD; if (machdep->flags & PAE) { machdep->section_size_bits = _SECTION_SIZE_BITS_LPAE; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_LPAE; } else { machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; } if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); /* * Registers for idle threads are saved in * thread_info.cpu_context. */ STRUCT_SIZE_INIT(cpu_context_save, "cpu_context_save"); MEMBER_OFFSET_INIT(cpu_context_save_r7, "cpu_context_save", "r7"); MEMBER_OFFSET_INIT(cpu_context_save_fp, "cpu_context_save", "fp"); MEMBER_OFFSET_INIT(cpu_context_save_sp, "cpu_context_save", "sp"); MEMBER_OFFSET_INIT(cpu_context_save_pc, "cpu_context_save", "pc"); MEMBER_OFFSET_INIT(thread_info_cpu_context, "thread_info", "cpu_context"); /* * We need to have information about note_buf_t which is used to * hold ELF note containing registers and status of the thread * that panic'd. */ STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); MEMBER_OFFSET_INIT(elf_prstatus_pr_pid, "elf_prstatus", "pr_pid"); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); if (!machdep->hz) machdep->hz = 100; break; case POST_VM: machdep->machspec->vmalloc_start_addr = vt->high_memory; /* * Modules are placed in first vmalloc'd area. This is 16MB * below PAGE_OFFSET. */ machdep->machspec->modules_end = machdep->kvbase - 1; vaddr = first_vmalloc_address(); if (vaddr > machdep->machspec->modules_end) machdep->machspec->modules_vaddr = DEFAULT_MODULES_VADDR; else machdep->machspec->modules_vaddr = vaddr; /* * crash_notes contains machine specific information about the * crash. In particular, it contains CPU registers at the time * of the crash. We need this information to extract correct * backtraces from the panic task. */ if (!ACTIVE()) arm_get_crash_notes(); if (init_unwind_tables()) { if (CRASHDEBUG(1)) fprintf(fp, "using unwind tables\n"); } else { if (CRASHDEBUG(1)) fprintf(fp, "using framepointers\n"); } break; case LOG_ONLY: machdep->machspec = &arm_machine_specific; machdep->kvbase = kt->vmcoreinfo._stext_SYMBOL & 0xffff0000UL; arm_init_machspec(); break; } } void arm_dump_machdep_table(ulong arg) { const struct machine_specific *ms; int others, i; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PHYS_BASE) fprintf(fp, "%sPHYS_BASE", others++ ? "|" : ""); if (machdep->flags & PGTABLE_V2) fprintf(fp, "%sPGTABLE_V2", others++ ? "|" : ""); if (machdep->flags & IDMAP_PGD) fprintf(fp, "%sIDMAP_PGD", others++ ? "|" : ""); if (machdep->flags & PAE) fprintf(fp, "%sPAE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->kvbase); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %lx\n", (ulong)machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: arm_eframe_search()\n"); fprintf(fp, " back_trace: arm_back_trace_cmd()\n"); fprintf(fp, " processor_speed: arm_processor_speed()\n"); fprintf(fp, " uvtop: arm_uvtop()\n"); fprintf(fp, " kvtop: arm_kvtop()\n"); fprintf(fp, " get_task_pgd: arm_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_stack_frame: arm_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: arm_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: arm_vmalloc_start()\n"); fprintf(fp, " is_task_addr: arm_is_task_addr()\n"); fprintf(fp, " verify_symbol: arm_verify_symbol()\n"); fprintf(fp, " dis_filter: arm_dis_filter()\n"); fprintf(fp, " cmd_mach: arm_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: arm_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: arm_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: arm_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " xendump_p2m_create: NULL\n"); fprintf(fp, "xen_kdump_p2m_create: NULL\n"); fprintf(fp, " line_number_hooks: arm_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, "clear_machdep_cache: NULL\n"); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } ms = machdep->machspec; fprintf(fp, " machspec: %lx\n", (ulong)ms); fprintf(fp, " phys_base: %lx\n", ms->phys_base); fprintf(fp, " vmalloc_start_addr: %lx\n", ms->vmalloc_start_addr); fprintf(fp, " modules_vaddr: %lx\n", ms->modules_vaddr); fprintf(fp, " modules_end: %lx\n", ms->modules_end); fprintf(fp, " kernel_text_start: %lx\n", ms->kernel_text_start); fprintf(fp, " kernel_text_end: %lx\n", ms->kernel_text_end); fprintf(fp, "exception_text_start: %lx\n", ms->exception_text_start); fprintf(fp, " exception_text_end: %lx\n", ms->exception_text_end); fprintf(fp, " crash_task_regs: %lx\n", (ulong)ms->crash_task_regs); fprintf(fp, "unwind_index_prel31: %d\n", ms->unwind_index_prel31); } /* * Parse machine dependent command line arguments. * * Force the phys_base address via: * * --machdep phys_base=
*/ static void arm_parse_cmdline_args(void) { int index, i, c, err; char *arglist[MAXARGS]; char buf[BUFSIZE]; char *p; ulong value = 0; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { error(WARNING, "ignoring --machdep option: %x\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { err = 0; if (STRNEQ(arglist[i], "phys_base=")) { int megabytes = FALSE; int flags = RETURN_ON_ERROR | QUIET; if ((LASTCHAR(arglist[i]) == 'm') || (LASTCHAR(arglist[i]) == 'M')) { LASTCHAR(arglist[i]) = NULLCHAR; megabytes = TRUE; } p = arglist[i] + strlen("phys_base="); if (strlen(p)) { if (megabytes) value = dtol(p, flags, &err); else value = htol(p, flags, &err); } if (!err) { if (megabytes) value = MEGABYTES(value); machdep->machspec->phys_base = value; error(NOTE, "setting phys_base to: 0x%lx\n", machdep->machspec->phys_base); machdep->flags |= PHYS_BASE; continue; } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); } } } /* * Retrieve task registers for the time of the crash. */ static void arm_get_crash_notes(void) { struct machine_specific *ms = machdep->machspec; ulong crash_notes; Elf32_Nhdr *note; ulong offset; char *buf, *p; ulong *notes_ptrs; ulong i, found; if (!symbol_exists("crash_notes")) return; crash_notes = symbol_value("crash_notes"); notes_ptrs = (ulong *)GETBUF(kt->cpus*sizeof(notes_ptrs[0])); /* * Read crash_notes for the first CPU. crash_notes are in standard ELF * note format. */ if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read crash_notes\n"); FREEBUF(notes_ptrs); return; } if (symbol_exists("__per_cpu_offset")) { /* Add __per_cpu_offset for each cpu to form the pointer to the notes */ for (i = 0; icpus; i++) notes_ptrs[i] = notes_ptrs[kt->cpus-1] + kt->__per_cpu_offset[i]; } buf = GETBUF(SIZE(note_buf)); if (!(panic_task_regs = calloc((size_t)kt->cpus, sizeof(*panic_task_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = found = 0; icpus; i++) { if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { error(WARNING, "cpu %d: cannot read NT_PRSTATUS note\n", i); continue; } /* * Do some sanity checks for this note before reading registers from it. */ note = (Elf32_Nhdr *)buf; p = buf + sizeof(Elf32_Nhdr); /* * dumpfiles created with qemu won't have crash_notes, but there will * be elf notes; dumpfiles created by kdump do not create notes for * offline cpus. */ if (note->n_namesz == 0 && (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE())) { if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (note) { /* * SIZE(note_buf) accounts for a "final note", which is a * trailing empty elf note header. */ long notesz = SIZE(note_buf) - sizeof(Elf32_Nhdr); if (sizeof(Elf32_Nhdr) + roundup(note->n_namesz, 4) + note->n_descsz == notesz) BCOPY((char *)note, buf, notesz); } else { error(WARNING, "cpu %d: cannot find NT_PRSTATUS note\n", i); continue; } } /* * Check the sanity of NT_PRSTATUS note only for each online cpu. * If this cpu has invalid note, continue to find the crash notes * for other online cpus. */ if (note->n_type != NT_PRSTATUS) { error(WARNING, "cpu %d: invalid NT_PRSTATUS note (n_type != NT_PRSTATUS)\n", i); continue; } if (!STRNEQ(p, "CORE")) { error(WARNING, "cpu %d: invalid NT_PRSTATUS note (name != \"CORE\")\n", i); continue; } /* * Find correct location of note data. This contains elf_prstatus * structure which has registers etc. for the crashed task. */ offset = sizeof(Elf32_Nhdr); offset = roundup(offset + note->n_namesz, 4); p = buf + offset; /* start of elf_prstatus */ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); found++; } /* * And finally we have the registers for the crashed task. This is * used later on when dumping backtrace. */ ms->crash_task_regs = panic_task_regs; FREEBUF(buf); FREEBUF(notes_ptrs); if (!found) { free(panic_task_regs); ms->crash_task_regs = NULL; } } /* * Accept or reject a symbol from the kernel namelist. */ static int arm_verify_symbol(const char *name, ulong value, char type) { if (STREQ(name, "swapper_pg_dir")) machdep->flags |= KSYMS_START; if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) return FALSE; if (STREQ(name, "$a") || STREQ(name, "$n") || STREQ(name, "$d")) return FALSE; if (STREQ(name, "PRRR") || STREQ(name, "NMRR")) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); return TRUE; } static int arm_is_module_addr(ulong vaddr) { ulong modules_start; ulong modules_end = machdep->machspec->modules_end; if (!MODULES_VADDR) { /* * In case we are still initializing, and vm_init() has not been * called, we use defaults here which is 16MB below kernel start * address. */ modules_start = DEFAULT_MODULES_VADDR; } else { modules_start = MODULES_VADDR; } return (vaddr >= modules_start && vaddr <= modules_end); } int arm_is_vmalloc_addr(ulong vaddr) { if (arm_is_module_addr(vaddr)) return TRUE; if (!VMALLOC_START) return FALSE; return (vaddr >= VMALLOC_START); } /* * Check whether given address falls inside kernel address space (including * modules). */ static int arm_is_kvaddr(ulong vaddr) { if (arm_is_module_addr(vaddr)) return TRUE; return (vaddr >= machdep->kvbase); } static int arm_is_uvaddr(ulong vaddr, struct task_context *unused) { if (arm_is_module_addr(vaddr)) return FALSE; return (vaddr < machdep->kvbase); } /* * Returns TRUE if given pc is in exception area. */ static int arm_in_exception_text(ulong pc) { ulong exception_start = machdep->machspec->exception_text_start; ulong exception_end = machdep->machspec->exception_text_end; if (exception_start && exception_end) return (pc >= exception_start && pc < exception_end); return FALSE; } /* * Returns TRUE if given pc points to a return from syscall * entrypoint. In case the function returns TRUE and if offset is given, * it is filled with the offset that should be added to the SP to get * address of the exception frame where the user registers are. */ static int arm_in_ret_from_syscall(ulong pc, int *offset) { /* * On fast syscall return path, the stack looks like: * * SP + 0 {r4, r5} * SP + 8 user pt_regs * * The asm syscall handler pushes fifth and sixth registers * onto the stack before calling the actual syscall handler. * * So in order to print out the user registers at the time * the syscall was made, we need to adjust SP for 8. */ if (pc == symbol_value("ret_fast_syscall")) { if (offset) *offset = 8; return TRUE; } /* * In case we are on the slow syscall path, the SP already * points to the start of the user registers hence no * adjustments needs to be done. */ if (pc == symbol_value("ret_slow_syscall")) { if (offset) *offset = 0; return TRUE; } return FALSE; } /* * Unroll the kernel stack using a minimal amount of gdb services. */ static void arm_back_trace(struct bt_info *bt) { int n = 0; /* * In case bt->machdep contains pointer to a full register set, we take * FP from there. */ if (bt->machdep) { const struct arm_pt_regs *regs = bt->machdep; bt->frameptr = regs->ARM_fp; } /* * Stack frame layout: * optionally saved caller registers (r4 - r10) * saved fp * saved sp * saved lr * frame => saved pc * optionally saved arguments (r0 - r3) * saved sp => * * Functions start with the following code sequence: * mov ip, sp * stmfd sp!, {r0 - r3} (optional) * corrected pc => stmfd sp!, {..., fp, ip, lr, pc} */ while (bt->frameptr && INSTACK(bt->frameptr, bt)) { ulong from; ulong sp; /* * We correct the PC to point to the actual instruction (current * value is PC + 8). */ bt->instptr = GET_STACK_ULONG(bt->frameptr - 0); bt->instptr -= 8; /* * Now get LR, saved SP and FP from the frame as well. */ from = GET_STACK_ULONG(bt->frameptr - 4); sp = GET_STACK_ULONG(bt->frameptr - 8); bt->frameptr = GET_STACK_ULONG(bt->frameptr - 12); arm_dump_backtrace_entry(bt, n++, from, sp); bt->stkptr = sp; } } /* * Unroll a kernel stack. */ static void arm_back_trace_cmd(struct bt_info *bt) { if (bt->flags & BT_REGS_NOT_FOUND) return; if (kt->flags & DWARF_UNWIND) unwind_backtrace(bt); else arm_back_trace(bt); } /* * Calculate and return the speed of the processor. */ static ulong arm_processor_speed(void) { /* * For now, we don't support reading CPU speed. */ return 0; } /* * Translate a PTE, returning TRUE if the page is present. If a physaddr pointer * is passed in, don't print anything. */ static int arm_translate_pte(ulong pte, void *physaddr, ulonglong lpae_pte) { char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char buf[BUFSIZE]; int page_present; ulonglong paddr; int len1, len2, others; if (machdep->flags & PAE) { paddr = LPAE_PAGEBASE(lpae_pte); sprintf(ptebuf, "%llx", lpae_pte); pte = (ulong)lpae_pte; } else { paddr = PAGEBASE(pte); sprintf(ptebuf, "%lx", pte); } page_present = pte_present(pte); if (physaddr) { if (machdep->flags & PAE) *((ulonglong *)physaddr) = paddr; else *((ulong *)physaddr) = (ulong)paddr; return page_present; } len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER | LJUST, "PTE")); if (!page_present && pte) { /* swap page, not handled yet */ return page_present; } sprintf(physbuf, "%llx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER | LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER | RJUST, NULL), mkstring(physbuf, len2, CENTER | RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte_present(pte)) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (pte_dirty(pte)) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (pte_young(pte)) fprintf(fp, "%sYOUNG", others++ ? "|" : ""); if (machdep->flags & PGTABLE_V2) { if (!pte_rdonly(pte)) fprintf(fp, "%sWRITE", others++ ? "|" : ""); if (!pte_xn(pte)) fprintf(fp, "%sEXEC", others++ ? "|" : ""); } else { if (pte_write(pte)) fprintf(fp, "%sWRITE", others++ ? "|" : ""); if (pte_exec(pte)) fprintf(fp, "%sEXEC", others++ ? "|" : ""); } } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return 0; } /* * Virtual to physical memory translation. This function will be called by both * arm_kvtop() and arm_uvtop(). */ static int arm_vtop(ulong vaddr, ulong *pgd, physaddr_t *paddr, int verbose) { char buf[BUFSIZE]; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; /* * Page tables in ARM Linux * * In hardware PGD is 16k (having 4096 pointers to PTE) and PTE is 1k * (containing 256 translations). * * Linux, however, wants to have PTEs as page sized entities. This means * that in ARM Linux we have following setup (see also * arch/arm/include/asm/pgtable.h) * * Before 2.6.38 * * PGD PTE * +---------+ * | | 0 ----> +------------+ * +- - - - -+ | h/w pt 0 | * | | 4 ----> +------------+ +1024 * +- - - - -+ | h/w pt 1 | * . . +------------+ +2048 * . . | Linux pt 0 | * . . +------------+ +3072 * | | 4095 | Linux pt 1 | * +---------+ +------------+ +4096 * * Starting from 2.6.38 * * PGD PTE * +---------+ * | | 0 ----> +------------+ * +- - - - -+ | Linux pt 0 | * | | 4 ----> +------------+ +1024 * +- - - - -+ | Linux pt 1 | * . . +------------+ +2048 * . . | h/w pt 0 | * . . +------------+ +3072 * | | 4095 | h/w pt 1 | * +---------+ +------------+ +4096 * * So in Linux implementation we have two hardware pointers to second * level page tables. Depending on the kernel version, the "Linux" page * tables either follow or precede the hardware tables. * * Linux PT entries contain bits that are not supported on hardware, for * example "young" and "dirty" flags. * * Our translation scheme only uses Linux PTEs here. */ if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); /* * pgd_offset(pgd, vaddr) */ page_dir = pgd + PGD_OFFSET(vaddr) * 2; /* The unity-mapped region is mapped using 1MB pages, * hence 1-level translation if bit 20 is set; if we * are 1MB apart physically, we move the page_dir in * case bit 20 is set. */ if (((vaddr) >> (20)) & 1) page_dir = page_dir + 1; FILL_PGD(PAGEBASE(pgd), KVADDR, PGDIR_SIZE()); pgd_pte = ULONG(machdep->pgd + PGDIR_OFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!pgd_pte) return FALSE; /* * pmd_offset(pgd, vaddr) * * Here PMD is folded into a PGD. */ pmd_pte = pgd_pte; page_middle = page_dir; if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if ((pmd_pte & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = pmd_pte & _SECTION_PAGE_MASK; if (verbose) { fprintf(fp, " PAGE: %s (1MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR(sectionbase))); } *paddr = sectionbase + (vaddr & ~_SECTION_PAGE_MASK); return TRUE; } /* * pte_offset_map(pmd, vaddr) */ page_table = pmd_page_addr(pmd_pte) + PTE_OFFSET(vaddr); FILL_PTBL(PAGEBASE(page_table), PHYSADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) { fprintf(fp, " PTE: %s => %lx\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR((ulong)page_table)), pte); } if (!pte_present(pte)) { if (pte && verbose) { fprintf(fp, "\n"); arm_translate_pte(pte, 0, 0); } return FALSE; } *paddr = PAGEBASE(pte) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR(PAGEBASE(pte)))); arm_translate_pte(pte, 0, 0); } return TRUE; } /* * Virtual to physical memory translation when "CONFIG_ARM_LPAE=y". * This function will be called by both arm_kvtop() and arm_uvtop(). */ static int arm_lpae_vtop(ulong vaddr, ulong *pgd, physaddr_t *paddr, int verbose) { char buf[BUFSIZE]; physaddr_t page_dir; physaddr_t page_middle; physaddr_t page_table; pgd_t pgd_pmd; pmd_t pmd_pte; pte_t pte; if (IS_KVADDR(vaddr)) { if (!vt->vmalloc_start) { *paddr = LPAE_VTOP(vaddr); return TRUE; } if (!IS_VMALLOC_ADDR(vaddr)) { *paddr = LPAE_VTOP(vaddr); if (!verbose) return TRUE; } } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); /* * pgd_offset(pgd, vaddr) */ page_dir = LPAE_VTOP((ulong)pgd + LPAE_PGD_OFFSET(vaddr) * 8); FILL_PGD_LPAE(LPAE_VTOP(pgd), PHYSADDR, LPAE_PGDIR_SIZE()); pgd_pmd = ULONGLONG(machdep->pgd + LPAE_PGDIR_OFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %8llx => %llx\n", (ulonglong)page_dir, pgd_pmd); if (!pgd_pmd) return FALSE; /* * pmd_offset(pgd, vaddr) */ page_middle = LPAE_PAGEBASE(pgd_pmd) + LPAE_PMD_OFFSET(vaddr) * 8; FILL_PMD_LPAE(LPAE_PAGEBASE(pgd_pmd), PHYSADDR, LPAE_PMDIR_SIZE()); pmd_pte = ULONGLONG(machdep->pmd + LPAE_PMDIR_OFFSET(page_middle)); if (!pmd_pte) return FALSE; if ((pmd_pte & PMD_TYPE_MASK) == PMD_TYPE_SECT_LPAE) { ulonglong sectionbase = LPAE_PAGEBASE(pmd_pte) & LPAE_SECTION_PAGE_MASK; if (verbose) fprintf(fp, " PAGE: %8llx (2MB)\n\n", (ulonglong)sectionbase); *paddr = sectionbase + (vaddr & ~LPAE_SECTION_PAGE_MASK); return TRUE; } /* * pte_offset_map(pmd, vaddr) */ page_table = LPAE_PAGEBASE(pmd_pte) + PTE_OFFSET(vaddr) * 8; FILL_PTBL_LPAE(LPAE_PAGEBASE(pmd_pte), PHYSADDR, LPAE_PTEDIR_SIZE()); pte = ULONGLONG(machdep->ptbl + LPAE_PTEDIR_OFFSET(page_table)); if (verbose) { fprintf(fp, " PTE: %8llx => %llx\n\n", (ulonglong)page_table, pte); } if (!pte_present(pte)) { if (pte && verbose) { fprintf(fp, "\n"); arm_translate_pte(0, 0, pte); } return FALSE; } *paddr = LPAE_PAGEBASE(pte) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR(PAGEBASE(pte)))); arm_translate_pte(0, 0, pte); } return TRUE; } /* * Translates a user virtual address to its physical address. cmd_vtop() sets * the verbose flag so that the pte translation gets displayed; all other * callers quietly accept the translation. */ static int arm_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); /* * Before idmap_pgd was introduced with upstream commit 2c8951ab0c * (ARM: idmap: use idmap_pgd when setting up mm for reboot), the * panic task pgd was overwritten by soft reboot code, so we can't do * any vtop translations. */ if (!(machdep->flags & IDMAP_PGD) && tc->task == tt->panic_task) error(FATAL, "panic task pgd is trashed by soft reboot code\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(uvaddr)) { ulong active_mm; readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } else { ulong mm; mm = task_mm(tc->task, TRUE); if (mm) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (machdep->flags & PAE) return arm_lpae_vtop(uvaddr, pgd, paddr, verbose); return arm_vtop(uvaddr, pgd, paddr, verbose); } /* * Translates a kernel virtual address to its physical address. cmd_vtop() sets * the verbose flag so that the pte translation gets displayed; all other * callers quietly accept the translation. */ static int arm_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { if (!IS_KVADDR(kvaddr)) return FALSE; if (machdep->flags & PAE) return arm_lpae_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } return arm_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); } /* * Get SP and PC values for idle tasks. */ static int arm_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { const char *cpu_context; if (!bt->tc || !(tt->flags & THREAD_INFO)) return FALSE; /* * Update thread_info in tt. */ if (!fill_thread_info(bt->tc->thread_info)) return FALSE; cpu_context = tt->thread_info + OFFSET(thread_info_cpu_context); #define GET_REG(ptr, cp, off) ((*ptr) = (*((ulong *)((cp) + OFFSET(off))))) GET_REG(spp, cpu_context, cpu_context_save_sp); GET_REG(pcp, cpu_context, cpu_context_save_pc); /* * Unwinding code needs FP (R7 for Thumb code) value also so we pass it * with bt. */ if (*pcp & 1) GET_REG(&bt->frameptr, cpu_context, cpu_context_save_r7); else GET_REG(&bt->frameptr, cpu_context, cpu_context_save_fp); return TRUE; } /* * Get the starting point for the active cpu in a diskdump. */ static int arm_get_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp) { const struct machine_specific *ms = machdep->machspec; if (!ms->crash_task_regs || (!ms->crash_task_regs[bt->tc->processor].ARM_pc && !ms->crash_task_regs[bt->tc->processor].ARM_sp)) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } /* * We got registers for panic task from crash_notes. Just return them. */ *nip = ms->crash_task_regs[bt->tc->processor].ARM_pc; *ksp = ms->crash_task_regs[bt->tc->processor].ARM_sp; /* * Also store pointer to all registers in case unwinding code needs * to access LR. */ bt->machdep = &(ms->crash_task_regs[bt->tc->processor]); return TRUE; } /* * Get a stack frame combination of PC and SP from the most relevant spot. */ static void arm_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { ulong ip, sp; int ret; ip = sp = 0; bt->machdep = NULL; if (DUMPFILE() && is_task_active(bt->task)) ret = arm_get_dumpfile_stack_frame(bt, &ip, &sp); else ret = arm_get_frame(bt, &ip, &sp); if (!ret) error(WARNING, "cannot determine starting stack frame for task %lx\n", bt->task); if (pcp) *pcp = ip; if (spp) *spp = sp; } /* * Prints out exception stack starting from start. */ void arm_dump_exception_stack(ulong start, ulong end) { struct arm_pt_regs regs; ulong flags; char buf[64]; if (!readmem(start, KVADDR, ®s, sizeof(regs), "exception regs", RETURN_ON_ERROR)) { error(WARNING, "failed to read exception registers\n"); return; } fprintf(fp, " pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" " sp : %08lx ip : %08lx fp : %08lx\n", regs.ARM_pc, regs.ARM_lr, regs.ARM_cpsr, regs.ARM_sp, regs.ARM_ip, regs.ARM_fp); fprintf(fp, " r10: %08lx r9 : %08lx r8 : %08lx\n", regs.ARM_r10, regs.ARM_r9, regs.ARM_r8); fprintf(fp, " r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", regs.ARM_r7, regs.ARM_r6, regs.ARM_r5, regs.ARM_r4); fprintf(fp, " r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", regs.ARM_r3, regs.ARM_r2, regs.ARM_r1, regs.ARM_r0); flags = regs.ARM_cpsr; buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; buf[4] = '\0'; fprintf(fp, " Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s\n", buf, interrupts_enabled(®s) ? "n" : "ff", fast_interrupts_enabled(®s) ? "n" : "ff", processor_modes[processor_mode(®s)], isa_modes[isa_mode(®s)]); } static void arm_display_full_frame(struct bt_info *bt, ulong sp) { ulong words, addr; ulong *up; char buf[BUFSIZE]; int i, u_idx; if (!INSTACK(sp, bt) || !INSTACK(bt->stkptr, bt)) return; words = (sp - bt->stkptr) / sizeof(ulong); if (words == 0) { fprintf(fp, " (no frame)\n"); return; } addr = bt->stkptr; u_idx = (bt->stkptr - bt->stackbase) / sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if ((i % 4) == 0) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx * sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); } /* * Prints out a single stack frame. What is printed depends on flags passed in * with bt. * * What is expected when calling this function: * bt->frameptr = current FP (or 0 if there is no such) * bt->stkptr = current SP * bt->instptr = current PC * * from = LR * sp = previous/saved SP */ void arm_dump_backtrace_entry(struct bt_info *bt, int level, ulong from, ulong sp) { struct load_module *lm; const char *name; int offset = 0; struct syment *symp; ulong symbol_offset; char *name_plus_offset; char buf[BUFSIZE]; name = closest_symbol(bt->instptr); name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { symp = value_search(bt->instptr, &symbol_offset); if (symp && symbol_offset) name_plus_offset = value_to_symstr(bt->instptr, buf, bt->radix); } if (module_symbol(bt->instptr, NULL, &lm, NULL, 0)) { fprintf(fp, "%s#%d [<%08lx>] (%s [%s]) from [<%08lx>]\n", level < 10 ? " " : "", level, bt->instptr, name_plus_offset ? name_plus_offset : name, lm->mod_name, from); } else { fprintf(fp, "%s#%d [<%08lx>] (%s) from [<%08lx>]\n", level < 10 ? " " : "", level, bt->instptr, name_plus_offset ? name_plus_offset : name, from); } if (bt->flags & BT_LINE_NUMBERS) { char buf[BUFSIZE]; get_line_number(bt->instptr, buf, FALSE); if (strlen(buf)) fprintf(fp, " %s\n", buf); } if (arm_in_exception_text(bt->instptr)) { arm_dump_exception_stack(sp, sp + sizeof(struct arm_pt_regs)); } else if (arm_in_ret_from_syscall(from, &offset)) { ulong nsp = sp + offset; arm_dump_exception_stack(nsp, nsp + sizeof(struct arm_pt_regs)); } if (bt->flags & BT_FULL) { if (kt->flags & DWARF_UNWIND) { fprintf(fp, " " "[PC: %08lx LR: %08lx SP: %08lx SIZE: %ld]\n", bt->instptr, from, bt->stkptr, sp - bt->stkptr); } else { fprintf(fp, " " "[PC: %08lx LR: %08lx SP: %08lx FP: %08lx " "SIZE: %ld]\n", bt->instptr, from, bt->stkptr, bt->frameptr, sp - bt->stkptr); } arm_display_full_frame(bt, sp); } } /* * Determine where vmalloc'd memory starts. */ static ulong arm_vmalloc_start(void) { machdep->machspec->vmalloc_start_addr = vt->high_memory; return vt->high_memory; } /* * Checks whether given task is valid task address. */ static int arm_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); return (IS_KVADDR(task) && ALIGNED_STACK_OFFSET(task) == 0); } /* * Filter dissassembly output if the output radix is not gdb's default 10 */ static int arm_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on arm) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Look for likely exception frames in a stack. */ static int arm_eframe_search(struct bt_info *bt) { return (NOT_IMPLEMENTED()); } /* * Get the relevant page directory pointer from a task structure. */ static ulong arm_get_task_pgd(ulong task) { return (NOT_IMPLEMENTED()); } /* * Machine dependent command. */ static void arm_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cm")) != -1) { switch (c) { case 'c': case 'm': fprintf(fp, "ARM: '-%c' option is not supported\n", c); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); arm_display_machine_stats(); } static void arm_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL MODULES BASE: %lx\n", MODULES_VADDR); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } static int arm_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_present())) return cpus; else return MAX(get_cpus_online(), get_highest_cpu_online()+1); } /* * Initialize ARM specific stuff. */ static void arm_init_machspec(void) { struct machine_specific *ms = machdep->machspec; ulong phys_base; if (symbol_exists("__exception_text_start") && symbol_exists("__exception_text_end")) { ms->exception_text_start = symbol_value("__exception_text_start"); ms->exception_text_end = symbol_value("__exception_text_end"); } if (symbol_exists("_stext") && symbol_exists("_etext")) { ms->kernel_text_start = symbol_value("_stext"); ms->kernel_text_end = symbol_value("_etext"); } if (CRASHDEBUG(1)) { fprintf(fp, "kernel text: [%lx - %lx]\n", ms->kernel_text_start, ms->kernel_text_end); fprintf(fp, "exception text: [%lx - %lx]\n", ms->exception_text_start, ms->exception_text_end); } if (machdep->flags & PHYS_BASE) /* --machdep override */ return; /* * Next determine suitable value for phys_base. User can override this * by passing valid '--machdep phys_base=' option. */ ms->phys_base = 0; if (ACTIVE()) { char buf[BUFSIZE]; char *p1; int errflag; FILE *fp; if ((fp = fopen("/proc/iomem", "r")) == NULL) return; /* * Memory regions are sorted in ascending order. We take the * first region which should be correct for most uses. */ errflag = 1; while (fgets(buf, BUFSIZE, fp)) { if (strstr(buf, ": System RAM")) { clean_line(buf); errflag = 0; break; } } fclose(fp); if (errflag) return; if (!(p1 = strstr(buf, "-"))) return; *p1 = NULLCHAR; phys_base = htol(buf, RETURN_ON_ERROR | QUIET, &errflag); if (errflag) return; ms->phys_base = phys_base; } else if (DISKDUMP_DUMPFILE() && diskdump_phys_base(&phys_base)) { ms->phys_base = phys_base; } else if (KDUMP_DUMPFILE() && arm_kdump_phys_base(&phys_base)) { ms->phys_base = phys_base; } else { error(WARNING, "phys_base cannot be determined from the dumpfile.\n" "Using default value of 0. If this is not correct,\n" "consider using '--machdep phys_base='\n"); } if (CRASHDEBUG(1)) fprintf(fp, "using %lx as phys_base\n", ms->phys_base); } static const char *hook_files[] = { "arch/arm/kernel/entry-armv.S", "arch/arm/kernel/entry-common.S", }; #define ENTRY_ARMV_S ((char **)&hook_files[0]) #define ENTRY_COMMON_S ((char **)&hook_files[1]) static struct line_number_hook arm_line_number_hooks[] = { { "__dabt_svc", ENTRY_ARMV_S }, { "__irq_svc", ENTRY_ARMV_S }, { "__und_svc", ENTRY_ARMV_S }, { "__pabt_svc", ENTRY_ARMV_S }, { "__switch_to", ENTRY_ARMV_S }, { "ret_fast_syscall", ENTRY_COMMON_S }, { "ret_slow_syscall", ENTRY_COMMON_S }, { "ret_from_fork", ENTRY_COMMON_S }, { NULL, NULL }, }; #endif /* ARM */ crash-utility-crash-9cd43f5/lkcd_common.c0000664000372000037200000011044415107550337020031 0ustar juerghjuergh/* lkcd_common.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002 Silicon Graphics, Inc. * Copyright (C) 2002 Free Software Foundation, Inc. * Copyright (C) 2002-2005, 2007, 2009, 2011, 2013 David Anderson * Copyright (C) 2002-2005, 2007, 2009, 2011, 2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * lkcd_uncompress_RLE() is essentially LKCD's __cmpuncompress_page() rountine, * adapted from ../cmd/lcrash/lib/libklib/arch/i386/kl_cmp.c: */ /* * arch/i386/cmp.c * * This file handles compression aspects of crash dump files * for i386 based systems. Most of this is taken from the * IRIX compression code, with exceptions to how the index * is created, because the file format is different with Linux. * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. */ /* * This file has no knowledge of the dump_header_t, dump_header_asm_t or * dump_page_t formats, so it gathers information from them via the version * specific "_v1" or "_v2_v3" type routines. */ #define LKCD_COMMON #include "defs.h" static void dump_dump_page(char *, void *); static int lkcd_uncompress_RLE(unsigned char *, unsigned char *,uint32_t,int *); static int lkcd_uncompress_gzip(unsigned char *, ulong, unsigned char *, ulong); static int hash_page(ulong); static int page_is_cached(void); static int page_is_hashed(long *); static int cache_page(void); struct lkcd_environment lkcd_environment = { 0 }; struct lkcd_environment *lkcd = &lkcd_environment; static int uncompress_errloc; static int uncompress_recover(unsigned char *, ulong, unsigned char *, ulong); ulonglong fix_lkcd_address(ulonglong addr) { int i; ulong offset; for (i = 0; i < lkcd->fix_addr_num; i++) { if ( (addr >=lkcd->fix_addr[i].task) && (addr < lkcd->fix_addr[i].task + STACKSIZE())){ offset = addr - lkcd->fix_addr[i].task; addr = lkcd->fix_addr[i].saddr + offset; } } return addr; } /* * Each version has its own dump initialization. */ int lkcd_dump_init(FILE *fp, int fd, char *dumpfile) { switch (lkcd->version) { case LKCD_DUMP_V1: return(lkcd_dump_init_v1(fp, fd)); case LKCD_DUMP_V2: case LKCD_DUMP_V3: return(lkcd_dump_init_v2_v3(fp, fd)); case LKCD_DUMP_V5: case LKCD_DUMP_V6: return(lkcd_dump_init_v5(fp, fd)); case LKCD_DUMP_V7: return(lkcd_dump_init_v7(fp, fd, dumpfile)); case LKCD_DUMP_V8: case LKCD_DUMP_V9: return(lkcd_dump_init_v8(fp, fd, dumpfile)); default: return FALSE; } } /* * Return the page size value recorded in the dump header. */ uint32_t lkcd_page_size(void) { return lkcd->page_size; } /* * Return the panic task and panic string. */ unsigned long get_lkcd_panic_task(void) { return(lkcd->flags & (LKCD_VALID|LKCD_REMOTE) ? lkcd->panic_task : 0); } void get_lkcd_panicmsg(char *buf) { if (lkcd->flags & (LKCD_VALID|LKCD_REMOTE)) strcpy(buf, lkcd->panic_string); } /* * Called by remote_lkcd_dump_init() the local (!valid) lkcd_environment * is used to store the panic task and panic message for use by the * two routines above. */ void set_remote_lkcd_panic_data(ulong task, char *buf) { if (buf) { if (!(lkcd->panic_string = (char *)malloc(strlen(buf)+1))) { fprintf(stderr, "cannot malloc space for panic message!\n"); clean_exit(1); } strcpy(lkcd->panic_string, buf); } if (task) lkcd->panic_task = task; lkcd->flags |= LKCD_REMOTE; } /* * Does the magic number indicate an LKCD compressed dump? * If so, set the version number for all future forays into the * functions in this file. */ int is_lkcd_compressed_dump(char *s) { int tmpfd; uint64_t magic; uint32_t version; char errbuf[BUFSIZE]; if ((tmpfd = open(s, O_RDONLY)) < 0) { strcpy(errbuf, s); perror(errbuf); return FALSE; } if (read(tmpfd, &magic, sizeof(uint64_t)) != sizeof(uint64_t)) { close(tmpfd); return FALSE; } if (read(tmpfd, &version, sizeof(uint32_t)) != sizeof(uint32_t)) { close(tmpfd); return FALSE; } close(tmpfd); if (!((magic == LKCD_DUMP_MAGIC_NUMBER) || (magic == LKCD_DUMP_MAGIC_LIVE))) return FALSE; switch (version & ~(LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1)) { case LKCD_DUMP_V1: lkcd->version = LKCD_DUMP_V1; return TRUE; case LKCD_DUMP_V2: case LKCD_DUMP_V3: lkcd->version = LKCD_DUMP_V2; return TRUE; case LKCD_DUMP_V5: case LKCD_DUMP_V6: lkcd->version = LKCD_DUMP_V5; return TRUE; case LKCD_DUMP_V7: lkcd->version = LKCD_DUMP_V7; return TRUE; case LKCD_DUMP_V8: case LKCD_DUMP_V9: case LKCD_DUMP_V10: lkcd->version = LKCD_DUMP_V8; return TRUE; default: lkcd_print("unsupported LKCD dump version: %ld (%lx)\n", version & ~(LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1), version); return FALSE; } } /* * console-only output for info regarding current page. */ static void dump_dump_page(char *s, void *dp) { switch (lkcd->version) { case LKCD_DUMP_V1: dump_dump_page_v1(s, dp); break; case LKCD_DUMP_V2: case LKCD_DUMP_V3: dump_dump_page_v2_v3(s, dp); break; case LKCD_DUMP_V5: dump_dump_page_v5(s, dp); break; case LKCD_DUMP_V7: dump_dump_page_v7(s, dp); break; case LKCD_DUMP_V8: case LKCD_DUMP_V9: dump_dump_page_v8(s, dp); break; } } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment(ulong arg) { int others; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; lkcd_print(" fd: %d\n", lkcd->fd); lkcd_print(" fp: %lx\n", lkcd->fp); lkcd_print(" debug: %ld\n", lkcd->debug); lkcd_print(" flags: %lx (", lkcd->flags); others = 0; if (lkcd->flags & LKCD_VALID) lkcd_print("%sLKCD_VALID", others++ ? "|" : ""); if (lkcd->flags & LKCD_REMOTE) lkcd_print("%sLKCD_REMOTE", others++ ? "|" : ""); if (lkcd->flags & LKCD_NOHASH) lkcd_print("%sLKCD_NOHASH", others++ ? "|" : ""); if (lkcd->flags & LKCD_MCLX) lkcd_print("%sLKCD_MCLX", others++ ? "|" : ""); if (lkcd->flags & LKCD_BAD_DUMP) lkcd_print("%sLKCD_BAD_DUMP", others++ ? "|" : ""); lkcd_print(")\n"); dump_header_only: switch (lkcd->version) { case LKCD_DUMP_V1: dump_lkcd_environment_v1(LKCD_DUMP_HEADER_ONLY); break; case LKCD_DUMP_V2: case LKCD_DUMP_V3: dump_lkcd_environment_v2_v3(LKCD_DUMP_HEADER_ONLY); break; case LKCD_DUMP_V5: dump_lkcd_environment_v5(LKCD_DUMP_HEADER_ONLY); break; case LKCD_DUMP_V7: dump_lkcd_environment_v7(LKCD_DUMP_HEADER_ONLY); break; case LKCD_DUMP_V8: case LKCD_DUMP_V9: dump_lkcd_environment_v8(LKCD_DUMP_HEADER_ONLY); break; } if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: switch (lkcd->version) { case LKCD_DUMP_V1: dump_lkcd_environment_v1(LKCD_DUMP_PAGE_ONLY); break; case LKCD_DUMP_V2: case LKCD_DUMP_V3: dump_lkcd_environment_v2_v3(LKCD_DUMP_PAGE_ONLY); break; case LKCD_DUMP_V5: dump_lkcd_environment_v5(LKCD_DUMP_PAGE_ONLY); break; case LKCD_DUMP_V7: dump_lkcd_environment_v7(LKCD_DUMP_PAGE_ONLY); break; case LKCD_DUMP_V8: dump_lkcd_environment_v8(LKCD_DUMP_PAGE_ONLY); break; } if (arg == LKCD_DUMP_PAGE_ONLY) return; lkcd_print(" version: %ld\n", lkcd->version); lkcd_print(" page_size: %ld\n", lkcd->page_size); lkcd_print(" page_shift: %d\n", lkcd->page_shift); lkcd_print(" bits: %d\n", lkcd->bits); lkcd_print(" panic_task: %lx\n", lkcd->panic_task); lkcd_print(" panic_string: %s%s", lkcd->panic_string, lkcd->panic_string && strstr(lkcd->panic_string, "\n") ? "" : "\n"); lkcd_print(" get_dp_size: "); if (lkcd->get_dp_size == get_dp_size_v1) lkcd_print("get_dp_size_v1()\n"); else if (lkcd->get_dp_size == get_dp_size_v2_v3) lkcd_print("get_dp_size_v2_v3()\n"); else if (lkcd->get_dp_size == get_dp_size_v5) lkcd_print("get_dp_size_v5()\n"); else lkcd_print("%lx\n", lkcd->get_dp_size); lkcd_print(" get_dp_flags: "); if (lkcd->get_dp_flags == get_dp_flags_v1) lkcd_print("get_dp_flags_v1()\n"); else if (lkcd->get_dp_flags == get_dp_flags_v2_v3) lkcd_print("get_dp_flags_v2_v3()\n"); else if (lkcd->get_dp_flags == get_dp_flags_v5) lkcd_print("get_dp_flags_v5()\n"); else lkcd_print("%lx\n", lkcd->get_dp_flags); lkcd_print(" get_dp_address: "); if (lkcd->get_dp_address == get_dp_address_v1) lkcd_print("get_dp_address_v1()\n"); else if (lkcd->get_dp_address == get_dp_address_v2_v3) lkcd_print("get_dp_address_v2_v3()\n"); else if (lkcd->get_dp_address == get_dp_address_v5) lkcd_print("get_dp_address_v5()\n"); else lkcd_print("%lx\n", lkcd->get_dp_address); lkcd_print(" compression: "); lkcd_print(BITS32() ? "%lx " : "%x ", lkcd->compression); switch (lkcd->compression) { case LKCD_DUMP_COMPRESS_NONE: lkcd_print("(LKCD_DUMP_COMPRESS_NONE)\n"); break; case LKCD_DUMP_COMPRESS_RLE: lkcd_print("(LKCD_DUMP_COMPRESS_RLE)\n"); break; case LKCD_DUMP_COMPRESS_GZIP: lkcd_print("(LKCD_DUMP_COMPRESS_GZIP)\n"); break; default: lkcd_print("(unknown)\n"); break; } lkcd_print("page_header_size: %ld\n", lkcd->page_header_size); lkcd_print(" curpos: %ld\n", lkcd->curpos); lkcd_print(" curpaddr: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", lkcd->curpaddr); lkcd_print(" curbufptr: %lx\n", lkcd->curbufptr); lkcd_print(" curhdroffs: %ld\n", lkcd->curhdroffs); lkcd_print(" kvbase: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", lkcd->kvbase); lkcd_print(" page_cache_buf: %lx\n", lkcd->page_cache_buf); lkcd_print(" compressed_page: %lx\n", lkcd->compressed_page); lkcd_print(" evict_index: %d\n", lkcd->evict_index); lkcd_print(" evictions: %ld\n", lkcd->evictions); lkcd_print(" benchmark_pages: %ld\n", lkcd->benchmark_pages); lkcd_print(" benchmarks_done: %ld\n", lkcd->benchmarks_done); lkcd_memory_dump(lkcd->fp); } /* * Set the shadow debug flag. */ void set_lkcd_debug(ulong debug) { lkcd->debug = debug; } /* * Set no-hash flag bit. */ void set_lkcd_nohash(void) { lkcd->flags |= LKCD_NOHASH; } /* * Set the file pointer for debug output. */ FILE * set_lkcd_fp(FILE *fp) { lkcd->fp = fp; return fp; } /* * Return the number of pages cached. */ int lkcd_memory_used(void) { int i, pages; struct page_cache_hdr *sp; sp = &lkcd->page_cache_hdr[0]; for (i = pages = 0; i < LKCD_CACHED_PAGES; i++, sp++) { if (LKCD_VALID_PAGE(sp->pg_flags)) pages++; } return pages; } /* * Since the dumpfile pages are temporary tenants of a fixed page cache, * this command doesn't do anything except clear the references. */ int lkcd_free_memory(void) { int i, pages; struct page_cache_hdr *sp; sp = &lkcd->page_cache_hdr[0]; for (i = pages = 0; i < LKCD_CACHED_PAGES; i++, sp++) { if (LKCD_VALID_PAGE(sp->pg_flags)) { sp->pg_addr = 0; sp->pg_hit_count = 0; pages++; } sp->pg_flags = 0; } return pages; } /* * Dump the page cache; */ int lkcd_memory_dump(FILE *fp) { int i, c, pages; struct page_cache_hdr *sp; struct page_hash_entry *phe; ulong pct_cached, pct_hashed; ulong pct_compressed, pct_raw; FILE *fpsave; char buf[BUFSIZE]; int wrap; fpsave = lkcd->fp; lkcd->fp = fp; lkcd_print(" total_pages: %ld\n", lkcd->total_pages); pct_compressed = (lkcd->compressed*100) / (lkcd->hashed ? lkcd->hashed : 1); pct_raw = (lkcd->raw*100) / (lkcd->hashed ? lkcd->hashed : 1); lkcd_print(" hashed: %ld\n", lkcd->hashed); lkcd_print(" compressed: %ld (%ld%%)\n", lkcd->compressed, pct_compressed); lkcd_print(" raw: %ld (%ld%%)\n", lkcd->raw, pct_raw); pct_cached = (lkcd->cached_reads*100) / (lkcd->total_reads ? lkcd->total_reads : 1); pct_hashed = (lkcd->hashed_reads*100) / (lkcd->total_reads ? lkcd->total_reads : 1); lkcd_print(" cached_reads: %ld (%ld%%)\n", lkcd->cached_reads, pct_cached); lkcd_print(" hashed_reads: %ld (%ld%%)\n", lkcd->hashed_reads, pct_hashed); lkcd_print(" total_reads: %ld (hashed or cached: %ld%%) \n", lkcd->total_reads, pct_cached+pct_hashed); lkcd_print("page_hash[%2d]:\n", LKCD_PAGE_HASH); if (LKCD_DEBUG(1)) { for (i = 0; i < LKCD_PAGE_HASH; i++) { phe = &lkcd->page_hash[i]; if (!LKCD_VALID_PAGE(phe->pg_flags)) continue; lkcd_print(" [%2d]: ", i); wrap = 0; while (phe && LKCD_VALID_PAGE(phe->pg_flags)) { sprintf(buf, "%llx@", (ulonglong)phe->pg_addr); sprintf(&buf[strlen(buf)], "%llx,", (ulonglong)phe->pg_hdr_offset); lkcd_print("%18s", buf); phe = phe->next; if (phe && (++wrap == 3)) { lkcd_print("\n "); wrap = 0; } } lkcd_print("\n"); } } else { for (i = 0; i < LKCD_PAGE_HASH; i++) { phe = &lkcd->page_hash[i]; if (!LKCD_VALID_PAGE(phe->pg_flags)) continue; lkcd_print(" [%2d]: ", i); wrap = 0; while (phe && LKCD_VALID_PAGE(phe->pg_flags)) { lkcd_print(BITS32() ? "%9llx," : "%9lx,", phe->pg_addr); phe = phe->next; if (phe && (++wrap == 7)) { lkcd_print("\n "); wrap = 0; } } lkcd_print("\n"); } } lkcd_print("page_cache_hdr[%2d]:\n", LKCD_CACHED_PAGES); lkcd_print(" INDEX PG_ADDR PG_BUFPTR"); lkcd_print(BITS32() ? " PG_HIT_COUNT\n" : " PG_HIT_COUNT\n"); sp = &lkcd->page_cache_hdr[0]; for (i = pages = 0; i < LKCD_CACHED_PAGES; i++, sp++) { if (LKCD_VALID_PAGE(sp->pg_flags)) pages++; if (BITS32()) lkcd_print(" [%2d] %9llx %lx %ld\n", i, sp->pg_addr, sp->pg_bufptr, sp->pg_hit_count); else lkcd_print(" [%2d] %9lx %lx %ld\n", i, sp->pg_addr, sp->pg_bufptr, sp->pg_hit_count); } if (lkcd->mb_hdr_offsets) { lkcd_print("mb_hdr_offsets[%3ld]: \n", lkcd->benchmark_pages); for (i = 0; i < lkcd->benchmark_pages; i += 8) { lkcd_print(" [%3d]", i); c = 0; while ((c < 8) && ((i+c) < lkcd->benchmark_pages)) { lkcd_print(" %8lx", lkcd->mb_hdr_offsets[i+c]); c++; } lkcd_print("\n"); } } else { lkcd_print(" mb_hdr_offsets: NA\n"); } if (lkcd->zones) { lkcd_print(" num_zones: %d / %d\n", lkcd->num_zones, lkcd->max_zones); lkcd_print(" zoned_offsets: %ld\n", lkcd->zoned_offsets); } lkcd_print(" dumpfile_index: %s\n", lkcd->dumpfile_index); lkcd_print(" ifd: %d\n", lkcd->ifd); lkcd_print(" memory_pages: %ld\n", lkcd->memory_pages); lkcd_print(" page_offset_max: %ld\n", lkcd->page_offset_max); lkcd_print(" page_index_max: %ld\n", lkcd->page_index_max); lkcd_print(" page_offsets: %lx\n", lkcd->page_offsets); lkcd->fp = fpsave; return pages; } /* * The lkcd_lseek() routine does the bulk of the work setting things up * so that the subsequent lkcd_read() simply has to do a bcopy(). * Given a physical address, first determine: * * (1) its page offset (lkcd->curpos). * (2) its page address as specified in the dumpfile (lkcd->curpaddr). * * If the page data is already cached, everything will be set up for the * subsequent read when page_is_cached() returns. * * If the page data is not cached, either of the following occurs: * * (1) page_is_hashed() will check whether the page header offset is cached, * and if so, will set up the page variable, and lseek to the header. * * In either case above, the starting point for the page search is set up. * Lastly, cache_page() stores the requested page's data. */ static int save_offset(uint64_t paddr, off_t off) { uint64_t zone, page; int ii, ret; int max_zones; struct physmem_zone *zones; ret = -1; zone = paddr & lkcd->zone_mask; page = (paddr & ~lkcd->zone_mask) >> lkcd->page_shift; if (lkcd->num_zones == 0) { lkcd->zones = malloc(ZONE_ALLOC * sizeof(struct physmem_zone)); if (!lkcd->zones) { return -1; /* This should be fatal */ } BZERO(lkcd->zones, ZONE_ALLOC * sizeof(struct physmem_zone)); lkcd->max_zones = ZONE_ALLOC; lkcd->zones[0].start = zone; lkcd->zones[0].pages = malloc((ZONE_SIZE >> lkcd->page_shift) * sizeof(struct page_desc)); if (!lkcd->zones[0].pages) { return -1; /* this should be fatal */ } BZERO(lkcd->zones[0].pages, (ZONE_SIZE >> lkcd->page_shift) * sizeof(struct page_desc)); lkcd->num_zones++; } retry: /* find the zone */ for (ii=0; ii < lkcd->num_zones; ii++) { if (lkcd->zones[ii].start == zone) { if (lkcd->zones[ii].pages[page].offset != 0) { if (lkcd->zones[ii].pages[page].offset != off) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "LKCD: conflicting page: zone %lld, " "page %lld: %lld, %lld != %lld\n", (unsigned long long)zone, (unsigned long long)page, (unsigned long long)paddr, (unsigned long long)off, (unsigned long long)lkcd->zones[ii].pages[page].offset); return -1; } ret = 0; } else { lkcd->zones[ii].pages[page].offset = off; ret = 1; } break; } } if (ii == lkcd->num_zones) { /* This is a new zone */ if (lkcd->num_zones < lkcd->max_zones) { /* We have room for another one */ lkcd->zones[ii].start = zone; lkcd->zones[ii].pages = malloc( (ZONE_SIZE >> lkcd->page_shift) * sizeof(struct page_desc)); if (!lkcd->zones[ii].pages) { return -1; /* this should be fatal */ } BZERO(lkcd->zones[ii].pages, (ZONE_SIZE >> lkcd->page_shift) * sizeof(struct page_desc)); lkcd->zones[ii].pages[page].offset = off; ret = 1; lkcd->num_zones++; } else { /* need to expand zone */ max_zones = lkcd->max_zones * 2; zones = malloc(max_zones * sizeof(struct physmem_zone)); if (!zones) { return -1; /* This should be fatal */ } BZERO(zones, max_zones * sizeof(struct physmem_zone)); memcpy(zones, lkcd->zones, lkcd->max_zones * sizeof(struct physmem_zone)); free(lkcd->zones); lkcd->zones = zones; lkcd->max_zones = max_zones; goto retry; } } return ret; /* 1 if the page is new */ } static off_t get_offset(uint64_t paddr) { uint64_t zone, page; int ii; zone = paddr & lkcd->zone_mask; page = (paddr % ZONE_SIZE) >> lkcd->page_shift; if (lkcd->zones == 0) { return 0; } /* find the zone */ for (ii=0; ii < lkcd->num_zones; ii++) { if (lkcd->zones[ii].start == zone) { return (lkcd->zones[ii].pages[page].offset); } } return 0; } #ifdef IA64 int lkcd_get_kernel_start(ulong *addr) { if (!addr) return 0; switch (lkcd->version) { case LKCD_DUMP_V8: case LKCD_DUMP_V9: return lkcd_get_kernel_start_v8(addr); default: return 0; } } #endif int lkcd_lseek(physaddr_t paddr) { int err; int eof; void *dp; long page = 0; physaddr_t physaddr; int seeked_to_page = 0; off_t page_offset; dp = lkcd->dump_page; lkcd->curpos = paddr & ((physaddr_t)(lkcd->page_size-1)); lkcd->curpaddr = paddr & ~((physaddr_t)(lkcd->page_size-1)); if (page_is_cached()) return TRUE; /* Faster than paging in lkcd->page_offsets[page] */ if(page_is_hashed(&page)) { seeked_to_page = 1; } /* Find the offset for this page, if known */ if ((page_offset = get_offset(paddr)) > 0) { off_t seek_offset; seek_offset = lseek(lkcd->fd, page_offset, SEEK_SET); if (seek_offset == page_offset) { seeked_to_page = 1; page = 0; /* page doesn't make any sense */ } } if (seeked_to_page) { err = lkcd_load_dump_page_header(dp, page); if (err == LKCD_DUMPFILE_OK) { return(cache_page()); } } /* We have to grind through some more of the dump file */ lseek(lkcd->fd, lkcd->page_offset_max, SEEK_SET); eof = FALSE; while (!eof) { switch (lkcd_load_dump_page_header(dp, page)) { case LKCD_DUMPFILE_OK: break; case LKCD_DUMPFILE_EOF: eof = TRUE; continue; } physaddr = lkcd->get_dp_flags() & (LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1) ? (lkcd->get_dp_address() - lkcd->kvbase) << lkcd->page_shift: lkcd->get_dp_address() - lkcd->kvbase; if (physaddr == lkcd->curpaddr) { return(cache_page()); } lseek(lkcd->fd, lkcd->get_dp_size(), SEEK_CUR); } return FALSE; } /* * Everything's been set up by the previous lkcd_lseek(), so all that has * to be done is to read the uncompressed data into the user buffer: * * lkcd->curbufptr points to the uncompressed page base. * lkcd->curpos is the offset into the buffer. */ long lkcd_read(void *buf, long count) { char *p; lkcd->total_reads++; p = lkcd->curbufptr + lkcd->curpos; BCOPY(p, buf, count); return count; } /* * Check whether lkcd->curpaddr is already cached. If it is, update * lkcd->curbufptr to point to the page's uncompressed data. */ static int page_is_cached(void) { int i; for (i = 0; i < LKCD_CACHED_PAGES; i++) { if (!LKCD_VALID_PAGE(lkcd->page_cache_hdr[i].pg_flags)) continue; if (lkcd->page_cache_hdr[i].pg_addr == lkcd->curpaddr) { lkcd->page_cache_hdr[i].pg_hit_count++; lkcd->curbufptr = lkcd->page_cache_hdr[i].pg_bufptr; lkcd->cached_reads++; return TRUE; } } return FALSE; } /* * For an incoming page: * * (1) If it's already hashed just return TRUE. * (2) If the base page_hash_entry is unused, fill it up and return TRUE; * (3) Otherwise, find the last page_hash_entry on the list, allocate and * fill a new one, link it on the list, and return TRUE. * (4) If the malloc fails, quietly return FALSE (with no harm done). */ static int hash_page(ulong type) { struct page_hash_entry *phe; int index; if (lkcd->flags & LKCD_NOHASH) { lkcd->flags &= ~LKCD_NOHASH; return FALSE; } index = LKCD_PAGE_HASH_INDEX(lkcd->curpaddr); for (phe = &lkcd->page_hash[index]; LKCD_VALID_PAGE(phe->pg_flags); phe = phe->next) { if (phe->pg_addr == lkcd->curpaddr) return TRUE; if (!phe->next) break; } if (LKCD_VALID_PAGE(phe->pg_flags)) { if ((phe->next = malloc (sizeof(struct page_hash_entry))) == NULL) return FALSE; phe = phe->next; } phe->pg_flags |= LKCD_VALID; phe->pg_addr = lkcd->curpaddr; phe->pg_hdr_offset = lkcd->curhdroffs; phe->next = NULL; lkcd->hashed++; switch (type) { case LKCD_DUMP_COMPRESSED: lkcd->compressed++; break; case LKCD_DUMP_RAW: lkcd->raw++; break; } return TRUE; } /* * Check whether a page is currently hashed, and if so, return the page * number so that the subsequent search loop will find it immediately. */ static int page_is_hashed(long *pp) { struct page_hash_entry *phe; int index; index = LKCD_PAGE_HASH_INDEX(lkcd->curpaddr); for (phe = &lkcd->page_hash[index]; LKCD_VALID_PAGE(phe->pg_flags); phe = phe->next) { if (phe->pg_addr == lkcd->curpaddr) { *pp = (long)(lkcd->curpaddr >> lkcd->page_shift); lseek(lkcd->fd, phe->pg_hdr_offset, SEEK_SET); lkcd->hashed_reads++; return TRUE; } if (!phe->next) break; } return FALSE; } /* * The caller stores the incoming page's page header offset in * lkcd->curhdroffs. */ int set_mb_benchmark(ulong page) { long mb; if ((mb = LKCD_PAGE_MEGABYTE(page)) >= lkcd->benchmark_pages) return FALSE; if (!lkcd->mb_hdr_offsets[mb]) { lkcd->mb_hdr_offsets[mb] = lkcd->curhdroffs; lkcd->benchmarks_done++; } return TRUE; } /* * Coming into this routine: * * (1) lkcd->curpaddr points to the page address as specified in the dumpfile. * (2) the dump_page header has been copied into lkcd->dump_page. * (3) the file pointer is sitting at the beginning of the page data, * be it compressed or otherwise. * (4) lkcd->curhdroffs contains the file pointer to the incoming page's * header offset. * * If an empty page cache location is available, take it. Otherwise, evict * the entry indexed by evict_index, and then bump evict index. The hit_count * is only gathered for dump_lkcd_environment(). * * If the page is compressed, uncompress it into the selected page cache entry. * If the page is raw, just copy it into the selected page cache entry. * If all works OK, update lkcd->curbufptr to point to the page's uncompressed * data. * */ static int cache_page(void) { int i; ulong type; int found, newsz; uint32_t rawsz; ssize_t bytes ATTRIBUTE_UNUSED; for (i = found = 0; i < LKCD_CACHED_PAGES; i++) { if (LKCD_VALID_PAGE(lkcd->page_cache_hdr[i].pg_flags)) continue; found = TRUE; break; } if (!found) { i = lkcd->evict_index; lkcd->page_cache_hdr[i].pg_hit_count = 0; lkcd->evict_index = (lkcd->evict_index+1) % LKCD_CACHED_PAGES; lkcd->evictions++; } lkcd->page_cache_hdr[i].pg_flags = 0; lkcd->page_cache_hdr[i].pg_addr = lkcd->curpaddr; lkcd->page_cache_hdr[i].pg_hit_count++; type = lkcd->get_dp_flags() & (LKCD_DUMP_COMPRESSED|LKCD_DUMP_RAW); switch (type) { case LKCD_DUMP_COMPRESSED: if (LKCD_DEBUG(2)) dump_dump_page("cmp: ", lkcd->dump_page); newsz = 0; BZERO(lkcd->compressed_page, lkcd->page_size); bytes = read(lkcd->fd, lkcd->compressed_page, lkcd->get_dp_size()); switch (lkcd->compression) { case LKCD_DUMP_COMPRESS_NONE: lkcd_print("dump_header: DUMP_COMPRESS_NONE and " "dump_page: DUMP_COMPRESSED (?)\n"); return FALSE; case LKCD_DUMP_COMPRESS_RLE: if (!lkcd_uncompress_RLE((unsigned char *) lkcd->compressed_page, (unsigned char *)lkcd->page_cache_hdr[i].pg_bufptr, lkcd->get_dp_size(), &newsz) || (newsz != lkcd->page_size)) { lkcd_print("uncompress of page "); lkcd_print(BITS32() ? "%llx failed!\n" : "%lx failed!\n", lkcd->get_dp_address()); lkcd_print("newsz returned: %d\n", newsz); return FALSE; } break; case LKCD_DUMP_COMPRESS_GZIP: if (!lkcd_uncompress_gzip((unsigned char *) lkcd->page_cache_hdr[i].pg_bufptr, lkcd->page_size, (unsigned char *)lkcd->compressed_page, lkcd->get_dp_size())) { lkcd_print("uncompress of page "); lkcd_print(BITS32() ? "%llx failed!\n" : "%lx failed!\n", lkcd->get_dp_address()); return FALSE; } break; } break; case LKCD_DUMP_RAW: if (LKCD_DEBUG(2)) dump_dump_page("raw: ", lkcd->dump_page); if ((rawsz = lkcd->get_dp_size()) == 0) BZERO(lkcd->page_cache_hdr[i].pg_bufptr, lkcd->page_size); else if (rawsz == lkcd->page_size) bytes = read(lkcd->fd, lkcd->page_cache_hdr[i].pg_bufptr, lkcd->page_size); else { lkcd_print("cache_page: " "invalid LKCD_DUMP_RAW dp_size\n"); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); return FALSE; } break; default: lkcd_print("cache_page: bogus page:\n"); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); return FALSE; } lkcd->page_cache_hdr[i].pg_flags |= LKCD_VALID; lkcd->curbufptr = lkcd->page_cache_hdr[i].pg_bufptr; hash_page(type); return TRUE; } /* * Uncompress an RLE-encoded buffer. */ static int lkcd_uncompress_RLE(unsigned char *cbuf, unsigned char *ucbuf, uint32_t blk_size, int *new_size) { int i; unsigned char value, count, cur_byte; uint32_t ri, wi; /* initialize the read / write indices */ ri = wi = 0; /* otherwise decompress using run length encoding */ while(ri < blk_size) { cur_byte = cbuf[ri++]; if (cur_byte == 0) { count = cbuf[ri++]; if (count == 0) { ucbuf[wi++] = 0; } else { value = cbuf[ri++]; for (i = 0; i <= count; i++) { ucbuf[wi++] = value; } } } else { ucbuf[wi++] = cur_byte; } /* if our write index is beyond the page size, exit out */ if (wi > /* PAGE_SIZE */ lkcd->page_size) { lkcd_print( "Attempted to decompress beyond page boundaries: file corrupted!\n"); return (0); } } /* set return size to be equal to uncompressed size (in bytes) */ *new_size = wi; return 1; } /* Returns the bit offset if it's able to correct, or negative if not */ static int uncompress_recover(unsigned char *dest, ulong destlen, unsigned char *source, ulong sourcelen) { int byte, bit; ulong retlen = destlen; int good_decomp = 0, good_rv = -1; /* Generate all single bit errors */ if (sourcelen > 16384) { lkcd_print("uncompress_recover: sourcelen %ld too long\n", sourcelen); return(-1); } for (byte = 0; byte < sourcelen; byte++) { for (bit = 0; bit < 8; bit++) { source[byte] ^= (1 << bit); if (uncompress(dest, &retlen, source, sourcelen) == Z_OK && retlen == destlen) { good_decomp++; lkcd_print("good for flipping byte %d bit %d\n", byte, bit); good_rv = bit + byte * 8; } /* Put it back */ source[byte] ^= (1 << bit); } } if (good_decomp == 0) { lkcd_print("Could not correct gzip errors.\n"); return -2; } else if (good_decomp > 1) { lkcd_print("Too many valid gzip decompressions: %d.\n", good_decomp); return -3; } else { source[good_rv >> 8] ^= 1 << (good_rv % 8); uncompress(dest, &retlen, source, sourcelen); source[good_rv >> 8] ^= 1 << (good_rv % 8); return good_rv; } } /* * Uncompress a gzip'd buffer. * * Returns FALSE on error. If set, then * a non-negative value of uncompress_errloc indicates the location of * a single-bit error, and the data may be used. */ static int lkcd_uncompress_gzip(unsigned char *dest, ulong destlen, unsigned char *source, ulong sourcelen) { ulong retlen = destlen; int rc = FALSE; switch (uncompress(dest, &retlen, source, sourcelen)) { case Z_OK: if (retlen == destlen) { rc = TRUE; break; } lkcd_print("uncompress: returned length not page size: %ld\n", retlen); rc = FALSE; break; case Z_MEM_ERROR: lkcd_print("uncompress: Z_MEM_ERROR (not enough memory)\n"); rc = FALSE; break; case Z_BUF_ERROR: lkcd_print("uncompress: " "Z_BUF_ERROR (not enough room in output buffer)\n"); rc = FALSE; break; case Z_DATA_ERROR: lkcd_print("uncompress: Z_DATA_ERROR (input data corrupted)\n"); rc = FALSE; break; default: rc = FALSE; break; } if (rc == FALSE) { uncompress_errloc = uncompress_recover(dest, destlen, source, sourcelen); } return rc; } /* * Generic print routine to handle integral and remote daemon usage of */ void lkcd_print(char *fmt, ...) { char buf[BUFSIZE]; va_list ap; if (!fmt || !strlen(fmt)) return; va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (lkcd->fp) fprintf(lkcd->fp, "%s", buf); else console(buf); } /* * Try to read the current dump page header, reporting back either * LKCD_DUMPFILE_EOF, LKCD_DUMPFILE_END or LKCD_DUMPFILE_OK. The header's * file pointer position is saved in lkcd->curhdroffs. If the page is * an even megabyte, save its offset. */ int lkcd_load_dump_page_header(void *dp, ulong page) { uint32_t dp_flags; uint64_t dp_address, physaddr; off_t page_offset; int ret; /* This is wasted effort */ page_offset = lkcd->curhdroffs = lseek(lkcd->fd, 0, SEEK_CUR); if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) { if (page > lkcd->total_pages) lkcd_dumpfile_complaint(page, lkcd->total_pages, LKCD_DUMPFILE_EOF); return LKCD_DUMPFILE_EOF; } dp_flags = lkcd->get_dp_flags(); dp_address = lkcd->get_dp_address(); if (dp_flags & LKCD_DUMP_END) { return LKCD_DUMPFILE_END; } if ((lkcd->flags & LKCD_VALID) && (page > lkcd->total_pages)) lkcd->total_pages = page; #ifdef X86 /* * Ugly leftover from very early x86 LKCD versions which used * the kernel unity-mapped virtual address as the dp_address. */ if ((page == 0) && !(lkcd->flags & LKCD_VALID) && (lkcd->version == LKCD_DUMP_V1) && (dp_address == 0xc0000000)) lkcd->kvbase = dp_address; #endif physaddr = dp_flags & (LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1) ? (dp_address - lkcd->kvbase) << lkcd->page_shift : dp_address - lkcd->kvbase; if ((ret = save_offset(physaddr, page_offset)) < 0) { return LKCD_DUMPFILE_EOF; /* really an error */ } lkcd->zoned_offsets += ret; /* return = 0 if already known */ if (page_offset > lkcd->page_offset_max) { /* doesn't this mean I have to re-read this dp? */ lkcd->page_offset_max = page_offset; } return LKCD_DUMPFILE_OK; } /* * Register a complaint one time, if appropriate. */ void lkcd_dumpfile_complaint(uint32_t realpages, uint32_t dh_num_pages, int retval) { if (lkcd->flags & LKCD_BAD_DUMP) return; lkcd->flags |= LKCD_BAD_DUMP; if (realpages > dh_num_pages) { lkcd_print( "\n\nWARNING: This dumpfile contains more pages than the amount indicated\n" " in the dumpfile header. This is indicative of a failure during\n" " the post-panic creation of the dumpfile on the dump device.\n\n"); } if (realpages < dh_num_pages) { lkcd_print( "\n\nWARNING: This dumpfile contains fewer pages than the amount indicated\n" " in the dumpfile header. This is indicative of a failure during\n" " the creation of the dumpfile during boot.\n\n"); } } int get_lkcd_regs_for_cpu(struct bt_info *bt, ulong *eip, ulong *esp) { switch (lkcd->version) { case LKCD_DUMP_V8: case LKCD_DUMP_V9: return get_lkcd_regs_for_cpu_v8(bt, eip, esp); default: return -1; } } crash-utility-crash-9cd43f5/mips.c0000664000372000037200000010032315107550337016507 0ustar juerghjuergh/* * mips.c - core analysis suite * * Copyright (C) 2015 Rabin Vincent * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef MIPS #include #include "defs.h" /* From arch/mips/asm/include/pgtable{,-32}.h */ typedef ulong pgd_t; typedef ulong pte_t; #define PTE_ORDER 0 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) #define __PGD_ORDER (32 - 3 * (int)PAGESHIFT() + PGD_T_LOG2 + PTE_T_LOG2) #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) #define PGD_SIZE (PAGESIZE() << PGD_ORDER) #define PGDIR_SHIFT (2 * PAGESHIFT() + PTE_ORDER - PTE_T_LOG2) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) #define PTRS_PER_PTE ((PAGESIZE() << PTE_ORDER) / sizeof(pte_t)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pte_offset(address) \ (((address) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) #define MIPS_CPU_RIXI 0x00800000llu #define MIPS32_EF_R0 6 #define MIPS32_EF_R29 35 #define MIPS32_EF_R31 37 #define MIPS32_EF_LO 38 #define MIPS32_EF_HI 39 #define MIPS32_EF_CP0_EPC 40 #define MIPS32_EF_CP0_BADVADDR 41 #define MIPS32_EF_CP0_STATUS 42 #define MIPS32_EF_CP0_CAUSE 43 static struct machine_specific mips_machine_specific = { 0 }; /* * Holds registers during the crash. */ static struct mips_regset *panic_task_regs; static void mips_display_machine_stats(void) { fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, "\n"); #define PRINT_PAGE_FLAG(flag) \ if (flag) \ fprintf(fp, " %14s: %08lx\n", #flag, flag) PRINT_PAGE_FLAG(_PAGE_PRESENT); PRINT_PAGE_FLAG(_PAGE_READ); PRINT_PAGE_FLAG(_PAGE_WRITE); PRINT_PAGE_FLAG(_PAGE_ACCESSED); PRINT_PAGE_FLAG(_PAGE_MODIFIED); PRINT_PAGE_FLAG(_PAGE_GLOBAL); PRINT_PAGE_FLAG(_PAGE_VALID); PRINT_PAGE_FLAG(_PAGE_NO_READ); PRINT_PAGE_FLAG(_PAGE_NO_EXEC); PRINT_PAGE_FLAG(_PAGE_DIRTY); } static void mips_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); mips_display_machine_stats(); } #define PGDIR_OFFSET(X) (((ulong)(X)) & (PGD_SIZE - 1)) static void mips_init_page_flags(void) { ulong shift = 0; _PAGE_PRESENT = 1UL << shift++; if (THIS_KERNEL_VERSION >= LINUX(4,1,0)) { _PAGE_WRITE = 1UL << shift++; _PAGE_ACCESSED = 1UL << shift++; _PAGE_MODIFIED = 1UL << shift++; _PAGE_NO_EXEC = 1UL << shift++; _PAGE_READ = _PAGE_NO_READ = 1UL << shift++; } else { ulonglong cpu_options; int rixi; ulong addr; addr = symbol_value("cpu_data") + MEMBER_OFFSET("cpuinfo_mips", "options"); readmem(addr, KVADDR, &cpu_options, sizeof(cpu_options), "cpu_data[0].options", FAULT_ON_ERROR); rixi = cpu_options & MIPS_CPU_RIXI; if (!rixi) _PAGE_READ = 1UL << shift++; _PAGE_WRITE = 1UL << shift++; _PAGE_ACCESSED = 1UL << shift++; _PAGE_MODIFIED = 1UL << shift++; if (rixi) { _PAGE_NO_EXEC = 1UL << shift++; _PAGE_NO_READ = 1UL << shift++; } } _PAGE_GLOBAL = 1UL << shift++; _PAGE_VALID = 1UL << shift++; _PAGE_DIRTY = 1UL << shift++; _PFN_SHIFT = PAGESHIFT() - 12 + shift + 3; } static int mips_translate_pte(ulong pte, void *physaddr, ulonglong pte64) { char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char buf[BUFSIZE]; int present; ulong paddr; int len1, len2, others; present = pte & _PAGE_PRESENT; paddr = (pte >> _PFN_SHIFT) << PAGESHIFT(); if (physaddr) { *(ulong *)physaddr = PAGEBASE(pte); return !!present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER | LJUST, "PTE")); if (!present) return !!present; sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER | LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER | RJUST, NULL), mkstring(physbuf, len2, CENTER | RJUST, NULL)); fprintf(fp, "("); others = 0; #define CHECK_PAGE_FLAG(flag) \ if ((_PAGE_##flag) && (pte & _PAGE_##flag)) \ fprintf(fp, "%s" #flag, others++ ? "|" : "") if (pte) { CHECK_PAGE_FLAG(PRESENT); CHECK_PAGE_FLAG(READ); CHECK_PAGE_FLAG(WRITE); CHECK_PAGE_FLAG(ACCESSED); CHECK_PAGE_FLAG(MODIFIED); CHECK_PAGE_FLAG(GLOBAL); CHECK_PAGE_FLAG(VALID); CHECK_PAGE_FLAG(NO_READ); CHECK_PAGE_FLAG(NO_EXEC); CHECK_PAGE_FLAG(DIRTY); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return !!present; } static int mips_pgd_vtop(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong invalid_pte_table = symbol_value("invalid_pte_table"); ulong *page_dir; ulong pgd_pte, page_table; ulong pte; ulong pbase; if (verbose) { const char *segment; if (vaddr < 0x80000000lu) segment = "useg"; else if (vaddr < 0xa0000000lu) segment = "kseg0"; else if (vaddr < 0xc0000000lu) segment = "kseg1"; else if (vaddr < 0xe0000000lu) segment = "ksseg"; else segment = "kseg3"; fprintf(fp, "SEGMENT: %s\n", segment); } if (vaddr >= 0x80000000lu && vaddr < 0xc0000000lu) { *paddr = VTOP(vaddr); return TRUE; } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + pgd_index(vaddr); FILL_PGD(PAGEBASE(pgd), KVADDR, PGD_SIZE); pgd_pte = ULONG(machdep->pgd + PGDIR_OFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %08lx => %lx\n", (ulong)page_dir, pgd_pte); if (pgd_pte == invalid_pte_table) { fprintf(fp, "invalid\n"); return FALSE; } page_table = VTOP(pgd_pte) + sizeof(pte_t) * pte_offset(vaddr); FILL_PTBL(PAGEBASE(page_table), PHYSADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %08lx => %08lx\n", page_table, pte); if (!(pte & _PAGE_PRESENT)) { if (verbose) { fprintf(fp, "\n"); mips_translate_pte((ulong)pte, 0, pte); } return FALSE; } pbase = (pte >> _PFN_SHIFT) << PAGESHIFT(); *paddr = pbase + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %08lx\n\n", pbase); mips_translate_pte(pte, 0, 0); } return TRUE; } static int mips_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { ulong active_mm; readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } else { ulong mm; mm = task_mm(tc->task, TRUE); if (mm) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } return mips_pgd_vtop(pgd, vaddr, paddr, verbose); } static int mips_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { if (!IS_KVADDR(kvaddr)) return FALSE; if (!verbose && !IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); return TRUE; } return mips_pgd_vtop((ulong *)vt->kernel_pgd[0], kvaddr, paddr, verbose); } static void mips_dump_exception_stack(struct bt_info *bt, char *pt_regs) { struct mips_pt_regs_main *mains; struct mips_pt_regs_cp0 *cp0; int i; char buf[BUFSIZE]; mains = (struct mips_pt_regs_main *) (pt_regs + OFFSET(pt_regs_regs)); cp0 = (struct mips_pt_regs_cp0 *) \ (pt_regs + OFFSET(pt_regs_cp0_badvaddr)); for (i = 0; i < 32; i += 4) { fprintf(fp, " $%2d : %08lx %08lx %08lx %08lx\n", i, mains->regs[i], mains->regs[i+1], mains->regs[i+2], mains->regs[i+3]); } fprintf(fp, " Hi : %08lx\n", mains->hi); fprintf(fp, " Lo : %08lx\n", mains->lo); value_to_symstr(cp0->cp0_epc, buf, 16); fprintf(fp, " epc : %08lx %s\n", cp0->cp0_epc, buf); value_to_symstr(mains->regs[31], buf, 16); fprintf(fp, " ra : %08lx %s\n", mains->regs[31], buf); fprintf(fp, " Status: %08lx\n", mains->cp0_status); fprintf(fp, " Cause : %08lx\n", cp0->cp0_cause); fprintf(fp, " BadVA : %08lx\n", cp0->cp0_badvaddr); } struct mips_unwind_frame { ulong sp; ulong pc; ulong ra; }; static void mips_display_full_frame(struct bt_info *bt, struct mips_unwind_frame *current, struct mips_unwind_frame *previous) { ulong words, addr; ulong *up; char buf[BUFSIZE]; int i, u_idx; if (!INSTACK(previous->sp, bt) || !INSTACK(current->sp, bt)) return; words = (previous->sp - current->sp) / sizeof(ulong); if (words == 0) { fprintf(fp, " (no frame)\n"); return; } addr = current->sp; u_idx = (current->sp - bt->stackbase) / sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if ((i % 4) == 0) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx * sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); } static int mips_is_exception_entry(struct syment *sym) { return STREQ(sym->name, "ret_from_exception") || STREQ(sym->name, "ret_from_irq") || STREQ(sym->name, "work_resched") || STREQ(sym->name, "handle_sys"); } static void mips_dump_backtrace_entry(struct bt_info *bt, struct syment *sym, struct mips_unwind_frame *current, struct mips_unwind_frame *previous, int level) { const char *name = sym ? sym->name : "(invalid)"; struct load_module *lm; char *name_plus_offset; char buf[BUFSIZE]; name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { struct syment *symp; ulong symbol_offset; symp = value_search(current->pc, &symbol_offset); if (symp && symbol_offset) name_plus_offset = value_to_symstr(current->pc, buf, bt->radix); } fprintf(fp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, current->sp, name_plus_offset ? name_plus_offset : name, current->pc); if (module_symbol(current->pc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); if (bt->flags & BT_LINE_NUMBERS) { char buf[BUFSIZE]; get_line_number(current->pc, buf, FALSE); if (strlen(buf)) fprintf(fp, " %s\n", buf); } if (sym && mips_is_exception_entry(sym)) { char pt_regs[SIZE(pt_regs)]; GET_STACK_DATA(current->sp, &pt_regs, SIZE(pt_regs)); mips_dump_exception_stack(bt, pt_regs); } if (bt->flags & BT_FULL) { fprintf(fp, " " "[PC: %08lx RA: %08lx SP: %08lx SIZE: %ld]\n", current->pc, current->ra, current->sp, previous->sp - current->sp); mips_display_full_frame(bt, current, previous); } } static void mips_analyze_function(ulong start, ulong offset, struct mips_unwind_frame *current, struct mips_unwind_frame *previous) { ulong rapos = 0; ulong spadjust = 0; ulong *funcbuf, *ip; ulong i; if (CRASHDEBUG(8)) fprintf(fp, "%s: start %#lx offset %#lx\n", __func__, start, offset); if (!offset) { previous->sp = current->sp; return; } ip = funcbuf = (ulong *)GETBUF(offset); if (!readmem(start, KVADDR, funcbuf, offset, "mips_analyze_function", RETURN_ON_ERROR)) { FREEBUF(funcbuf); error(FATAL, "Cannot read function at %8x", start); return; } for (i = 0; i < offset; i += 4) { ulong insn = *ip; ulong high = (insn >> 16) & 0xffff; ulong low = insn & 0xffff; if (CRASHDEBUG(8)) fprintf(fp, "insn @ %#lx = %#lx\n", start + i, insn); if (high == 0x27bd) { /* ADDIU sp, sp, imm */ if (!(low & 0x8000)) break; spadjust += 0x10000 - low; if (CRASHDEBUG(8)) fprintf(fp, "spadjust = %lu\n", spadjust); } else if (high == 0xafbf) { /* SW RA, imm(SP) */ rapos = current->sp + low; if (CRASHDEBUG(8)) fprintf(fp, "rapos %lx\n", rapos); break; } ip++; } FREEBUF(funcbuf); previous->sp = current->sp + spadjust; if (rapos && !readmem(rapos, KVADDR, ¤t->ra, sizeof(current->ra), "RA from stack", RETURN_ON_ERROR)) { error(FATAL, "Cannot read RA from stack %lx", rapos); return; } } static void mips_back_trace_cmd(struct bt_info *bt) { struct mips_unwind_frame current, previous; int level = 0; int invalid_ok = 1; if (bt->flags & BT_REGS_NOT_FOUND) return; previous.sp = previous.pc = previous.ra = 0; current.pc = bt->instptr; current.sp = bt->stkptr; current.ra = 0; if (bt->machdep) { struct mips_regset *regs = bt->machdep; previous.pc = current.ra = regs->regs[MIPS32_EF_R31]; } while (INSTACK(current.sp, bt)) { struct syment *symbol = NULL; ulong offset; if (CRASHDEBUG(8)) fprintf(fp, "level %d pc %#lx ra %#lx sp %lx\n", level, current.pc, current.ra, current.sp); if (!IS_KVADDR(current.pc) && !invalid_ok) return; symbol = value_search(current.pc, &offset); if (!symbol && !invalid_ok) { error(FATAL, "PC is unknown symbol (%lx)", current.pc); return; } invalid_ok = 0; /* * If we get an address which points to the start of a * function, then it could one of the following: * * - we are dealing with a noreturn function. The last call * from a noreturn function has an an ra which points to the * start of the function after it. This is common in the * oops callchain because of die() which is annotated as * noreturn. * * - we have taken an exception at the start of this function. * In this case we already have the RA in current.ra. * * - we are in one of these routines which appear with zero * offset in manually-constructed stack frames: * * * ret_from_exception * * ret_from_irq * * ret_from_fork * * ret_from_kernel_thread */ if (!current.ra && !offset && symbol && !STRNEQ(symbol->name, "ret_from")) { if (CRASHDEBUG(8)) fprintf(fp, "zero offset at %s, try previous symbol\n", symbol->name); symbol = value_search(current.pc - 4, &offset); if (!symbol) { error(FATAL, "PC is unknown symbol (%lx)", current.pc); return; } } if (symbol && mips_is_exception_entry(symbol)) { struct mips_pt_regs_main *mains; struct mips_pt_regs_cp0 *cp0; char pt_regs[SIZE(pt_regs)]; mains = (struct mips_pt_regs_main *) \ (pt_regs + OFFSET(pt_regs_regs)); cp0 = (struct mips_pt_regs_cp0 *) \ (pt_regs + OFFSET(pt_regs_cp0_badvaddr)); GET_STACK_DATA(current.sp, pt_regs, sizeof(pt_regs)); previous.ra = mains->regs[31]; previous.sp = mains->regs[29]; current.ra = cp0->cp0_epc; if (CRASHDEBUG(8)) fprintf(fp, "exception pc %#lx ra %#lx sp %lx\n", previous.pc, previous.ra, previous.sp); /* The PC causing the exception may have been invalid */ invalid_ok = 1; } else if (symbol) { mips_analyze_function(symbol->value, offset, ¤t, &previous); } else { /* * The current PC is invalid. Assume that the code * jumped through a invalid pointer and that the SP has * not been adjusted. */ previous.sp = current.sp; } mips_dump_backtrace_entry(bt, symbol, ¤t, &previous, level++); current.pc = current.ra; current.sp = previous.sp; current.ra = previous.ra; if (CRASHDEBUG(8)) fprintf(fp, "next %d pc %#lx ra %#lx sp %lx\n", level, current.pc, current.ra, current.sp); previous.sp = previous.pc = previous.ra = 0; } } static int mips_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp) { const struct machine_specific *ms = machdep->machspec; struct mips_regset *regs; ulong epc, r29; if (!ms->crash_task_regs) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } regs = &ms->crash_task_regs[bt->tc->processor]; epc = regs->regs[MIPS32_EF_CP0_EPC]; r29 = regs->regs[MIPS32_EF_R29]; if (!epc && !r29) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } if (nip) *nip = epc; if (ksp) *ksp = r29; bt->machdep = regs; return TRUE; } static int mips_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (!bt->tc || !(tt->flags & THREAD_INFO)) return FALSE; if (!readmem(bt->task + OFFSET(task_struct_thread_reg31), KVADDR, pcp, sizeof(*pcp), "thread_struct.regs31", RETURN_ON_ERROR)) { return FALSE; } if (!readmem(bt->task + OFFSET(task_struct_thread_reg29), KVADDR, spp, sizeof(*spp), "thread_struct.regs29", RETURN_ON_ERROR)) { return FALSE; } return TRUE; } static void mips_stackframe_init(void) { long task_struct_thread = MEMBER_OFFSET("task_struct", "thread"); long thread_reg29 = MEMBER_OFFSET("thread_struct", "reg29"); long thread_reg31 = MEMBER_OFFSET("thread_struct", "reg31"); if ((task_struct_thread == INVALID_OFFSET) || (thread_reg29 == INVALID_OFFSET) || (thread_reg31 == INVALID_OFFSET)) { error(FATAL, "cannot determine thread_struct offsets\n"); return; } ASSIGN_OFFSET(task_struct_thread_reg29) = task_struct_thread + thread_reg29; ASSIGN_OFFSET(task_struct_thread_reg31) = task_struct_thread + thread_reg31; STRUCT_SIZE_INIT(pt_regs, "pt_regs"); MEMBER_OFFSET_INIT(pt_regs_regs, "pt_regs", "regs"); MEMBER_OFFSET_INIT(pt_regs_cp0_badvaddr, "pt_regs", "cp0_badvaddr"); } static void mips_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { int ret; *pcp = 0; *spp = 0; bt->machdep = NULL; if (DUMPFILE() && is_task_active(bt->task)) ret = mips_dumpfile_stack_frame(bt, pcp, spp); else ret = mips_get_frame(bt, pcp, spp); if (!ret) error(WARNING, "cannot determine starting stack frame for task %lx\n", bt->task); } static int mips_eframe_search(struct bt_info *bt) { return error(FATAL, "%s: not implemented\n", __func__); } static ulong mips_get_task_pgd(ulong task) { return error(FATAL, "%s: not implemented\n", __func__); } static int mips_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); return (IS_KVADDR(task) && ALIGNED_STACK_OFFSET(task) == 0); } static ulong mips_processor_speed(void) { return 0; } static int mips_get_smp_cpus(void) { return (get_cpus_online() > 0) ? get_cpus_online() : kt->cpus; } static ulong mips_vmalloc_start(void) { return first_vmalloc_address(); } /* * Retrieve task registers for the time of the crash. */ static int mips_get_crash_notes(void) { struct machine_specific *ms = machdep->machspec; ulong crash_notes; Elf32_Nhdr *note; ulong offset; char *buf, *p; ulong *notes_ptrs; ulong i; if (!symbol_exists("crash_notes")) return FALSE; crash_notes = symbol_value("crash_notes"); notes_ptrs = (ulong *)GETBUF(kt->cpus*sizeof(notes_ptrs[0])); /* * Read crash_notes for the first CPU. crash_notes are in standard ELF * note format. */ if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read crash_notes\n"); FREEBUF(notes_ptrs); return FALSE; } if (symbol_exists("__per_cpu_offset")) { /* Add __per_cpu_offset for each cpu to form the pointer to the notes */ for (i = 0; icpus; i++) notes_ptrs[i] = notes_ptrs[kt->cpus-1] + kt->__per_cpu_offset[i]; } buf = GETBUF(SIZE(note_buf)); if (!(panic_task_regs = calloc((size_t)kt->cpus, sizeof(*panic_task_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i=0;icpus;i++) { if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { error(WARNING, "failed to read note_buf_t\n"); goto fail; } /* * Do some sanity checks for this note before reading registers from it. */ note = (Elf32_Nhdr *)buf; p = buf + sizeof(Elf32_Nhdr); /* * dumpfiles created with qemu won't have crash_notes, but there will * be elf notes; dumpfiles created by kdump do not create notes for * offline cpus. */ if (note->n_namesz == 0 && (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE())) { if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (note) { /* * SIZE(note_buf) accounts for a "final note", which is a * trailing empty elf note header. */ long notesz = SIZE(note_buf) - sizeof(Elf32_Nhdr); if (sizeof(Elf32_Nhdr) + roundup(note->n_namesz, 4) + note->n_descsz == notesz) BCOPY((char *)note, buf, notesz); } else { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } } if (note->n_type != NT_PRSTATUS) { error(WARNING, "invalid note (n_type != NT_PRSTATUS)\n"); goto fail; } if (p[0] != 'C' || p[1] != 'O' || p[2] != 'R' || p[3] != 'E') { error(WARNING, "invalid note (name != \"CORE\"\n"); goto fail; } /* * Find correct location of note data. This contains elf_prstatus * structure which has registers etc. for the crashed task. */ offset = sizeof(Elf32_Nhdr); offset = roundup(offset + note->n_namesz, 4); p = buf + offset; /* start of elf_prstatus */ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); } /* * And finally we have the registers for the crashed task. This is * used later on when dumping backtrace. */ ms->crash_task_regs = panic_task_regs; FREEBUF(buf); FREEBUF(notes_ptrs); return TRUE; fail: FREEBUF(buf); FREEBUF(notes_ptrs); free(panic_task_regs); return FALSE; } static int mips_get_elf_notes(void) { struct machine_specific *ms = machdep->machspec; int i; if (!DISKDUMP_DUMPFILE() && !KDUMP_DUMPFILE()) return FALSE; panic_task_regs = calloc(kt->cpus, sizeof(*panic_task_regs)); if (!panic_task_regs) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = 0; i < kt->cpus; i++) { Elf32_Nhdr *note = NULL; size_t len; if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (!note) { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); BCOPY((char *)note + len + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); } ms->crash_task_regs = panic_task_regs; return TRUE; } static int mips_init_active_task_regs(void) { int retval; retval = mips_get_crash_notes(); if (retval == TRUE) return retval; return mips_get_elf_notes(); } static int mips_verify_symbol(const char *name, ulong value, char type) { if (STREQ(name, "_text")) machdep->flags |= KSYMS_START; return (name && strlen(name) && (machdep->flags & KSYMS_START) && !STRNEQ(name, "__func__.") && !STRNEQ(name, "__crc_")); } void mips_dump_machdep_table(ulong arg) { int others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " pgdir_shift: %d\n", PGDIR_SHIFT); fprintf(fp, " ptrs_per_pgd: %lu\n", PTRS_PER_PGD); fprintf(fp, " ptrs_per_pte: %d\n", PTRS_PER_PTE); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " memsize: %lld (0x%llx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: mips_eframe_search()\n"); fprintf(fp, " back_trace: mips_back_trace_cmd()\n"); fprintf(fp, " processor_speed: mips_processor_speed()\n"); fprintf(fp, " uvtop: mips_uvtop()\n"); fprintf(fp, " kvtop: mips_kvtop()\n"); fprintf(fp, " get_task_pgd: mips_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " get_stack_frame: mips_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: mips_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: mips_vmalloc_start()\n"); fprintf(fp, " is_task_addr: mips_is_task_addr()\n"); fprintf(fp, " verify_symbol: mips_verify_symbol()\n"); fprintf(fp, " dis_filter: generic_dis_filter()\n"); fprintf(fp, " cmd_mach: mips_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: mips_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: NULL\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } static ulong mips_get_page_size(void) { struct syment *spd, *next = NULL; spd = symbol_search("swapper_pg_dir"); if (spd) next = next_symbol(NULL, spd); if (!spd || !next) return memory_page_size(); return next->value - spd->value; } void mips_init(int when) { #if defined(__i386__) || defined(__x86_64__) if (ACTIVE()) error(FATAL, "compiled for the MIPS architecture\n"); #endif switch (when) { case SETUP_ENV: machdep->process_elf_notes = process_elf32_notes; break; case PRE_SYMTAB: machdep->verify_symbol = mips_verify_symbol; machdep->machspec = &mips_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; break; case PRE_GDB: machdep->pagesize = mips_get_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); if (machdep->pagesize >= 16384) machdep->stacksize = machdep->pagesize; else machdep->stacksize = machdep->pagesize * 2; if ((machdep->pgd = malloc(PGD_SIZE)) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->ptbl = malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->kvbase = 0x80000000; machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->uvtop = mips_uvtop; machdep->kvtop = mips_kvtop; machdep->vmalloc_start = mips_vmalloc_start; machdep->eframe_search = mips_eframe_search; machdep->back_trace = mips_back_trace_cmd; machdep->processor_speed = mips_processor_speed; machdep->get_task_pgd = mips_get_task_pgd; machdep->get_stack_frame = mips_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = mips_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = mips_is_task_addr; machdep->dis_filter = generic_dis_filter; machdep->cmd_mach = mips_cmd_mach; machdep->get_smp_cpus = mips_get_smp_cpus; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; break; case POST_GDB: mips_init_page_flags(); machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); mips_stackframe_init(); if (!machdep->hz) machdep->hz = 100; MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); STRUCT_SIZE_INIT(note_buf, "note_buf_t"); break; case POST_VM: /* * crash_notes contains machine specific information about the * crash. In particular, it contains CPU registers at the time * of the crash. We need this information to extract correct * backtraces from the panic task. */ if (!ACTIVE() && !mips_init_active_task_regs()) error(WARNING, "cannot retrieve registers for active task%s\n\n", kt->cpus > 1 ? "s" : ""); } } void mips_display_regs_from_elf_notes(int cpu, FILE *ofp) { const struct machine_specific *ms = machdep->machspec; struct mips_regset *regs; if (!ms->crash_task_regs) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } regs = &ms->crash_task_regs[cpu]; if (!regs->regs[MIPS32_EF_R29] && !regs->regs[MIPS32_EF_CP0_EPC]) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } fprintf(ofp, " R0: %08lx R1: %08lx R2: %08lx\n" " R3: %08lx R4: %08lx R5: %08lx\n" " R6: %08lx R7: %08lx R8: %08lx\n" " R9: %08lx R10: %08lx R11: %08lx\n" " R12: %08lx R13: %08lx R14: %08lx\n" " R15: %08lx R16: %08lx R17: %08lx\n" " R18: %08lx R19: %08lx R20: %08lx\n" " R21: %08lx R22: %08lx R23: %08lx\n" " R24: %08lx R25: %08lx R26: %08lx\n" " R27: %08lx R28: %08lx R29: %08lx\n" " R30: %08lx R31: %08lx\n" " LO: %08lx HI: %08lx\n" " EPC: %08lx BADVADDR: %08lx\n" " STATUS: %08lx CAUSE: %08lx\n", regs->regs[MIPS32_EF_R0], regs->regs[MIPS32_EF_R0 + 1], regs->regs[MIPS32_EF_R0 + 2], regs->regs[MIPS32_EF_R0 + 3], regs->regs[MIPS32_EF_R0 + 4], regs->regs[MIPS32_EF_R0 + 5], regs->regs[MIPS32_EF_R0 + 6], regs->regs[MIPS32_EF_R0 + 7], regs->regs[MIPS32_EF_R0 + 8], regs->regs[MIPS32_EF_R0 + 9], regs->regs[MIPS32_EF_R0 + 10], regs->regs[MIPS32_EF_R0 + 11], regs->regs[MIPS32_EF_R0 + 12], regs->regs[MIPS32_EF_R0 + 13], regs->regs[MIPS32_EF_R0 + 14], regs->regs[MIPS32_EF_R0 + 15], regs->regs[MIPS32_EF_R0 + 16], regs->regs[MIPS32_EF_R0 + 17], regs->regs[MIPS32_EF_R0 + 18], regs->regs[MIPS32_EF_R0 + 19], regs->regs[MIPS32_EF_R0 + 20], regs->regs[MIPS32_EF_R0 + 21], regs->regs[MIPS32_EF_R0 + 22], regs->regs[MIPS32_EF_R0 + 23], regs->regs[MIPS32_EF_R0 + 24], regs->regs[MIPS32_EF_R0 + 25], regs->regs[MIPS32_EF_R0 + 26], regs->regs[MIPS32_EF_R0 + 27], regs->regs[MIPS32_EF_R0 + 28], regs->regs[MIPS32_EF_R0 + 29], regs->regs[MIPS32_EF_R0 + 30], regs->regs[MIPS32_EF_R0 + 31], regs->regs[MIPS32_EF_LO], regs->regs[MIPS32_EF_HI], regs->regs[MIPS32_EF_CP0_EPC], regs->regs[MIPS32_EF_CP0_BADVADDR], regs->regs[MIPS32_EF_CP0_STATUS], regs->regs[MIPS32_EF_CP0_CAUSE]); } #else #include "defs.h" void mips_display_regs_from_elf_notes(int cpu, FILE *ofp) { return; } #endif /* !MIPS */ crash-utility-crash-9cd43f5/maple_tree.h0000664000372000037200000000313015107550337017657 0ustar juerghjuergh/* SPDX-License-Identifier: GPL-2.0+ */ #ifndef _MAPLE_TREE_H #define _MAPLE_TREE_H /* * Maple Tree - An RCU-safe adaptive tree for storing ranges * Copyright (c) 2018-2022 Oracle * Authors: Liam R. Howlett * Matthew Wilcox * * eXtensible Arrays * Copyright (c) 2017 Microsoft Corporation * Author: Matthew Wilcox * * See Documentation/core-api/xarray.rst for how to use the XArray. */ #include #include #include /* * The following are copied and modified from include/linux/maple_tree.h */ enum maple_type { maple_dense, maple_leaf_64, maple_range_64, maple_arange_64, }; #define MAPLE_NODE_MASK 255UL #define MT_FLAGS_HEIGHT_OFFSET 0x02 #define MT_FLAGS_HEIGHT_MASK 0x7C #define MAPLE_NODE_TYPE_MASK 0x0F #define MAPLE_NODE_TYPE_SHIFT 0x03 #define MAPLE_RESERVED_RANGE 4096 /* * The following are copied and modified from include/linux/xarray.h */ #define XA_ZERO_ENTRY xa_mk_internal(257) static inline ulong xa_mk_internal(ulong v) { return (v << 2) | 2; } static inline bool xa_is_internal(ulong entry) { return (entry & 3) == 2; } static inline bool xa_is_node(ulong entry) { return xa_is_internal(entry) && entry > 4096; } static inline bool xa_is_value(ulong entry) { return entry & 1; } static inline bool xa_is_zero(ulong entry) { return entry == XA_ZERO_ENTRY; } static inline unsigned long xa_to_internal(ulong entry) { return entry >> 2; } static inline unsigned long xa_to_value(ulong entry) { return entry >> 1; } #endif /* _MAPLE_TREE_H */ crash-utility-crash-9cd43f5/.gitignore0000664000372000037200000000047215107550337017367 0ustar juerghjuerghconfigure build_data.c cscope.out crashlib.a *.o crash CFLAGS.extra LDFLAGS.extra crash.spec *.gz *.rpm gdb.files gdb-7.6/ gdb-10.2/ gdb-16.2/ extensions/defs.h extensions/*.so extensions/eppic # cscope files cscope.* ncscope.* # ctags files tags TAGS # Clang's compilation database file /compile_commands.json crash-utility-crash-9cd43f5/vmware_guestdump.c0000664000372000037200000003434315107550337021145 0ustar juerghjuergh/* * vmware_guestdump.c * * Copyright (c) 2020 VMware, Inc. * Copyright (c) 2024 Broadcom. All Rights Reserved. The term "Broadcom" * refers to Broadcom Inc. and/or its subsidiaries. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Alexey Makhalov */ #include "defs.h" #include "vmware_vmss.h" #define LOGPRX "vmw: " /* * debug.guest file layout * 00000000: guest dump header, it includes: * 1. Version (4 bytes) \ * 2. Number of Virtual CPUs (4 bytes) } - struct guestdumpheader * 3. Reserved gap * 4. Main Memory information - struct mainmeminfo{,_old} * 5. Reserved gap #2. Only in v7+ * (use get_vcpus_offset() to get total size of guestdumpheader) * vcpus_offset: ---------\ * 1. struct vcpu_state1 \ * 2. reserved gap } num_vcpus times * 3. struct vcpu_state2 / * 4. 4KB of reserved data / * --------/ * */ struct guestdumpheader { uint32_t version; uint32_t num_vcpus; } __attribute__((packed)) hdr; struct mainmeminfo { uint64_t last_addr; uint64_t memsize_in_pages; uint32_t reserved1; uint32_t mem_holes; struct memhole { uint64_t ppn; uint64_t pages; } holes[2]; } __attribute__((packed)); /* Used by version 1 only */ struct mainmeminfo_old { uint64_t last_addr; uint32_t memsize_in_pages; uint32_t reserved1; uint32_t mem_holes; struct memhole1 { uint32_t ppn; uint32_t pages; } holes[2]; /* There are additional fields, see get_vcpus_offset() calculation. */ } __attribute__((packed)); /* First half of vcpu_state */ struct vcpu_state1 { uint32_t cr0; uint64_t cr2; uint64_t cr3; uint64_t cr4; uint64_t reserved1[10]; uint64_t idt_base; } __attribute__((packed)); /* * Unused fields between vcpu_state1 and vcpu_state2 swill be skipped. * See get_vcpu_gapsize() calculation. */ /* Second half of vcpu_state */ struct vcpu_state2 { struct x86_64_pt_regs { uint64_t r15; uint64_t r14; uint64_t r13; uint64_t r12; uint64_t rbp; uint64_t rbx; uint64_t r11; uint64_t r10; uint64_t r9; uint64_t r8; uint64_t rax; uint64_t rcx; uint64_t rdx; uint64_t rsi; uint64_t rdi; uint64_t orig_rax; uint64_t rip; uint64_t cs; uint64_t eflags; uint64_t rsp; uint64_t ss; uint64_t fs_base; uint64_t gs_base; uint64_t ds; uint64_t es; uint64_t fs; uint64_t gs; } regs64; uint8_t reserved3[17]; } __attribute__((packed)); typedef enum { CPU_ARCH_AARCH64, CPU_ARCH_X86, } cpu_arch; /* * Returns the size of reserved gap #2 in the header right after the Main Mem. */ static inline long get_gap2_size(uint32_t version) { if (version == 7) return 11; return 0; } /* * Returns the size of the guest dump header. */ static inline long get_vcpus_offset(uint32_t version, int mem_holes) { switch (version) { case 1: /* ESXi 6.7 and older */ return sizeof(struct guestdumpheader) + 13 + sizeof(struct mainmeminfo_old) + (mem_holes == -1 ? 0 : 8 * mem_holes + 4); case 3: /* ESXi 6.8 */ return sizeof(struct guestdumpheader) + 14 + sizeof(struct mainmeminfo); case 4: /* ESXi 7.0 */ case 5: /* ESXi 8.0 */ return sizeof(struct guestdumpheader) + 14 + sizeof(struct mainmeminfo); case 6: /* ESXi 8.0u2 */ return sizeof(struct guestdumpheader) + 15 + sizeof(struct mainmeminfo); case 7: /* ESXi 9.0 */ return sizeof(struct guestdumpheader) + 8 + sizeof(struct mainmeminfo) + get_gap2_size(version); } return 0; } /* * Returns the size of reserved (unused) fields in the middle of vcpu_state structure. */ static inline long get_vcpu_gapsize(uint32_t version) { if (version < 4) return 45; return 42; } /* * vmware_guestdump is an extension to the vmware_vmss with ability to debug * debug.guest and debug.vmem files. * * debug.guest.gz and debug.vmem.gz can be obtained using following * .vmx options from VM running in debug mode: * monitor.mini-suspend_on_panic = TRUE * monitor.suspend_on_triplefault = TRUE * * guestdump (debug.guest) is a simplified version of the *.vmss which does * not contain a full VM state, but minimal guest state, such as a memory * layout and CPUs state, needed for the debugger. is_vmware_guestdump() * and vmware_guestdump_init() functions parse guestdump header and * populate vmss data structure (from vmware_vmss.c). In result, all * handlers (except memory_dump) from vmware_vmss.c can be reused. * * debug.guest does not have a dedicated header magic or file format signature * To probe debug.guest we need to perform series of validations. In addition, * we check for the filename extension, which must be ".guest". */ int is_vmware_guestdump(char *filename) { struct mainmeminfo mmi; long vcpus_offset; FILE *fp; uint64_t filesize, expected_filesize, holes_sum = 0; int i; if (strcmp(filename + strlen(filename) - 6, ".guest")) return FALSE; if ((fp = fopen(filename, "r")) == NULL) { error(INFO, LOGPRX"Failed to open '%s': [Error %d] %s\n", filename, errno, strerror(errno)); return FALSE; } if (fread(&hdr, sizeof(struct guestdumpheader), 1, fp) != 1) { error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", "guestdumpheader", filename, errno, strerror(errno)); fclose(fp); return FALSE; } vcpus_offset = get_vcpus_offset(hdr.version, -1 /* Unknown yet, adjust it later */); if (!vcpus_offset) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Not supported version %d\n", hdr.version); fclose(fp); return FALSE; } if (hdr.version == 1) { struct mainmeminfo_old tmp; if (fseek(fp, vcpus_offset - sizeof(struct mainmeminfo_old), SEEK_SET) == -1) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Failed to fseek '%s': [Error %d] %s\n", filename, errno, strerror(errno)); fclose(fp); return FALSE; } if (fread(&tmp, sizeof(struct mainmeminfo_old), 1, fp) != 1) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", "mainmeminfo_old", filename, errno, strerror(errno)); fclose(fp); return FALSE; } mmi.last_addr = tmp.last_addr; mmi.memsize_in_pages = tmp.memsize_in_pages; mmi.mem_holes = tmp.mem_holes; mmi.holes[0].ppn = tmp.holes[0].ppn; mmi.holes[0].pages = tmp.holes[0].pages; mmi.holes[1].ppn = tmp.holes[1].ppn; mmi.holes[1].pages = tmp.holes[1].pages; /* vcpu_offset adjustment for mem_holes is required only for version 1. */ vcpus_offset = get_vcpus_offset(hdr.version, mmi.mem_holes); } else { if (fseek(fp, vcpus_offset - sizeof(struct mainmeminfo) - get_gap2_size(hdr.version), SEEK_SET) == -1) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Failed to fseek '%s': [Error %d] %s\n", filename, errno, strerror(errno)); fclose(fp); return FALSE; } if (fread(&mmi, sizeof(struct mainmeminfo), 1, fp) != 1) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", "mainmeminfo", filename, errno, strerror(errno)); fclose(fp); return FALSE; } /* Check CPU architecture field. Next 4 bytes after the Main Mem */ if (hdr.version >= 7) { cpu_arch arch; if (fread(&arch, sizeof(cpu_arch), 1, fp) != 1) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", "CPU arch", filename, errno, strerror(errno)); fclose(fp); return FALSE; } if (arch != CPU_ARCH_X86) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Invalid or unsupported CPU architecture: %d\n", arch); fclose(fp); return FALSE; } } } if (fseek(fp, 0L, SEEK_END) == -1) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Failed to fseek '%s': [Error %d] %s\n", filename, errno, strerror(errno)); fclose(fp); return FALSE; } filesize = ftell(fp); fclose(fp); if (mmi.mem_holes > 2) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Unexpected mmi.mem_holes value %d\n", mmi.mem_holes); return FALSE; } for (i = 0; i < mmi.mem_holes; i++) { /* hole start page */ vmss.regions[i].startpagenum = mmi.holes[i].ppn; /* hole end page */ vmss.regions[i].startppn = mmi.holes[i].ppn + mmi.holes[i].pages; holes_sum += mmi.holes[i].pages; } if ((mmi.last_addr + 1) != ((mmi.memsize_in_pages + holes_sum) << VMW_PAGE_SHIFT)) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Memory size check failed\n"); return FALSE; } expected_filesize = vcpus_offset + hdr.num_vcpus * (sizeof(struct vcpu_state1) + get_vcpu_gapsize(hdr.version) + sizeof(struct vcpu_state2) + VMW_PAGE_SIZE); if (filesize != expected_filesize) { if (CRASHDEBUG(1)) error(INFO, LOGPRX"Incorrect file size: %d != %d\n", filesize, expected_filesize); return FALSE; } vmss.memsize = mmi.memsize_in_pages << VMW_PAGE_SHIFT; vmss.regionscount = mmi.mem_holes + 1; vmss.memoffset = 0; vmss.num_vcpus = hdr.num_vcpus; return TRUE; } int vmware_guestdump_init(char *filename, FILE *ofp) { FILE *fp = NULL; int i, result = TRUE; char *vmem_filename = NULL; struct vcpu_state1 vs1; struct vcpu_state2 vs2; char *p; if (!machine_type("X86") && !machine_type("X86_64")) { error(INFO, LOGPRX"Invalid or unsupported host architecture for .guest file: %s\n", MACHINE_TYPE); result = FALSE; goto exit; } if ((fp = fopen(filename, "r")) == NULL) { error(INFO, LOGPRX"Failed to open '%s': [Error %d] %s\n", filename, errno, strerror(errno)); result = FALSE; goto exit; } if (fseek(fp, get_vcpus_offset(hdr.version, vmss.regionscount - 1), SEEK_SET) == -1) { error(INFO, LOGPRX"Failed to fseek '%s': [Error %d] %s\n", filename, errno, strerror(errno)); result = FALSE; goto exit; } vmss.vcpu_regs = malloc(vmss.num_vcpus * sizeof(uint64_t)); vmss.regs64 = calloc(vmss.num_vcpus, sizeof(void *)); if (!vmss.vcpu_regs || !vmss.regs64) { error(INFO, LOGPRX"Failed to allocate memory\n"); result = FALSE; goto exit; } for (i = 0; i < vmss.num_vcpus; i++) { if (fread(&vs1, sizeof(struct vcpu_state1), 1, fp) != 1) { error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", "vcpu_state", filename, errno, strerror(errno)); result = FALSE; goto exit; } if (fseek(fp, get_vcpu_gapsize(hdr.version), SEEK_CUR) == -1) { error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", "vcpu_state", filename, errno, strerror(errno)); result = FALSE; goto exit; } if (fread(&vs2, sizeof(struct vcpu_state2), 1, fp) != 1) { error(INFO, LOGPRX"Failed to read '%s' from file '%s': [Error %d] %s\n", "vcpu_state", filename, errno, strerror(errno)); result = FALSE; goto exit; } vmss.regs64[i] = calloc(1, sizeof(vmssregs64)); if (!vmss.regs64[i]) { error(INFO, LOGPRX"Failed to allocate memory\n"); result = FALSE; goto exit; } vmss.vcpu_regs[i] = 0; vmss.regs64[i]->rax = vs2.regs64.rax; vmss.regs64[i]->rcx = vs2.regs64.rcx; vmss.regs64[i]->rdx = vs2.regs64.rdx; vmss.regs64[i]->rbx = vs2.regs64.rbx; vmss.regs64[i]->rbp = vs2.regs64.rbp; vmss.regs64[i]->rsp = vs2.regs64.rsp; vmss.regs64[i]->rsi = vs2.regs64.rsi; vmss.regs64[i]->rdi = vs2.regs64.rdi; vmss.regs64[i]->r8 = vs2.regs64.r8; vmss.regs64[i]->r9 = vs2.regs64.r9; vmss.regs64[i]->r10 = vs2.regs64.r10; vmss.regs64[i]->r11 = vs2.regs64.r11; vmss.regs64[i]->r12 = vs2.regs64.r12; vmss.regs64[i]->r13 = vs2.regs64.r13; vmss.regs64[i]->r14 = vs2.regs64.r14; vmss.regs64[i]->r15 = vs2.regs64.r15; vmss.regs64[i]->idtr = vs1.idt_base; vmss.regs64[i]->cr[0] = vs1.cr0; vmss.regs64[i]->cr[2] = vs1.cr2; vmss.regs64[i]->cr[3] = vs1.cr3; vmss.regs64[i]->cr[4] = vs1.cr4; vmss.regs64[i]->rip = vs2.regs64.rip; vmss.regs64[i]->rflags = vs2.regs64.eflags; vmss.regs64[i]->es = vs2.regs64.es; vmss.regs64[i]->cs = vs2.regs64.cs; vmss.regs64[i]->ss = vs2.regs64.ss; vmss.regs64[i]->ds = vs2.regs64.ds; vmss.regs64[i]->fs = vs2.regs64.fs; vmss.regs64[i]->gs = vs2.regs64.gs; vmss.regs64[i]->fs_base = vs2.regs64.fs_base; vmss.regs64[i]->gs_base = vs2.regs64.gs_base; vmss.vcpu_regs[i] = REGS_PRESENT_ALL; } vmem_filename = strdup(filename); p = vmem_filename + strlen(vmem_filename) - 5; if (strcmp(p, "guest") != 0) { result = FALSE; goto exit; } strcpy(p, "vmem"); fprintf(ofp, LOGPRX"Open the companion vmem file: %s\n", vmem_filename); if ((vmss.dfp = fopen(vmem_filename, "r")) == NULL) { error(INFO, LOGPRX"%s: %s\n", vmem_filename, strerror(errno)); result = FALSE; goto exit; } fseek(vmss.dfp, 0L, SEEK_END); if (vmss.memsize != ftell(vmss.dfp)) { error(INFO, LOGPRX"%s: unexpected size\n", vmem_filename); result = FALSE; goto exit; } fseek(vmss.dfp, 0L, SEEK_SET); fprintf(ofp, LOGPRX"vmem file: %s\n\n", vmem_filename); if (CRASHDEBUG(1)) { vmware_guestdump_memory_dump(ofp); dump_registers_for_vmss_dump(); } exit: if (fp) fclose(fp); if (vmem_filename) free(vmem_filename); if (result == FALSE) { if (vmss.dfp) fclose(vmss.dfp); if (vmss.regs64) { for (i = 0; i < vmss.num_vcpus; i++) { if (vmss.regs64[i]) free(vmss.regs64[i]); } free(vmss.regs64); } if (vmss.vcpu_regs) free(vmss.vcpu_regs); } return result; } int vmware_guestdump_memory_dump(FILE *ofp) { uint64_t holes_sum = 0; unsigned i; fprintf(ofp, "vmware_guestdump:\n"); fprintf(ofp, " Header: version=%d num_vcpus=%llu\n", hdr.version, (ulonglong)vmss.num_vcpus); fprintf(ofp, "Total memory: %llu\n", (ulonglong)vmss.memsize); fprintf(ofp, "Memory regions[%d]:\n", vmss.regionscount); fprintf(ofp, " [0x%016x-", 0); for (i = 0; i < vmss.regionscount - 1; i++) { fprintf(ofp, "0x%016llx]\n", (ulonglong)vmss.regions[i].startpagenum << VMW_PAGE_SHIFT); fprintf(ofp, " [0x%016llx-", (ulonglong)vmss.regions[i].startppn << VMW_PAGE_SHIFT); holes_sum += vmss.regions[i].startppn - vmss.regions[i].startpagenum; } fprintf(ofp, "0x%016llx]\n", (ulonglong)vmss.memsize + (holes_sum << VMW_PAGE_SHIFT)); return TRUE; } crash-utility-crash-9cd43f5/unwind_decoder.c0000664000372000037200000003156115107550337020537 0ustar juerghjuergh/* * Copyright (C) 2000 Hewlett-Packard Co * Copyright (C) 2000 David Mosberger-Tang */ /* * unwind_decoder.c * * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * arch/ia64/kernel/unwind_decoder.c (kernel-2.4.18-6.23) */ /* * Copyright (C) 2000 Hewlett-Packard Co * Copyright (C) 2000 David Mosberger-Tang * * Generic IA-64 unwind info decoder. * * This file is used both by the Linux kernel and objdump. Please keep * the two copies of this file in sync. * * You need to customize the decoder by defining the following * macros/constants before including this file: * * Types: * unw_word Unsigned integer type with at least 64 bits * * Register names: * UNW_REG_BSP * UNW_REG_BSPSTORE * UNW_REG_FPSR * UNW_REG_LC * UNW_REG_PFS * UNW_REG_PR * UNW_REG_RNAT * UNW_REG_PSP * UNW_REG_RP * UNW_REG_UNAT * * Decoder action macros: * UNW_DEC_BAD_CODE(code) * UNW_DEC_ABI(fmt,abi,context,arg) * UNW_DEC_BR_GR(fmt,brmask,gr,arg) * UNW_DEC_BR_MEM(fmt,brmask,arg) * UNW_DEC_COPY_STATE(fmt,label,arg) * UNW_DEC_EPILOGUE(fmt,t,ecount,arg) * UNW_DEC_FRGR_MEM(fmt,grmask,frmask,arg) * UNW_DEC_FR_MEM(fmt,frmask,arg) * UNW_DEC_GR_GR(fmt,grmask,gr,arg) * UNW_DEC_GR_MEM(fmt,grmask,arg) * UNW_DEC_LABEL_STATE(fmt,label,arg) * UNW_DEC_MEM_STACK_F(fmt,t,size,arg) * UNW_DEC_MEM_STACK_V(fmt,t,arg) * UNW_DEC_PRIUNAT_GR(fmt,r,arg) * UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) * UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) * UNW_DEC_PRIUNAT_WHEN_PSPREL(fmt,pspoff,arg) * UNW_DEC_PRIUNAT_WHEN_SPREL(fmt,spoff,arg) * UNW_DEC_PROLOGUE(fmt,body,rlen,arg) * UNW_DEC_PROLOGUE_GR(fmt,rlen,mask,grsave,arg) * UNW_DEC_REG_PSPREL(fmt,reg,pspoff,arg) * UNW_DEC_REG_REG(fmt,src,dst,arg) * UNW_DEC_REG_SPREL(fmt,reg,spoff,arg) * UNW_DEC_REG_WHEN(fmt,reg,t,arg) * UNW_DEC_RESTORE(fmt,t,abreg,arg) * UNW_DEC_RESTORE_P(fmt,qp,t,abreg,arg) * UNW_DEC_SPILL_BASE(fmt,pspoff,arg) * UNW_DEC_SPILL_MASK(fmt,imaskp,arg) * UNW_DEC_SPILL_PSPREL(fmt,t,abreg,pspoff,arg) * UNW_DEC_SPILL_PSPREL_P(fmt,qp,t,abreg,pspoff,arg) * UNW_DEC_SPILL_REG(fmt,t,abreg,x,ytreg,arg) * UNW_DEC_SPILL_REG_P(fmt,qp,t,abreg,x,ytreg,arg) * UNW_DEC_SPILL_SPREL(fmt,t,abreg,spoff,arg) * UNW_DEC_SPILL_SPREL_P(fmt,qp,t,abreg,pspoff,arg) */ static unw_word unw_decode_uleb128 (unsigned char **dpp) { unsigned shift = 0; unw_word byte, result = 0; unsigned char *bp = *dpp; while (1) { byte = *bp++; result |= (byte & 0x7f) << shift; if ((byte & 0x80) == 0) break; shift += 7; } *dpp = bp; return result; } static unsigned char * unw_decode_x1 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, abreg; unw_word t, off; byte1 = *dp++; t = unw_decode_uleb128 (&dp); off = unw_decode_uleb128 (&dp); abreg = (byte1 & 0x7f); if (byte1 & 0x80) UNW_DEC_SPILL_SPREL(X1, t, abreg, off, arg); else UNW_DEC_SPILL_PSPREL(X1, t, abreg, off, arg); return dp; } static unsigned char * unw_decode_x2 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, abreg, x, ytreg; unw_word t; byte1 = *dp++; byte2 = *dp++; t = unw_decode_uleb128 (&dp); abreg = (byte1 & 0x7f); ytreg = byte2; x = (byte1 >> 7) & 1; if ((byte1 & 0x80) == 0 && ytreg == 0) UNW_DEC_RESTORE(X2, t, abreg, arg); else UNW_DEC_SPILL_REG(X2, t, abreg, x, ytreg, arg); return dp; } static unsigned char * unw_decode_x3 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, abreg, qp; unw_word t, off; byte1 = *dp++; byte2 = *dp++; t = unw_decode_uleb128 (&dp); off = unw_decode_uleb128 (&dp); qp = (byte1 & 0x3f); abreg = (byte2 & 0x7f); if (byte1 & 0x80) UNW_DEC_SPILL_SPREL_P(X3, qp, t, abreg, off, arg); else UNW_DEC_SPILL_PSPREL_P(X3, qp, t, abreg, off, arg); return dp; } static unsigned char * unw_decode_x4 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, byte2, byte3, qp, abreg, x, ytreg; unw_word t; byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; t = unw_decode_uleb128 (&dp); qp = (byte1 & 0x3f); abreg = (byte2 & 0x7f); x = (byte2 >> 7) & 1; ytreg = byte3; if ((byte2 & 0x80) == 0 && byte3 == 0) UNW_DEC_RESTORE_P(X4, qp, t, abreg, arg); else UNW_DEC_SPILL_REG_P(X4, qp, t, abreg, x, ytreg, arg); return dp; } static unsigned char * unw_decode_r1 (unsigned char *dp, unsigned char code, void *arg) { int body = (code & 0x20) != 0; unw_word rlen; rlen = (code & 0x1f); UNW_DEC_PROLOGUE(R1, body, rlen, arg); return dp; } static unsigned char * unw_decode_r2 (unsigned char *dp, unsigned char code, void *arg) { unsigned char byte1, mask, grsave; unw_word rlen; byte1 = *dp++; mask = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); grsave = (byte1 & 0x7f); rlen = unw_decode_uleb128 (&dp); UNW_DEC_PROLOGUE_GR(R2, rlen, mask, grsave, arg); return dp; } static unsigned char * unw_decode_r3 (unsigned char *dp, unsigned char code, void *arg) { unw_word rlen; rlen = unw_decode_uleb128 (&dp); UNW_DEC_PROLOGUE(R3, ((code & 0x3) == 1), rlen, arg); return dp; } static unsigned char * unw_decode_p1 (unsigned char *dp, unsigned char code, void *arg) { unsigned char brmask = (code & 0x1f); UNW_DEC_BR_MEM(P1, brmask, arg); return dp; } static unsigned char * unw_decode_p2_p5 (unsigned char *dp, unsigned char code, void *arg) { if ((code & 0x10) == 0) { unsigned char byte1 = *dp++; UNW_DEC_BR_GR(P2, ((code & 0xf) << 1) | ((byte1 >> 7) & 1), (byte1 & 0x7f), arg); } else if ((code & 0x08) == 0) { unsigned char byte1 = *dp++, r, dst; r = ((code & 0x7) << 1) | ((byte1 >> 7) & 1); dst = (byte1 & 0x7f); switch (r) { case 0: UNW_DEC_REG_GR(P3, UNW_REG_PSP, dst, arg); break; case 1: UNW_DEC_REG_GR(P3, UNW_REG_RP, dst, arg); break; case 2: UNW_DEC_REG_GR(P3, UNW_REG_PFS, dst, arg); break; case 3: UNW_DEC_REG_GR(P3, UNW_REG_PR, dst, arg); break; case 4: UNW_DEC_REG_GR(P3, UNW_REG_UNAT, dst, arg); break; case 5: UNW_DEC_REG_GR(P3, UNW_REG_LC, dst, arg); break; case 6: UNW_DEC_RP_BR(P3, dst, arg); break; case 7: UNW_DEC_REG_GR(P3, UNW_REG_RNAT, dst, arg); break; case 8: UNW_DEC_REG_GR(P3, UNW_REG_BSP, dst, arg); break; case 9: UNW_DEC_REG_GR(P3, UNW_REG_BSPSTORE, dst, arg); break; case 10: UNW_DEC_REG_GR(P3, UNW_REG_FPSR, dst, arg); break; case 11: UNW_DEC_PRIUNAT_GR(P3, dst, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } else if ((code & 0x7) == 0) UNW_DEC_SPILL_MASK(P4, dp, arg); else if ((code & 0x7) == 1) { unw_word grmask, frmask, byte1, byte2, byte3; byte1 = *dp++; byte2 = *dp++; byte3 = *dp++; grmask = ((byte1 >> 4) & 0xf); frmask = ((byte1 & 0xf) << 16) | (byte2 << 8) | byte3; UNW_DEC_FRGR_MEM(P5, grmask, frmask, arg); } else UNW_DEC_BAD_CODE(code); return dp; } static unsigned char * unw_decode_p6 (unsigned char *dp, unsigned char code, void *arg) { int gregs = (code & 0x10) != 0; unsigned char mask = (code & 0x0f); if (gregs) UNW_DEC_GR_MEM(P6, mask, arg); else UNW_DEC_FR_MEM(P6, mask, arg); return dp; } static unsigned char * unw_decode_p7_p10 (unsigned char *dp, unsigned char code, void *arg) { unsigned char r, byte1, byte2; unw_word t, size; if ((code & 0x10) == 0) { r = (code & 0xf); t = unw_decode_uleb128 (&dp); switch (r) { case 0: size = unw_decode_uleb128 (&dp); UNW_DEC_MEM_STACK_F(P7, t, size, arg); break; case 1: UNW_DEC_MEM_STACK_V(P7, t, arg); break; case 2: UNW_DEC_SPILL_BASE(P7, t, arg); break; case 3: UNW_DEC_REG_SPREL(P7, UNW_REG_PSP, t, arg); break; case 4: UNW_DEC_REG_WHEN(P7, UNW_REG_RP, t, arg); break; case 5: UNW_DEC_REG_PSPREL(P7, UNW_REG_RP, t, arg); break; case 6: UNW_DEC_REG_WHEN(P7, UNW_REG_PFS, t, arg); break; case 7: UNW_DEC_REG_PSPREL(P7, UNW_REG_PFS, t, arg); break; case 8: UNW_DEC_REG_WHEN(P7, UNW_REG_PR, t, arg); break; case 9: UNW_DEC_REG_PSPREL(P7, UNW_REG_PR, t, arg); break; case 10: UNW_DEC_REG_WHEN(P7, UNW_REG_LC, t, arg); break; case 11: UNW_DEC_REG_PSPREL(P7, UNW_REG_LC, t, arg); break; case 12: UNW_DEC_REG_WHEN(P7, UNW_REG_UNAT, t, arg); break; case 13: UNW_DEC_REG_PSPREL(P7, UNW_REG_UNAT, t, arg); break; case 14: UNW_DEC_REG_WHEN(P7, UNW_REG_FPSR, t, arg); break; case 15: UNW_DEC_REG_PSPREL(P7, UNW_REG_FPSR, t, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } else { switch (code & 0xf) { case 0x0: /* p8 */ { r = *dp++; t = unw_decode_uleb128 (&dp); switch (r) { case 1: UNW_DEC_REG_SPREL(P8, UNW_REG_RP, t, arg); break; case 2: UNW_DEC_REG_SPREL(P8, UNW_REG_PFS, t, arg); break; case 3: UNW_DEC_REG_SPREL(P8, UNW_REG_PR, t, arg); break; case 4: UNW_DEC_REG_SPREL(P8, UNW_REG_LC, t, arg); break; case 5: UNW_DEC_REG_SPREL(P8, UNW_REG_UNAT, t, arg); break; case 6: UNW_DEC_REG_SPREL(P8, UNW_REG_FPSR, t, arg); break; case 7: UNW_DEC_REG_WHEN(P8, UNW_REG_BSP, t, arg); break; case 8: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSP, t, arg); break; case 9: UNW_DEC_REG_SPREL(P8, UNW_REG_BSP, t, arg); break; case 10: UNW_DEC_REG_WHEN(P8, UNW_REG_BSPSTORE, t, arg); break; case 11: UNW_DEC_REG_PSPREL(P8, UNW_REG_BSPSTORE, t, arg); break; case 12: UNW_DEC_REG_SPREL(P8, UNW_REG_BSPSTORE, t, arg); break; case 13: UNW_DEC_REG_WHEN(P8, UNW_REG_RNAT, t, arg); break; case 14: UNW_DEC_REG_PSPREL(P8, UNW_REG_RNAT, t, arg); break; case 15: UNW_DEC_REG_SPREL(P8, UNW_REG_RNAT, t, arg); break; case 16: UNW_DEC_PRIUNAT_WHEN_GR(P8, t, arg); break; case 17: UNW_DEC_PRIUNAT_PSPREL(P8, t, arg); break; case 18: UNW_DEC_PRIUNAT_SPREL(P8, t, arg); break; case 19: UNW_DEC_PRIUNAT_WHEN_MEM(P8, t, arg); break; default: UNW_DEC_BAD_CODE(r); break; } } break; case 0x1: byte1 = *dp++; byte2 = *dp++; UNW_DEC_GR_GR(P9, (byte1 & 0xf), (byte2 & 0x7f), arg); break; case 0xf: /* p10 */ byte1 = *dp++; byte2 = *dp++; UNW_DEC_ABI(P10, byte1, byte2, arg); break; case 0x9: return unw_decode_x1 (dp, code, arg); case 0xa: return unw_decode_x2 (dp, code, arg); case 0xb: return unw_decode_x3 (dp, code, arg); case 0xc: return unw_decode_x4 (dp, code, arg); default: UNW_DEC_BAD_CODE(code); break; } } return dp; } static unsigned char * unw_decode_b1 (unsigned char *dp, unsigned char code, void *arg) { unw_word label = (code & 0x1f); if ((code & 0x20) != 0) UNW_DEC_COPY_STATE(B1, label, arg); else UNW_DEC_LABEL_STATE(B1, label, arg); return dp; } static unsigned char * unw_decode_b2 (unsigned char *dp, unsigned char code, void *arg) { unw_word t; t = unw_decode_uleb128 (&dp); UNW_DEC_EPILOGUE(B2, t, (code & 0x1f), arg); return dp; } static unsigned char * unw_decode_b3_x4 (unsigned char *dp, unsigned char code, void *arg) { unw_word t, ecount, label; if ((code & 0x10) == 0) { t = unw_decode_uleb128 (&dp); ecount = unw_decode_uleb128 (&dp); UNW_DEC_EPILOGUE(B3, t, ecount, arg); } else if ((code & 0x07) == 0) { label = unw_decode_uleb128 (&dp); if ((code & 0x08) != 0) UNW_DEC_COPY_STATE(B4, label, arg); else UNW_DEC_LABEL_STATE(B4, label, arg); } else switch (code & 0x7) { case 1: return unw_decode_x1 (dp, code, arg); case 2: return unw_decode_x2 (dp, code, arg); case 3: return unw_decode_x3 (dp, code, arg); case 4: return unw_decode_x4 (dp, code, arg); default: UNW_DEC_BAD_CODE(code); break; } return dp; } typedef unsigned char *(*unw_decoder) (unsigned char *, unsigned char, void *); static unw_decoder unw_decode_table[2][8] = { /* prologue table: */ { unw_decode_r1, /* 0 */ unw_decode_r1, unw_decode_r2, unw_decode_r3, unw_decode_p1, /* 4 */ unw_decode_p2_p5, unw_decode_p6, unw_decode_p7_p10 }, { unw_decode_r1, /* 0 */ unw_decode_r1, unw_decode_r2, unw_decode_r3, unw_decode_b1, /* 4 */ unw_decode_b1, unw_decode_b2, unw_decode_b3_x4 } }; /* * Decode one descriptor and return address of next descriptor. */ static inline unsigned char * unw_decode (unsigned char *dp, int inside_body, void *arg) { unw_decoder decoder; unsigned char code; code = *dp++; decoder = unw_decode_table[inside_body][code >> 5]; dp = (*decoder) (dp, code, arg); return dp; } crash-utility-crash-9cd43f5/net.c0000664000372000037200000014450615107550337016340 0ustar juerghjuergh/* net.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2016 David Anderson * Copyright (C) 2002-2016 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include #include /* * Cache values we need that can change based on OS version, or any other * variables static to this file. These are setup in net_init(). Dump * the table during runtime via "help -n". */ struct net_table { ulong flags; char *netdevice; /* name of net device */ char *dev_name_t; /* readmem ID's */ char *dev_type_t; char *dev_addr_t; long dev_name; long dev_next; long dev_type; long dev_addr_len; long dev_ip_ptr; long in_device_ifa_list; long in_ifaddr_ifa_next; long in_ifaddr_ifa_address; int net_device_name_index; } net_table = { 0 }; struct net_table *net = &net_table; #define NETDEV_INIT (0x1) #define STRUCT_DEVICE (0x2) #define STRUCT_NET_DEVICE (0x4) #define SOCK_V1 (0x8) #define SOCK_V2 (0x10) #define NO_INET_SOCK (0x20) #define DEV_NAME_MAX 100 struct devinfo { char dev_name[DEV_NAME_MAX]; unsigned char dev_addr_len; short dev_type; }; #define BYTES_IP_ADDR 15 /* bytes to print IP addr (xxx.xxx.xxx.xxx) */ #define BYTES_PORT_NUM 5 /* bytes to print port number */ /* bytes needed for : notation */ #define BYTES_IP_TUPLE (BYTES_IP_ADDR + BYTES_PORT_NUM + 1) static void show_net_devices(ulong); static void show_net_devices_v2(ulong); static void show_net_devices_v3(ulong); static void print_neighbour_q(ulong, int); static void get_netdev_info(ulong, struct devinfo *); static void get_device_name(ulong, char *); static long get_device_address(ulong, char **, long); static void get_device_ip6_address(ulong, char **, long); static void get_sock_info(ulong, char *); static void dump_arp(void); static void arp_state_to_flags(unsigned char); static void dump_ether_hw(unsigned char *, int); static void dump_sockets(ulong, struct reference *); static int sym_socket_dump(ulong, int, int, ulong, struct reference *); static void dump_hw_addr(unsigned char *, int); static char *dump_in6_addr_port(uint16_t *, uint16_t, char *, int *); #define MK_TYPE_T(f,s,m) \ do { \ (f) = malloc(strlen(s) + strlen(m) + 2); \ if ((f) == NULL) { \ error(WARNING, "malloc fail for type %s.%s", (s), (m)); \ } else { \ sprintf((f), "%s %s", (s), (m)); \ } \ } while(0) void net_init(void) { /* * Note the order of the following checks. The device struct was * renamed to net_device in 2.3, but there may be another struct * called 'device' so we check for the new one first. */ STRUCT_SIZE_INIT(net_device, "net_device"); if (VALID_STRUCT(net_device)) { net->netdevice = "net_device"; net->dev_next = MEMBER_OFFSET_INIT(net_device_next, "net_device", "next"); net->dev_name = MEMBER_OFFSET_INIT(net_device_name, "net_device", "name"); net->dev_type = MEMBER_OFFSET_INIT(net_device_type, "net_device", "type"); net->dev_addr_len = MEMBER_OFFSET_INIT(net_device_addr_len, "net_device", "addr_len"); net->dev_ip_ptr = MEMBER_OFFSET_INIT(net_device_ip_ptr, "net_device", "ip_ptr"); MEMBER_OFFSET_INIT(net_device_dev_list, "net_device", "dev_list"); MEMBER_OFFSET_INIT(net_device_ip6_ptr, "net_device", "ip6_ptr"); MEMBER_OFFSET_INIT(inet6_dev_addr_list, "inet6_dev", "addr_list"); MEMBER_OFFSET_INIT(inet6_ifaddr_addr, "inet6_ifaddr", "addr"); MEMBER_OFFSET_INIT(inet6_ifaddr_if_list, "inet6_ifaddr", "if_list"); MEMBER_OFFSET_INIT(inet6_ifaddr_if_next, "inet6_ifaddr", "if_next"); MEMBER_OFFSET_INIT(in6_addr_in6_u, "in6_addr", "in6_u"); MEMBER_OFFSET_INIT(net_dev_base_head, "net", "dev_base_head"); ARRAY_LENGTH_INIT(net->net_device_name_index, net_device_name, "net_device.name", NULL, sizeof(char)); net->flags |= (NETDEV_INIT|STRUCT_NET_DEVICE); } else { STRUCT_SIZE_INIT(device, "device"); if (VALID_STRUCT(device)) { net->netdevice = "device"; net->dev_next = MEMBER_OFFSET_INIT(device_next, "device", "next"); net->dev_name = MEMBER_OFFSET_INIT(device_name, "device", "name"); net->dev_type = MEMBER_OFFSET_INIT(device_type, "device", "type"); net->dev_ip_ptr = MEMBER_OFFSET_INIT(device_ip_ptr, "device", "ip_ptr"); net->dev_addr_len = MEMBER_OFFSET_INIT(device_addr_len, "device", "addr_len"); net->flags |= (NETDEV_INIT|STRUCT_DEVICE); } else error(WARNING, "net_init: unknown device type for net device"); } if (VALID_MEMBER(task_struct_nsproxy)) MEMBER_OFFSET_INIT(nsproxy_net_ns, "nsproxy", "net_ns"); if (net->flags & NETDEV_INIT) { MK_TYPE_T(net->dev_name_t, net->netdevice, "name"); MK_TYPE_T(net->dev_type_t, net->netdevice, "type"); MK_TYPE_T(net->dev_addr_t, net->netdevice, "addr_len"); MEMBER_OFFSET_INIT(socket_sk, "socket", "sk"); MEMBER_OFFSET_INIT(neighbour_next, "neighbour", "next"); MEMBER_OFFSET_INIT(neighbour_primary_key, "neighbour", "primary_key"); MEMBER_OFFSET_INIT(neighbour_ha, "neighbour", "ha"); MEMBER_OFFSET_INIT(neighbour_dev, "neighbour", "dev"); MEMBER_OFFSET_INIT(neighbour_nud_state, "neighbour", "nud_state"); MEMBER_OFFSET_INIT(neigh_table_nht_ptr, "neigh_table", "nht"); if (VALID_MEMBER(neigh_table_nht_ptr)) { MEMBER_OFFSET_INIT(neigh_table_hash_mask, "neigh_hash_table", "hash_mask"); MEMBER_OFFSET_INIT(neigh_table_hash_shift, "neigh_hash_table", "hash_shift"); MEMBER_OFFSET_INIT(neigh_table_hash_buckets, "neigh_hash_table", "hash_buckets"); /* Linux 6.13 and later */ if (INVALID_MEMBER(neigh_table_hash_buckets)) { MEMBER_OFFSET_INIT(neigh_table_hash_heads, "neigh_hash_table", "hash_heads"); MEMBER_OFFSET_INIT(neighbour_hash, "neighbour", "hash"); } } else { MEMBER_OFFSET_INIT(neigh_table_hash_buckets, "neigh_table", "hash_buckets"); MEMBER_OFFSET_INIT(neigh_table_hash_mask, "neigh_table", "hash_mask"); } MEMBER_OFFSET_INIT(neigh_table_key_len, "neigh_table", "key_len"); MEMBER_OFFSET_INIT(in_device_ifa_list, "in_device", "ifa_list"); MEMBER_OFFSET_INIT(in_ifaddr_ifa_next, "in_ifaddr", "ifa_next"); MEMBER_OFFSET_INIT(in_ifaddr_ifa_address, "in_ifaddr", "ifa_address"); STRUCT_SIZE_INIT(sock, "sock"); MEMBER_OFFSET_INIT(sock_family, "sock", "family"); if (VALID_MEMBER(sock_family)) { MEMBER_OFFSET_INIT(sock_daddr, "sock", "daddr"); MEMBER_OFFSET_INIT(sock_rcv_saddr, "sock", "rcv_saddr"); MEMBER_OFFSET_INIT(sock_dport, "sock", "dport"); MEMBER_OFFSET_INIT(sock_sport, "sock", "sport"); MEMBER_OFFSET_INIT(sock_num, "sock", "num"); MEMBER_OFFSET_INIT(sock_type, "sock", "type"); net->flags |= SOCK_V1; } else { /* * struct sock { * struct sock_common __sk_common; * #define sk_family __sk_common.skc_family * ... */ MEMBER_OFFSET_INIT(sock_common_skc_family, "sock_common", "skc_family"); MEMBER_OFFSET_INIT(sock_sk_type, "sock", "sk_type"); MEMBER_OFFSET_INIT(sock_sk_common, "sock", "__sk_common"); MEMBER_OFFSET_INIT(sock_common_skc_v6_daddr, "sock_common", "skc_v6_daddr"); MEMBER_OFFSET_INIT(sock_common_skc_v6_rcv_saddr, "sock_common", "skc_v6_rcv_saddr"); /* * struct inet_sock { * struct sock sk; * struct ipv6_pinfo *pinet6; * struct inet_opt inet; * }; */ STRUCT_SIZE_INIT(inet_sock, "inet_sock"); STRUCT_SIZE_INIT(socket, "socket"); if (STRUCT_EXISTS("inet_opt")) { MEMBER_OFFSET_INIT(inet_sock_inet, "inet_sock", "inet"); MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_opt", "daddr"); MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_opt", "rcv_saddr"); MEMBER_OFFSET_INIT(inet_opt_dport, "inet_opt", "dport"); MEMBER_OFFSET_INIT(inet_opt_sport, "inet_opt", "sport"); MEMBER_OFFSET_INIT(inet_opt_num, "inet_opt", "num"); } else { /* inet_opt moved to inet_sock */ ASSIGN_OFFSET(inet_sock_inet) = 0; if (MEMBER_EXISTS("inet_sock", "daddr")) { MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_sock", "daddr"); MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_sock", "rcv_saddr"); MEMBER_OFFSET_INIT(inet_opt_dport, "inet_sock", "dport"); MEMBER_OFFSET_INIT(inet_opt_sport, "inet_sock", "sport"); MEMBER_OFFSET_INIT(inet_opt_num, "inet_sock", "num"); } else if (MEMBER_EXISTS("inet_sock", "inet_daddr")) { MEMBER_OFFSET_INIT(inet_opt_daddr, "inet_sock", "inet_daddr"); MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "inet_sock", "inet_rcv_saddr"); MEMBER_OFFSET_INIT(inet_opt_dport, "inet_sock", "inet_dport"); MEMBER_OFFSET_INIT(inet_opt_sport, "inet_sock", "inet_sport"); MEMBER_OFFSET_INIT(inet_opt_num, "inet_sock", "inet_num"); } else if ((MEMBER_OFFSET("inet_sock", "sk") == 0) && (MEMBER_OFFSET("sock", "__sk_common") == 0)) { MEMBER_OFFSET_INIT(inet_opt_daddr, "sock_common", "skc_daddr"); if (INVALID_MEMBER(inet_opt_daddr)) ANON_MEMBER_OFFSET_INIT(inet_opt_daddr, "sock_common", "skc_daddr"); MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "sock_common", "skc_rcv_saddr"); if (INVALID_MEMBER(inet_opt_rcv_saddr)) ANON_MEMBER_OFFSET_INIT(inet_opt_rcv_saddr, "sock_common", "skc_rcv_saddr"); MEMBER_OFFSET_INIT(inet_opt_dport, "inet_sock", "inet_dport"); if (INVALID_MEMBER(inet_opt_dport)) { MEMBER_OFFSET_INIT(inet_opt_dport, "sock_common", "skc_dport"); if (INVALID_MEMBER(inet_opt_dport)) ANON_MEMBER_OFFSET_INIT(inet_opt_dport, "sock_common", "skc_dport"); } MEMBER_OFFSET_INIT(inet_opt_sport, "inet_sock", "inet_sport"); MEMBER_OFFSET_INIT(inet_opt_num, "inet_sock", "inet_num"); if (INVALID_MEMBER(inet_opt_num)) { MEMBER_OFFSET_INIT(inet_opt_num, "sock_common", "skc_num"); if (INVALID_MEMBER(inet_opt_num)) ANON_MEMBER_OFFSET_INIT(inet_opt_num, "sock_common", "skc_num"); } } } if (VALID_STRUCT(inet_sock) && INVALID_MEMBER(inet_sock_inet)) { /* * gdb can't seem to figure out the inet_sock * in later 2.6 kernels, returning this: * * struct inet_sock { * * } * * It does know the struct size, so kludge it * to subtract the size of the inet_opt struct * from the size of the containing inet_sock. */ net->flags |= NO_INET_SOCK; ASSIGN_OFFSET(inet_sock_inet) = SIZE(inet_sock) - STRUCT_SIZE("inet_opt"); } /* * If necessary, set inet_sock size and inet_sock_inet offset, * accounting for the configuration-dependent, intervening, * struct ipv6_pinfo pointer located in between the sock and * inet_opt members of the inet_sock. */ if (!VALID_STRUCT(inet_sock)) { if (symbol_exists("tcpv6_protocol") && symbol_exists("udpv6_protocol")) { ASSIGN_SIZE(inet_sock) = SIZE(sock) + sizeof(void *) + STRUCT_SIZE("inet_opt"); ASSIGN_OFFSET(inet_sock_inet) = SIZE(sock) + sizeof(void *); } else { ASSIGN_SIZE(inet_sock) = SIZE(sock) + STRUCT_SIZE("inet_opt"); ASSIGN_OFFSET(inet_sock_inet) = SIZE(sock); } } MEMBER_OFFSET_INIT(ipv6_pinfo_rcv_saddr, "ipv6_pinfo", "rcv_saddr"); MEMBER_OFFSET_INIT(ipv6_pinfo_daddr, "ipv6_pinfo", "daddr"); STRUCT_SIZE_INIT(in6_addr, "in6_addr"); MEMBER_OFFSET_INIT(socket_alloc_vfs_inode, "socket_alloc", "vfs_inode"); net->flags |= SOCK_V2; } } } /* * The net command... */ #define NETOPTS "N:asSR:xdn" #define s_FLAG FOREACH_s_FLAG #define S_FLAG FOREACH_S_FLAG #define x_FLAG FOREACH_x_FLAG #define d_FLAG FOREACH_d_FLAG #define NET_REF_FOUND (0x1) #define NET_REF_HEXNUM (0x2) #define NET_REF_DECNUM (0x4) #define NET_TASK_HEADER_PRINTED (0x8) #define NET_SOCK_HEADER_PRINTED (0x10) #define NET_REF_FOUND_ITEM (0x20) #define NET_REFERENCE_CHECK(X) (X) #define NET_REFERENCE_FOUND(X) ((X) && ((X)->cmdflags & NET_REF_FOUND)) void cmd_net(void) { int c; ulong sflag, nflag, aflag; ulong value; ulong task; struct task_context *tc = NULL; struct in_addr in_addr; struct reference reference, *ref; if (!(net->flags & NETDEV_INIT)) error(FATAL, "net subsystem not initialized!"); ref = NULL; sflag = nflag = aflag = 0; task = pid_to_task(0); while ((c = getopt(argcnt, args, NETOPTS)) != EOF) { switch (c) { case 'R': if (ref) error(INFO, "only one -R option allowed\n"); else { ref = &reference; BZERO(ref, sizeof(struct reference)); ref->str = optarg; } break; case 'a': dump_arp(); aflag++; break; case 'N': value = stol(optarg, FAULT_ON_ERROR, NULL); in_addr.s_addr = (in_addr_t)value; fprintf(fp, "%s\n", inet_ntoa(in_addr)); return; case 's': if (sflag & S_FLAG) error(INFO, "only one -s or -S option allowed\n"); else sflag |= s_FLAG; break; case 'S': if (sflag & s_FLAG) error(INFO, "only one -s or -S option allowed\n"); else sflag |= S_FLAG; break; case 'x': if (sflag & d_FLAG) error(FATAL, "-d and -x are mutually exclusive\n"); sflag |= x_FLAG; break; case 'd': if (sflag & x_FLAG) error(FATAL, "-d and -x are mutually exclusive\n"); sflag |= d_FLAG; break; case 'n': nflag = 1; task = CURRENT_TASK(); if (args[optind]) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: case STR_TASK: task = tc->task; break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", args[optind]); } } break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (sflag & (s_FLAG|S_FLAG)) dump_sockets(sflag, ref); else { if ((argcnt == 1) || nflag) show_net_devices(task); else if (!aflag) cmd_usage(pc->curcmd, SYNOPSIS); } } /* * Just display the address and name of each net device. */ static void show_net_devices(ulong task) { ulong next; long flen; char *buf; long buflen = BUFSIZE; if (symbol_exists("dev_base_head")) { show_net_devices_v2(task); return; } else if (symbol_exists("init_net")) { show_net_devices_v3(task); return; } if (!symbol_exists("dev_base")) error(FATAL, "dev_base, dev_base_head or init_net do not exist!\n"); get_symbol_data("dev_base", sizeof(void *), &next); if (!net->netdevice || !next) return; buf = GETBUF(buflen); flen = MAX(VADDR_PRLEN, strlen(net->netdevice)); fprintf(fp, "%s NAME IP ADDRESS(ES)\n", mkstring(upper_case(net->netdevice, buf), flen, CENTER|LJUST, NULL)); do { fprintf(fp, "%s ", mkstring(buf, flen, CENTER|RJUST|LONG_HEX, MKSTR(next))); get_device_name(next, buf); fprintf(fp, "%-10s ", buf); get_device_address(next, &buf, buflen); get_device_ip6_address(next, &buf, buflen); fprintf(fp, "%s\n", buf); readmem(next+net->dev_next, KVADDR, &next, sizeof(void *), "(net_)device.next", FAULT_ON_ERROR); } while (next); FREEBUF(buf); } static void show_net_devices_v2(ulong task) { struct list_data list_data, *ld; char *net_device_buf; char *buf; long buflen = BUFSIZE; int ndevcnt, i; long flen; if (!net->netdevice) /* initialized in net_init() */ return; buf = GETBUF(buflen); flen = MAX(VADDR_PRLEN, strlen(net->netdevice)); fprintf(fp, "%s NAME IP ADDRESS(ES)\n", mkstring(upper_case(net->netdevice, buf), flen, CENTER|LJUST, NULL)); net_device_buf = GETBUF(SIZE(net_device)); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; get_symbol_data("dev_base_head", sizeof(void *), &ld->start); ld->end = symbol_value("dev_base_head"); ld->list_head_offset = OFFSET(net_device_dev_list); ndevcnt = do_list(ld); for (i = 0; i < ndevcnt; ++i) { readmem(ld->list_ptr[i], KVADDR, net_device_buf, SIZE(net_device), "net_device buffer", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|RJUST|LONG_HEX, MKSTR(ld->list_ptr[i]))); get_device_name(ld->list_ptr[i], buf); fprintf(fp, "%-10s ", buf); get_device_address(ld->list_ptr[i], &buf, buflen); get_device_ip6_address(ld->list_ptr[i], &buf, buflen); fprintf(fp, "%s\n", buf); } FREEBUF(ld->list_ptr); FREEBUF(net_device_buf); FREEBUF(buf); } static void show_net_devices_v3(ulong task) { ulong nsproxy_p, net_ns_p; struct list_data list_data, *ld; char *net_device_buf; char *buf; long buflen = BUFSIZE; int ndevcnt, i; long flen; if (!net->netdevice) /* initialized in net_init() */ return; buf = GETBUF(buflen); flen = MAX(VADDR_PRLEN, strlen(net->netdevice)); fprintf(fp, "%s NAME IP ADDRESS(ES)\n", mkstring(upper_case(net->netdevice, buf), flen, CENTER|LJUST, NULL)); net_device_buf = GETBUF(SIZE(net_device)); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; if (VALID_MEMBER(nsproxy_net_ns)) { readmem(task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy_p, sizeof(ulong), "task_struct.nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy_p + OFFSET(nsproxy_net_ns), KVADDR, &net_ns_p, sizeof(ulong), "nsproxy.net_ns", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine net_namespace location!\n"); } else net_ns_p = symbol_value("init_net"); ld->start = ld->end = net_ns_p + OFFSET(net_dev_base_head); ld->list_head_offset = OFFSET(net_device_dev_list); ndevcnt = do_list(ld); /* * Skip the first entry (init_net). */ for (i = 1; i < ndevcnt; ++i) { readmem(ld->list_ptr[i], KVADDR, net_device_buf, SIZE(net_device), "net_device buffer", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|RJUST|LONG_HEX, MKSTR(ld->list_ptr[i]))); get_device_name(ld->list_ptr[i], buf); fprintf(fp, "%-10s ", buf); get_device_address(ld->list_ptr[i], &buf, buflen); get_device_ip6_address(ld->list_ptr[i], &buf, buflen); fprintf(fp, "%s\n", buf); } FREEBUF(ld->list_ptr); FREEBUF(net_device_buf); FREEBUF(buf); } /* * Perform the actual work of dumping the ARP table... */ #define ARP_HEADING \ "NEIGHBOUR IP ADDRESS HW TYPE HW ADDRESS DEVICE STATE" static void dump_arp(void) { ulong arp_tbl; /* address of arp_tbl */ ulong *hash_buckets; ulong hash; long hash_bytes; int nhash_buckets = 0; int key_len; int i; int header_printed = 0; int hash_mask = 0; ulong nht; if (!symbol_exists("arp_tbl")) error(FATAL, "arp_tbl does not exist in this kernel\n"); arp_tbl = symbol_value("arp_tbl"); /* * NOTE: 2.6.8 -> 2.6.9 neigh_table struct changed from: * * struct neighbour *hash_buckets[32]; * to * struct neighbour **hash_buckets; * * Use 'hash_mask' as indicator to decide if we're dealing * with an array or a pointer. * * Around 2.6.37 neigh_hash_table struct has been introduced * and pointer to it has been added to neigh_table. */ if (VALID_MEMBER(neigh_table_nht_ptr)) { readmem(arp_tbl + OFFSET(neigh_table_nht_ptr), KVADDR, &nht, sizeof(nht), "neigh_table nht", FAULT_ON_ERROR); /* NB! Re-use of offsets like neigh_table_hash_mask * with neigh_hash_table structure */ if (VALID_MEMBER(neigh_table_hash_mask)) { readmem(nht + OFFSET(neigh_table_hash_mask), KVADDR, &hash_mask, sizeof(hash_mask), "neigh_hash_table hash_mask", FAULT_ON_ERROR); nhash_buckets = hash_mask + 1; } else if (VALID_MEMBER(neigh_table_hash_shift)) { readmem(nht + OFFSET(neigh_table_hash_shift), KVADDR, &hash_mask, sizeof(hash_mask), "neigh_hash_table hash_shift", FAULT_ON_ERROR); nhash_buckets = 1U << hash_mask; } } else if (VALID_MEMBER(neigh_table_hash_mask)) { readmem(arp_tbl + OFFSET(neigh_table_hash_mask), KVADDR, &hash_mask, sizeof(hash_mask), "neigh_table hash_mask", FAULT_ON_ERROR); nhash_buckets = hash_mask + 1; } else nhash_buckets = (i = ARRAY_LENGTH(neigh_table_hash_buckets)) ? i : get_array_length("neigh_table.hash_buckets", NULL, sizeof(void *)); if (nhash_buckets == 0) { option_not_supported('a'); return; } hash_bytes = nhash_buckets * sizeof(*hash_buckets); hash_buckets = (ulong *)GETBUF(hash_bytes); readmem(arp_tbl + OFFSET(neigh_table_key_len), KVADDR, &key_len, sizeof(key_len), "neigh_table key_len", FAULT_ON_ERROR); if (VALID_MEMBER(neigh_table_nht_ptr)) { /* Linux 6.13 and later */ if (VALID_MEMBER(neigh_table_hash_heads)) readmem(nht + OFFSET(neigh_table_hash_heads), KVADDR, &hash, sizeof(hash), "neigh_hash_table hash_heads ptr", FAULT_ON_ERROR); else readmem(nht + OFFSET(neigh_table_hash_buckets), KVADDR, &hash, sizeof(hash), "neigh_hash_table hash_buckets ptr", FAULT_ON_ERROR); readmem(hash, KVADDR, hash_buckets, hash_bytes, "neigh_hash_table hash_buckets", FAULT_ON_ERROR); } else if (hash_mask) { readmem(arp_tbl + OFFSET(neigh_table_hash_buckets), KVADDR, &hash, sizeof(hash), "neigh_table hash_buckets pointer", FAULT_ON_ERROR); readmem(hash, KVADDR, hash_buckets, hash_bytes, "neigh_table hash_buckets", FAULT_ON_ERROR); } else readmem(arp_tbl + OFFSET(neigh_table_hash_buckets), KVADDR, hash_buckets, hash_bytes, "neigh_table hash_buckets", FAULT_ON_ERROR); for (i = 0; i < nhash_buckets; i++) { if (hash_buckets[i] != (ulong)NULL) { if (!header_printed) { fprintf(fp, "%s\n", ARP_HEADING); header_printed = 1; } print_neighbour_q(hash_buckets[i], key_len); } } fflush(fp); FREEBUF(hash_buckets); } /* * Dump out the relevant information of a neighbour structure for the * ARP table. */ static void print_neighbour_q(ulong addr, int key_len) { int i; ulong dev; /* dev address of this struct */ unsigned char *ha_buf; /* buffer for hardware address */ uint ha_size; /* size of HW address */ uint ipaddr; /* hold ipaddr (aka primary_key) */ struct devinfo dinfo; unsigned char state; /* state of ARP entry */ struct in_addr in_addr; ha_size = (i = ARRAY_LENGTH(neighbour_ha)) ? i : get_array_length("neighbour.ha", NULL, sizeof(char)); ha_buf = (unsigned char *)GETBUF(ha_size); while (addr) { readmem(addr + OFFSET(neighbour_primary_key), KVADDR, &ipaddr, sizeof(ipaddr), "neighbour primary_key", FAULT_ON_ERROR); readmem(addr + OFFSET(neighbour_ha), KVADDR, ha_buf, ha_size, "neighbour ha", FAULT_ON_ERROR); readmem(addr + OFFSET(neighbour_dev), KVADDR, &dev, sizeof(dev), "neighbour dev", FAULT_ON_ERROR); get_netdev_info(dev, &dinfo); readmem(addr + OFFSET(neighbour_nud_state), KVADDR, &state, sizeof(state), "neighbour nud_state", FAULT_ON_ERROR); in_addr.s_addr = ipaddr; fprintf(fp, "%-16lx %-16s", addr, inet_ntoa(in_addr)); switch (dinfo.dev_type) { case ARPHRD_ETHER: /* * Use the actual HW address size in the device struct * rather than the max size of the array (as was done * during the readmem() call above.... */ fprintf(fp, "%-10s ", "ETHER"); dump_ether_hw(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_NETROM: fprintf(fp, "%-10s ", "NETROM"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_EETHER: fprintf(fp, "%-10s ", "EETHER"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_AX25: fprintf(fp, "%-10s ", "AX25"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_PRONET: fprintf(fp, "%-10s ", "PRONET"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_CHAOS: fprintf(fp, "%-10s ", "CHAOS"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_IEEE802: fprintf(fp, "%-10s ", "IEEE802"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_ARCNET: fprintf(fp, "%-10s ", "ARCNET"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_APPLETLK: fprintf(fp, "%-10s ", "APPLETLK"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_DLCI: fprintf(fp, "%-10s ", "DLCI"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; case ARPHRD_METRICOM: fprintf(fp, "%-10s ", "METRICOM"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; default: fprintf(fp, "%-10s ", "UNKNOWN"); dump_hw_addr(ha_buf, dinfo.dev_addr_len); break; } fprintf(fp, " %-6s ", dinfo.dev_name); arp_state_to_flags(state); /* Linux 6.13 and later kernels use hlist. */ if (VALID_MEMBER(neighbour_hash)) { readmem(addr + OFFSET(neighbour_hash), KVADDR, &addr, sizeof(addr), "neighbour hash", FAULT_ON_ERROR); if (addr) addr -= OFFSET(neighbour_hash); } else readmem(addr + OFFSET(neighbour_next), KVADDR, &addr, sizeof(addr), "neighbour next", FAULT_ON_ERROR); } FREEBUF(ha_buf); } /* * read netdevice info.... */ static void get_netdev_info(ulong devaddr, struct devinfo *dip) { short dev_type; get_device_name(devaddr, dip->dev_name); readmem(devaddr + net->dev_type, KVADDR, &dev_type, sizeof(dev_type), net->dev_type_t, FAULT_ON_ERROR); dip->dev_type = dev_type; readmem(devaddr + net->dev_addr_len, KVADDR, &dip->dev_addr_len, sizeof(dip->dev_addr_len), net->dev_addr_t, FAULT_ON_ERROR); } /* * Get the device name. */ static void get_device_name(ulong devaddr, char *buf) { ulong name_addr; switch (net->flags & (STRUCT_DEVICE|STRUCT_NET_DEVICE)) { case STRUCT_NET_DEVICE: if (net->net_device_name_index > 0) { readmem(devaddr + net->dev_name, KVADDR, buf, net->net_device_name_index, net->dev_name_t, FAULT_ON_ERROR); return; } /* fallthrough */ case STRUCT_DEVICE: readmem(devaddr + net->dev_name, KVADDR, &name_addr, sizeof(name_addr), net->dev_name_t, FAULT_ON_ERROR); read_string(name_addr, buf, DEV_NAME_MAX); break; } } /* * Get the device address. * * {net_}device->ip_ptr points to in_device. * in_device->in_ifaddr points to in_ifaddr list. * in_ifaddr->ifa_address contains the address. * in_ifaddr->ifa_next points to the next in_ifaddr in the list (if any). * */ static long get_device_address(ulong devaddr, char **bufp, long buflen) { ulong ip_ptr, ifa_list; struct in_addr ifa_address; char *buf; char buf2[BUFSIZE]; long pos = 0; buf = *bufp; BZERO(buf, buflen); BZERO(buf2, BUFSIZE); readmem(devaddr + net->dev_ip_ptr, KVADDR, &ip_ptr, sizeof(ulong), "ip_ptr", FAULT_ON_ERROR); if (!ip_ptr) return buflen; readmem(ip_ptr + OFFSET(in_device_ifa_list), KVADDR, &ifa_list, sizeof(ulong), "ifa_list", FAULT_ON_ERROR); while (ifa_list) { readmem(ifa_list + OFFSET(in_ifaddr_ifa_address), KVADDR, &ifa_address, sizeof(struct in_addr), "ifa_address", FAULT_ON_ERROR); sprintf(buf2, "%s%s", pos ? ", " : "", inet_ntoa(ifa_address)); if (pos + strlen(buf2) >= buflen) { RESIZEBUF(*bufp, buflen, buflen * 2); buf = *bufp; BZERO(buf + buflen, buflen); buflen *= 2; } BCOPY(buf2, &buf[pos], strlen(buf2)); pos += strlen(buf2); readmem(ifa_list + OFFSET(in_ifaddr_ifa_next), KVADDR, &ifa_list, sizeof(ulong), "ifa_next", FAULT_ON_ERROR); } return buflen; } static void get_device_ip6_address(ulong devaddr, char **bufp, long buflen) { ulong ip6_ptr = 0, pos = 0, bufsize = buflen, addr = 0; struct in6_addr ip6_addr; char *buf; char str[INET6_ADDRSTRLEN] = {0}; char buffer[INET6_ADDRSTRLEN + 2] = {0}; uint len = 0; buf = *bufp; pos = strlen(buf); readmem(devaddr + OFFSET(net_device_ip6_ptr), KVADDR, &ip6_ptr, sizeof(ulong), "ip6_ptr", FAULT_ON_ERROR); if (!ip6_ptr) return; /* * 502a2ffd7376 ("ipv6: convert idev_list to list macros") * v2.6.35-rc1~473^2~733 */ if (VALID_MEMBER(inet6_ifaddr_if_list)) { struct list_data list_data, *ld; ulong cnt = 0, i; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; ld->start = ip6_ptr + OFFSET(inet6_dev_addr_list); ld->list_head_offset = OFFSET(inet6_ifaddr_if_list); cnt = do_list(ld); for (i = 1; i < cnt; i++) { addr = ld->list_ptr[i] + OFFSET(inet6_ifaddr_addr); readmem(addr + OFFSET(in6_addr_in6_u), KVADDR, &ip6_addr, sizeof(struct in6_addr), "in6_addr.in6_u", FAULT_ON_ERROR); inet_ntop(AF_INET6, (void*)&ip6_addr, str, INET6_ADDRSTRLEN); sprintf(buffer, "%s%s", pos ? ", " : "", str); len = strlen(buffer); if (pos + len >= bufsize) { RESIZEBUF(*bufp, bufsize, bufsize + buflen); buf = *bufp; BZERO(buf + bufsize, buflen); bufsize += buflen; } BCOPY(buffer, &buf[pos], len); pos += len; } FREEBUF(ld->list_ptr); return; } if (INVALID_MEMBER(inet6_ifaddr_if_next)) return; readmem(ip6_ptr + OFFSET(inet6_dev_addr_list), KVADDR, &addr, sizeof(void *), "inet6_dev.addr_list", FAULT_ON_ERROR); while (addr) { readmem(addr + OFFSET(in6_addr_in6_u), KVADDR, &ip6_addr, sizeof(struct in6_addr), "in6_addr.in6_u", FAULT_ON_ERROR); inet_ntop(AF_INET6, (void*)&ip6_addr, str, INET6_ADDRSTRLEN); sprintf(buffer, "%s%s", pos ? ", " : "", str); len = strlen(buffer); if (pos + len >= bufsize) { RESIZEBUF(*bufp, bufsize, bufsize + buflen); buf = *bufp; BZERO(buf + bufsize, buflen); bufsize += buflen; } BCOPY(buffer, &buf[pos], len); pos += len; readmem(addr + OFFSET(inet6_ifaddr_if_next), KVADDR, &addr, sizeof(void *), "inet6_ifaddr.if_next", FAULT_ON_ERROR); } } /* * Get the family, type, local and destination address/port pairs. */ static void get_sock_info(ulong sock, char *buf) { uint32_t daddr, rcv_saddr; uint16_t dport, sport; ushort family, type; ushort num ATTRIBUTE_UNUSED; char *sockbuf, *inet_sockbuf; ulong ipv6_pinfo, ipv6_rcv_saddr, ipv6_daddr; uint16_t u6_addr16_src[8]; uint16_t u6_addr16_dest[8]; char buf2[BUFSIZE]; struct in_addr in_addr; int len; BZERO(buf, BUFSIZE); BZERO(buf2, BUFSIZE); sockbuf = inet_sockbuf = NULL; rcv_saddr = daddr = 0; dport = sport = 0; family = type = 0; ipv6_pinfo = 0; switch (net->flags & (SOCK_V1|SOCK_V2)) { case SOCK_V1: sockbuf = GETBUF(SIZE(sock)); readmem(sock, KVADDR, sockbuf, SIZE(sock), "sock buffer", FAULT_ON_ERROR); daddr = UINT(sockbuf + OFFSET(sock_daddr)); rcv_saddr = UINT(sockbuf + OFFSET(sock_rcv_saddr)); dport = USHORT(sockbuf + OFFSET(sock_dport)); sport = USHORT(sockbuf + OFFSET(sock_sport)); num = USHORT(sockbuf + OFFSET(sock_num)); family = USHORT(sockbuf + OFFSET(sock_family)); type = USHORT(sockbuf + OFFSET(sock_type)); break; case SOCK_V2: inet_sockbuf = GETBUF(SIZE(inet_sock)); readmem(sock, KVADDR, inet_sockbuf, SIZE(inet_sock), "inet_sock buffer", FAULT_ON_ERROR); daddr = UINT(inet_sockbuf + OFFSET(inet_sock_inet) + OFFSET(inet_opt_daddr)); rcv_saddr = UINT(inet_sockbuf + OFFSET(inet_sock_inet) + OFFSET(inet_opt_rcv_saddr)); dport = USHORT(inet_sockbuf + OFFSET(inet_sock_inet) + OFFSET(inet_opt_dport)); sport = USHORT(inet_sockbuf + OFFSET(inet_sock_inet) + OFFSET(inet_opt_sport)); num = USHORT(inet_sockbuf + OFFSET(inet_sock_inet) + OFFSET(inet_opt_num)); family = USHORT(inet_sockbuf + OFFSET(sock_common_skc_family)); type = USHORT(inet_sockbuf + OFFSET(sock_sk_type)); ipv6_pinfo = ULONG(inet_sockbuf + SIZE(sock)); break; } switch (family) { case AF_UNSPEC: sprintf(buf, "UNSPEC:"); break; case AF_UNIX: sprintf(buf, "UNIX:"); break; case AF_INET: sprintf(buf, "INET:"); break; case AF_AX25: sprintf(buf, "AX25:"); break; case AF_IPX: sprintf(buf, "IPX:"); break; case AF_APPLETALK: sprintf(buf, "APPLETALK:"); break; case AF_NETROM: sprintf(buf, "NETROM:"); break; case AF_BRIDGE: sprintf(buf, "BRIDGE:"); break; case AF_ATMPVC: sprintf(buf, "ATMPVC:"); break; case AF_X25: sprintf(buf, "X25:"); break; case AF_INET6: sprintf(buf, "INET6:"); break; case AF_ROSE: sprintf(buf, "ROSE:"); break; case AF_DECnet: sprintf(buf, "DECnet:"); break; case AF_NETBEUI: sprintf(buf, "NETBEUI:"); break; case AF_SECURITY: sprintf(buf, "SECURITY/KEY:"); break; case AF_NETLINK: sprintf(buf, "NETLINK/ROUTE:"); break; case AF_PACKET: sprintf(buf, "PACKET:"); break; case AF_ASH: sprintf(buf, "ASH:"); break; case AF_ECONET: sprintf(buf, "ECONET:"); break; case AF_ATMSVC: sprintf(buf, "ATMSVC:"); break; case AF_SNA: sprintf(buf, "SNA:"); break; case AF_IRDA: sprintf(buf, "IRDA:"); break; #ifndef AF_PPPOX #define AF_PPPOX 24 #endif case AF_PPPOX: sprintf(buf, "PPPOX:"); break; default: sprintf(buf, "%d:", family); break; } switch (type) { case SOCK_STREAM: sprintf(&buf[strlen(buf)], "STREAM"); break; case SOCK_DGRAM: sprintf(&buf[strlen(buf)], "DGRAM "); break; case SOCK_RAW: sprintf(&buf[strlen(buf)], "RAW"); break; case SOCK_RDM: sprintf(&buf[strlen(buf)], "RDM"); break; case SOCK_SEQPACKET: sprintf(&buf[strlen(buf)], "SEQPACKET"); break; case SOCK_PACKET: sprintf(&buf[strlen(buf)], "PACKET"); break; default: sprintf(&buf[strlen(buf)], "%d", type); break; } /* make sure we have room at the end... */ // sprintf(&buf[strlen(buf)], "%s", space(MINSPACE-1)); sprintf(&buf[strlen(buf)], " "); if (family == AF_INET) { if (BITS32()) { in_addr.s_addr = rcv_saddr; sprintf(&buf[strlen(buf)], "%*s-%-*d%s", BYTES_IP_ADDR, inet_ntoa(in_addr), BYTES_PORT_NUM, ntohs(sport), space(1)); in_addr.s_addr = daddr; sprintf(&buf[strlen(buf)], "%*s-%-*d%s", BYTES_IP_ADDR, inet_ntoa(in_addr), BYTES_PORT_NUM, ntohs(dport), space(1)); } else { in_addr.s_addr = rcv_saddr; sprintf(&buf[strlen(buf)], " %s-%d ", inet_ntoa(in_addr), ntohs(sport)); in_addr.s_addr = daddr; sprintf(&buf[strlen(buf)], "%s-%d", inet_ntoa(in_addr), ntohs(dport)); } } if (sockbuf) FREEBUF(sockbuf); if (inet_sockbuf) FREEBUF(inet_sockbuf); if (family != AF_INET6) return; switch (net->flags & (SOCK_V1|SOCK_V2)) { case SOCK_V1: break; case SOCK_V2: if (VALID_MEMBER(ipv6_pinfo_rcv_saddr) && VALID_MEMBER(ipv6_pinfo_daddr)) { ipv6_rcv_saddr = ipv6_pinfo + OFFSET(ipv6_pinfo_rcv_saddr); ipv6_daddr = ipv6_pinfo + OFFSET(ipv6_pinfo_daddr); } else if (VALID_MEMBER(sock_sk_common) && VALID_MEMBER(sock_common_skc_v6_daddr) && VALID_MEMBER(sock_common_skc_v6_rcv_saddr)) { ipv6_rcv_saddr = sock + OFFSET(sock_sk_common) + OFFSET(sock_common_skc_v6_rcv_saddr); ipv6_daddr = sock + OFFSET(sock_sk_common) + OFFSET(sock_common_skc_v6_daddr); } else { sprintf(&buf[strlen(buf)], "%s", "(cannot get IPv6 addresses)"); break; } if (!readmem(ipv6_rcv_saddr, KVADDR, u6_addr16_src, SIZE(in6_addr), "ipv6_rcv_saddr buffer", QUIET|RETURN_ON_ERROR)) break; if (!readmem(ipv6_daddr, KVADDR, u6_addr16_dest, SIZE(in6_addr), "ipv6_daddr buffer", QUIET|RETURN_ON_ERROR)) break; sprintf(&buf[strlen(buf)], "%*s ", BITS32() ? 22 : 12, dump_in6_addr_port(u6_addr16_src, sport, buf2, &len)); if (BITS32() && (len > 22)) len = 1; mkstring(dump_in6_addr_port(u6_addr16_dest, dport, buf2, NULL), len, CENTER, NULL); sprintf(&buf[strlen(buf)], "%s", buf2); break; } } static char * dump_in6_addr_port(uint16_t *addr, uint16_t port, char *buf, int *len) { sprintf(buf, "%x:%x:%x:%x:%x:%x:%x:%x-%d", ntohs(addr[0]), ntohs(addr[1]), ntohs(addr[2]), ntohs(addr[3]), ntohs(addr[4]), ntohs(addr[5]), ntohs(addr[6]), ntohs(addr[7]), ntohs(port)); if (len) *len = strlen(buf); return buf; } /* * XXX - copied from neighbour.h !!!!!! * * Neighbor Cache Entry States. */ #define NUD_INCOMPLETE 0x01 #define NUD_REACHABLE 0x02 #define NUD_STALE 0x04 #define NUD_DELAY 0x08 #define NUD_PROBE 0x10 #define NUD_FAILED 0x20 #define NUD_NOARP 0x40 #define NUD_PERMANENT 0x80 #define FLAGBUF_SIZE 100 #define FILLBUF(s) \ do { \ char *bp; \ int blen; \ blen=strlen(flag_buffer); \ if ((blen + strlen(s)) < FLAGBUF_SIZE-2) { \ bp = &flag_buffer[blen]; \ if (blen != 0) { \ sprintf(bp, "|%s", (s)); \ } else { \ sprintf(bp, "%s", (s)); \ } \ } \ } while(0) /* * Take the state of the ARP entry and print it out the flag associated * with the binary state... */ static void arp_state_to_flags(unsigned char state) { char flag_buffer[FLAGBUF_SIZE]; int had_flags = 0; if (!state) { fprintf(fp, "\n"); return; } bzero(flag_buffer, FLAGBUF_SIZE); if (state & NUD_INCOMPLETE) { FILLBUF("INCOMPLETE"); had_flags = 1; } if (state & NUD_REACHABLE) { FILLBUF("REACHABLE"); had_flags = 1; } if (state & NUD_STALE) { FILLBUF("STALE"); had_flags = 1; } if (state & NUD_DELAY) { FILLBUF("DELAY"); had_flags = 1; } if (state & NUD_PROBE) { FILLBUF("PROBE"); had_flags = 1; } if (state & NUD_FAILED) { FILLBUF("FAILED"); had_flags = 1; } if (state & NUD_NOARP) { FILLBUF("NOARP"); had_flags = 1; } if (state & NUD_PERMANENT) { FILLBUF("PERMANENT"); had_flags = 1; } if (had_flags) { fprintf(fp, "%s\n", flag_buffer); /* fprintf(fp, "%29.29s%s)\n", " ", flag_buffer); */ } } #undef FILLBUF /* * Print out a formatted ethernet HW address.... */ static void dump_ether_hw(unsigned char *ha, int len) { int i; for (i = 0; i < len; i++) { char sep = ':'; if (i == (len - 1)) { sep = ' '; } fprintf(fp, "%02x%c", ha[i], sep); } } /* * Catchall routine for dumping out a HA address whose format we * don't know about... */ static void dump_hw_addr(unsigned char *ha, int len) { int i; for (i = 0; i < len; i++) { fprintf(fp, "%02x ", ha[i]); } } /* * help -N output */ void dump_net_table(void) { int others; others = 0; fprintf(fp, " flags: %lx (", net->flags); if (net->flags & NETDEV_INIT) fprintf(fp, "%sNETDEV_INIT", others++ ? "|" : ""); if (net->flags & STRUCT_DEVICE) fprintf(fp, "%sSTRUCT_DEVICE", others++ ? "|" : ""); if (net->flags & STRUCT_NET_DEVICE) fprintf(fp, "%sSTRUCT_NET_DEVICE", others++ ? "|" : ""); if (net->flags & NO_INET_SOCK) fprintf(fp, "%sNO_INET_SOCK", others++ ? "|" : ""); if (net->flags & SOCK_V1) fprintf(fp, "%sSOCK_V1", others++ ? "|" : ""); if (net->flags & SOCK_V2) fprintf(fp, "%sSOCK_V2", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " netdevice: \"%s\"\n", net->netdevice); fprintf(fp, " dev_name_t: \"%s\"\n", net->dev_name_t); fprintf(fp, " dev_type_t: \"%s\"\n", net->dev_type_t); fprintf(fp, " dev_addr_t: \"%s\"\n", net->dev_addr_t); fprintf(fp, " dev_name: %ld\n", net->dev_name); fprintf(fp, " dev_next: %ld\n", net->dev_next); fprintf(fp, " dev_type: %ld\n", net->dev_type); fprintf(fp, " dev_ip_ptr: %ld\n", net->dev_ip_ptr); fprintf(fp, " dev_addr_len: %ld\n", net->dev_addr_len); fprintf(fp, "net_device_name_index: %d\n", net->net_device_name_index); } /* * Dump the open sockets for a given PID. */ static void dump_sockets(ulong flag, struct reference *ref) { struct task_context *tc; ulong value; int subsequent; if (!args[optind]) { if (!NET_REFERENCE_CHECK(ref)) print_task_header(fp, CURRENT_CONTEXT(), 0); dump_sockets_workhorse(CURRENT_TASK(), flag, ref); return; } subsequent = 0; while (args[optind]) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) { if (!NET_REFERENCE_CHECK(ref)) print_task_header(fp, tc, subsequent++); dump_sockets_workhorse(tc->task, flag, ref); } break; case STR_TASK: if (!NET_REFERENCE_CHECK(ref)) print_task_header(fp, tc, subsequent++); dump_sockets_workhorse(tc->task, flag, ref); break; case STR_INVALID: error(INFO, "%sinvalid task or pid value: %s\n", subsequent++ ? "\n" : "", args[optind]); break; } optind++; } } /* * Find all sockets in the designated task and call sym_socket_dump() * to display them. */ void dump_sockets_workhorse(ulong task, ulong flag, struct reference *ref) { ulong files_struct_addr = 0, fdtable_addr = 0; int max_fdset = 0; int max_fds = 0; ulong open_fds_addr = 0; ulong *open_fds; int open_fds_size; ulong fd; ulong file; int i, j; int sockets_found = 0; ulong value; /* * Steps to getting open sockets: * * 1) task->files (struct files_struct) * 2) files->fd (struct file **) * 3) cycle through from 0 to files->open_fds offset from *fd * i.e. fd[0], fd[1], fd[2] are pointers to the first three * open file descriptors. Thus, we have: * struct file *fd[0], *fd[1], *fd[2],... * * 4) file->f_dentry (struct dentry) * 5) dentry->d_inode (struct inode) * 6) S_ISSOCK(inode.mode) * Assuming it _is_ a socket: * 7) inode.u (struct socket) -- offset 0xdc from inode pointer */ readmem(task + OFFSET(task_struct_files), KVADDR, &files_struct_addr, sizeof(void *), "task files contents", FAULT_ON_ERROR); if (files_struct_addr) { if (VALID_MEMBER(files_struct_max_fdset)) { readmem(files_struct_addr + OFFSET(files_struct_max_fdset), KVADDR, &max_fdset, sizeof(int), "files_struct max_fdset", FAULT_ON_ERROR); readmem(files_struct_addr + OFFSET(files_struct_max_fds), KVADDR, &max_fds, sizeof(int), "files_struct max_fds", FAULT_ON_ERROR); } else if (VALID_MEMBER(files_struct_fdt)) { readmem(files_struct_addr + OFFSET(files_struct_fdt), KVADDR, &fdtable_addr, sizeof(void *), "fdtable buffer", FAULT_ON_ERROR); if (VALID_MEMBER(fdtable_max_fdset)) readmem(fdtable_addr + OFFSET(fdtable_max_fdset), KVADDR, &max_fdset, sizeof(int), "fdtable_struct max_fdset", FAULT_ON_ERROR); else max_fdset = -1; readmem(fdtable_addr + OFFSET(fdtable_max_fds), KVADDR, &max_fds, sizeof(int), "fdtable_struct max_fds", FAULT_ON_ERROR); } } if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) || !files_struct_addr || (max_fdset == 0) || (max_fds == 0)) { if (!NET_REFERENCE_CHECK(ref)) fprintf(fp, "No open sockets.\n"); return; } if (VALID_MEMBER(fdtable_open_fds)){ readmem(fdtable_addr + OFFSET(fdtable_open_fds), KVADDR, &open_fds_addr, sizeof(void *), "files_struct open_fds addr", FAULT_ON_ERROR); readmem(fdtable_addr + OFFSET(fdtable_fd), KVADDR, &fd, sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR); } else { readmem(files_struct_addr + OFFSET(files_struct_open_fds), KVADDR, &open_fds_addr, sizeof(void *), "files_struct open_fds addr", FAULT_ON_ERROR); readmem(files_struct_addr + OFFSET(files_struct_fd), KVADDR, &fd, sizeof(void *), "files_struct fd addr", FAULT_ON_ERROR); } open_fds_size = MAX(max_fdset, max_fds) / BITS_PER_BYTE; open_fds = (ulong *)GETBUF(open_fds_size); if (!open_fds) return; if (open_fds_addr) readmem(open_fds_addr, KVADDR, open_fds, open_fds_size, "files_struct open_fds", FAULT_ON_ERROR); if (!open_fds_addr || !fd) { if (!NET_REFERENCE_CHECK(ref)) fprintf(fp, "No open sockets.\n"); FREEBUF(open_fds); return; } if (NET_REFERENCE_CHECK(ref)) { if (IS_A_NUMBER(ref->str)) { if (hexadecimal_only(ref->str, 0)) { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= NET_REF_HEXNUM; } else { value = dtol(ref->str, FAULT_ON_ERROR, NULL); if (value <= MAX(max_fdset, max_fds)) { ref->decval = value; ref->cmdflags |= NET_REF_DECNUM; } else { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= NET_REF_HEXNUM; } } } ref->ref1 = task; } j = 0; for (;;) { unsigned long set; i = j * BITS_PER_LONG; if (((max_fdset >= 0) && (i >= max_fdset)) || (i >= max_fds)) break; set = open_fds[j++]; while (set) { if (set & 1) { readmem(fd + i*sizeof(struct file *), KVADDR, &file, sizeof(struct file *), "fd file", FAULT_ON_ERROR); if (file) { if (sym_socket_dump(file, i, sockets_found, flag, ref)) { sockets_found++; } } } i++; set >>= 1; } } if (!sockets_found && !NET_REFERENCE_CHECK(ref)) fprintf(fp, "No open sockets.\n"); if (NET_REFERENCE_FOUND(ref)) fprintf(fp, "\n"); FREEBUF(open_fds); } /* * Dump a struct socket symbolically. Dave makes this _very_ easy. * * Return TRUE if we found a socket, FALSE otherwise. */ static char *socket_hdr_32 = "FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT"; static char *socket_hdr_64 = "FD SOCKET SOCK FAMILY:TYPE SOURCE-PORT DESTINATION-PORT"; static int sym_socket_dump(ulong file, int fd, int sockets_found, ulong flag, struct reference *ref) { uint16_t umode16 = 0; uint32_t umode32 = 0; uint mode = 0; ulong dentry = 0, inode = 0, struct_socket = 0; ulong sock = 0; char *file_buf, *dentry_buf, *inode_buf, *socket_buf; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *socket_hdr = BITS32() ? socket_hdr_32 : socket_hdr_64; unsigned int radix; file_buf = fill_file_cache(file); dentry = ULONG(file_buf + OFFSET(file_f_dentry)); if (flag & d_FLAG) radix = 10; else if (flag & x_FLAG) radix = 16; else radix = 0; if (!dentry) return FALSE; dentry_buf = fill_dentry_cache(dentry); inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); if (!inode) return FALSE; inode_buf = fill_inode_cache(inode); switch (SIZE(umode_t)) { case SIZEOF_32BIT: umode32 = UINT(inode_buf + OFFSET(inode_i_mode)); break; case SIZEOF_16BIT: umode16 = USHORT(inode_buf + OFFSET(inode_i_mode)); break; } if (SIZE(umode_t) == SIZEOF_32BIT) mode = umode32; else mode = (uint)umode16; if (!S_ISSOCK(mode)) return FALSE; /* * 2.6 (SOCK_V2) -- socket is inode addr minus sizeof(struct socket) */ switch (net->flags & (SOCK_V1|SOCK_V2)) { case SOCK_V1: struct_socket = inode + OFFSET(inode_u); sock = ULONG(inode_buf + OFFSET(inode_u) + OFFSET(socket_sk)); break; case SOCK_V2: if (!VALID_SIZE(inet_sock)) error(FATAL, "cannot determine what an inet_sock structure is\n"); struct_socket = inode - OFFSET(socket_alloc_vfs_inode); socket_buf = GETBUF(SIZE(socket)); readmem(struct_socket, KVADDR, socket_buf, SIZE(socket), "socket buffer", FAULT_ON_ERROR); sock = ULONG(socket_buf + OFFSET(socket_sk)); FREEBUF(socket_buf); break; } if (NET_REFERENCE_CHECK(ref)) { if ((ref->cmdflags & NET_REF_HEXNUM) && ((ref->hexval == sock) || (ref->hexval == struct_socket))) ref->cmdflags |= NET_REF_FOUND_ITEM; else if ((ref->cmdflags & NET_REF_DECNUM) && (ref->decval == (ulong)fd)) ref->cmdflags |= NET_REF_FOUND_ITEM; else if ((ref->cmdflags & NET_REF_HEXNUM) && (ref->hexval == (ulong)fd)) ref->cmdflags |= NET_REF_FOUND_ITEM; if (!(ref->cmdflags & NET_REF_FOUND_ITEM)) return FALSE; ref->cmdflags &= ~NET_REF_FOUND_ITEM; ref->cmdflags |= NET_REF_FOUND; if (!(ref->cmdflags & NET_TASK_HEADER_PRINTED)) { print_task_header(fp, task_to_context(ref->ref1), 0); ref->cmdflags |= NET_TASK_HEADER_PRINTED; } if (!(ref->cmdflags & NET_SOCK_HEADER_PRINTED)) { sockets_found = 0; ref->cmdflags |= NET_SOCK_HEADER_PRINTED; } } switch (flag & (S_FLAG|s_FLAG)) { case S_FLAG: fprintf(fp, "%sFD %s %s\n", sockets_found ? "\n" : "", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SOCKET"), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "SOCK")); fprintf(fp, "%2d %s %s\n\n", fd, mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(struct_socket)), mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(sock))); dump_struct("socket", struct_socket, radix); switch (net->flags & (SOCK_V1|SOCK_V2)) { case SOCK_V1: dump_struct("sock", sock, radix); break; case SOCK_V2: if (STRUCT_EXISTS("inet_sock") && !(net->flags & NO_INET_SOCK)) dump_struct("inet_sock", sock, radix); else if (STRUCT_EXISTS("sock")) dump_struct("sock", sock, radix); else fprintf(fp, "\nunable to display inet_sock structure\n"); break; } break; case s_FLAG: if (!sockets_found) { fprintf(fp, "%s\n", socket_hdr); } fprintf(fp, "%2d%s%s%s%s%s", fd, space(MINSPACE), mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(struct_socket)), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(sock)), space(MINSPACE)); buf1[0] = NULLCHAR; get_sock_info(sock, buf1); fprintf(fp, "%s\n", buf1); return TRUE; default: error(FATAL, "illegal flag: %lx\n", flag); } return TRUE; } crash-utility-crash-9cd43f5/configure.c0000664000372000037200000015127315107550337017532 0ustar juerghjuergh/* configure.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2013 David Anderson * Copyright (C) 2002-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * define, clear and undef dynamically update the top-level Makefile: * * -b define: TARGET, GDB, GDB_FILES, GDB_OFILES, GDB_PATCH_FILES, * TARGET_CFLAGS, LDFLAGS, GDB_CONF_FLAGS and GPL_FILES * create: build_data.c * * -d define: TARGET, GDB, GDB_FILES, GDB_OFILES, GDB_PATCH_FILES, * TARGET_CFLAGS, LDFLAGS, GDB_CONF_FLAGS and PROGRAM (for daemon) * create: build_data.c * * -u clear: TARGET, GDB, GDB_FILES, GDB_OFILES, VERSION, GDB_PATCH_FILES, * TARGET_CFLAGS, LDFLAGS, GDB_CONF_FLAGS and GPL_FILES * undef: WARNING_ERROR, WARNING_OPTIONS * * -r define: GDB_FILES, VERSION, GDB_PATCH_FILES GPL_FILES * * -w define: WARNING_OPTIONS * undef: WARNING_ERROR * * -W define: WARNING_ERROR, WARNING_OPTIONS * * -n undef: WARNING_ERROR, WARNING_OPTIONS * * -g define: GDB * * -p Create or remove .rh_rpm_package file * * -q Don't print configuration * * -s Create crash.spec file * * -x Add extra libraries/flags to build */ #include #include #include #include #include #include struct supported_gdb_version; void build_configure(struct supported_gdb_version *); void release_configure(char *, struct supported_gdb_version *); void make_rh_rpm_package(char *, int); void unconfigure(void); void set_warnings(int); void show_configuration(void); void target_rebuild_instructions(struct supported_gdb_version *, char *); void arch_mismatch(struct supported_gdb_version *); void get_current_configuration(struct supported_gdb_version *); void makefile_setup(FILE **, FILE **); void makefile_create(FILE **, FILE **); char *strip_linefeeds(char *); char *upper_case(char *, char *); char *lower_case(char *, char *); char *shift_string_left(char *, int); char *shift_string_right(char *, int); char *strip_beginning_whitespace(char *); char *strip_ending_whitespace(char *); char *strip_linefeeds(char *); int file_exists(char *); int count_chars(char *, char); void make_build_data(char *); void gdb_configure(struct supported_gdb_version *); int parse_line(char *, char **); struct supported_gdb_version *setup_gdb_defaults(void); struct supported_gdb_version *store_gdb_defaults(struct supported_gdb_version *); void make_spec_file(struct supported_gdb_version *); void set_initial_target(struct supported_gdb_version *); char *target_to_name(int); int name_to_target(char *); char *get_extra_flags(char *, char *); void add_extra_lib(char *); #define TRUE 1 #define FALSE 0 #undef X86 #undef ALPHA #undef PPC #undef IA64 #undef S390 #undef S390X #undef PPC64 #undef X86_64 #undef ARM #undef ARM64 #undef MIPS #undef SPARC64 #undef MIPS64 #undef RISCV64 #undef LOONGARCH64 #define UNKNOWN 0 #define X86 1 #define ALPHA 2 #define PPC 3 #define IA64 4 #define S390 5 #define S390X 6 #define PPC64 7 #define X86_64 8 #define ARM 9 #define ARM64 10 #define MIPS 11 #define SPARC64 12 #define MIPS64 13 #define RISCV64 14 #define LOONGARCH64 15 #define TARGET_X86 "TARGET=X86" #define TARGET_ALPHA "TARGET=ALPHA" #define TARGET_PPC "TARGET=PPC" #define TARGET_IA64 "TARGET=IA64" #define TARGET_S390 "TARGET=S390" #define TARGET_S390X "TARGET=S390X" #define TARGET_PPC64 "TARGET=PPC64" #define TARGET_X86_64 "TARGET=X86_64" #define TARGET_ARM "TARGET=ARM" #define TARGET_ARM64 "TARGET=ARM64" #define TARGET_MIPS "TARGET=MIPS" #define TARGET_MIPS64 "TARGET=MIPS64" #define TARGET_SPARC64 "TARGET=SPARC64" #define TARGET_RISCV64 "TARGET=RISCV64" #define TARGET_LOONGARCH64 "TARGET=LOONGARCH64" #define TARGET_CFLAGS_X86 "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_ALPHA "TARGET_CFLAGS=" #define TARGET_CFLAGS_PPC "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_IA64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_S390 "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_S390X "TARGET_CFLAGS=" #define TARGET_CFLAGS_PPC64 "TARGET_CFLAGS=-m64" #define TARGET_CFLAGS_X86_64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_ARM "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_ARM_ON_X86 "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_ARM_ON_X86_64 "TARGET_CFLAGS=-m32 -D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_X86_ON_X86_64 "TARGET_CFLAGS=-m32 -D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_PPC_ON_PPC64 "TARGET_CFLAGS=-m32 -D_FILE_OFFSET_BITS=64 -fPIC" #define TARGET_CFLAGS_ARM64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_ARM64_ON_X86_64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_PPC64_ON_X86_64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_MIPS "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_MIPS_ON_X86 "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_MIPS_ON_X86_64 "TARGET_CFLAGS=-m32 -D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_MIPS64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_SPARC64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_RISCV64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_RISCV64_ON_X86_64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_LOONGARCH64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_LOONGARCH64_ON_X86_64 "TARGET_CFLAGS=" #ifndef GDB_TARGET_DEFAULT #define GDB_TARGET_DEFAULT "GDB_CONF_FLAGS=" #endif #define GDB_TARGET_ARM_ON_X86 "GDB_CONF_FLAGS=--target=arm-elf-linux" #define GDB_TARGET_ARM_ON_X86_64 "GDB_CONF_FLAGS=--target=arm-elf-linux CFLAGS=-m32 CXXFLAGS=-m32" #define GDB_TARGET_X86_ON_X86_64 "GDB_CONF_FLAGS=--target=i686-pc-linux-gnu CFLAGS=-m32 CXXFLAGS=-m32" #define GDB_TARGET_PPC_ON_PPC64 "GDB_CONF_FLAGS=--target=ppc-elf-linux CFLAGS=-m32 CXXFLAGS=-m32" #define GDB_TARGET_ARM64_ON_X86_64 "GDB_CONF_FLAGS=--target=aarch64-elf-linux" /* TBD */ #define GDB_TARGET_PPC64_ON_X86_64 "GDB_CONF_FLAGS=--target=powerpc64le-unknown-linux-gnu" #define GDB_TARGET_MIPS_ON_X86 "GDB_CONF_FLAGS=--target=mipsel-elf-linux" #define GDB_TARGET_MIPS_ON_X86_64 "GDB_CONF_FLAGS=--target=mipsel-elf-linux CFLAGS=-m32 CXXFLAGS=-m32" #define GDB_TARGET_RISCV64_ON_X86_64 "GDB_CONF_FLAGS=--target=riscv64-unknown-linux-gnu" #define GDB_TARGET_LOONGARCH64_ON_X86_64 "GDB_CONF_FLAGS=--target=loongarch64-unknown-linux-gnu" /* * The original plan was to allow the use of a particular version * of gdb for a given architecture. But for practical purposes, * it's a one-size-fits-all scheme, and they all use the default * unless overridden. */ #define GDB_5_3 (0) #define GDB_6_0 (1) #define GDB_6_1 (2) #define GDB_7_0 (3) #define GDB_7_3_1 (4) #define GDB_7_6 (5) #define GDB_10_2 (6) #define GDB_16_2 (7) #define SUPPORTED_GDB_VERSIONS (GDB_16_2 + 1) int default_gdb = GDB_16_2; struct supported_gdb_version { char *GDB; char *GDB_VERSION_IN; char *GDB_FILES; char *GDB_OFILES; char *GDB_PATCH_FILES; char *GDB_FLAGS; char *GPL; } supported_gdb_versions[SUPPORTED_GDB_VERSIONS] = { { "GDB=gdb-5.3post-0.20021129.36rh", "Red Hat Linux (5.3post-0.20021129.36rh)", "GDB_FILES=${GDB_5.3post-0.20021129.36rh_FILES}", "GDB_OFILES=${GDB_5.3post-0.20021129.36rh_OFILES}", "GDB_PATCH_FILES=", "GDB_FLAGS=-DGDB_5_3", "GPLv2" }, { "GDB=gdb-6.0", "6.0", "GDB_FILES=${GDB_6.0_FILES}", "GDB_OFILES=${GDB_6.0_OFILES}", "GDB_PATCH_FILES=", "GDB_FLAGS=-DGDB_6_0", "GPLv2" }, { "GDB=gdb-6.1", "6.1", "GDB_FILES=${GDB_6.1_FILES}", "GDB_OFILES=${GDB_6.1_OFILES}", "GDB_PATCH_FILES=gdb-6.1.patch", "GDB_FLAGS=-DGDB_6_1", "GPLv2" }, { "GDB=gdb-7.0", "7.0", "GDB_FILES=${GDB_7.0_FILES}", "GDB_OFILES=${GDB_7.0_OFILES}", "GDB_PATCH_FILES=gdb-7.0.patch", "GDB_FLAGS=-DGDB_7_0", "GPLv3" }, { "GDB=gdb-7.3.1", "7.3.1", "GDB_FILES=${GDB_7.3.1_FILES}", "GDB_OFILES=${GDB_7.3.1_OFILES}", "GDB_PATCH_FILES=gdb-7.3.1.patch", "GDB_FLAGS=-DGDB_7_3_1", "GPLv3" }, { "GDB=gdb-7.6", "7.6", "GDB_FILES=${GDB_7.6_FILES}", "GDB_OFILES=${GDB_7.6_OFILES}", "GDB_PATCH_FILES=gdb-7.6.patch gdb-7.6-ppc64le-support.patch gdb-7.6-proc_service.h.patch", "GDB_FLAGS=-DGDB_7_6", "GPLv3" }, { "GDB=gdb-10.2", "10.2", "GDB_FILES=${GDB_10.2_FILES}", "GDB_OFILES=${GDB_10.2_OFILES}", "GDB_PATCH_FILES=gdb-10.2.patch", "GDB_FLAGS=-DGDB_10_2", "GPLv3" }, { "GDB=gdb-16.2", "16.2", "GDB_FILES=${GDB_16.2_FILES}", "GDB_OFILES=${GDB_16.2_OFILES}", "GDB_PATCH_FILES=gdb-16.2.patch", "GDB_FLAGS=-DGDB_16_2", "GPLv3" }, }; #define DAEMON 0x1 #define QUIET 0x2 #define MAXSTRLEN 256 #define MIN(a,b) (((a)<(b))?(a):(b)) struct target_data { int target; int host; int initial_gdb_target; int flags; char program[MAXSTRLEN]; char gdb_version[MAXSTRLEN]; char release[MAXSTRLEN]; struct stat statbuf; const char *target_as_param; } target_data = { 0 }; int main(int argc, char **argv) { int c; struct supported_gdb_version *sp; sp = setup_gdb_defaults(); while ((c = getopt(argc, argv, "gsqnWwubdr:p:P:t:x:")) > 0) { switch (c) { case 'q': target_data.flags |= QUIET; break; case 'u': unconfigure(); break; case 'd': target_data.flags |= DAEMON; case 'b': build_configure(sp); break; case 'r': release_configure(optarg, sp); break; case 'p': make_rh_rpm_package(optarg, 0); break; case 'P': make_rh_rpm_package(optarg, 1); break; case 'W': case 'w': case 'n': set_warnings(c); break; case 's': make_spec_file(sp); break; case 'g': gdb_configure(sp); break; case 't': target_data.target_as_param = optarg; break; case 'x': add_extra_lib(optarg); break; } } exit(0); } void target_rebuild_instructions(struct supported_gdb_version *sp, char *target) { fprintf(stderr, "\nIn order to build a crash binary for the %s architecture:\n", target); fprintf(stderr, " 1. remove the %s subdirectory\n", &sp->GDB[strlen("GDB=")]); fprintf(stderr, " 2. perform a \"make clean\"\n"); fprintf(stderr, " 3. retry the build\n\n"); } void arch_mismatch(struct supported_gdb_version *sp) { fprintf(stderr, "\nThe initial build in this source tree was for the %s architecture.\n", target_to_name(target_data.initial_gdb_target)); target_rebuild_instructions(sp, target_to_name(target_data.target)); exit(1); } void get_current_configuration(struct supported_gdb_version *sp) { FILE *fp; static char buf[512]; char *p; #ifndef CONF_TARGET_ARCH #ifdef __alpha__ target_data.target = ALPHA; #endif #ifdef __i386__ target_data.target = X86; #endif #ifdef __powerpc__ target_data.target = PPC; #endif #ifdef __ia64__ target_data.target = IA64; #endif #ifdef __s390__ target_data.target = S390; #endif #ifdef __s390x__ target_data.target = S390X; #endif #ifdef __powerpc64__ target_data.target = PPC64; #endif #ifdef __x86_64__ target_data.target = X86_64; #endif #ifdef __arm__ target_data.target = ARM; #endif #ifdef __aarch64__ target_data.target = ARM64; #endif #ifdef __mips__ #ifndef __mips64 target_data.target = MIPS; #else target_data.target = MIPS64; #endif #endif #ifdef __sparc_v9__ target_data.target = SPARC64; #endif #if defined(__riscv) && (__riscv_xlen == 64) target_data.target = RISCV64; #endif #ifdef __loongarch64 target_data.target = LOONGARCH64; #endif #else target_data.target = CONF_TARGET_ARCH; #endif set_initial_target(sp); /* * Override target if specified on command line. */ target_data.host = target_data.target; if (target_data.target_as_param) { if ((target_data.target == X86 || target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == ARM)) { /* * Debugging of ARM core files supported on X86, and on * X86_64 when built as a 32-bit executable. */ target_data.target = ARM; } else if ((target_data.target == X86 || target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == MIPS)) { /* * Debugging of MIPS little-endian core files * supported on X86, and on X86_64 when built as a * 32-bit executable. */ target_data.target = MIPS; } else if ((target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == X86)) { /* * Build an X86 crash binary on an X86_64 host. */ target_data.target = X86; } else if ((target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == ARM64)) { /* * Build an ARM64 crash binary on an X86_64 host. */ target_data.target = ARM64; } else if ((target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == PPC64)) { /* * Build a PPC64 little-endian crash binary on an X86_64 host. */ target_data.target = PPC64; } else if ((target_data.target == PPC64) && (name_to_target((char *)target_data.target_as_param) == PPC)) { /* * Build an PPC crash binary on an PPC64 host. */ target_data.target = PPC; } else if (name_to_target((char *)target_data.target_as_param) == target_data.host) { if ((target_data.initial_gdb_target != UNKNOWN) && (target_data.host != target_data.initial_gdb_target)) arch_mismatch(sp); } else if ((target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == RISCV64)) { /* * Build an RISCV64 crash binary on an X86_64 host. */ target_data.target = RISCV64; } else if ((target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == LOONGARCH64)) { /* * Build an LOONGARCH64 crash binary on an X86_64 host. */ target_data.target = LOONGARCH64; } else { fprintf(stderr, "\ntarget=%s is not supported on the %s host architecture\n\n", target_data.target_as_param, target_to_name(target_data.host)); exit(1); } } /* * Impose implied (sticky) target if an initial build has been * done in the source tree. */ if (target_data.initial_gdb_target && (target_data.target != target_data.initial_gdb_target)) { if ((target_data.initial_gdb_target == ARM) && (target_data.target != ARM)) { if ((target_data.target == X86) || (target_data.target == X86_64)) target_data.target = ARM; else arch_mismatch(sp); } if ((target_data.target == ARM) && (target_data.initial_gdb_target != ARM)) arch_mismatch(sp); if ((target_data.initial_gdb_target == MIPS) && (target_data.target != MIPS)) { if ((target_data.target == X86) || (target_data.target == X86_64)) target_data.target = MIPS; else arch_mismatch(sp); } if ((target_data.initial_gdb_target == MIPS64) && (target_data.target != MIPS64)) arch_mismatch(sp); if ((target_data.initial_gdb_target == LOONGARCH64) && (target_data.target != LOONGARCH64)) { if (target_data.target == X86_64) target_data.target = LOONGARCH64; else arch_mismatch(sp); } if ((target_data.initial_gdb_target == RISCV64) && (target_data.target != RISCV64)) { if (target_data.target == X86_64) target_data.target = RISCV64; else arch_mismatch(sp); } if ((target_data.initial_gdb_target == X86) && (target_data.target != X86)) { if (target_data.target == X86_64) target_data.target = X86; else arch_mismatch(sp); } if ((target_data.target == X86) && (target_data.initial_gdb_target != X86)) arch_mismatch(sp); if ((target_data.initial_gdb_target == ARM64) && (target_data.target != ARM64)) { if (target_data.target == X86_64) target_data.target = ARM64; else arch_mismatch(sp); } if ((target_data.target == ARM64) && (target_data.initial_gdb_target != ARM64)) arch_mismatch(sp); if ((target_data.initial_gdb_target == PPC64) && (target_data.target != PPC64)) { if (target_data.target == X86_64) target_data.target = PPC64; else arch_mismatch(sp); } if ((target_data.target == PPC64) && (target_data.initial_gdb_target != PPC64)) arch_mismatch(sp); if ((target_data.initial_gdb_target == PPC) && (target_data.target != PPC)) { if (target_data.target == PPC64) target_data.target = PPC; else arch_mismatch(sp); } if ((target_data.target == PPC) && (target_data.initial_gdb_target != PPC)) arch_mismatch(sp); if ((target_data.target == SPARC64) && (target_data.initial_gdb_target != SPARC64)) arch_mismatch(sp); } if ((fp = fopen("Makefile", "r")) == NULL) { perror("Makefile"); goto get_release; } while (fgets(buf, 512, fp)) { if (strncmp(buf, "PROGRAM=", strlen("PROGRAM=")) == 0) { p = strstr(buf, "=") + 1; strip_linefeeds(p); upper_case(p, target_data.program); if (target_data.flags & DAEMON) strcat(target_data.program, "D"); continue; } } fclose(fp); get_release: target_data.release[0] = '\0'; if (file_exists(".rh_rpm_package")) { if ((fp = fopen(".rh_rpm_package", "r")) == NULL) { perror(".rh_rpm_package"); } else { if (fgets(buf, 512, fp)) { strip_linefeeds(buf); if (strlen(buf)) { buf[MAXSTRLEN-1] = '\0'; strcpy(target_data.release, buf); } else fprintf(stderr, "WARNING: .rh_rpm_package file is empty!\n"); } else fprintf(stderr, "WARNING: .rh_rpm_package file is empty!\n"); fclose(fp); if (strlen(target_data.release)) return; } } else fprintf(stderr, "WARNING: .rh_rpm_package file does not exist!\n"); if ((fp = fopen("defs.h", "r")) == NULL) { perror("defs.h"); return; } while (fgets(buf, 512, fp)) { if (strncmp(buf, "#define BASELEVEL_REVISION", strlen("#define BASELEVEL_REVISION")) == 0) { p = strstr(buf, "\"") + 1; strip_linefeeds(p); p[strlen(p)-1] = '\0'; strcpy(target_data.release, p); break; } } fclose(fp); } void show_configuration(void) { int i; if (target_data.flags & QUIET) return; switch (target_data.target) { case X86: printf("TARGET: X86\n"); break; case ALPHA: printf("TARGET: ALPHA\n"); break; case PPC: printf("TARGET: PPC\n"); break; case IA64: printf("TARGET: IA64\n"); break; case S390: printf("TARGET: S390\n"); break; case S390X: printf("TARGET: S390X\n"); break; case PPC64: printf("TARGET: PPC64\n"); break; case X86_64: printf("TARGET: X86_64\n"); break; case ARM: printf("TARGET: ARM\n"); break; case ARM64: printf("TARGET: ARM64\n"); break; case MIPS: printf("TARGET: MIPS\n"); break; case MIPS64: printf("TARGET: MIPS64\n"); break; case SPARC64: printf("TARGET: SPARC64\n"); break; case RISCV64: printf("TARGET: RISCV64\n"); break; case LOONGARCH64: printf("TARGET: LOONGARCH64\n"); break; } if (strlen(target_data.program)) { for (i = 0; i < (strlen("TARGET")-strlen(target_data.program)); i++) printf(" "); printf("%s: ", target_data.program); if (strlen(target_data.release)) printf("%s\n", target_data.release); else printf("???\n"); } if (strlen(target_data.gdb_version)) printf(" GDB: %s\n\n", &target_data.gdb_version[4]); } void build_configure(struct supported_gdb_version *sp) { FILE *fp1, *fp2; char buf[512]; char *target; char *target_CFLAGS; char *gdb_conf_flags; char *ldflags; char *cflags; get_current_configuration(sp); target = target_CFLAGS = NULL; gdb_conf_flags = GDB_TARGET_DEFAULT; switch (target_data.target) { case X86: target = TARGET_X86; if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_X86_ON_X86_64; gdb_conf_flags = GDB_TARGET_X86_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_X86; break; case ALPHA: target = TARGET_ALPHA; target_CFLAGS = TARGET_CFLAGS_ALPHA; break; case PPC: target = TARGET_PPC; if (target_data.host == PPC64) { target_CFLAGS = TARGET_CFLAGS_PPC_ON_PPC64; gdb_conf_flags = GDB_TARGET_PPC_ON_PPC64; } else target_CFLAGS = TARGET_CFLAGS_PPC; break; case IA64: target = TARGET_IA64; target_CFLAGS = TARGET_CFLAGS_IA64; break; case S390: target = TARGET_S390; target_CFLAGS = TARGET_CFLAGS_S390; break; case S390X: target = TARGET_S390X; target_CFLAGS = TARGET_CFLAGS_S390X; break; case PPC64: target = TARGET_PPC64; if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_PPC64_ON_X86_64; gdb_conf_flags = GDB_TARGET_PPC64_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_PPC64; break; case X86_64: target = TARGET_X86_64; target_CFLAGS = TARGET_CFLAGS_X86_64; break; case ARM: target = TARGET_ARM; if (target_data.host == X86) { target_CFLAGS = TARGET_CFLAGS_ARM_ON_X86; gdb_conf_flags = GDB_TARGET_ARM_ON_X86; } else if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_ARM_ON_X86_64; gdb_conf_flags = GDB_TARGET_ARM_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_ARM; break; case ARM64: target = TARGET_ARM64; if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_ARM64_ON_X86_64; gdb_conf_flags = GDB_TARGET_ARM64_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_ARM64; break; case MIPS: target = TARGET_MIPS; if (target_data.host == X86) { target_CFLAGS = TARGET_CFLAGS_MIPS_ON_X86; gdb_conf_flags = GDB_TARGET_MIPS_ON_X86; } else if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_MIPS_ON_X86_64; gdb_conf_flags = GDB_TARGET_MIPS_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_MIPS; break; case MIPS64: target = TARGET_MIPS64; target_CFLAGS = TARGET_CFLAGS_MIPS64; break; case SPARC64: target = TARGET_SPARC64; target_CFLAGS = TARGET_CFLAGS_SPARC64; break; case RISCV64: target = TARGET_RISCV64; if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_RISCV64_ON_X86_64; gdb_conf_flags = GDB_TARGET_RISCV64_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_RISCV64; break; case LOONGARCH64: target = TARGET_LOONGARCH64; if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_LOONGARCH64_ON_X86_64; gdb_conf_flags = GDB_TARGET_LOONGARCH64_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_LOONGARCH64; break; } ldflags = get_extra_flags("LDFLAGS.extra", NULL); cflags = get_extra_flags("CFLAGS.extra", NULL); gdb_conf_flags = get_extra_flags("GDBFLAGS.extra", gdb_conf_flags); makefile_setup(&fp1, &fp2); while (fgets(buf, 512, fp1)) { if (strncmp(buf, "TARGET=", strlen("TARGET=")) == 0) fprintf(fp2, "%s\n", target); else if (strncmp(buf, "TARGET_CFLAGS=", strlen("TARGET_CFLAGS=")) == 0) fprintf(fp2, "%s%s%s\n", target_CFLAGS, cflags ? " " : "", cflags ? cflags : ""); else if (strncmp(buf, "GDB_CONF_FLAGS=", strlen("GDB_CONF_FLAGS=")) == 0) fprintf(fp2, "%s\n", gdb_conf_flags); else if (strncmp(buf, "GDB_FILES=",strlen("GDB_FILES=")) == 0) fprintf(fp2, "%s\n", sp->GDB_FILES); else if (strncmp(buf, "GDB_OFILES=",strlen("GDB_OFILES=")) == 0) fprintf(fp2, "%s\n", sp->GDB_OFILES); else if (strncmp(buf, "GDB_PATCH_FILES=",strlen("GDB_PATCH_FILES=")) == 0) fprintf(fp2, "%s\n", sp->GDB_PATCH_FILES); else if (strncmp(buf, "GDB_FLAGS=",strlen("GDB_FLAGS=")) == 0) fprintf(fp2, "%s\n", sp->GDB_FLAGS); else if (strncmp(buf, "GPL_FILES=", strlen("GPL_FILES=")) == 0) fprintf(fp2, "GPL_FILES=%s\n", strcmp(sp->GPL, "GPLv2") == 0 ? "COPYING" : "COPYING3"); else if (strncmp(buf, "GDB=", strlen("GDB=")) == 0) { fprintf(fp2, "%s\n", sp->GDB); sprintf(target_data.gdb_version, "%s", &sp->GDB[4]); } else if (strncmp(buf, "LDFLAGS=", strlen("LDFLAGS=")) == 0) { fprintf(fp2, "LDFLAGS=%s\n", ldflags ? ldflags : ""); } else fprintf(fp2, "%s", buf); } makefile_create(&fp1, &fp2); show_configuration(); make_build_data(&target[strlen("TARGET=")]); } void release_configure(char *gdb_version, struct supported_gdb_version *sp) { FILE *fp1, *fp2; int found; char buf[512]; char gdb_files[MAXSTRLEN]; get_current_configuration(sp); sprintf(buf, "%s/gdb", gdb_version); if (!file_exists(buf)) { fprintf(stderr, "make release: no such directory: %s\n", buf); exit(1); } sprintf(gdb_files, "GDB_%s_FILES", &gdb_version[strlen("gdb-")]); makefile_setup(&fp1, &fp2); found = 0; while (fgets(buf, 512, fp1)) { if (strncmp(buf, gdb_files, strlen(gdb_files)) == 0) found++; if (strncmp(buf, "GDB_FILES=", strlen("GDB_FILES=")) == 0) fprintf(fp2, "GDB_FILES=${%s}\n", gdb_files); else if (strncmp(buf, "VERSION=", strlen("VERSION=")) == 0) fprintf(fp2, "VERSION=%s\n", target_data.release); else if (strncmp(buf, "GDB_PATCH_FILES=", strlen("GDB_PATCH_FILES=")) == 0) fprintf(fp2, "%s\n", sp->GDB_PATCH_FILES); else if (strncmp(buf, "GPL_FILES=", strlen("GPL_FILES=")) == 0) fprintf(fp2, "GPL_FILES=%s\n", strcmp(sp->GPL, "GPLv2") == 0 ? "COPYING" : "COPYING3"); else fprintf(fp2, "%s", buf); } if (!found) { fprintf(stderr, "make release: cannot find %s\n", gdb_files); exit(1); } makefile_create(&fp1, &fp2); } /* * Create an .rh_rpm_package file if the passed-in variable is set. */ void make_rh_rpm_package(char *package, int release) { char *p, *cur; FILE *fp; char buf[256]; if ((strcmp(package, "remove") == 0)) { if (file_exists(".rh_rpm_package")) { if (unlink(".rh_rpm_package")) { perror("unlink"); fprintf(stderr, "cannot remove .rh_rpm_package\n"); exit(1); } } return; } if (!(p = strstr(package, "="))) return; if (!strlen(++p)) return; if (release) { if (!(fp = popen("./crash -v", "r"))) { fprintf(stderr, "cannot execute \"crash -v\"\n"); exit(1); } cur = NULL; while (fgets(buf, 256, fp)) { if (strncmp(buf, "crash ", 6) == 0) { cur = &buf[6]; break; } } pclose(fp); if (!cur) { fprintf(stderr, "cannot get version from \"crash -v\"\n"); exit(1); } strip_linefeeds(cur); if (strcmp(cur, p) != 0) { fprintf(stderr, "./crash version: %s\n", cur); fprintf(stderr, "release version: %s\n", p); exit(1); } } if ((fp = fopen(".rh_rpm_package", "w")) == NULL) { perror("fopen"); fprintf(stderr, "cannot open .rh_rpm_package\n"); exit(1); } fprintf(fp, "%s\n", strip_linefeeds(p)); fclose(fp); } void gdb_configure(struct supported_gdb_version *sp) { FILE *fp1, *fp2; char buf[512]; get_current_configuration(sp); makefile_setup(&fp1, &fp2); while (fgets(buf, 512, fp1)) { if (strncmp(buf, "GDB=", strlen("GDB=")) == 0) fprintf(fp2, "%s\n", sp->GDB); else fprintf(fp2, "%s", buf); } makefile_create(&fp1, &fp2); } void unconfigure(void) { FILE *fp1, *fp2; char buf[512]; makefile_setup(&fp1, &fp2); while (fgets(buf, 512, fp1)) { if (strncmp(buf, "TARGET=", strlen("TARGET=")) == 0) fprintf(fp2, "TARGET=\n"); else if (strncmp(buf, "TARGET_CFLAGS=", strlen("TARGET_CFLAGS=")) == 0) fprintf(fp2, "TARGET_CFLAGS=\n"); else if (strncmp(buf, "GDB_CONF_FLAGS=", strlen("GDB_CONF_FLAGS=")) == 0) fprintf(fp2, "GDB_CONF_FLAGS=\n"); else if (strncmp(buf, "GDB_FILES=",strlen("GDB_FILES=")) == 0) fprintf(fp2, "GDB_FILES=\n"); else if (strncmp(buf, "GDB_OFILES=",strlen("GDB_OFILES=")) == 0) fprintf(fp2, "GDB_OFILES=\n"); else if (strncmp(buf, "GDB_PATCH_FILES=",strlen("GDB_PATCH_FILES=")) == 0) fprintf(fp2, "GDB_PATCH_FILES=\n"); else if (strncmp(buf, "GDB_FLAGS=",strlen("GDB_FLAGS=")) == 0) fprintf(fp2, "GDB_FLAGS=\n"); else if (strncmp(buf, "GDB=", strlen("GDB=")) == 0) fprintf(fp2, "GDB=\n"); else if (strncmp(buf, "VERSION=", strlen("VERSION=")) == 0) fprintf(fp2, "VERSION=\n"); else if (strncmp(buf, "GPL_FILES=", strlen("GPL_FILES=")) == 0) fprintf(fp2, "GPL_FILES=\n"); else if (strncmp(buf, "LDFLAGS=", strlen("LDFLAGS=")) == 0) fprintf(fp2, "LDFLAGS=\n"); else if (strncmp(buf, "WARNING_ERROR=", strlen("WARNING_ERROR=")) == 0) { shift_string_right(buf, 1); buf[0] = '#'; fprintf(fp2, "%s", buf); } else if (strncmp(buf, "WARNING_OPTIONS=", strlen("WARNING_OPTIONS=")) == 0) { shift_string_right(buf, 1); buf[0] = '#'; fprintf(fp2, "%s", buf); } else fprintf(fp2, "%s", buf); } makefile_create(&fp1, &fp2); } void set_warnings(int w) { FILE *fp1, *fp2; char buf[512]; makefile_setup(&fp1, &fp2); while (fgets(buf, 512, fp1)) { if (strncmp(buf, "#WARNING_ERROR=", strlen("#WARNING_ERROR=")) == 0) { switch (w) { case 'W': shift_string_left(buf, 1); break; case 'w': case 'n': break; } } if (strncmp(buf, "WARNING_ERROR=", strlen("WARNING_ERROR=")) == 0) { switch (w) { case 'n': case 'w': shift_string_right(buf, 1); buf[0] = '#'; break; case 'W': break; } } if (strncmp(buf, "#WARNING_OPTIONS=", strlen("#WARNING_OPTIONS=")) == 0) { switch (w) { case 'W': case 'w': shift_string_left(buf, 1); break; case 'n': break; } } if (strncmp(buf, "WARNING_OPTIONS=", strlen("WARNING_OPTIONS=")) == 0) { switch (w) { case 'w': case 'W': break; case 'n': shift_string_right(buf, 1); buf[0] = '#'; break; } } fprintf(fp2, "%s", buf); } makefile_create(&fp1, &fp2); } void makefile_setup(FILE **fp1, FILE **fp2) { if (stat("Makefile", &target_data.statbuf) == -1) { perror("Makefile"); exit(1); } if ((*fp1 = fopen("Makefile", "r")) == NULL) { perror("fopen"); fprintf(stderr, "cannot open existing Makefile\n"); exit(1); } unlink("Makefile.new"); if ((*fp2 = fopen("Makefile.new", "w+")) == NULL) { perror("fopen"); fprintf(stderr, "cannot create new Makefile\n"); exit(1); } } void makefile_create(FILE **fp1, FILE **fp2) { fclose(*fp1); fclose(*fp2); if (system("mv Makefile.new Makefile") != 0) { fprintf(stderr, "Makefile: cannot create new Makefile\n"); fprintf(stderr, "please copy Makefile.new to Makefile\n"); exit(1); } if (chown("Makefile", target_data.statbuf.st_uid, target_data.statbuf.st_gid) == -1) { fprintf(stderr, "Makefile: cannot restore original owner/group\n"); } } #define LASTCHAR(s) (s[strlen(s)-1]) char * strip_linefeeds(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == '\n') *p = '\0'; return(line); } /* * Turn a string into upper-case. */ char * upper_case(char *s, char *buf) { char *p1, *p2; p1 = s; p2 = buf; while (*p1) { *p2 = toupper(*p1); p1++, p2++; } *p2 = '\0'; return(buf); } /* * Turn a string into lower-case. */ char * lower_case(char *s, char *buf) { char *p1, *p2; p1 = s; p2 = buf; while (*p1) { *p2 = tolower(*p1); p1++, p2++; } *p2 = '\0'; return(buf); } char * shift_string_left(char *s, int cnt) { int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s, s+cnt, (origlen-cnt)); *(s+(origlen-cnt)) = '\0'; return(s); } char * shift_string_right(char *s, int cnt) { int i; int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s+cnt, s, origlen); *(s+(origlen+cnt)) = '\0'; for (i = 0; i < cnt; i++) s[i] = ' '; return(s); } char * strip_beginning_whitespace(char *line) { char buf[MAXSTRLEN]; char *p; if (line == NULL || strlen(line) == 0) return(line); strcpy(buf, line); p = &buf[0]; while (*p == ' ' || *p == '\t') p++; strcpy(line, p); return(line); } char * strip_ending_whitespace(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &line[strlen(line)-1]; while (*p == ' ' || *p == '\t') { *p = '\0'; if (p == line) break; p--; } return(line); } int file_exists(char *file) { struct stat sbuf; if (stat(file, &sbuf) == 0) return TRUE; return FALSE; } int count_chars(char *s, char c) { char *p; int count; if (!s) return 0; count = 0; for (p = s; *p; p++) { if (*p == c) count++; } return count; } void make_build_data(char *target) { char *p; char hostname[MAXSTRLEN]; char progname[MAXSTRLEN]; char inbuf1[MAXSTRLEN]; char inbuf2[MAXSTRLEN]; char inbuf3[MAXSTRLEN]; FILE *fp1, *fp2, *fp3, *fp4; unlink("build_data.c"); fp1 = popen("date", "r"); fp2 = popen("id", "r"); fp3 = popen("gcc --version", "r"); if ((fp4 = fopen("build_data.c", "w")) == NULL) { perror("build_data.c"); exit(1); } if (gethostname(hostname, MAXSTRLEN) != 0) hostname[0] = '\0'; p = fgets(inbuf1, 79, fp1); p = fgets(inbuf2, 79, fp2); p = strstr(inbuf2, " "); *p = '\0'; p = fgets(inbuf3, 79, fp3); lower_case(target_data.program, progname); fprintf(fp4, "char *build_command = \"%s\";\n", progname); if (getenv("SOURCE_DATE_EPOCH")) fprintf(fp4, "char *build_data = \"reproducible build\";\n"); else if (strlen(hostname)) fprintf(fp4, "char *build_data = \"%s by %s on %s\";\n", strip_linefeeds(inbuf1), inbuf2, hostname); else fprintf(fp4, "char *build_data = \"%s by %s\";\n", strip_linefeeds(inbuf1), inbuf2); bzero(inbuf1, MAXSTRLEN); sprintf(inbuf1, "%s", target_data.release); fprintf(fp4, "char *build_target = \"%s\";\n", target); fprintf(fp4, "char *build_version = \"%s\";\n", inbuf1); fprintf(fp4, "char *compiler_version = \"%s\";\n", strip_linefeeds(inbuf3)); pclose(fp1); pclose(fp2); pclose(fp3); fclose(fp4); } void make_spec_file(struct supported_gdb_version *sp) { char *Version, *Release; char buf[512]; get_current_configuration(sp); Release = strstr(target_data.release, "-"); if (!Release) { Version = target_data.release; Release = "0"; } else { fprintf(stderr, "crash.spec: obsolete src.rpm build manner -- no dashes allowed: %s\n", target_data.release); return; } printf("#\n"); printf("# crash core analysis suite\n"); printf("#\n"); printf("Summary: crash utility for live systems; netdump, diskdump, kdump, LKCD or mcore dumpfiles\n"); printf("Name: %s\n", lower_case(target_data.program, buf)); printf("Version: %s\n", Version); printf("Release: %s\n", Release); printf("License: %s\n", sp->GPL); printf("Group: Development/Debuggers\n"); printf("Source: %%{name}-%%{version}.tar.gz\n"); printf("URL: https://github.com/crash-utility\n"); printf("Distribution: Linux 2.2 or greater\n"); printf("Vendor: Red Hat, Inc.\n"); printf("Packager: Dave Anderson \n"); printf("ExclusiveOS: Linux\n"); printf("ExclusiveArch: %%{ix86} alpha ia64 ppc ppc64 ppc64pseries ppc64iseries x86_64 s390 s390x arm aarch64 ppc64le mips mipsel mips64el sparc64 riscv64 loongarch64\n"); printf("Buildroot: %%{_tmppath}/%%{name}-root\n"); printf("BuildRequires: ncurses-devel zlib-devel bison\n"); printf("Requires: binutils\n"); printf("# Patch0: crash-3.3-20.installfix.patch (patch example)\n"); printf("\n"); printf("%%description\n"); printf("The core analysis suite is a self-contained tool that can be used to\n"); printf("investigate either live systems, kernel core dumps created from the\n"); printf("netdump, diskdump and kdump facilities from Red Hat Linux, the mcore kernel patch\n"); printf("offered by Mission Critical Linux, or the LKCD kernel patch.\n"); printf("\n"); printf("%%package devel\n"); printf("Requires: %%{name} = %%{version}, zlib-devel\n"); printf("Summary: crash utility for live systems; netdump, diskdump, kdump, LKCD or mcore dumpfiles\n"); printf("Group: Development/Debuggers\n"); printf("\n"); printf("%%description devel\n"); printf("The core analysis suite is a self-contained tool that can be used to\n"); printf("investigate either live systems, kernel core dumps created from the\n"); printf("netdump, diskdump and kdump packages from Red Hat Linux, the mcore kernel patch\n"); printf("offered by Mission Critical Linux, or the LKCD kernel patch.\n"); printf("\n"); printf("%%package extensions\n"); printf("Summary: Additional commands for the crash dump analysis tool\n"); printf("Group: Development/Debuggers\n"); printf("\n"); printf("%%description extensions\n"); printf("The extensions package contains plugins that provide additional crash\n"); printf("commands. The extensions can be loaded in crash via the \"extend\" command.\n"); printf("\n"); printf("The following extensions are provided:\n"); printf("* eppic: Provides C-like language for writing dump analysis scripts\n"); printf("* dminfo: Device-mapper target analyzer\n"); printf("* snap: Takes a snapshot of live memory and creates a kdump dumpfile\n"); printf("* trace: Displays kernel tracing data and traced events that occurred prior to a panic.\n"); printf("\n"); printf("%%prep\n"); printf("%%setup -n %%{name}-%%{version}\n"); printf("# %%patch0 -p1 -b .install (patch example)\n"); printf("\n"); printf("%%build\n"); printf("make RPMPKG=\"%%{version}\"\n"); printf("# make RPMPKG=\"%%{version}-%%{release}\"\n"); printf("make extensions\n"); /* printf("make crashd\n"); */ printf("\n"); printf("%%install\n"); printf("rm -rf %%{buildroot}\n"); printf("mkdir -p %%{buildroot}/usr/bin\n"); printf("make DESTDIR=%%{buildroot} install\n"); printf("mkdir -p %%{buildroot}%%{_mandir}/man8\n"); printf("cp crash.8 %%{buildroot}%%{_mandir}/man8/crash.8\n"); printf("mkdir -p %%{buildroot}%%{_includedir}/crash\n"); printf("cp defs.h %%{buildroot}%%{_includedir}/crash\n"); printf("mkdir -p %%{buildroot}%%{_libdir}/crash/extensions\n"); printf("if [ -f extensions/eppic.so ]\n"); printf("then\n"); printf("cp extensions/eppic.so %%{buildroot}%%{_libdir}/crash/extensions\n"); printf("fi\n"); printf("cp extensions/dminfo.so %%{buildroot}%%{_libdir}/crash/extensions\n"); printf("cp extensions/snap.so %%{buildroot}%%{_libdir}/crash/extensions\n"); printf("cp extensions/trace.so %%{buildroot}%%{_libdir}/crash/extensions\n"); printf("\n"); printf("%%clean\n"); printf("rm -rf %%{buildroot}\n"); printf("\n"); printf("%%files\n"); printf("%%defattr(-,root,root)\n"); printf("/usr/bin/crash\n"); printf("%%{_mandir}/man8/crash.8*\n"); /* printf("/usr/bin/crashd\n"); */ printf("%%doc README\n"); printf("\n"); printf("%%files devel\n"); printf("%%defattr(-,root,root)\n"); printf("%%{_includedir}/*\n"); printf("\n"); printf("%%files extensions\n"); printf("%%defattr(-,root,root)\n"); printf("%%{_libdir}/crash/extensions/*\n"); } /* * Use the default gdb #defines unless there's a .gdb file. */ struct supported_gdb_version * setup_gdb_defaults(void) { FILE *fp; char inbuf[512]; char buf[512]; struct supported_gdb_version *sp; /* * Use the default, allowing for an override in .gdb */ if (!file_exists(".gdb")) return store_gdb_defaults(NULL); if ((fp = fopen(".gdb", "r")) == NULL) { perror(".gdb"); return store_gdb_defaults(NULL); } while (fgets(inbuf, 512, fp)) { strip_linefeeds(inbuf); strip_beginning_whitespace(inbuf); strcpy(buf, inbuf); /* * Simple override. */ if (strcmp(buf, "5.3") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_5_3]; fprintf(stderr, ".gdb configuration: %s\n\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "6.0") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_6_0]; fprintf(stderr, ".gdb configuration: %s\n\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "6.1") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_6_1]; fprintf(stderr, ".gdb configuration: %s\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "7.0") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_7_0]; fprintf(stderr, ".gdb configuration: %s\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "7.3.1") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_7_3_1]; fprintf(stderr, ".gdb configuration: %s\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "7.6") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_7_6]; fprintf(stderr, ".gdb configuration: %s\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "10.2") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_10_2]; fprintf(stderr, ".gdb configuration: %s\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "16.2") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_16_2]; fprintf(stderr, ".gdb configuration: %s\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } } fclose(fp); fprintf(stderr, ".gdb: rejected -- using default gdb\n\n"); return store_gdb_defaults(NULL); } struct supported_gdb_version * store_gdb_defaults(struct supported_gdb_version *sp) { if (!sp) sp = &supported_gdb_versions[default_gdb]; else fprintf(stderr, "WARNING: \"make clean\" may be required before rebuilding\n\n"); return sp; } void set_initial_target(struct supported_gdb_version *sp) { FILE *fp; char crash_target[512]; char buf[512]; target_data.initial_gdb_target = UNKNOWN; sprintf(crash_target, "%s/crash.target", &sp->GDB[strlen("GDB=")]); if (!file_exists(crash_target)) { if (target_data.target_as_param && file_exists(&sp->GDB[strlen("GDB=")])) { fprintf(stderr, "\nThe \"%s\" file does not exist.\n", crash_target); target_rebuild_instructions(sp, (char *)target_data.target_as_param); exit(1); } return; } if ((fp = fopen(crash_target, "r")) == NULL) { perror(crash_target); return; } if (!fgets(buf, 512, fp)) { perror(crash_target); fclose(fp); return; } fclose(fp); if (strncmp(buf, "X86_64", strlen("X86_64")) == 0) target_data.initial_gdb_target = X86_64; else if (strncmp(buf, "X86", strlen("X86")) == 0) target_data.initial_gdb_target = X86; else if (strncmp(buf, "ALPHA", strlen("ALPHA")) == 0) target_data.initial_gdb_target = ALPHA; else if (strncmp(buf, "PPC64", strlen("PPC64")) == 0) target_data.initial_gdb_target = PPC64; else if (strncmp(buf, "PPC", strlen("PPC")) == 0) target_data.initial_gdb_target = PPC; else if (strncmp(buf, "IA64", strlen("IA64")) == 0) target_data.initial_gdb_target = IA64; else if (strncmp(buf, "S390X", strlen("S390X")) == 0) target_data.initial_gdb_target = S390X; else if (strncmp(buf, "S390", strlen("S390")) == 0) target_data.initial_gdb_target = S390; else if (strncmp(buf, "ARM64", strlen("ARM64")) == 0) target_data.initial_gdb_target = ARM64; else if (strncmp(buf, "ARM", strlen("ARM")) == 0) target_data.initial_gdb_target = ARM; else if (strncmp(buf, "MIPS64", strlen("MIPS64")) == 0) target_data.initial_gdb_target = MIPS64; else if (strncmp(buf, "MIPS", strlen("MIPS")) == 0) target_data.initial_gdb_target = MIPS; else if (strncmp(buf, "SPARC64", strlen("SPARC64")) == 0) target_data.initial_gdb_target = SPARC64; else if (strncmp(buf, "RISCV64", strlen("RISCV64")) == 0) target_data.initial_gdb_target = RISCV64; else if (strncmp(buf, "LOONGARCH64", strlen("LOONGARCH64")) == 0) target_data.initial_gdb_target = LOONGARCH64; } char * target_to_name(int target) { switch (target) { case X86: return("X86"); case ALPHA: return("ALPHA"); case PPC: return("PPC"); case IA64: return("IA64"); case S390: return("S390"); case S390X: return("S390X"); case PPC64: return("PPC64"); case X86_64: return("X86_64"); case ARM: return("ARM"); case ARM64: return("ARM64"); case MIPS: return("MIPS"); case MIPS64: return("MIPS64"); case SPARC64: return("SPARC64"); case RISCV64: return("RISCV64"); case LOONGARCH64: return("LOONGARCH64"); } return "UNKNOWN"; } int name_to_target(char *name) { if (strncmp(name, "X86_64", strlen("X86_64")) == 0) return X86_64; else if (strncmp(name, "x86_64", strlen("x86_64")) == 0) return X86_64; else if (strncmp(name, "X86", strlen("X86")) == 0) return X86; else if (strncmp(name, "x86", strlen("x86")) == 0) return X86; else if (strncmp(name, "ALPHA", strlen("ALPHA")) == 0) return ALPHA; else if (strncmp(name, "alpha", strlen("alpha")) == 0) return ALPHA; else if (strncmp(name, "PPC64", strlen("PPC64")) == 0) return PPC64; else if (strncmp(name, "ppc64", strlen("ppc64")) == 0) return PPC64; else if (strncmp(name, "ppc64le", strlen("ppc64le")) == 0) return PPC64; else if (strncmp(name, "PPC64LE", strlen("PPC64LE")) == 0) return PPC64; else if (strncmp(name, "PPC", strlen("PPC")) == 0) return PPC; else if (strncmp(name, "ppc", strlen("ppc")) == 0) return PPC; else if (strncmp(name, "IA64", strlen("IA64")) == 0) return IA64; else if (strncmp(name, "ia64", strlen("ia64")) == 0) return IA64; else if (strncmp(name, "S390X", strlen("S390X")) == 0) return S390X; else if (strncmp(name, "s390x", strlen("s390x")) == 0) return S390X; else if (strncmp(name, "S390", strlen("S390")) == 0) return S390; else if (strncmp(name, "s390", strlen("s390")) == 0) return S390; else if (strncmp(name, "ARM64", strlen("ARM64")) == 0) return ARM64; else if (strncmp(name, "arm64", strlen("arm64")) == 0) return ARM64; else if (strncmp(name, "aarch64", strlen("aarch64")) == 0) return ARM64; else if (strncmp(name, "ARM", strlen("ARM")) == 0) return ARM; else if (strncmp(name, "arm", strlen("arm")) == 0) return ARM; else if (strncmp(name, "mips", strlen("mips")) == 0) return MIPS; else if (strncmp(name, "MIPS", strlen("MIPS")) == 0) return MIPS; else if (strncmp(name, "mips64", strlen("mips64")) == 0) return MIPS64; else if (strncmp(name, "MIPS64", strlen("MIPS64")) == 0) return MIPS64; else if (strncmp(name, "sparc64", strlen("sparc64")) == 0) return SPARC64; else if (strncmp(name, "RISCV64", strlen("RISCV64")) == 0) return RISCV64; else if (strncmp(name, "riscv64", strlen("riscv64")) == 0) return RISCV64; else if (strncmp(name, "loongarch64", strlen("loongarch64")) == 0) return LOONGARCH64; else if (strncmp(name, "LOONGARCH64", strlen("LOONGARCH64")) == 0) return LOONGARCH64; return UNKNOWN; } char * get_extra_flags(char *filename, char *initial) { FILE *fp; char inbuf[512]; char buf[512]; if (!file_exists(filename)) return (initial ? initial : NULL); if ((fp = fopen(filename, "r")) == NULL) { perror(filename); return (initial ? initial : NULL); } if (initial) strcpy(buf, initial); else buf[0] = '\0'; while (fgets(inbuf, 512, fp)) { strip_linefeeds(inbuf); strip_beginning_whitespace(inbuf); strip_ending_whitespace(inbuf); if (inbuf[0] == '#') continue; if (strlen(inbuf)) { if (strlen(buf)) strcat(buf, " "); strcat(buf, inbuf); } } fclose(fp); if (strlen(buf)) return strdup(buf); else return NULL; } /* * Add extra compression libraries. If not already there, create * a CFLAGS.extra file and an LDFLAGS.extra file. * For lzo: * - enter -DLZO in the CFLAGS.extra file * - enter -llzo2 in the LDFLAGS.extra file * * For snappy: * - enter -DSNAPPY in the CFLAGS.extra file * - enter -lsnappy in the LDFLAGS.extra file * * For zstd: * - enter -DZSTD in the CFLAGS.extra file * - enter -lzstd in the LDFLAGS.extra file * * For valgrind: * - enter -DVALGRIND in the CFLAGS.extra file */ void add_extra_lib(char *option) { int lzo, add_DLZO, add_llzo2; int snappy, add_DSNAPPY, add_lsnappy; int zstd, add_DZSTD, add_lzstd; int valgrind, add_DVALGRIND; char *cflags, *ldflags; FILE *fp_cflags, *fp_ldflags; char *mode; char inbuf[512]; lzo = add_DLZO = add_llzo2 = 0; snappy = add_DSNAPPY = add_lsnappy = 0; zstd = add_DZSTD = add_lzstd = 0; valgrind = add_DVALGRIND = 0; ldflags = get_extra_flags("LDFLAGS.extra", NULL); cflags = get_extra_flags("CFLAGS.extra", NULL); if (strcmp(option, "lzo") == 0) { lzo++; if (!cflags || !strstr(cflags, "-DLZO")) add_DLZO++; if (!ldflags || !strstr(ldflags, "-llzo2")) add_llzo2++; } if (strcmp(option, "snappy") == 0) { snappy++; if (!cflags || !strstr(cflags, "-DSNAPPY")) add_DSNAPPY++; if (!ldflags || !strstr(ldflags, "-lsnappy")) add_lsnappy++; } if (strcmp(option, "zstd") == 0) { zstd++; if (!cflags || !strstr(cflags, "-DZSTD")) add_DZSTD++; if (!ldflags || !strstr(ldflags, "-lzstd")) add_lzstd++; } if (strcmp(option, "valgrind") == 0) { valgrind++; if (!cflags || !strstr(cflags, "-DVALGRIND")) add_DVALGRIND++; } if ((lzo || snappy || zstd) && file_exists("diskdump.o") && (unlink("diskdump.o") < 0)) { perror("diskdump.o"); return; } if (valgrind && file_exists("tools.o") && (unlink("tools.o") < 0)) { perror("tools.o"); return; } mode = file_exists("CFLAGS.extra") ? "r+" : "w+"; if ((fp_cflags = fopen("CFLAGS.extra", mode)) == NULL) { perror("CFLAGS.extra"); return; } mode = file_exists("LDFLAGS.extra") ? "r+" : "w+"; if ((fp_ldflags = fopen("LDFLAGS.extra", mode)) == NULL) { perror("LDFLAGS.extra"); fclose(fp_cflags); return; } if (add_DLZO || add_DSNAPPY || add_DZSTD || add_DVALGRIND) { while (fgets(inbuf, 512, fp_cflags)) ; if (add_DLZO) fputs("-DLZO\n", fp_cflags); if (add_DSNAPPY) fputs("-DSNAPPY\n", fp_cflags); if (add_DZSTD) fputs("-DZSTD\n", fp_cflags); if (add_DVALGRIND) fputs("-DVALGRIND\n", fp_cflags); } if (add_llzo2 || add_lsnappy || add_lzstd) { while (fgets(inbuf, 512, fp_ldflags)) ; if (add_llzo2) fputs("-llzo2\n", fp_ldflags); if (add_lsnappy) fputs("-lsnappy\n", fp_ldflags); if (add_lzstd) fputs("-lzstd\n", fp_ldflags); } fclose(fp_cflags); fclose(fp_ldflags); } crash-utility-crash-9cd43f5/crash_target.c0000664000372000037200000001371415107550337020214 0ustar juerghjuergh/* * crash_target.c * * Copyright (c) 2021 VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Alexey Makhalov */ #include #include "top.h" #include "target.h" #include "inferior.h" #include "regcache.h" #include "gdbarch.h" void crash_target_init (void); extern "C" int gdb_readmem_callback(unsigned long, void *, int, int); extern "C" int crash_get_current_task_reg (int regno, const char *regname, int regsize, void *val, int sid); extern "C" int gdb_change_thread_context (void); extern "C" int gdb_add_substack (int); extern "C" void crash_get_current_task_info(unsigned long *pid, char **comm); #if defined (X86_64) || defined (ARM64) || defined (PPC64) extern "C" void silent_call_bt(void); #endif /* The crash target. */ static const target_info crash_target_info = { "crash", N_("Local core dump file"), N_("Use a built-in crash instance as a target.") }; class crash_target final : public process_stratum_target { public: const target_info &info () const override { return crash_target_info; } void fetch_registers (struct regcache *, int) override; enum target_xfer_status xfer_partial (enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) override; bool has_all_memory () override { return true; } bool has_memory () override { return true; } bool has_stack () override { return true; } bool has_registers () override { return true; } bool thread_alive (ptid_t ptid) override { return true; } std::string pid_to_str (ptid_t ptid) override { unsigned long pid; char *comm; crash_get_current_task_info(&pid, &comm); return string_printf ("%ld %s", pid, comm); } const char *extra_thread_info (thread_info *tp) override { static char buf[16] = {0}; snprintf(buf, sizeof(buf), "stack %ld", tp->ptid.tid()); return buf; } }; static void supply_registers(struct regcache *regcache, int regno) { gdb_byte regval[32]; struct gdbarch *arch = regcache->arch (); const char *regname = gdbarch_register_name(arch, regno); int regsize = register_size(arch, regno); if (regsize > sizeof (regval)) error (_("fatal error: buffer size is not enough to fit register value")); if (crash_get_current_task_reg (regno, regname, regsize, (void *)®val, inferior_thread()->ptid.tid())) regcache->raw_supply (regno, regval); else regcache->raw_supply (regno, NULL); } void crash_target::fetch_registers (struct regcache *regcache, int regno) { if (regno >= 0) { supply_registers(regcache, regno); } else if (regno == -1) { for (int r = 0; r < gdbarch_num_regs (regcache->arch ()); r++) supply_registers(regcache, r); } } enum target_xfer_status crash_target::xfer_partial (enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) { if (object != TARGET_OBJECT_MEMORY && object != TARGET_OBJECT_STACK_MEMORY && object != TARGET_OBJECT_CODE_MEMORY) return TARGET_XFER_E_IO; if (gdb_readmem_callback(offset, (void *)(readbuf ? readbuf : writebuf), len, !readbuf)) { *xfered_len = len; return TARGET_XFER_OK; } return TARGET_XFER_E_IO; } #define CRASH_INFERIOR_PID 1 void crash_target_init (void) { crash_target *target = new crash_target (); /* Own the target until it is successfully pushed. */ target_ops_up target_holder (target); current_inferior ()->push_target (std::move (target_holder)); inferior_appeared (current_inferior (), CRASH_INFERIOR_PID); /*Only create 1 gdb threads to view tasks' stack unwinding*/ thread_info *thread = add_thread_silent (target, ptid_t(CRASH_INFERIOR_PID, 0, 0)); switch_to_thread (thread); /* Fetch all registers from core file. */ target_fetch_registers (get_thread_regcache(thread), -1); /* Now, set up the frame cache. */ reinit_frame_cache (); } extern "C" int gdb_change_thread_context (void) { /* 1st, switch to tid 0 if we are not */ if (inferior_thread()->ptid.tid()) { switch_to_thread (&(current_inferior()->thread_list.front())); } /* 2nd, delete threads whose tid is not 0 */ for (thread_info *tp : current_inferior()->threads_safe()) { if (tp->ptid.tid() && tp->deletable()) { delete_thread_silent(tp); current_inferior()->highest_thread_num--; } } /* 3rd, refresh regcache for tid 0 */ target_fetch_registers(get_thread_regcache(inferior_thread()), -1); reinit_frame_cache(); #if defined (X86_64) || defined (ARM64) || defined (PPC64) /* 4th, invoke bt silently to refresh the additional stacks */ silent_call_bt(); #endif return TRUE; } /* Add a thread for each additional stack. Use stack ID as a thread ID */ extern "C" int gdb_add_substack (int sid) { thread_info *tp; thread_info *current_thread = inferior_thread(); ptid_t ptid = ptid_t(CRASH_INFERIOR_PID, 0, sid + 1); tp = current_inferior()->find_thread(ptid); if (tp == nullptr) { tp = add_thread_silent(current_inferior()->process_target(), ptid); } switch_to_thread (tp); target_fetch_registers(get_thread_regcache(tp), -1); switch_to_thread (current_thread); return TRUE; }crash-utility-crash-9cd43f5/vmware_vmss.h0000664000372000037200000001256215107550337020124 0ustar juerghjuergh/* * vmware_vmss.h * * Copyright (c) 2015 VMware, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Dyno Hongjun Fu */ #define CPTDUMP_OLD_MAGIC_NUMBER 0xbed0bed0 #define CPTDUMP_MAGIC_NUMBER 0xbed2bed2 #define CPTDUMP_PARTIAL_MAGIC_NUMBER 0xbed3bed3 #define CPTDUMP_RESTORED_MAGIC_NUMBER 0xbad1bad1 #define CPTDUMP_NORESTORE_MAGIC_NUMBER 0xbad2bad2 /* * Poor man's bit fields * TAG: | NAMELEN | NINDX | VALSIZE | * bits |15 8|7 6|5 0| * size | 8 | 2 | 6 | */ #define TAG_NAMELEN_MASK 0xFF #define TAG_NAMELEN_OFFSET 8 #define TAG_NINDX_MASK 0x3 #define TAG_NINDX_OFFSET 6 #define TAG_VALSIZE_MASK 0x3F #define TAG_VALSIZE_OFFSET 0 #define TAG_SIZE 2 /* * The value size has two special values to indicate blocks and compressed * blocks. */ #define TAG_ISBLOCK TAG_VALSIZE_MASK #define TAG_ISBLOCK_COMPRESSED (TAG_VALSIZE_MASK-1) #define MAKE_TAG(_nl, _nidx, _nb) \ (((_nl) & TAG_NAMELEN_MASK) << TAG_NAMELEN_OFFSET | \ ((_nidx) & TAG_NINDX_MASK) << TAG_NINDX_OFFSET | \ ((_nb) & TAG_VALSIZE_MASK) << TAG_VALSIZE_OFFSET) #define TAG_NAMELEN(_tag) (((_tag) >> TAG_NAMELEN_OFFSET) & TAG_NAMELEN_MASK) #define TAG_NINDX(_tag) (((_tag) >> TAG_NINDX_OFFSET) & TAG_NINDX_MASK) #define TAG_VALSIZE(_tag) (((_tag) >> TAG_VALSIZE_OFFSET) & TAG_VALSIZE_MASK) #define NULL_TAG MAKE_TAG(0, 0, 0) #define NO_INDEX (-1) /* * TRUE iff it's a (optionally compressed) block */ #define IS_BLOCK_TAG(_tag) (TAG_VALSIZE(_tag) == TAG_ISBLOCK || \ TAG_VALSIZE(_tag) == TAG_ISBLOCK_COMPRESSED) /* * TRUE iff it's a compressed block. */ #define IS_BLOCK_COMPRESSED_TAG(_tag) (TAG_VALSIZE(_tag) == TAG_ISBLOCK_COMPRESSED) struct cptdumpheader { uint32_t id; uint32_t version; uint32_t numgroups; }; typedef struct cptdumpheader cptdumpheader; #define MAX_LENGTH 64 struct cptgroupdesc { char name[MAX_LENGTH]; uint64_t position; uint64_t size; }; typedef struct cptgroupdesc cptgroupdesc; struct memregion { uint32_t startpagenum; uint32_t startppn; uint32_t size; }; typedef struct memregion memregion; #define VMW_GPREGS_SIZE (128) #define VMW_CR64_SIZE (72) #define VMW_IDTR_SIZE (10) struct vmssregs64 { /* read from vmss */ uint64_t rax; uint64_t rcx; uint64_t rdx; uint64_t rbx; uint64_t rbp; uint64_t rsp; uint64_t rsi; uint64_t rdi; uint64_t r8; uint64_t r9; uint64_t r10; uint64_t r11; uint64_t r12; uint64_t r13; uint64_t r14; uint64_t r15; uint64_t es; uint64_t cs; uint64_t ss; uint64_t ds; uint64_t fs; uint64_t gs; uint64_t ldtr; uint64_t tr; /* manually managed */ uint64_t idtr; uint64_t cr[VMW_CR64_SIZE / 8]; uint64_t rip; uint64_t rflags; uint64_t fs_base; uint64_t gs_base; }; typedef struct vmssregs64 vmssregs64; typedef enum SegmentName { SEG_ES, SEG_CS, SEG_SS, SEG_DS, SEG_FS, SEG_GS, SEG_LDTR, SEG_TR, NUM_SEGS } SegmentName; #define REGS_PRESENT_RAX 1L<<0 #define REGS_PRESENT_RCX 1L<<1 #define REGS_PRESENT_RDX 1L<<2 #define REGS_PRESENT_RBX 1L<<3 #define REGS_PRESENT_RBP 1L<<4 #define REGS_PRESENT_RSP 1L<<5 #define REGS_PRESENT_RSI 1L<<6 #define REGS_PRESENT_RDI 1L<<7 #define REGS_PRESENT_R8 1L<<8 #define REGS_PRESENT_R9 1L<<9 #define REGS_PRESENT_R10 1L<<10 #define REGS_PRESENT_R11 1L<<11 #define REGS_PRESENT_R12 1L<<12 #define REGS_PRESENT_R13 1L<<13 #define REGS_PRESENT_R14 1L<<14 #define REGS_PRESENT_R15 1L<<15 #define REGS_PRESENT_IDTR 1L<<16 #define REGS_PRESENT_CR0 1L<<17 #define REGS_PRESENT_CR1 1L<<18 #define REGS_PRESENT_CR2 1L<<19 #define REGS_PRESENT_CR3 1L<<20 #define REGS_PRESENT_CR4 1L<<21 #define REGS_PRESENT_RIP 1L<<22 #define REGS_PRESENT_RFLAGS 1L<<23 #define REGS_PRESENT_ES 1L<<24 #define REGS_PRESENT_CS 1L<<25 #define REGS_PRESENT_SS 1L<<26 #define REGS_PRESENT_DS 1L<<27 #define REGS_PRESENT_FS 1L<<28 #define REGS_PRESENT_GS 1L<<29 #define REGS_PRESENT_LDTR 1L<<30 #define REGS_PRESENT_TR 1L<<31 #define REGS_PRESENT_FS_BASE 1L<<32 #define REGS_PRESENT_GS_BASE 1L<<33 #define REGS_PRESENT_GPREGS 0x000000000000FFFF #define REGS_PRESENT_CRS 0x00000000003E0000 #define REGS_PRESENT_SEG 0x00000003FF000000 #define REGS_PRESENT_ALL 0x00000003FFFFFFFF #define MAX_REGIONS 3 struct vmssdata { int32_t cpt64bit; FILE *dfp; char *filename; /* about the memory */ uint32_t alignmask; uint32_t regionscount; memregion regions[MAX_REGIONS]; uint64_t memoffset; uint64_t memsize; ulong phys_base; int separate_vmem; uint64_t *vcpu_regs; uint64_t num_vcpus; vmssregs64 **regs64; }; typedef struct vmssdata vmssdata; /* VMware only supports X86/X86_64 virtual machines. */ #define VMW_PAGE_SIZE (4096) #define VMW_PAGE_SHIFT (12) #define MAX_BLOCK_DUMP (128) extern vmssdata vmss; #define DEBUG_PARSE_PRINT(x) \ do { \ if (CRASHDEBUG(1)) { \ fprintf x; \ } \ } while(0) crash-utility-crash-9cd43f5/unwind_i.h0000664000372000037200000001476615107550337017377 0ustar juerghjuergh/* * Copyright (C) 2000, 2002 Hewlett-Packard Co * David Mosberger-Tang */ /* * unwind_i.h * * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * arch/ia64/kernel/unwind_i.h (kernel-2.4.18-6.23) */ /* * Kernel unwind support. */ #define UNW_VER(x) ((x) >> 48) #define UNW_FLAG_MASK 0x0000ffff00000000 #define UNW_FLAG_OSMASK 0x0000f00000000000 #define UNW_FLAG_EHANDLER(x) ((x) & 0x0000000100000000L) #define UNW_FLAG_UHANDLER(x) ((x) & 0x0000000200000000L) #define UNW_LENGTH(x) ((x) & 0x00000000ffffffffL) enum unw_register_index { /* primary unat: */ UNW_REG_PRI_UNAT_GR, UNW_REG_PRI_UNAT_MEM, /* register stack */ UNW_REG_BSP, /* register stack pointer */ UNW_REG_BSPSTORE, UNW_REG_PFS, /* previous function state */ UNW_REG_RNAT, /* memory stack */ UNW_REG_PSP, /* previous memory stack pointer */ /* return pointer: */ UNW_REG_RP, /* preserved registers: */ UNW_REG_R4, UNW_REG_R5, UNW_REG_R6, UNW_REG_R7, UNW_REG_UNAT, UNW_REG_PR, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_B1, UNW_REG_B2, UNW_REG_B3, UNW_REG_B4, UNW_REG_B5, UNW_REG_F2, UNW_REG_F3, UNW_REG_F4, UNW_REG_F5, UNW_REG_F16, UNW_REG_F17, UNW_REG_F18, UNW_REG_F19, UNW_REG_F20, UNW_REG_F21, UNW_REG_F22, UNW_REG_F23, UNW_REG_F24, UNW_REG_F25, UNW_REG_F26, UNW_REG_F27, UNW_REG_F28, UNW_REG_F29, UNW_REG_F30, UNW_REG_F31, UNW_NUM_REGS }; struct unw_info_block { u64 header; u64 desc[0]; /* unwind descriptors */ /* personality routine and language-specific data follow behind descriptors */ }; struct unw_table_entry { u64 start_offset; u64 end_offset; u64 info_offset; }; struct unw_table { struct unw_table *next; /* must be first member! */ const char *name; unsigned long gp; /* global pointer for this load-module */ unsigned long segment_base; /* base for offsets in the unwind table entries */ unsigned long start; unsigned long end; const struct unw_table_entry *array; unsigned long length; }; enum unw_where { UNW_WHERE_NONE, /* register isn't saved at all */ UNW_WHERE_GR, /* register is saved in a general register */ UNW_WHERE_FR, /* register is saved in a floating-point register */ UNW_WHERE_BR, /* register is saved in a branch register */ UNW_WHERE_SPREL, /* register is saved on memstack (sp-relative) */ UNW_WHERE_PSPREL, /* register is saved on memstack (psp-relative) */ /* * At the end of each prologue these locations get resolved to * UNW_WHERE_PSPREL and UNW_WHERE_GR, respectively: */ UNW_WHERE_SPILL_HOME, /* register is saved in its spill home */ UNW_WHERE_GR_SAVE /* register is saved in next general register */ }; #define UNW_WHEN_NEVER 0x7fffffff struct unw_reg_info { unsigned long val; /* save location: register number or offset */ enum unw_where where; /* where the register gets saved */ int when; /* when the register gets saved */ }; struct unw_reg_state { struct unw_reg_state *next; /* next (outer) element on state stack */ struct unw_reg_info reg[UNW_NUM_REGS]; /* register save locations */ }; struct unw_labeled_state { struct unw_labeled_state *next; /* next labeled state (or NULL) */ unsigned long label; /* label for this state */ struct unw_reg_state saved_state; }; struct unw_state_record { unsigned int first_region : 1; /* is this the first region? */ unsigned int done : 1; /* are we done scanning descriptors? */ unsigned int any_spills : 1; /* got any register spills? */ unsigned int in_body : 1; /* are we inside a body (as opposed to a prologue)? */ unsigned long flags; /* see UNW_FLAG_* in unwind.h */ u8 *imask; /* imask of spill_mask record or NULL */ unsigned long pr_val; /* predicate values */ unsigned long pr_mask; /* predicate mask */ long spill_offset; /* psp-relative offset for spill base */ int region_start; int region_len; int epilogue_start; int epilogue_count; int when_target; u8 gr_save_loc; /* next general register to use for saving a register */ u8 return_link_reg; /* branch register in which the return link is passed */ struct unw_labeled_state *labeled_states; /* list of all labeled states */ struct unw_reg_state curr; /* current state */ }; enum unw_nat_type { UNW_NAT_NONE, /* NaT not represented */ UNW_NAT_VAL, /* NaT represented by NaT value (fp reg) */ UNW_NAT_MEMSTK, /* NaT value is in unat word at offset OFF */ UNW_NAT_REGSTK /* NaT is in rnat */ }; enum unw_insn_opcode { UNW_INSN_ADD, /* s[dst] += val */ UNW_INSN_ADD_PSP, /* s[dst] = (s.psp + val) */ UNW_INSN_ADD_SP, /* s[dst] = (s.sp + val) */ UNW_INSN_MOVE, /* s[dst] = s[val] */ UNW_INSN_MOVE2, /* s[dst] = s[val]; s[dst+1] = s[val+1] */ UNW_INSN_MOVE_STACKED, /* s[dst] = ia64_rse_skip(*s.bsp, val) */ UNW_INSN_SETNAT_MEMSTK, /* s[dst+1].nat.type = MEMSTK; s[dst+1].nat.off = *s.pri_unat - s[dst] */ UNW_INSN_SETNAT_TYPE, /* s[dst+1].nat.type = val */ UNW_INSN_LOAD, /* s[dst] = *s[val] */ UNW_INSN_MOVE_SCRATCH, /* s[dst] = scratch reg "val" */ }; struct unw_insn { unsigned int opc : 4; unsigned int dst : 9; signed int val : 19; }; /* * Preserved general static registers (r2-r5) give rise to two script * instructions; everything else yields at most one instruction; at * the end of the script, the psp gets popped, accounting for one more * instruction. */ #define UNW_MAX_SCRIPT_LEN (UNW_NUM_REGS + 5) struct unw_script { unsigned long ip; /* ip this script is for */ unsigned long pr_mask; /* mask of predicates script depends on */ unsigned long pr_val; /* predicate values this script is for */ #ifndef REDHAT rwlock_t lock; #endif /* !REDHAT */ unsigned int flags; /* see UNW_FLAG_* in unwind.h */ #ifndef REDHAT unsigned short lru_chain; /* used for least-recently-used chain */ unsigned short coll_chain; /* used for hash collisions */ unsigned short hint; /* hint for next script to try (or -1) */ #endif /* !REDHAT */ unsigned short count; /* number of instructions in script */ struct unw_insn insn[UNW_MAX_SCRIPT_LEN]; }; crash-utility-crash-9cd43f5/lkcd_dump_v8.h0000664000372000037200000004075315107550337020135 0ustar juerghjuergh/* lkcd_dump_v8.h - core analysis suite * * Forward ported from lkcd_dump_v5.h by Corey Mineyard * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net) * Copyright 2001 Matt D. Robinson. All rights reserved. * * Most of this is the same old stuff from vmdump.h, except now we're * actually a stand-alone driver plugged into the block layer interface, * with the exception that we now allow for compression modes externally * loaded (e.g., someone can come up with their own). */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _DUMP_H #define _DUMP_H //#include /* define TRUE and FALSE for use in our dump modules */ #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #ifndef MCLX /* * MCLX NOTE: the architecture-specific headers are being ignored until * deemed necessary; crash has never used them functionally, and only * referencing them in the dump_sgi_environment() helper routines. */ /* necessary header files */ #include /* for architecture-specific header */ #endif #define UTSNAME_ENTRY_SZ 65 /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* header definitions for s390 dump */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */ #define DUMP_VERSION_NUMBER 0x5 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump levels - type specific stuff added later -- add as necessary */ #define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */ #define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */ #define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */ #define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */ #define DUMP_LEVEL_ALL 0x8 /* dump header, all memory pages */ /* dump compression options -- add as necessary */ #define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */ #define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */ #define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */ /* dump flags - any dump-type specific flags -- add as necessary */ #define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */ #define DUMP_FLAGS_NONDISRUPT 0x1 /* try to keep running after dump */ /* dump header flags -- add as necessary */ #define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */ #define DUMP_DH_RAW 0x1 /* raw page (no compression) */ #define DUMP_DH_COMPRESSED 0x2 /* page is compressed */ #define DUMP_DH_END 0x4 /* end marker on a full dump */ /* names for various dump tunables (they are now all read-only) */ #define DUMP_ROOT_NAME "sys/dump" #define DUMP_DEVICE_NAME "dump_device" #define DUMP_COMPRESS_NAME "dump_compress" #define DUMP_LEVEL_NAME "dump_level" #define DUMP_FLAGS_NAME "dump_flags" /* page size for gzip compression -- buffered beyond PAGE_SIZE slightly */ #define DUMP_DPC_PAGE_SIZE (PAGE_SIZE + 512) /* dump ioctl() control options */ #define DIOSDUMPDEV 1 /* set the dump device */ #define DIOGDUMPDEV 2 /* get the dump device */ #define DIOSDUMPLEVEL 3 /* set the dump level */ #define DIOGDUMPLEVEL 4 /* get the dump level */ #define DIOSDUMPFLAGS 5 /* set the dump flag parameters */ #define DIOGDUMPFLAGS 6 /* get the dump flag parameters */ #define DIOSDUMPCOMPRESS 7 /* set the dump compress level */ #define DIOGDUMPCOMPRESS 8 /* get the dump compress level */ /* the major number used for the dumping device */ #ifndef DUMP_MAJOR #define DUMP_MAJOR 227 #endif /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* timeval depends on architecture, two long values */ struct { uint64_t tv_sec; uint64_t tv_usec; } dh_time; /* the time of the system crash */ /* the NEW utsname (uname) information -- in character form */ /* we do this so we don't have to include utsname.h */ /* plus it helps us be more architecture independent */ /* now maybe one day soon they'll make the [65] a #define! */ char dh_utsname_sysname[65]; char dh_utsname_nodename[65]; char dh_utsname_release[65]; char dh_utsname_version[65]; char dh_utsname_machine[65]; char dh_utsname_domainname[65]; /* the address of current task (OLD = task_struct *, NEW = void *) */ uint64_t dh_current_task; /* what type of compression we're using in this dump (if any) */ uint32_t dh_dump_compress; /* any additional flags */ uint32_t dh_dump_flags; /* any additional flags */ uint32_t dh_dump_device; /* size of dump buffer -- only in v9 dumps so we don't declare it here */ /* uint64_t dh_dump_buffer_size; */ } __attribute__((packed)) dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } __attribute__((packed)) dump_page_t; /* * This structure contains information needed for the lkcdutils * package (particularly lcrash) to determine what information is * associated to this kernel, specifically. */ typedef struct lkcdinfo_s { int arch; int ptrsz; int byte_order; int linux_release; int page_shift; int page_size; uint64_t page_mask; uint64_t page_offset; int stack_offset; } lkcdinfo_t; /* * * machine specific dump headers * */ /* * IA64 --------------------------------------------------------- */ #if defined(IA64) #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x5 /* version number */ struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ struct ia64_fpreg f10; /* scratch */ struct ia64_fpreg f11; /* scratch */ }; /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. * */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* pointer to pt_regs, (OLD: (struct pt_regs *, NEW: (uint64_t)) */ uint64_t dha_pt_regs; /* the dump registers */ struct pt_regs dha_regs; /* the rnat register saved after flushrs */ uint64_t dha_rnat; /* the pfs register saved after flushrs */ uint64_t dha_pfs; /* the bspstore register saved after flushrs */ uint64_t dha_bspstore; /* smp specific */ uint32_t dha_smp_num_cpus; uint32_t dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; uint64_t dha_smp_current_task[NR_CPUS]; uint64_t dha_stack[NR_CPUS]; uint64_t dha_stack_ptr[NR_CPUS]; /* load address of kernel */ uint64_t dha_kernel_addr; } __attribute__((packed)) dump_header_asm_t; struct dump_CPU_info_ia64 { struct pt_regs dha_smp_regs; uint64_t dha_smp_current_task; uint64_t dha_stack; uint64_t dha_stack_ptr; } __attribute__((packed)) dump_CPU_info_ia64_t; typedef struct dump_CPU_info_ia64 dump_CPU_info_t; /* * i386 --------------------------------------------------------- */ #elif defined(X86) #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x5 /* version number */ struct pt_regs { long ebx; long ecx; long edx; long esi; long edi; long ebp; long eax; int xds; int xes; long orig_eax; long eip; int xcs; long eflags; long esp; int xss; }; /* * Structure: __dump_header_asm * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* the esp for i386 systems */ uint32_t dha_esp; /* the eip for i386 systems */ uint32_t dha_eip; /* the dump registers */ struct pt_regs dha_regs; /* smp specific */ uint32_t dha_smp_num_cpus; uint32_t dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; uint32_t dha_smp_current_task[NR_CPUS]; uint32_t dha_stack[NR_CPUS]; uint32_t dha_stack_ptr[NR_CPUS]; } __attribute__((packed)) dump_header_asm_t; /* * CPU specific part of dump_header_asm_t */ typedef struct dump_CPU_info_s { struct pt_regs dha_smp_regs; uint32_t dha_smp_current_task; uint32_t dha_stack; uint32_t dha_stack_ptr; } __attribute__ ((packed)) dump_CPU_info_t; /* * x86-64 --------------------------------------------------------- */ #elif defined(X86_64) /* definitions */ #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x2 /* version number */ struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long rbp; unsigned long rbx; /* arguments: non interrupts/non tracing syscalls only save upto here*/ unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long rax; unsigned long rcx; unsigned long rdx; unsigned long rsi; unsigned long rdi; unsigned long orig_rax; /* end of arguments */ /* cpu exception frame or undefined */ unsigned long rip; unsigned long cs; unsigned long eflags; unsigned long rsp; unsigned long ss; /* top of stack page */ }; /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* the dump registers */ struct pt_regs dha_regs; /* smp specific */ uint32_t dha_smp_num_cpus; int dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; uint64_t dha_smp_current_task[NR_CPUS]; uint64_t dha_stack[NR_CPUS]; uint64_t dha_stack_ptr[NR_CPUS]; } __attribute__((packed)) dump_header_asm_t; /* * CPU specific part of dump_header_asm_t */ typedef struct dump_CPU_info_s { struct pt_regs dha_smp_regs; uint64_t dha_smp_current_task; uint64_t dha_stack; uint64_t dha_stack_ptr; } __attribute__ ((packed)) dump_CPU_info_t; #else #define HAVE_NO_DUMP_HEADER_ASM 1 #endif #endif /* _DUMP_H */ crash-utility-crash-9cd43f5/memory_driver/0000775000372000037200000000000015107550337020257 5ustar juerghjuerghcrash-utility-crash-9cd43f5/memory_driver/Makefile0000664000372000037200000000144615107550337021724 0ustar juerghjuergh# This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # ifneq ($(KERNELRELEASE),) obj-m := crash.o else PWD := $(shell pwd) KVER ?= $(shell uname -r) KDIR ?= /lib/modules/${KVER}/build all: ${MAKE} -C ${KDIR} M=${PWD} SUBDIRS=${PWD} modules clean: test -e ${KDIR}/Makefile && ${MAKE} -C ${KDIR} M=${PWD} SUBDIRS=${PWD} clean || ${RM} *.mod.c *.ko *.o Module.* endif crash-utility-crash-9cd43f5/memory_driver/README0000664000372000037200000000134715107550337021144 0ustar juerghjuerghFor live system analysis, the physical memory source must be one of the following devices: /dev/mem /proc/kcore /dev/crash If the live system kernel was configured with CONFIG_STRICT_DEVMEM or CONFIG_HARDENED_USERCOPY, then /dev/mem cannot be used. If the live system kernel was configured without CONFIG_PROC_KCORE, or if /proc/kcore is non-functional, then /proc/kcore cannot be used. The third alternative is this /dev/crash driver. Presuming that /lib/modules/`uname -r`/build points to a kernel build tree or kernel "devel" package tree, the module can simply be built and installed like so: # make ... # insmod crash.ko Once installed, the /dev/crash driver will be used by default for live system crash sessions. crash-utility-crash-9cd43f5/memory_driver/crash.c0000664000372000037200000001732615107550337021534 0ustar juerghjuergh/* * linux/drivers/char/crash.c * * Copyright (C) 2004, 2011, 2016 Dave Anderson * Copyright (C) 2004, 2011, 2016 Red Hat, Inc. * Copyright (C) 2019 Serapheim Dimitropoulos */ /****************************************************************************** * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * *****************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include extern int page_is_ram(unsigned long); #if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) #define CAN_WRITE_KERNEL 1 static inline long copy_from_kernel_nofault(void *dst, const void *src, size_t size) { return probe_kernel_read(dst, src, size); } static inline long copy_to_kernel_nofault(void *dst, const void *src, size_t size) { return probe_kernel_write(dst, src, size); } #endif #ifdef CONFIG_S390 /* * For swapped prefix pages get bounce buffer using xlate_dev_mem_ptr() */ static inline void *map_virtual(u64 offset, struct page **pp) { struct page *page; unsigned long pfn; void *vaddr; vaddr = xlate_dev_mem_ptr(offset); pfn = ((unsigned long) vaddr) >> PAGE_SHIFT; if ((unsigned long) vaddr != offset) page = pfn_to_page(pfn); else page = NULL; if (!page_is_ram(pfn)) { printk(KERN_INFO "crash memory driver: !page_is_ram(pfn: %lx)\n", pfn); return NULL; } if (!pfn_valid(pfn)) { printk(KERN_INFO "crash memory driver: invalid pfn: %lx )\n", pfn); return NULL; } *pp = page; return vaddr; } /* * Free bounce buffer if necessary */ static inline void unmap_virtual(struct page *page) { void *vaddr; if (page) { /* * Because for bounce buffers vaddr will never be 0 * unxlate_dev_mem_ptr() will always free the bounce buffer. */ vaddr = (void *)(page_to_pfn(page) << PAGE_SHIFT); unxlate_dev_mem_ptr(0, vaddr); } } #else /* all architectures except s390x */ static inline void * map_virtual(u64 offset, struct page **pp) { struct page *page; unsigned long pfn; void *vaddr; pfn = (unsigned long)(offset >> PAGE_SHIFT); #ifdef NOTDEF /* * page_is_ram() is typically not exported, but there may * be another architecture, kernel version, or distribution * specific mechanism that can be plugged in here if desired. */ if (!page_is_ram(pfn)) { printk(KERN_INFO "crash memory driver: !page_is_ram(pfn: %lx)\n", pfn); return NULL; } #endif if (!pfn_valid(pfn)) { printk(KERN_INFO "crash memory driver: invalid pfn: %lx\n", pfn); return NULL; } page = pfn_to_page(pfn); vaddr = kmap(page); if (!vaddr) { printk(KERN_INFO "crash memory driver: pfn: %lx kmap(page: %lx) failed\n", pfn, (unsigned long)page); return NULL; } *pp = page; return (vaddr + (offset & (PAGE_SIZE-1))); } static inline void unmap_virtual(struct page *page) { kunmap(page); } #endif #define CRASH_VERSION "1.5" /* * These are the file operation functions that allow crash utility * access to physical memory. */ static loff_t crash_llseek(struct file * file, loff_t offset, int orig) { switch (orig) { case 0: file->f_pos = offset; return file->f_pos; case 1: file->f_pos += offset; return file->f_pos; default: return -EINVAL; } } #ifdef CAN_WRITE_KERNEL static ssize_t crash_write(struct file *file, const char *buf, size_t count, loff_t *poff) { void *vaddr; struct page *page; u64 offset; ssize_t written; char *buffer = file->private_data; offset = *poff; if (offset >> PAGE_SHIFT != (offset+count-1) >> PAGE_SHIFT) return -EINVAL; vaddr = map_virtual(offset, &page); if (!vaddr) return -EFAULT; /* * Use bounce buffer to bypass the CONFIG_HARDENED_USERCOPY * kernel text restriction. */ if (copy_from_user(buffer, buf, count)) { unmap_virtual(page); return -EFAULT; } if (copy_to_kernel_nofault(vaddr, buffer, count)) { unmap_virtual(page); return -EFAULT; } unmap_virtual(page); written = count; *poff += written; return written; } #endif /* * Determine the page address for an address offset value, * get a virtual address for it, and copy it out. * Accesses must fit within a page. */ static ssize_t crash_read(struct file *file, char *buf, size_t count, loff_t *poff) { void *vaddr; struct page *page; u64 offset; ssize_t read; char *buffer = file->private_data; offset = *poff; if (offset >> PAGE_SHIFT != (offset+count-1) >> PAGE_SHIFT) return -EINVAL; vaddr = map_virtual(offset, &page); if (!vaddr) return -EFAULT; /* * Use bounce buffer to bypass the CONFIG_HARDENED_USERCOPY * kernel text restriction. */ if (copy_from_kernel_nofault(buffer, vaddr, count)) { unmap_virtual(page); return -EFAULT; } if (copy_to_user(buf, buffer, count)) { unmap_virtual(page); return -EFAULT; } unmap_virtual(page); read = count; *poff += read; return read; } static int crash_open(struct inode * inode, struct file * filp) { if (!capable(CAP_SYS_RAWIO)) return -EPERM; filp->private_data = (void *)__get_free_page(GFP_KERNEL); if (!filp->private_data) return -ENOMEM; return 0; } static int crash_release(struct inode *inode, struct file *filp) { free_pages((unsigned long)filp->private_data, 0); return 0; } /* * Note: This function is required for Linux 4.6 and later ARM64 kernels. * For earler kernel versions, remove this CONFIG_ARM64 section. */ #ifdef CONFIG_ARM64 #define DEV_CRASH_ARCH_DATA _IOR('c', 1, long) static long crash_arch_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { extern u64 kimage_voffset; switch (cmd) { case DEV_CRASH_ARCH_DATA: return put_user(kimage_voffset, (unsigned long __user *)arg); default: return -EINVAL; } } #endif static long crash_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { #ifdef DEV_CRASH_ARCH_DATA return crash_arch_ioctl(file, cmd, arg); #else return -EINVAL; #endif } static struct file_operations crash_fops = { .owner = THIS_MODULE, .llseek = crash_llseek, .read = crash_read, #ifdef CAN_WRITE_KERNEL .write = crash_write, #endif .unlocked_ioctl = crash_ioctl, .open = crash_open, .release = crash_release, }; static struct miscdevice crash_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "crash", .fops = &crash_fops }; static int __init crash_init(void) { int ret; ret = misc_register(&crash_dev); if (ret) { printk(KERN_ERR "crash memory driver: cannot misc_register (MISC_DYNAMIC_MINOR)\n"); goto out; } ret = 0; printk(KERN_INFO "crash memory driver: version %s\n", CRASH_VERSION); out: return ret; } static void __exit crash_cleanup_module(void) { misc_deregister(&crash_dev); } module_init(crash_init); module_exit(crash_cleanup_module); MODULE_LICENSE("GPL"); crash-utility-crash-9cd43f5/ia64.c0000664000372000037200000037270715107550337016323 0ustar juerghjuergh/* ia64.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2013 David Anderson * Copyright (C) 2002-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef IA64 #include "defs.h" #include "xen_hyper_defs.h" #include static int ia64_verify_symbol(const char *, ulong, char); static int ia64_eframe_search(struct bt_info *); static void ia64_back_trace_cmd(struct bt_info *); static void ia64_old_unwind(struct bt_info *); static void ia64_old_unwind_init(void); static void try_old_unwind(struct bt_info *); static void ia64_dump_irq(int); static ulong ia64_processor_speed(void); static int ia64_vtop_4l(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_vtop(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_uvtop(struct task_context *, ulong, physaddr_t *, int); static int ia64_kvtop(struct task_context *, ulong, physaddr_t *, int); static ulong ia64_get_task_pgd(ulong); static ulong ia64_get_pc(struct bt_info *); static ulong ia64_get_sp(struct bt_info *); static ulong ia64_get_thread_ksp(ulong); static void ia64_get_stack_frame(struct bt_info *, ulong *, ulong *); static int ia64_translate_pte(ulong, void *, ulonglong); static ulong ia64_vmalloc_start(void); static int ia64_is_task_addr(ulong); static int ia64_dis_filter(ulong, char *, unsigned int); static void ia64_dump_switch_stack(ulong, ulong); static void ia64_cmd_mach(void); static int ia64_get_smp_cpus(void); static void ia64_display_machine_stats(void); static void ia64_display_cpu_data(unsigned int); static void ia64_display_memmap(void); static void ia64_create_memmap(void); static ulong check_mem_limit(void); static int ia64_verify_paddr(uint64_t); static int ia64_available_memory(struct efi_memory_desc_t *); static void ia64_post_init(void); static ulong ia64_in_per_cpu_mca_stack(void); static struct line_number_hook ia64_line_number_hooks[]; static ulong ia64_get_stackbase(ulong); static ulong ia64_get_stacktop(ulong); static void parse_cmdline_args(void); static void ia64_calc_phys_start(void); static int ia64_get_kvaddr_ranges(struct vaddr_range *); struct unw_frame_info; static void dump_unw_frame_info(struct unw_frame_info *); static int old_unw_unwind(struct unw_frame_info *); static void unw_init_from_blocked_task(struct unw_frame_info *, ulong); static ulong ia64_rse_slot_num(ulong *); static ulong *ia64_rse_skip_regs(ulong *, long); static ulong *ia64_rse_rnat_addr(ulong *); static ulong rse_read_reg(struct unw_frame_info *, int, int *); static void rse_function_params(struct unw_frame_info *, char *); static int ia64_vtop_4l_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_vtop_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_xen_kdump_p2m_create(struct xen_kdump_data *); static int ia64_xendump_p2m_create(struct xendump_data *); static void ia64_debug_dump_page(FILE *, char *, char *); static char *ia64_xendump_load_page(ulong, struct xendump_data *); static int ia64_xendump_page_index(ulong, struct xendump_data *); static ulong ia64_xendump_panic_task(struct xendump_data *); static void ia64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); static void ia64_init_hyper(int); struct machine_specific ia64_machine_specific = { 0 }; void ia64_init(int when) { struct syment *sp, *spn; if (XEN_HYPER_MODE()) { ia64_init_hyper(when); return; } switch (when) { case SETUP_ENV: #if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT) prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0); #endif #if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT) prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0); #endif break; case PRE_SYMTAB: machdep->verify_symbol = ia64_verify_symbol; machdep->machspec = &ia64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~(machdep->pageoffset); switch (machdep->pagesize) { case 4096: machdep->stacksize = (power(2, 3) * PAGESIZE()); break; case 8192: machdep->stacksize = (power(2, 2) * PAGESIZE()); break; case 16384: machdep->stacksize = (power(2, 1) * PAGESIZE()); break; case 65536: machdep->stacksize = (power(2, 0) * PAGESIZE()); break; default: machdep->stacksize = 32*1024; break; } if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pud space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = ia64_verify_paddr; machdep->get_kvaddr_ranges = ia64_get_kvaddr_ranges; machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->machspec->phys_start = UNKNOWN_PHYS_START; if (machdep->cmdline_args[0]) parse_cmdline_args(); if (ACTIVE()) machdep->flags |= DEVMEMRD; break; case PRE_GDB: if (pc->flags & KERNEL_DEBUG_QUERY) return; /* * Until the kernel core dump and va_server library code * do the right thing with respect to the configured page size, * try to recognize a fatal inequity between the compiled-in * page size and the page size used by the kernel. */ if ((sp = symbol_search("empty_zero_page")) && (spn = next_symbol(NULL, sp)) && ((spn->value - sp->value) != PAGESIZE())) error(FATAL, "compiled-in page size: %d (apparent) kernel page size: %ld\n", PAGESIZE(), spn->value - sp->value); machdep->kvbase = KERNEL_VMALLOC_BASE; machdep->identity_map_base = KERNEL_CACHED_BASE; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = ia64_eframe_search; machdep->back_trace = ia64_back_trace_cmd; machdep->processor_speed = ia64_processor_speed; machdep->uvtop = ia64_uvtop; machdep->kvtop = ia64_kvtop; machdep->get_task_pgd = ia64_get_task_pgd; machdep->dump_irq = ia64_dump_irq; machdep->get_stack_frame = ia64_get_stack_frame; machdep->get_stackbase = ia64_get_stackbase; machdep->get_stacktop = ia64_get_stacktop; machdep->translate_pte = ia64_translate_pte; machdep->memory_size = generic_memory_size; machdep->vmalloc_start = ia64_vmalloc_start; machdep->is_task_addr = ia64_is_task_addr; machdep->dis_filter = ia64_dis_filter; machdep->cmd_mach = ia64_cmd_mach; machdep->get_smp_cpus = ia64_get_smp_cpus; machdep->line_number_hooks = ia64_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->show_interrupts = generic_show_interrupts; if ((sp = symbol_search("_stext"))) { machdep->machspec->kernel_region = VADDR_REGION(sp->value); machdep->machspec->kernel_start = sp->value; } else { machdep->machspec->kernel_region = KERNEL_CACHED_REGION; machdep->machspec->kernel_start = KERNEL_CACHED_BASE; } if (machdep->machspec->kernel_region == KERNEL_VMALLOC_REGION) { machdep->machspec->vmalloc_start = machdep->machspec->kernel_start + GIGABYTES((ulong)(4)); if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) ia64_calc_phys_start(); } else machdep->machspec->vmalloc_start = KERNEL_VMALLOC_BASE; machdep->xen_kdump_p2m_create = ia64_xen_kdump_p2m_create; machdep->xendump_p2m_create = ia64_xendump_p2m_create; machdep->xendump_panic_task = ia64_xendump_panic_task; machdep->get_xendump_regs = ia64_get_xendump_regs; break; case POST_GDB: STRUCT_SIZE_INIT(cpuinfo_ia64, "cpuinfo_ia64"); STRUCT_SIZE_INIT(switch_stack, "switch_stack"); MEMBER_OFFSET_INIT(thread_struct_fph, "thread_struct", "fph"); MEMBER_OFFSET_INIT(switch_stack_b0, "switch_stack", "b0"); MEMBER_OFFSET_INIT(switch_stack_ar_bspstore, "switch_stack", "ar_bspstore"); MEMBER_OFFSET_INIT(switch_stack_ar_pfs, "switch_stack", "ar_pfs"); MEMBER_OFFSET_INIT(switch_stack_ar_rnat, "switch_stack", "ar_rnat"); MEMBER_OFFSET_INIT(switch_stack_pr, "switch_stack", "pr"); MEMBER_OFFSET_INIT(cpuinfo_ia64_proc_freq, "cpuinfo_ia64", "proc_freq"); MEMBER_OFFSET_INIT(cpuinfo_ia64_unimpl_va_mask, "cpuinfo_ia64", "unimpl_va_mask"); MEMBER_OFFSET_INIT(cpuinfo_ia64_unimpl_pa_mask, "cpuinfo_ia64", "unimpl_pa_mask"); if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (symbol_exists("_irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "_irq_desc", NULL, 0); if (!machdep->hz) machdep->hz = 1024; machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; ia64_create_memmap(); break; case POST_INIT: ia64_post_init(); break; case LOG_ONLY: machdep->machspec = &ia64_machine_specific; machdep->machspec->kernel_start = kt->vmcoreinfo._stext_SYMBOL; machdep->machspec->kernel_region = VADDR_REGION(kt->vmcoreinfo._stext_SYMBOL); if (machdep->machspec->kernel_region == KERNEL_VMALLOC_REGION) { machdep->machspec->vmalloc_start = machdep->machspec->kernel_start + GIGABYTES((ulong)(4)); ia64_calc_phys_start(); } break; } } /* * --machdep defaults to the physical start location. * * Otherwise, it's got to be a "item=value" string, separated * by commas if more than one is passed in. */ void parse_cmdline_args(void) { int index, i, c, errflag; char *p; char buf[BUFSIZE]; char *arglist[MAXARGS]; ulong value; struct machine_specific *ms; int vm_flag; ms = &ia64_machine_specific; vm_flag = 0; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { errflag = 0; value = htol(machdep->cmdline_args[index], RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { ms->phys_start = value; error(NOTE, "setting phys_start to: 0x%lx\n", ms->phys_start); } else error(WARNING, "ignoring --machdep option: %s\n\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { errflag = 0; if (STRNEQ(arglist[i], "phys_start=")) { p = arglist[i] + strlen("phys_start="); if (strlen(p)) { value = htol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { ms->phys_start = value; error(NOTE, "setting phys_start to: 0x%lx\n", ms->phys_start); continue; } } } else if (STRNEQ(arglist[i], "init_stack_size=")) { p = arglist[i] + strlen("init_stack_size="); if (strlen(p)) { value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { ms->ia64_init_stack_size = (int)value; error(NOTE, "setting init_stack_size to: 0x%x (%d)\n", ms->ia64_init_stack_size, ms->ia64_init_stack_size); continue; } } } else if (STRNEQ(arglist[i], "vm=")) { vm_flag++; p = arglist[i] + strlen("vm="); if (strlen(p)) { if (STREQ(p, "4l")) { machdep->flags |= VM_4_LEVEL; continue; } } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); } if (vm_flag) { switch (machdep->flags & (VM_4_LEVEL)) { case VM_4_LEVEL: error(NOTE, "using 4-level pagetable\n"); c++; break; default: error(WARNING, "invalid vm= option\n"); c++; machdep->flags &= ~(VM_4_LEVEL); break; } } if (c) fprintf(fp, "\n"); } } int ia64_in_init_stack(ulong addr) { ulong init_stack_addr; if (!symbol_exists("ia64_init_stack")) return FALSE; /* * ia64_init_stack could be aliased to region 5 */ init_stack_addr = ia64_VTOP(symbol_value("ia64_init_stack")); addr = ia64_VTOP(addr); if ((addr < init_stack_addr) || (addr >= (init_stack_addr+machdep->machspec->ia64_init_stack_size))) return FALSE; return TRUE; } static ulong ia64_in_per_cpu_mca_stack(void) { int plen, i; ulong flag; ulong vaddr, paddr, stackbase, stacktop; ulong *__per_cpu_mca; struct task_context *tc; tc = CURRENT_CONTEXT(); if (STRNEQ(CURRENT_COMM(), "INIT")) flag = INIT; else if (STRNEQ(CURRENT_COMM(), "MCA")) flag = MCA; else return 0; if (!symbol_exists("__per_cpu_mca") || !(plen = get_array_length("__per_cpu_mca", NULL, 0)) || (plen < kt->cpus)) return 0; vaddr = SWITCH_STACK_ADDR(CURRENT_TASK()); if (VADDR_REGION(vaddr) != KERNEL_CACHED_REGION) return 0; paddr = ia64_VTOP(vaddr); __per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * kt->cpus); if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca, sizeof(ulong) * kt->cpus, "__per_cpu_mca", RETURN_ON_ERROR|QUIET)) return 0; if (CRASHDEBUG(1)) { for (i = 0; i < kt->cpus; i++) { fprintf(fp, "__per_cpu_mca[%d]: %lx\n", i, __per_cpu_mca[i]); } } stackbase = __per_cpu_mca[tc->processor]; stacktop = stackbase + (STACKSIZE() * 2); FREEBUF(__per_cpu_mca); if ((paddr >= stackbase) && (paddr < stacktop)) return flag; else return 0; } void ia64_dump_machdep_table(ulong arg) { int i, others, verbose; struct machine_specific *ms; verbose = FALSE; ms = &ia64_machine_specific; if (arg) { switch (arg) { default: case 1: verbose = TRUE; break; case 2: if (machdep->flags & NEW_UNWIND) { machdep->flags &= ~(NEW_UNWIND|NEW_UNW_V1|NEW_UNW_V2|NEW_UNW_V3); machdep->flags |= OLD_UNWIND; ms->unwind_init = ia64_old_unwind_init; ms->unwind = ia64_old_unwind; ms->dump_unwind_stats = NULL; ms->unwind_debug = NULL; } else { machdep->flags &= ~OLD_UNWIND; machdep->flags |= NEW_UNWIND; if (MEMBER_EXISTS("unw_frame_info", "pt")) { if (MEMBER_EXISTS("pt_regs", "ar_csd")) { machdep->flags |= NEW_UNW_V3; ms->unwind_init = unwind_init_v3; ms->unwind = unwind_v3; ms->unwind_debug = unwind_debug_v3; ms->dump_unwind_stats = dump_unwind_stats_v3; } else { machdep->flags |= NEW_UNW_V2; ms->unwind_init = unwind_init_v2; ms->unwind = unwind_v2; ms->unwind_debug = unwind_debug_v2; ms->dump_unwind_stats = dump_unwind_stats_v2; } } else { machdep->flags |= NEW_UNW_V1; ms->unwind_init = unwind_init_v1; ms->unwind = unwind_v1; ms->unwind_debug = unwind_debug_v1; ms->dump_unwind_stats = dump_unwind_stats_v1; } } ms->unwind_init(); return; case 3: if (machdep->flags & NEW_UNWIND) ms->unwind_debug(arg); return; } } others = 0; fprintf(fp, " flags: %lx (", machdep->flags); /* future flags tests here */ if (machdep->flags & NEW_UNWIND) fprintf(fp, "%sNEW_UNWIND", others++ ? "|" : ""); if (machdep->flags & NEW_UNW_V1) fprintf(fp, "%sNEW_UNW_V1", others++ ? "|" : ""); if (machdep->flags & NEW_UNW_V2) fprintf(fp, "%sNEW_UNW_V2", others++ ? "|" : ""); if (machdep->flags & NEW_UNW_V3) fprintf(fp, "%sNEW_UNW_V3", others++ ? "|" : ""); if (machdep->flags & OLD_UNWIND) fprintf(fp, "%sOLD_UNWIND", others++ ? "|" : ""); if (machdep->flags & UNW_OUT_OF_SYNC) fprintf(fp, "%sUNW_OUT_OF_SYNC", others++ ? "|" : ""); if (machdep->flags & UNW_READ) fprintf(fp, "%sUNW_READ", others++ ? "|" : ""); if (machdep->flags & UNW_PTREGS) fprintf(fp, "%sUNW_PTREGS", others++ ? "|" : ""); if (machdep->flags & UNW_R0) fprintf(fp, "%sUNW_R0", others++ ? "|" : ""); if (machdep->flags & MEM_LIMIT) fprintf(fp, "%sMEM_LIMIT", others++ ? "|" : ""); if (machdep->flags & DEVMEMRD) fprintf(fp, "%sDEVMEMRD", others++ ? "|" : ""); if (machdep->flags & INIT) fprintf(fp, "%sINIT", others++ ? "|" : ""); if (machdep->flags & MCA) fprintf(fp, "%sMCA", others++ ? "|" : ""); if (machdep->flags & VM_4_LEVEL) fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %d\n", machdep->hz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: ia64_eframe_search()\n"); fprintf(fp, " back_trace: ia64_back_trace_cmd()\n"); fprintf(fp, "get_processor_speed: ia64_processor_speed()\n"); fprintf(fp, " uvtop: ia64_uvtop()\n"); fprintf(fp, " kvtop: ia64_kvtop()\n"); fprintf(fp, " get_task_pgd: ia64_get_task_pgd()\n"); fprintf(fp, " dump_irq: ia64_dump_irq()\n"); fprintf(fp, " get_stack_frame: ia64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: ia64_get_stackbase()\n"); fprintf(fp, " get_stacktop: ia64_get_stacktop()\n"); fprintf(fp, " translate_pte: ia64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: ia64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: ia64_is_task_addr()\n"); fprintf(fp, " verify_symbol: ia64_verify_symbol()\n"); fprintf(fp, " dis_filter: ia64_dis_filter()\n"); fprintf(fp, " cmd_mach: ia64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: ia64_get_smp_cpus()\n"); fprintf(fp, " get_kvaddr_ranges: ia64_get_kvaddr_ranges()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: %s()\n", (machdep->verify_paddr == ia64_verify_paddr) ? "ia64_verify_paddr" : "generic_verify_paddr"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, "xen_kdump_p2m_create: ia64_xen_kdump_p2m_create()\n"); fprintf(fp, " xendump_p2m_create: ia64_xendump_p2m_create()\n"); fprintf(fp, " xendump_panic_task: ia64_xendump_panic_task()\n"); fprintf(fp, " get_xendump_regs: ia64_get_xendump_regs()\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: ia64_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pud_read: %lx\n", machdep->last_pud_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pud: %lx\n", (ulong)machdep->pud); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " machspec: ia64_machine_specific\n"); fprintf(fp, " cpu_data_address: %lx\n", machdep->machspec->cpu_data_address); fprintf(fp, " unimpl_va_mask: %lx\n", machdep->machspec->unimpl_va_mask); fprintf(fp, " unimpl_pa_mask: %lx\n", machdep->machspec->unimpl_pa_mask); fprintf(fp, " unw: %lx\n", (ulong)machdep->machspec->unw); fprintf(fp, " unw_tables_offset: %ld\n", machdep->machspec->unw_tables_offset); fprintf(fp, " unw_kernel_table_offset: %ld %s\n", machdep->machspec->unw_kernel_table_offset, machdep->machspec->unw_kernel_table_offset ? "" : "(unused)"); fprintf(fp, " unw_pt_regs_offsets: %ld %s\n", machdep->machspec->unw_pt_regs_offsets, machdep->machspec->unw_pt_regs_offsets ? "" : "(unused)"); fprintf(fp, " script_index: %d\n", machdep->machspec->script_index); fprintf(fp, " script_cache: %lx%s", (ulong)machdep->machspec->script_cache, machdep->flags & OLD_UNWIND ? "\n" : " "); if (machdep->flags & NEW_UNWIND) ms->dump_unwind_stats(); if (!(machdep->flags & (NEW_UNWIND|OLD_UNWIND))) fprintf(fp, "\n"); fprintf(fp, " mem_limit: %lx\n", machdep->machspec->mem_limit); fprintf(fp, " kernel_region: %ld\n", machdep->machspec->kernel_region); fprintf(fp, " kernel_start: %lx\n", machdep->machspec->kernel_start); fprintf(fp, " phys_start: %lx (%lx)\n", machdep->machspec->phys_start, machdep->machspec->phys_start & KERNEL_TR_PAGE_MASK); fprintf(fp, " vmalloc_start: %lx\n", machdep->machspec->vmalloc_start); fprintf(fp, " ia64_memmap: %lx\n", (ulong)machdep->machspec->ia64_memmap); fprintf(fp, " efi_memmap_size: %ld\n", (ulong)machdep->machspec->efi_memmap_size); fprintf(fp, " efi_memdesc_size: %ld\n", (ulong)machdep->machspec->efi_memdesc_size); fprintf(fp, " unwind_init: "); if (ms->unwind_init == unwind_init_v1) fprintf(fp, "unwind_init_v1()\n"); else if (ms->unwind_init == unwind_init_v2) fprintf(fp, "unwind_init_v2()\n"); else if (ms->unwind_init == unwind_init_v3) fprintf(fp, "unwind_init_v3()\n"); else if (ms->unwind_init == ia64_old_unwind_init) fprintf(fp, "ia64_old_unwind_init()\n"); else fprintf(fp, "%lx\n", (ulong)ms->unwind_init); fprintf(fp, " unwind: "); if (ms->unwind == unwind_v1) fprintf(fp, "unwind_v1()\n"); else if (ms->unwind == unwind_v2) fprintf(fp, "unwind_v2()\n"); else if (ms->unwind == unwind_v3) fprintf(fp, "unwind_v3()\n"); else if (ms->unwind == ia64_old_unwind) fprintf(fp, "ia64_old_unwind()\n"); else fprintf(fp, "%lx\n", (ulong)ms->unwind); fprintf(fp, " dump_unwind_stats: "); if (ms->dump_unwind_stats == dump_unwind_stats_v1) fprintf(fp, "dump_unwind_stats_v1()\n"); else if (ms->dump_unwind_stats == dump_unwind_stats_v2) fprintf(fp, "dump_unwind_stats_v2()\n"); else if (ms->dump_unwind_stats == dump_unwind_stats_v3) fprintf(fp, "dump_unwind_stats_v3()\n"); else fprintf(fp, "%lx\n", (ulong)ms->dump_unwind_stats); fprintf(fp, " unwind_debug: "); if (ms->unwind_debug == unwind_debug_v1) fprintf(fp, "unwind_debug_v1()\n"); else if (ms->unwind_debug == unwind_debug_v2) fprintf(fp, "unwind_debug_v2()\n"); else if (ms->unwind_debug == unwind_debug_v3) fprintf(fp, "unwind_debug_v3()\n"); else fprintf(fp, "%lx\n", (ulong)ms->unwind_debug); fprintf(fp, " ia64_init_stack_size: %d\n", ms->ia64_init_stack_size); if (verbose) ia64_display_memmap(); } /* * Keep or reject a symbol from the namelist. */ static int ia64_verify_symbol(const char *name, ulong value, char type) { ulong region; if (!name || !strlen(name)) return FALSE; if (XEN_HYPER_MODE() && STREQ(name, "__per_cpu_shift")) return TRUE; if (CRASHDEBUG(8)) fprintf(fp, "%016lx %s\n", value, name); // if (STREQ(name, "phys_start") && type == 'A') // if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) // machdep->machspec->phys_start = value; region = VADDR_REGION(value); return (((region == KERNEL_CACHED_REGION) || (region == KERNEL_VMALLOC_REGION))); } /* * Look for likely exception frames in a stack. */ static int ia64_eframe_search(struct bt_info *bt) { return(error(FATAL, "ia64_eframe_search: not available for this architecture\n")); } /* * Unroll a kernel stack. */ #define BT_SWITCH_STACK BT_SYMBOLIC_ARGS static void ia64_back_trace_cmd(struct bt_info *bt) { struct machine_specific *ms = &ia64_machine_specific; if (bt->flags & BT_SWITCH_STACK) ia64_dump_switch_stack(bt->task, 0); if (machdep->flags & UNW_OUT_OF_SYNC) error(FATAL, "kernel and %s unwind data structures are out of sync\n", pc->program_name); ms->unwind(bt); if (bt->flags & BT_UNWIND_ERROR) try_old_unwind(bt); } /* * Dump the IRQ table. */ static void ia64_dump_irq(int irq) { if (kernel_symbol_exists("sparse_irqs") || symbol_exists("irq_desc") || symbol_exists("_irq_desc") || kernel_symbol_exists("irq_desc_ptrs")) { machdep->dump_irq = generic_dump_irq; return(generic_dump_irq(irq)); } error(FATAL, "ia64_dump_irq: neither irq_desc or _irq_desc exist\n"); } /* * Calculate and return the speed of the processor. */ static ulong ia64_processor_speed(void) { ulong mhz, proc_freq; int bootstrap_processor; if (machdep->mhz) return(machdep->mhz); mhz = 0; bootstrap_processor = 0; if (!machdep->machspec->cpu_data_address || !VALID_STRUCT(cpuinfo_ia64) || !VALID_MEMBER(cpuinfo_ia64_proc_freq)) return (machdep->mhz = mhz); if (symbol_exists("bootstrap_processor")) get_symbol_data("bootstrap_processor", sizeof(int), &bootstrap_processor); if (bootstrap_processor == -1) bootstrap_processor = 0; readmem(machdep->machspec->cpu_data_address + OFFSET(cpuinfo_ia64_proc_freq), KVADDR, &proc_freq, sizeof(ulong), "cpuinfo_ia64 proc_freq", FAULT_ON_ERROR); mhz = proc_freq/1000000; return (machdep->mhz = mhz); } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 4 level page table. */ static int ia64_vtop_4l(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { ulong *page_dir; ulong *page_upper; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pud_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1); page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE()); pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper)); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); if (!(pud_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P | _PAGE_PROTNONE))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 3 level page table. */ static int ia64_vtop(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT_3L) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P | _PAGE_PROTNONE))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * swapper_pg_dir, making it irrelevant in this processor's case. */ static int ia64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong mm; ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return ia64_kvtop(tc, uvaddr, paddr, verbose); if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l_xen_wpt(uvaddr, paddr, pgd, verbose, 1); else return ia64_vtop_xen_wpt(uvaddr, paddr, pgd, verbose, 1); } else { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l(uvaddr, paddr, pgd, verbose, 1); else return ia64_vtop(uvaddr, paddr, pgd, verbose, 1); } } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int ia64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { *paddr = ia64_VTOP(kvaddr); return TRUE; } switch (VADDR_REGION(kvaddr)) { case KERNEL_UNCACHED_REGION: *paddr = kvaddr - KERNEL_UNCACHED_BASE; if (verbose) fprintf(fp, "[UNCACHED MEMORY]\n"); return TRUE; case KERNEL_CACHED_REGION: *paddr = ia64_VTOP(kvaddr); if (verbose) fprintf(fp, "[MAPPED IN TRANSLATION REGISTER]\n"); return TRUE; case KERNEL_VMALLOC_REGION: if (ia64_IS_VMALLOC_ADDR(kvaddr)) break; if ((kvaddr < machdep->machspec->kernel_start) && (machdep->machspec->kernel_region == KERNEL_VMALLOC_REGION)) { *paddr = PADDR_NOT_AVAILABLE; return FALSE; } *paddr = ia64_VTOP(kvaddr); if (verbose) fprintf(fp, "[MAPPED IN TRANSLATION REGISTER]\n"); return TRUE; } if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l_xen_wpt(kvaddr, paddr, pgd, verbose, 0); else return ia64_vtop_xen_wpt(kvaddr, paddr, pgd, verbose, 0); } else { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l(kvaddr, paddr, pgd, verbose, 0); else return ia64_vtop(kvaddr, paddr, pgd, verbose, 0); } } /* * Even though thread_info structs are used in 2.6, they * are not the stack base. (until further notice...) */ static ulong ia64_get_stackbase(ulong task) { return (task); } static ulong ia64_get_stacktop(ulong task) { return (ia64_get_stackbase(task) + STACKSIZE()); } /* * Get the relevant page directory pointer from a task structure. */ static ulong ia64_get_task_pgd(ulong task) { return (error(FATAL, "ia64_get_task_pgd: N/A\n")); } static void ia64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (pcp) *pcp = ia64_get_pc(bt); if (spp) *spp = ia64_get_sp(bt); } /* * Return the kernel switch_stack b0 value. */ static ulong ia64_get_pc(struct bt_info *bt) { ulong b0; readmem(SWITCH_STACK_ADDR(bt->task) + OFFSET(switch_stack_b0), KVADDR, &b0, sizeof(void *), "switch_stack b0", FAULT_ON_ERROR); return b0; } /* * Return the kernel switch_stack ar_bspstore value. * If it's "bt -t" request, calculate the register backing store offset. */ static ulong ia64_get_sp(struct bt_info *bt) { ulong bspstore; readmem(SWITCH_STACK_ADDR(bt->task) + OFFSET(switch_stack_ar_bspstore), KVADDR, &bspstore, sizeof(void *), "switch_stack ar_bspstore", FAULT_ON_ERROR); if (bt->flags & (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) { bspstore = bt->task + SIZE(task_struct); if (tt->flags & THREAD_INFO) bspstore += SIZE(thread_info); bspstore = roundup(bspstore, sizeof(ulong)); } return bspstore; } /* * Get the ksp out of the task's thread_struct */ static ulong ia64_get_thread_ksp(ulong task) { ulong ksp; if (XEN_HYPER_MODE()) { readmem(task + XEN_HYPER_OFFSET(vcpu_thread_ksp), KVADDR, &ksp, sizeof(void *), "vcpu thread ksp", FAULT_ON_ERROR); } else { readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, &ksp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); } return ksp; } /* * Return the switch_stack structure address of a task. */ ulong ia64_get_switch_stack(ulong task) { ulong sw; if (LKCD_DUMPFILE() && (sw = get_lkcd_switch_stack(task))) return sw; /* * debug only: get panic switch_stack from the ELF header. */ if (CRASHDEBUG(3) && NETDUMP_DUMPFILE() && (sw = get_netdump_switch_stack(task))) return sw; if (DISKDUMP_DUMPFILE() && (sw = get_diskdump_switch_stack(task))) return sw; return (ia64_get_thread_ksp((ulong)(task)) + 16); } /* * Translate a PTE, returning TRUE if the page is _PAGE_P. * If a physaddr pointer is passed in, don't print anything. */ static int ia64_translate_pte(ulong pte, void *physaddr, ulonglong unused) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; char *ptr; ulong paddr; paddr = pte & _PFN_MASK; page_present = !!(pte & (_PAGE_P | _PAGE_PROTNONE)); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_P) fprintf(fp, "%sP", others++ ? "|" : ""); switch (pte & _PAGE_MA_MASK) { case _PAGE_MA_WB: ptr = "MA_WB"; break; case _PAGE_MA_UC: ptr = "MA_UC"; break; case _PAGE_MA_UCE: ptr = "MA_UCE"; break; case _PAGE_MA_WC: ptr = "MA_WC"; break; case _PAGE_MA_NAT: ptr = "MA_NAT"; break; case (0x1 << 2): ptr = "MA_UC"; break; default: ptr = "MA_RSV"; break; } fprintf(fp, "%s%s", others++ ? "|" : "", ptr); switch (pte & _PAGE_PL_MASK) { case _PAGE_PL_0: ptr = "PL_0"; break; case _PAGE_PL_1: ptr = "PL_1"; break; case _PAGE_PL_2: ptr = "PL_2"; break; case _PAGE_PL_3: ptr = "PL_3"; break; } fprintf(fp, "%s%s", others++ ? "|" : "", ptr); switch (pte & _PAGE_AR_MASK) { case _PAGE_AR_R: ptr = "AR_R"; break; case _PAGE_AR_RX: ptr = "AT_RX"; break; case _PAGE_AR_RW: ptr = "AR_RW"; break; case _PAGE_AR_RWX: ptr = "AR_RWX"; break; case _PAGE_AR_R_RW: ptr = "AR_R_RW"; break; case _PAGE_AR_RX_RWX: ptr = "AR_RX_RWX"; break; case _PAGE_AR_RWX_RW: ptr = "AR_RWX_RW"; break; case _PAGE_AR_X_RX: ptr = "AR_X_RX"; break; } fprintf(fp, "%s%s", others++ ? "|" : "", ptr); if (pte & _PAGE_A) fprintf(fp, "%sA", others++ ? "|" : ""); if (pte & _PAGE_D) fprintf(fp, "%sD", others++ ? "|" : ""); if (pte & _PAGE_ED) fprintf(fp, "%sED", others++ ? "|" : ""); if (pte & _PAGE_PROTNONE) fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return page_present; } /* * Determine where vmalloc'd memory starts. */ static ulong ia64_vmalloc_start(void) { return machdep->machspec->vmalloc_start; } /* * Verify that an address is a task_struct address. */ static int ia64_is_task_addr(ulong task) { int i; if (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)) return TRUE; for (i = 0; i < kt->cpus; i++) if (task == tt->idle_threads[i]) return TRUE; return FALSE; } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int ia64_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1, *p2; int argc; int revise_bracket, stop_bit; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on ia64) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); revise_bracket = stop_bit = 0; if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { revise_bracket = TRUE; stop_bit = FALSE; } else if ((FIRSTCHAR(argv[argc-1]) == '<') && strstr(argv[argc-1], ">;;")) { revise_bracket = TRUE; stop_bit = TRUE; } if (revise_bracket) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, "0x")) p1--; if (!STRNEQ(p1, "0x")) return FALSE; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>%s\n", value, value_to_symstr(value, buf2, output_radix), stop_bit ? ";;" : ""); sprintf(p1, "%s", buf1); } else if (STRNEQ(argv[argc-2], "br.call.") && STRNEQ(argv[argc-1], "b0=0x")) { /* * Update module function calls of these formats: * * br.call.sptk.many b0=0xa0000000003d5e40;; * br.call.sptk.many b0=0xa00000000001dfc0 * * to show a bracketed function name if the destination * address is a known symbol with no offset. */ if ((p1 = strstr(argv[argc-1], ";;")) && (p2 = strstr(inbuf, ";;\n"))) { *p1 = NULLCHAR; p1 = &argv[argc-1][3]; if (extract_hex(p1, &value, NULLCHAR, TRUE)) { sprintf(buf1, " <%s>;;\n", value_to_symstr(value, buf2, output_radix)); if (IS_MODULE_VADDR(value) && !strstr(buf2, "+")) sprintf(p2, "%s", buf1); } } else { p1 = &argv[argc-1][3]; p2 = &LASTCHAR(inbuf); if (extract_hex(p1, &value, '\n', TRUE)) { sprintf(buf1, " <%s>\n", value_to_symstr(value, buf2, output_radix)); if (IS_MODULE_VADDR(value) && !strstr(buf2, "+")) sprintf(p2, "%s", buf1); } } } console(" %s", inbuf); return TRUE; } /* * Format the pt_regs structure. */ enum pt_reg_names { P_cr_ipsr, P_cr_iip, P_cr_ifs, P_ar_unat, P_ar_pfs, P_ar_rsc, P_ar_rnat, P_ar_bspstore, P_ar_ccv, P_ar_fpsr, P_pr, P_loadrs, P_b0, P_b6, P_b7, P_r1, P_r2, P_r3, P_r8, P_r9, P_r10, P_r11, P_r12, P_r13, P_r14, P_r15, P_r16, P_r17, P_r18, P_r19, P_r20, P_r21, P_r22, P_r23, P_r24, P_r25, P_r26, P_r27, P_r28, P_r29, P_r30, P_r31, P_f6_lo, P_f6_hi, P_f7_lo, P_f7_hi, P_f8_lo, P_f8_hi, P_f9_lo, P_f9_hi, P_f10_lo, P_f10_hi, P_f11_lo, P_f11_hi, NUM_PT_REGS}; void ia64_exception_frame(ulong addr, struct bt_info *bt) { char buf[BUFSIZE], *p, *p1; int fval; ulong value1, value2; ulong eframe[NUM_PT_REGS]; console("ia64_exception_frame: pt_regs: %lx\n", addr); if (bt->debug) CRASHDEBUG_RESTORE(); CRASHDEBUG_SUSPEND(0); BZERO(&eframe, sizeof(ulong) * NUM_PT_REGS); open_tmpfile(); if (XEN_HYPER_MODE()) dump_struct("cpu_user_regs", addr, RADIX(16)); else dump_struct("pt_regs", addr, RADIX(16)); rewind(pc->tmpfile); fval = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "f6 = ")) { fval = 6; continue; } if (strstr(buf, "f7 = ")) { fval = 7; continue; } if (strstr(buf, "f8 = ")) { fval = 8; continue; } if (strstr(buf, "f9 = ")) { fval = 9; continue; } if (strstr(buf, "f10 = ")) { fval = 10; continue; } if (strstr(buf, "f11 = ")) { fval = 11; continue; } if (!strstr(buf, "0x")) continue; if (fval) { p = strstr(buf, "0x"); if ((p1 = strstr(p, "}"))) *p1 = NULLCHAR; extract_hex(p, &value1, ',', TRUE); p = strstr(buf, ","); extract_hex(p, &value2, NULLCHAR, FALSE); switch (fval) { case 6: eframe[P_f6_lo] = value1; eframe[P_f6_hi] = value2; break; case 7: eframe[P_f7_lo] = value1; eframe[P_f7_hi] = value2; break; case 8: eframe[P_f8_lo] = value1; eframe[P_f8_hi] = value2; break; case 9: eframe[P_f9_lo] = value1; eframe[P_f9_hi] = value2; break; case 10: eframe[P_f10_lo] = value1; eframe[P_f10_hi] = value2; break; case 11: eframe[P_f11_lo] = value1; eframe[P_f11_hi] = value2; break; } fval = 0; continue; } strip_comma(clean_line(buf)); p = strstr(buf, " = "); extract_hex(p, &value1, NULLCHAR, FALSE); if (strstr(buf, "cr_ipsr = ")) { eframe[P_cr_ipsr] = value1; } if (strstr(buf, "cr_iip = ")) { eframe[P_cr_iip] = value1; } if (strstr(buf, "cr_ifs = ")) { eframe[P_cr_ifs] = value1; } if (strstr(buf, "ar_unat = ")) { eframe[P_ar_unat] = value1; } if (strstr(buf, "ar_pfs = ")) { eframe[P_ar_pfs] = value1; } if (strstr(buf, "ar_rsc = ")) { eframe[P_ar_rsc] = value1; } if (strstr(buf, "ar_rnat = ")) { eframe[P_ar_rnat] = value1; } if (strstr(buf, "ar_bspstore = ")) { eframe[P_ar_bspstore] = value1; } if (strstr(buf, "ar_ccv = ")) { eframe[P_ar_ccv] = value1; } if (strstr(buf, "ar_fpsr = ")) { eframe[P_ar_fpsr] = value1; } if (strstr(buf, "pr = ")) { eframe[P_pr] = value1; } if (strstr(buf, "loadrs = ")) { eframe[P_loadrs] = value1; } if (strstr(buf, "b0 = ")) { eframe[P_b0] = value1; } if (strstr(buf, "b6 = ")) { eframe[P_b6] = value1; } if (strstr(buf, "b7 = ")) { eframe[P_b7] = value1; } if (strstr(buf, "r1 = ")) { eframe[P_r1] = value1; } if (strstr(buf, "r2 = ")) { eframe[P_r2] = value1; } if (strstr(buf, "r3 = ")) { eframe[P_r3] = value1; } if (strstr(buf, "r8 = ")) { eframe[P_r8] = value1; } if (strstr(buf, "r9 = ")) { eframe[P_r9] = value1; } if (strstr(buf, "r10 = ")) { eframe[P_r10] = value1; } if (strstr(buf, "r11 = ")) { eframe[P_r11] = value1; } if (strstr(buf, "r12 = ")) { eframe[P_r12] = value1; } if (strstr(buf, "r13 = ")) { eframe[P_r13] = value1; } if (strstr(buf, "r14 = ")) { eframe[P_r14] = value1; } if (strstr(buf, "r15 = ")) { eframe[P_r15] = value1; } if (strstr(buf, "r16 = ")) { eframe[P_r16] = value1; } if (strstr(buf, "r17 = ")) { eframe[P_r17] = value1; } if (strstr(buf, "r18 = ")) { eframe[P_r18] = value1; } if (strstr(buf, "r19 = ")) { eframe[P_r19] = value1; } if (strstr(buf, "r20 = ")) { eframe[P_r20] = value1; } if (strstr(buf, "r21 = ")) { eframe[P_r21] = value1; } if (strstr(buf, "r22 = ")) { eframe[P_r22] = value1; } if (strstr(buf, "r23 = ")) { eframe[P_r23] = value1; } if (strstr(buf, "r24 = ")) { eframe[P_r24] = value1; } if (strstr(buf, "r25 = ")) { eframe[P_r25] = value1; } if (strstr(buf, "r26 = ")) { eframe[P_r26] = value1; } if (strstr(buf, "r27 = ")) { eframe[P_r27] = value1; } if (strstr(buf, "r28 = ")) { eframe[P_r28] = value1; } if (strstr(buf, "r29 = ")) { eframe[P_r29] = value1; } if (strstr(buf, "r30 = ")) { eframe[P_r30] = value1; } if (strstr(buf, "r31 = ")) { eframe[P_r31] = value1; } } close_tmpfile(); fprintf(fp, " EFRAME: %lx\n", addr); if (bt->flags & BT_INCOMPLETE_USER_EFRAME) { fprintf(fp, " [exception frame incomplete -- check salinfo for complete context]\n"); bt->flags &= ~BT_INCOMPLETE_USER_EFRAME; } fprintf(fp, " B0: %016lx CR_IIP: %016lx\n", eframe[P_b0], eframe[P_cr_iip]); /** if (is_kernel_text(eframe[P_cr_iip])) fprintf(fp, "<%s>", value_to_symstr(eframe[P_cr_iip], buf, 0)); fprintf(fp, "\n"); **/ fprintf(fp, " CR_IPSR: %016lx CR_IFS: %016lx\n", eframe[P_cr_ipsr], eframe[P_cr_ifs]); fprintf(fp, " AR_PFS: %016lx AR_RSC: %016lx\n", eframe[P_ar_pfs], eframe[P_ar_rsc]); fprintf(fp, " AR_UNAT: %016lx AR_RNAT: %016lx\n", eframe[P_ar_unat], eframe[P_ar_rnat]); fprintf(fp, " AR_CCV: %016lx AR_FPSR: %016lx\n", eframe[P_ar_ccv], eframe[P_ar_fpsr]); fprintf(fp, " LOADRS: %016lx AR_BSPSTORE: %016lx\n", eframe[P_loadrs], eframe[P_ar_bspstore]); fprintf(fp, " B6: %016lx B7: %016lx\n", eframe[P_b6], eframe[P_b7]); fprintf(fp, " PR: %016lx R1: %016lx\n", eframe[P_pr], eframe[P_r1]); fprintf(fp, " R2: %016lx R3: %016lx\n", eframe[P_r2], eframe[P_r3]); fprintf(fp, " R8: %016lx R9: %016lx\n", eframe[P_r8], eframe[P_r9]); fprintf(fp, " R10: %016lx R11: %016lx\n", eframe[P_r10], eframe[P_r11]); fprintf(fp, " R12: %016lx R13: %016lx\n", eframe[P_r12], eframe[P_r13]); fprintf(fp, " R14: %016lx R15: %016lx\n", eframe[P_r14], eframe[P_r15]); fprintf(fp, " R16: %016lx R17: %016lx\n", eframe[P_r16], eframe[P_r17]); fprintf(fp, " R18: %016lx R19: %016lx\n", eframe[P_r18], eframe[P_r19]); fprintf(fp, " R20: %016lx R21: %016lx\n", eframe[P_r20], eframe[P_r21]); fprintf(fp, " R22: %016lx R23: %016lx\n", eframe[P_r22], eframe[P_r23]); fprintf(fp, " R24: %016lx R25: %016lx\n", eframe[P_r24], eframe[P_r25]); fprintf(fp, " R26: %016lx R27: %016lx\n", eframe[P_r26], eframe[P_r27]); fprintf(fp, " R28: %016lx R29: %016lx\n", eframe[P_r28], eframe[P_r29]); fprintf(fp, " R30: %016lx R31: %016lx\n", eframe[P_r30], eframe[P_r31]); fprintf(fp, " F6: %05lx%016lx ", eframe[P_f6_hi], eframe[P_f6_lo]); fprintf(fp, " F7: %05lx%016lx\n", eframe[P_f7_hi], eframe[P_f7_lo]); fprintf(fp, " F8: %05lx%016lx ", eframe[P_f8_hi], eframe[P_f8_lo]); fprintf(fp, " F9: %05lx%016lx\n", eframe[P_f9_hi], eframe[P_f9_lo]); if (machdep->flags & NEW_UNW_V3) { fprintf(fp, " F10: %05lx%016lx ", eframe[P_f10_hi], eframe[P_f10_lo]); fprintf(fp, " F11: %05lx%016lx\n", eframe[P_f11_hi], eframe[P_f11_lo]); } CRASHDEBUG_RESTORE(); if (bt->debug) CRASHDEBUG_SUSPEND(bt->debug); } enum ss_reg_names { S_caller_unat, S_ar_fpsr, S_f2_lo, S_f2_hi, S_f3_lo, S_f3_hi, S_f4_lo, S_f4_hi, S_f5_lo, S_f5_hi, S_f10_lo, S_f10_hi, S_f11_lo, S_f11_hi, S_f12_lo, S_f12_hi, S_f13_lo, S_f13_hi, S_f14_lo, S_f14_hi, S_f15_lo, S_f15_hi, S_f16_lo, S_f16_hi, S_f17_lo, S_f17_hi, S_f18_lo, S_f18_hi, S_f19_lo, S_f19_hi, S_f20_lo, S_f20_hi, S_f21_lo, S_f21_hi, S_f22_lo, S_f22_hi, S_f23_lo, S_f23_hi, S_f24_lo, S_f24_hi, S_f25_lo, S_f25_hi, S_f26_lo, S_f26_hi, S_f27_lo, S_f27_hi, S_f28_lo, S_f28_hi, S_f29_lo, S_f29_hi, S_f30_lo, S_f30_hi, S_f31_lo, S_f31_hi, S_r4, S_r5, S_r6, S_r7, S_b0, S_b1, S_b2, S_b3, S_b4, S_b5, S_ar_pfs, S_ar_lc, S_ar_unat, S_ar_rnat, S_ar_bspstore, S_pr, NUM_SS_REGS }; /* * Format the switch_stack structure. */ static void ia64_dump_switch_stack(ulong task, ulong flag) { ulong addr; char buf[BUFSIZE], *p; int fval; ulong value1, value2; ulong ss[NUM_SS_REGS]; addr = SWITCH_STACK_ADDR(task); BZERO(&ss, sizeof(ulong) * NUM_SS_REGS); open_tmpfile(); dump_struct("switch_stack", addr, RADIX(16)); rewind(pc->tmpfile); fval = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "f2 = ")) { fval = 2; continue; } if (strstr(buf, "f3 = ")) { fval = 3; continue; } if (strstr(buf, "f4 = ")) { fval = 4; continue; } if (strstr(buf, "f5 = ")) { fval = 5; continue; } if (strstr(buf, "f10 = ")) { fval = 10; continue; } if (strstr(buf, "f11 = ")) { fval = 11; continue; } if (strstr(buf, "f12 = ")) { fval = 12; continue; } if (strstr(buf, "f13 = ")) { fval = 13; continue; } if (strstr(buf, "f14 = ")) { fval = 14; continue; } if (strstr(buf, "f15 = ")) { fval = 15; continue; } if (strstr(buf, "f16 = ")) { fval = 16; continue; } if (strstr(buf, "f17 = ")) { fval = 17; continue; } if (strstr(buf, "f18 = ")) { fval = 18; continue; } if (strstr(buf, "f19 = ")) { fval = 19; continue; } if (strstr(buf, "f20 = ")) { fval = 20; continue; } if (strstr(buf, "f21 = ")) { fval = 21; continue; } if (strstr(buf, "f22 = ")) { fval = 22; continue; } if (strstr(buf, "f23 = ")) { fval = 23; continue; } if (strstr(buf, "f24 = ")) { fval = 24; continue; } if (strstr(buf, "f25 = ")) { fval = 25; continue; } if (strstr(buf, "f26 = ")) { fval = 26; continue; } if (strstr(buf, "f27 = ")) { fval = 27; continue; } if (strstr(buf, "f28 = ")) { fval = 28; continue; } if (strstr(buf, "f29 = ")) { fval = 29; continue; } if (strstr(buf, "f30 = ")) { fval = 30; continue; } if (strstr(buf, "f31 = ")) { fval = 31; continue; } if (!strstr(buf, "0x")) continue; if (fval) { p = strstr(buf, "0x"); extract_hex(p, &value1, ',', TRUE); p = strstr(buf, ","); extract_hex(p, &value2, '}', FALSE); switch (fval) { case 2: ss[S_f2_lo] = value1; ss[S_f2_hi] = value2; break; case 3: ss[S_f3_lo] = value1; ss[S_f3_hi] = value2; break; case 4: ss[S_f4_lo] = value1; ss[S_f4_hi] = value2; break; case 5: ss[S_f5_lo] = value1; ss[S_f5_hi] = value2; break; case 10: ss[S_f10_lo] = value1; ss[S_f10_hi] = value2; break; case 11: ss[S_f11_lo] = value1; ss[S_f11_hi] = value2; break; case 12: ss[S_f12_lo] = value1; ss[S_f12_hi] = value2; break; case 13: ss[S_f13_lo] = value1; ss[S_f13_hi] = value2; break; case 14: ss[S_f14_lo] = value1; ss[S_f14_hi] = value2; break; case 15: ss[S_f15_lo] = value1; ss[S_f15_hi] = value2; break; case 16: ss[S_f16_lo] = value1; ss[S_f16_hi] = value2; break; case 17: ss[S_f17_lo] = value1; ss[S_f17_hi] = value2; break; case 18: ss[S_f18_lo] = value1; ss[S_f18_hi] = value2; break; case 19: ss[S_f19_lo] = value1; ss[S_f19_hi] = value2; break; case 20: ss[S_f20_lo] = value1; ss[S_f20_hi] = value2; break; case 21: ss[S_f21_lo] = value1; ss[S_f21_hi] = value2; break; case 22: ss[S_f22_lo] = value1; ss[S_f22_hi] = value2; break; case 23: ss[S_f23_lo] = value1; ss[S_f23_hi] = value2; break; case 24: ss[S_f24_lo] = value1; ss[S_f24_hi] = value2; break; case 25: ss[S_f25_lo] = value1; ss[S_f25_hi] = value2; break; case 26: ss[S_f26_lo] = value1; ss[S_f26_hi] = value2; break; case 27: ss[S_f27_lo] = value1; ss[S_f27_hi] = value2; break; case 28: ss[S_f28_lo] = value1; ss[S_f28_hi] = value2; break; case 29: ss[S_f29_lo] = value1; ss[S_f29_hi] = value2; break; case 30: ss[S_f30_lo] = value1; ss[S_f30_hi] = value2; break; case 31: ss[S_f31_lo] = value1; ss[S_f31_hi] = value2; break; } fval = 0; continue; } strip_comma(clean_line(buf)); p = strstr(buf, " = "); extract_hex(p, &value1, NULLCHAR, FALSE); if (strstr(buf, "caller_unat = ")) { ss[S_caller_unat] = value1; } if (strstr(buf, "ar_fpsr = ")) { ss[S_ar_fpsr] = value1; } if (strstr(buf, "r4 = ")) { ss[S_r4] = value1; } if (strstr(buf, "r5 = ")) { ss[S_r5] = value1; } if (strstr(buf, "r6 = ")) { ss[S_r6] = value1; } if (strstr(buf, "r7 = ")) { ss[S_r7] = value1; } if (strstr(buf, "b0 = ")) { ss[S_b0] = value1; } if (strstr(buf, "b1 = ")) { ss[S_b1] = value1; } if (strstr(buf, "b2 = ")) { ss[S_b2] = value1; } if (strstr(buf, "b3 = ")) { ss[S_b3] = value1; } if (strstr(buf, "b4 = ")) { ss[S_b4] = value1; } if (strstr(buf, "b5 = ")) { ss[S_b5] = value1; } if (strstr(buf, "ar_pfs = ")) { ss[S_ar_pfs] = value1; } if (strstr(buf, "ar_lc = ")) { ss[S_ar_lc] = value1; } if (strstr(buf, "ar_unat = ")) { ss[S_ar_unat] = value1; } if (strstr(buf, "ar_rnat = ")) { ss[S_ar_rnat] = value1; } if (strstr(buf, "ar_bspstore = ")) { ss[S_ar_bspstore] = value1; } if (strstr(buf, "pr = ")) { ss[S_pr] = value1; } } close_tmpfile(); fprintf(fp, "SWITCH_STACK: %lx\n", addr); fprintf(fp, " B0: %016lx B1: %016lx\n", ss[S_b0], ss[S_b1]); fprintf(fp, " B2: %016lx B3: %016lx\n", ss[S_b2], ss[S_b3]); fprintf(fp, " B4: %016lx B5: %016lx\n", ss[S_b4], ss[S_b5]); fprintf(fp, " AR_PFS: %016lx AR_LC: %016lx\n", ss[S_ar_pfs], ss[S_ar_lc]); fprintf(fp, " AR_UNAT: %016lx AR_RNAT: %016lx\n", ss[S_ar_unat], ss[S_ar_rnat]); fprintf(fp, " PR: %016lx AR_BSPSTORE: %016lx\n", ss[S_pr], ss[S_ar_bspstore]); fprintf(fp, " AR_FPSR: %016lx CALLER_UNAT: %016lx\n", ss[S_ar_fpsr], ss[S_caller_unat]); fprintf(fp, " R4: %016lx R5: %016lx\n", ss[S_r4], ss[S_r5]); fprintf(fp, " R6: %016lx R7: %016lx\n", ss[S_r6], ss[S_r7]); fprintf(fp, " F2: %05lx%016lx ", ss[S_f2_hi], ss[S_f2_lo]); fprintf(fp, " F3: %05lx%016lx\n", ss[S_f3_hi], ss[S_f3_lo]); fprintf(fp, " F4: %05lx%016lx ", ss[S_f4_hi], ss[S_f4_lo]); fprintf(fp, " F5: %05lx%016lx\n", ss[S_f5_hi], ss[S_f5_lo]); fprintf(fp, " F10: %05lx%016lx ", ss[S_f10_hi], ss[S_f10_lo]); fprintf(fp, " F11: %05lx%016lx\n", ss[S_f11_hi], ss[S_f11_lo]); fprintf(fp, " F12: %05lx%016lx ", ss[S_f12_hi], ss[S_f12_lo]); fprintf(fp, " F13: %05lx%016lx\n", ss[S_f13_hi], ss[S_f13_lo]); fprintf(fp, " F14: %05lx%016lx ", ss[S_f14_hi], ss[S_f14_lo]); fprintf(fp, " F15: %05lx%016lx\n", ss[S_f15_hi], ss[S_f15_lo]); fprintf(fp, " F16: %05lx%016lx ", ss[S_f16_hi], ss[S_f16_lo]); fprintf(fp, " F17: %05lx%016lx\n", ss[S_f17_hi], ss[S_f17_lo]); fprintf(fp, " F18: %05lx%016lx ", ss[S_f18_hi], ss[S_f18_lo]); fprintf(fp, " F19: %05lx%016lx\n", ss[S_f19_hi], ss[S_f19_lo]); fprintf(fp, " F20: %05lx%016lx ", ss[S_f20_hi], ss[S_f20_lo]); fprintf(fp, " F21: %05lx%016lx\n", ss[S_f21_hi], ss[S_f21_lo]); fprintf(fp, " F22: %05lx%016lx ", ss[S_f22_hi], ss[S_f22_lo]); fprintf(fp, " F23: %05lx%016lx\n", ss[S_f23_hi], ss[S_f23_lo]); fprintf(fp, " F24: %05lx%016lx ", ss[S_f24_hi], ss[S_f24_lo]); fprintf(fp, " F25: %05lx%016lx\n", ss[S_f25_hi], ss[S_f25_lo]); fprintf(fp, " F26: %05lx%016lx ", ss[S_f26_hi], ss[S_f26_lo]); fprintf(fp, " F27: %05lx%016lx\n", ss[S_f27_hi], ss[S_f27_lo]); fprintf(fp, " F28: %05lx%016lx ", ss[S_f28_hi], ss[S_f28_lo]); fprintf(fp, " F29: %05lx%016lx\n", ss[S_f29_hi], ss[S_f29_lo]); fprintf(fp, " F30: %05lx%016lx ", ss[S_f30_hi], ss[S_f30_lo]); fprintf(fp, " F31: %05lx%016lx\n", ss[S_f31_hi], ss[S_f31_lo]); } /* * Override smp_num_cpus if possible and necessary. */ int ia64_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_online())) return MAX(cpus, get_highest_cpu_online()+1); else return kt->cpus; } /* * Machine dependent command. */ void ia64_cmd_mach(void) { int c, cflag, mflag; unsigned int radix; cflag = mflag = radix = 0; while ((c = getopt(argcnt, args, "cmxd")) != EOF) { switch(c) { case 'c': cflag++; break; case 'm': mflag++; ia64_display_memmap(); break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (cflag) ia64_display_cpu_data(radix); if (!cflag && !mflag) ia64_display_machine_stats(); } /* * "mach" command output. */ static void ia64_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); if (!STREQ(kt->hypervisor, "(undetermined)") && !STREQ(kt->hypervisor, "bare hardware")) fprintf(fp, " HYPERVISOR: %s\n", kt->hypervisor); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " KERNEL CACHED REGION: %lx\n", (ulong)KERNEL_CACHED_REGION << REGION_SHIFT); fprintf(fp, " KERNEL UNCACHED REGION: %lx\n", (ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT); fprintf(fp, " KERNEL VMALLOC REGION: %lx\n", (ulong)KERNEL_VMALLOC_REGION << REGION_SHIFT); fprintf(fp, " USER DATA/STACK REGION: %lx\n", (ulong)USER_STACK_REGION << REGION_SHIFT); fprintf(fp, " USER DATA/STACK REGION: %lx\n", (ulong)USER_DATA_REGION << REGION_SHIFT); fprintf(fp, " USER TEXT REGION: %lx\n", (ulong)USER_TEXT_REGION << REGION_SHIFT); fprintf(fp, " USER SHARED MEMORY REGION: %lx\n", (ulong)USER_SHMEM_REGION << REGION_SHIFT); fprintf(fp, "USER IA32 EMULATION REGION: %016lx\n", (ulong)USER_IA32_EMUL_REGION << REGION_SHIFT); } static void ia64_display_cpu_data(unsigned int radix) { int cpu; ulong cpu_data; int array_location_known; struct syment *sp; if (!(cpu_data = machdep->machspec->cpu_data_address)) { error(FATAL, "cannot find cpuinfo_ia64 location\n"); return; } array_location_known = per_cpu_symbol_search("per_cpu__cpu_info") || symbol_exists("cpu_data") || symbol_exists("_cpu_data"); for (cpu = 0; cpu < kt->cpus; cpu++) { fprintf(fp, "%sCPU %d: %s\n", cpu ? "\n" : "", cpu, array_location_known ? "" : "(boot)"); dump_struct("cpuinfo_ia64", cpu_data, radix); if (!array_location_known) break; if ((sp = per_cpu_symbol_search("per_cpu__cpu_info"))) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) cpu_data = sp->value + kt->__per_cpu_offset[cpu+1]; else break; /* we've already done cpu 0 */ } else cpu_data += SIZE(cpuinfo_ia64); } } /* * Dump the EFI memory map. */ static void ia64_display_memmap(void) { int i, others; struct efi_memory_desc_t *desc; struct machine_specific *ms; char *map; ms = &ia64_machine_specific; map = ms->ia64_memmap; if (!map) { check_mem_limit(); error(FATAL, "efi_mmap not accessible\n"); } fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE / ATTRIBUTE / [ACCESS]\n"); for (i = 0; i < ms->efi_memmap_size/ms->efi_memdesc_size; i++) { desc = (struct efi_memory_desc_t *)map; fprintf(fp, "%016lx - %016lx ", desc->phys_addr, desc->phys_addr + (desc->num_pages * (1 << EFI_PAGE_SHIFT))); switch (desc->type) { case EFI_RESERVED_TYPE: fprintf(fp, "%s", "RESERVED_TYPE"); break; case EFI_LOADER_CODE: fprintf(fp, "%s", "LOADER_CODE"); break; case EFI_LOADER_DATA: fprintf(fp, "%s", "LOADER_DATA"); break; case EFI_BOOT_SERVICES_CODE: fprintf(fp, "%s", "BOOT_SERVICES_CODE"); break; case EFI_BOOT_SERVICES_DATA: fprintf(fp, "%s", "BOOT_SERVICES_DATA"); break; case EFI_RUNTIME_SERVICES_CODE: fprintf(fp, "%s", "RUNTIME_SERVICES_CODE"); break; case EFI_RUNTIME_SERVICES_DATA: fprintf(fp, "%s", "RUNTIME_SERVICES_DATA"); break; case EFI_CONVENTIONAL_MEMORY: fprintf(fp, "%s", "CONVENTIONAL_MEMORY"); break; case EFI_UNUSABLE_MEMORY: fprintf(fp, "%s", "UNUSABLE_MEMORY"); break; case EFI_ACPI_RECLAIM_MEMORY: fprintf(fp, "%s", "ACPI_RECLAIM_MEMORY"); break; case EFI_ACPI_MEMORY_NVS: fprintf(fp, "%s", "ACPI_MEMORY_NVS"); break; case EFI_MEMORY_MAPPED_IO: fprintf(fp, "%s", "MEMORY_MAPPED_IO"); break; case EFI_MEMORY_MAPPED_IO_PORT_SPACE: fprintf(fp, "%s", "MEMORY_MAPPED_IO_PORT_SPACE"); break; case EFI_PAL_CODE: fprintf(fp, "%s", "PAL_CODE"); break; default: fprintf(fp, "%s", "(unknown type)"); break; } fprintf(fp, " "); others = 0; if (desc->attribute & EFI_MEMORY_UC) fprintf(fp, "%sUC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WC) fprintf(fp, "%sWC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WT) fprintf(fp, "%sWT", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WB) fprintf(fp, "%sWB", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WP) fprintf(fp, "%sWP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RP) fprintf(fp, "%sRP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_XP) fprintf(fp, "%sXP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RUNTIME) fprintf(fp, "%sRUNTIME", others++ ? "|" : ""); fprintf(fp, " %s", ia64_available_memory(desc) ? "[available]" : ""); switch (VADDR_REGION(desc->virt_addr)) { case KERNEL_UNCACHED_REGION: fprintf(fp, "[R6]\n"); break; case KERNEL_CACHED_REGION: fprintf(fp, "[R7]\n"); break; default: fprintf(fp, "\n"); } if (!CRASHDEBUG(1)) goto next_desc; fprintf(fp, "physical: %016lx %dk pages: %ld virtual: %016lx\n", desc->phys_addr, (1 << EFI_PAGE_SHIFT)/1024, desc->num_pages, desc->virt_addr); fprintf(fp, "type: "); switch (desc->type) { case EFI_RESERVED_TYPE: fprintf(fp, "%-27s", "RESERVED_TYPE"); break; case EFI_LOADER_CODE: fprintf(fp, "%-27s", "LOADER_CODE"); break; case EFI_LOADER_DATA: fprintf(fp, "%-27s", "LOADER_DATA"); break; case EFI_BOOT_SERVICES_CODE: fprintf(fp, "%-27s", "BOOT_SERVICES_CODE"); break; case EFI_BOOT_SERVICES_DATA: fprintf(fp, "%-27s", "BOOT_SERVICES_DATA"); break; case EFI_RUNTIME_SERVICES_CODE: fprintf(fp, "%-27s", "RUNTIME_SERVICES_CODE"); break; case EFI_RUNTIME_SERVICES_DATA: fprintf(fp, "%-27s", "RUNTIME_SERVICES_DATA"); break; case EFI_CONVENTIONAL_MEMORY: fprintf(fp, "%-27s", "CONVENTIONAL_MEMORY"); break; case EFI_UNUSABLE_MEMORY: fprintf(fp, "%-27s", "UNUSABLE_MEMORY"); break; case EFI_ACPI_RECLAIM_MEMORY: fprintf(fp, "%-27s", "ACPI_RECLAIM_MEMORY"); break; case EFI_ACPI_MEMORY_NVS: fprintf(fp, "%-27s", "ACPI_MEMORY_NVS"); break; case EFI_MEMORY_MAPPED_IO: fprintf(fp, "%-27s", "MEMORY_MAPPED_IO"); break; case EFI_MEMORY_MAPPED_IO_PORT_SPACE: fprintf(fp, "%-27s", "MEMORY_MAPPED_IO_PORT_SPACE"); break; case EFI_PAL_CODE: fprintf(fp, "%-27s", "PAL_CODE"); break; default: fprintf(fp, "%-27s", "(unknown type)"); break; } fprintf(fp, " attribute: ("); others = 0; if (desc->attribute & EFI_MEMORY_UC) fprintf(fp, "%sUC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WC) fprintf(fp, "%sWC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WT) fprintf(fp, "%sWT", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WB) fprintf(fp, "%sWB", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WP) fprintf(fp, "%sWP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RP) fprintf(fp, "%sRP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_XP) fprintf(fp, "%sXP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RUNTIME) fprintf(fp, "%sRUNTIME", others++ ? "|" : ""); fprintf(fp, ") %s\n", ia64_available_memory(desc) ? "[available]" : ""); next_desc: map += ms->efi_memdesc_size; } } static int ia64_available_memory(struct efi_memory_desc_t *desc) { if (desc->attribute & EFI_MEMORY_WB) { switch (desc->type) { case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: return TRUE; } } return FALSE; } /* * Make a copy of the memmap descriptor array. */ static void ia64_create_memmap(void) { struct machine_specific *ms; uint64_t ia64_boot_param, efi_memmap; ulong num_physpages; char *memmap; ms = &ia64_machine_specific; ms->ia64_memmap = NULL; if (symbol_exists("num_physpages")) { get_symbol_data("num_physpages", sizeof(ulong), &num_physpages); machdep->memsize = num_physpages * PAGESIZE(); } if (!symbol_exists("ia64_boot_param")) return; if ((ms->mem_limit = check_mem_limit())) machdep->flags |= MEM_LIMIT; get_symbol_data("ia64_boot_param", sizeof(void *), &ia64_boot_param); if ((ms->mem_limit && (ia64_VTOP(ia64_boot_param) >= ms->mem_limit)) || !readmem(ia64_boot_param+ MEMBER_OFFSET("ia64_boot_param", "efi_memmap"), KVADDR, &efi_memmap, sizeof(uint64_t), "efi_memmap", QUIET|RETURN_ON_ERROR)) { if (!XEN() || CRASHDEBUG(1)) error(WARNING, "cannot read ia64_boot_param: " "memory verification will not be performed\n\n"); return; } readmem(ia64_boot_param+MEMBER_OFFSET("ia64_boot_param", "efi_memmap_size"), KVADDR, &ms->efi_memmap_size, sizeof(uint64_t), "efi_memmap_size", FAULT_ON_ERROR); readmem(ia64_boot_param+MEMBER_OFFSET("ia64_boot_param", "efi_memdesc_size"), KVADDR, &ms->efi_memdesc_size, sizeof(uint64_t), "efi_memdesc_size", FAULT_ON_ERROR); if (!(memmap = (char *) malloc(ms->efi_memmap_size))) { error(WARNING, "cannot malloc ia64_memmap\n"); return; } if ((ms->mem_limit && (efi_memmap >= ms->mem_limit)) || !readmem(PTOV(efi_memmap), KVADDR, memmap, ms->efi_memmap_size, "efi_mmap contents", QUIET|RETURN_ON_ERROR)) { if (!XEN() || (XEN() && CRASHDEBUG(1))) error(WARNING, "cannot read efi_mmap: " "EFI memory verification will not be performed\n\n"); free(memmap); return; } ms->ia64_memmap = memmap; } /* * Kernel pages may cross EFI memmap boundaries, so the system page is * broken into EFI pages, and then each of them is verified. */ static int ia64_verify_paddr(uint64_t paddr) { int i, j, cnt, found, desc_count, desc_size; struct efi_memory_desc_t *desc; struct machine_specific *ms; uint64_t phys_end; char *map; int efi_pages; ulong efi_pagesize; /* * When kernel text and data are mapped in region 5, * and we're using the crash memory device driver, * then the driver will gracefully fail the read attempt * if the address is bogus. */ if ((VADDR_REGION(paddr) == KERNEL_VMALLOC_REGION) && (pc->flags & MEMMOD)) return TRUE; ms = &ia64_machine_specific; if (ms->ia64_memmap == NULL) return TRUE; desc_count = ms->efi_memmap_size/ms->efi_memdesc_size; desc_size = ms->efi_memdesc_size; efi_pagesize = (1 << EFI_PAGE_SHIFT); efi_pages = PAGESIZE() / efi_pagesize; paddr = PAGEBASE(paddr); for (i = cnt = 0; i < efi_pages; i++, paddr += efi_pagesize) { map = ms->ia64_memmap; for (j = found = 0; j < desc_count; j++) { desc = (struct efi_memory_desc_t *)map; if (ia64_available_memory(desc)) { phys_end = desc->phys_addr + (desc->num_pages * efi_pagesize); if ((paddr >= desc->phys_addr) && ((paddr + efi_pagesize) <= phys_end)) { cnt++; found = TRUE; } } if (found) break; map += desc_size; } } return (cnt == efi_pages); } /* * Check whether a "mem=X" argument was entered on the boot command line. * Note that the default setting of the kernel mem_limit is ~0UL. */ static ulong check_mem_limit(void) { ulong mem_limit; char *saved_command_line, *p1, *p2; int len; if (!symbol_exists("mem_limit")) return 0; get_symbol_data("mem_limit", sizeof(ulong), &mem_limit); if (mem_limit == ~0UL) return 0; mem_limit += 1; if (!symbol_exists("saved_command_line")) goto no_command_line; len = get_array_length("saved_command_line", 0, sizeof(char)); if (!len) goto no_command_line; saved_command_line = GETBUF(len+1); if (!readmem(symbol_value("saved_command_line"), KVADDR, saved_command_line, len, "saved_command_line", RETURN_ON_ERROR)) goto no_command_line; if (!(p1 = strstr(saved_command_line, "mem="))) goto no_command_line; p2 = p1; while (*p2 && !whitespace(*p2)) p2++; *p2 = NULLCHAR; error(pc->flags & RUNTIME ? INFO : WARNING, "boot command line argument: %s\n", p1); return mem_limit; no_command_line: error(pc->flags & RUNTIME ? INFO : WARNING, "boot command line memory limit: %lx\n", mem_limit); return mem_limit; } #ifndef _ASM_IA64_UNWIND_H #define _ASM_IA64_UNWIND_H /* * Copyright (C) 1999-2000 Hewlett-Packard Co * Copyright (C) 1999-2000 David Mosberger-Tang * * A simple API for unwinding kernel stacks. This is used for * debugging and error reporting purposes. The kernel doesn't need * full-blown stack unwinding with all the bells and whitles, so there * is not much point in implementing the full IA-64 unwind API (though * it would of course be possible to implement the kernel API on top * of it). */ struct task_struct; /* forward declaration */ struct switch_stack; /* forward declaration */ enum unw_application_register { UNW_AR_BSP, UNW_AR_BSPSTORE, UNW_AR_PFS, UNW_AR_RNAT, UNW_AR_UNAT, UNW_AR_LC, UNW_AR_EC, UNW_AR_FPSR, UNW_AR_RSC, UNW_AR_CCV }; /* * The following declarations are private to the unwind * implementation: */ struct unw_stack { unsigned long limit; unsigned long top; }; #define UNW_FLAG_INTERRUPT_FRAME (1UL << 0) /* * No user of this module should every access this structure directly * as it is subject to change. It is declared here solely so we can * use automatic variables. */ struct unw_frame_info { struct unw_stack regstk; struct unw_stack memstk; unsigned int flags; short hint; short prev_script; unsigned long bsp; unsigned long sp; /* stack pointer */ unsigned long psp; /* previous sp */ unsigned long ip; /* instruction pointer */ unsigned long pr_val; /* current predicates */ unsigned long *cfm; struct task_struct *task; struct switch_stack *sw; /* preserved state: */ unsigned long *pbsp; /* previous bsp */ unsigned long *bspstore; unsigned long *pfs; unsigned long *rnat; unsigned long *rp; unsigned long *pri_unat; unsigned long *unat; unsigned long *pr; unsigned long *lc; unsigned long *fpsr; struct unw_ireg { unsigned long *loc; struct unw_ireg_nat { int type : 3; /* enum unw_nat_type */ signed int off; /* NaT word is at loc+nat.off */ } nat; } r4, r5, r6, r7; unsigned long *b1, *b2, *b3, *b4, *b5; struct ia64_fpreg *f2, *f3, *f4, *f5, *fr[16]; }; #endif /* _ASM_UNWIND_H */ /* * Perform any leftover pre-prompt machine-specific initialization tasks here. */ static void ia64_post_init(void) { struct machine_specific *ms; struct gnu_request req; struct syment *sp; ulong flag; ms = &ia64_machine_specific; if (symbol_exists("unw_init_frame_info")) { machdep->flags |= NEW_UNWIND; if (MEMBER_EXISTS("unw_frame_info", "pt")) { if (MEMBER_EXISTS("pt_regs", "ar_csd")) { machdep->flags |= NEW_UNW_V3; ms->unwind_init = unwind_init_v3; ms->unwind = unwind_v3; ms->unwind_debug = unwind_debug_v3; ms->dump_unwind_stats = dump_unwind_stats_v3; } else { machdep->flags |= NEW_UNW_V2; ms->unwind_init = unwind_init_v2; ms->unwind = unwind_v2; ms->unwind_debug = unwind_debug_v2; ms->dump_unwind_stats = dump_unwind_stats_v2; } } else { machdep->flags |= NEW_UNW_V1; ms->unwind_init = unwind_init_v1; ms->unwind = unwind_v1; ms->unwind_debug = unwind_debug_v1; ms->dump_unwind_stats = dump_unwind_stats_v1; } } else { machdep->flags |= OLD_UNWIND; ms->unwind_init = ia64_old_unwind_init; ms->unwind = ia64_old_unwind; } ms->unwind_init(); if (!VALID_STRUCT(cpuinfo_ia64)) error(WARNING, "cpuinfo_ia64 structure does not exist\n"); else { if (symbol_exists("_cpu_data")) ms->cpu_data_address = symbol_value("_cpu_data"); else if (symbol_exists("boot_cpu_data")) get_symbol_data("boot_cpu_data", sizeof(ulong), &ms->cpu_data_address); else if (symbol_exists("cpu_data")) ms->cpu_data_address = symbol_value("cpu_data"); else if ((sp = per_cpu_symbol_search("per_cpu__cpu_info")) || (sp = per_cpu_symbol_search("per_cpu__ia64_cpu_info"))) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) ms->cpu_data_address = sp->value + kt->__per_cpu_offset[0]; else ms->cpu_data_address = sp->value; } else { error(WARNING, "cannot find cpuinfo_ia64 location\n"); ms->cpu_data_address = 0; } if (ms->cpu_data_address) { if (VALID_MEMBER(cpuinfo_ia64_unimpl_va_mask)) readmem(ms->cpu_data_address + OFFSET(cpuinfo_ia64_unimpl_va_mask), KVADDR, &ms->unimpl_va_mask, sizeof(ulong), "unimpl_va_mask", FAULT_ON_ERROR); if (VALID_MEMBER(cpuinfo_ia64_unimpl_pa_mask)) readmem(ms->cpu_data_address + OFFSET(cpuinfo_ia64_unimpl_pa_mask), KVADDR, &ms->unimpl_pa_mask, sizeof(ulong), "unimpl_pa_mask", FAULT_ON_ERROR); } } if (symbol_exists("ia64_init_stack") && !ms->ia64_init_stack_size) { get_symbol_type("ia64_init_stack", NULL, &req); ms->ia64_init_stack_size = req.length; } if (DUMPFILE() && ia64_in_init_stack(SWITCH_STACK_ADDR(CURRENT_TASK()))) machdep->flags |= INIT; if (DUMPFILE() && (flag = ia64_in_per_cpu_mca_stack())) machdep->flags |= flag; } /* * Try using the old unwind scheme if the new one fails, * that is as long as the unw_frame_info structs are the * same size. */ static void try_old_unwind(struct bt_info *bt) { if ((machdep->flags & NEW_UNWIND) && (STRUCT_SIZE("unw_frame_info") == sizeof(struct unw_frame_info))) { error(INFO, "unwind: trying old unwind mechanism\n"); ia64_old_unwind(bt); } } /* * Unwind the stack using the basic method used when CONFIG_IA64_NEW_UNWIND * is not configured into the kernel. * * NOTE: see kernel source: show_stack() and/or kdba_bt_stack() */ static void ia64_old_unwind_init(void) { long len; len = STRUCT_SIZE("unw_frame_info"); if (len < 0) { error(WARNING, "cannot determine size of unw_frame_info\n"); machdep->flags |= UNW_OUT_OF_SYNC; } else if (len != sizeof(struct unw_frame_info)) { error(WARNING, "unw_frame_info size differs: %ld (local: %d)\n", len, sizeof(struct unw_frame_info)); machdep->flags |= UNW_OUT_OF_SYNC; } } static int unw_debug; /* debug fprintf indent */ static void ia64_old_unwind(struct bt_info *bt) { struct unw_frame_info unw_frame_info, *info; struct syment *sm; int frame; char *name; if (bt->debug) CRASHDEBUG_SUSPEND(bt->debug); if (CRASHDEBUG(1)) unw_debug = 0; info = &unw_frame_info; unw_init_from_blocked_task(info, bt->task); frame = 0; do { if (info->ip == 0) break; if (!IS_KVADDR(info->ip)) break; if ((sm = value_search(info->ip, NULL))) name = sm->name; else name = "(unknown)"; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(name, bt->ref->str)) { bt->ref->cmdflags |= BT_REF_FOUND; goto unwind_return; } break; case BT_REF_HEXVAL: if (bt->ref->hexval == info->ip) { bt->ref->cmdflags |= BT_REF_FOUND; goto unwind_return; } break; } } else { fprintf(fp, "%s#%d [BSP:%lx] %s at %lx\n", frame >= 10 ? "" : " ", frame, info->bsp, name, info->ip); if (bt->flags & BT_FULL) rse_function_params(info, name); if (bt->flags & BT_LINE_NUMBERS) ia64_dump_line_number(info->ip); } frame++; if (CRASHDEBUG(1)) unw_debug = 0; if (STREQ(name, "start_kernel")) break; } while (old_unw_unwind(info) >= 0); unwind_return: if (!BT_REFERENCE_CHECK(bt) && !is_kernel_thread(bt->task)) ia64_exception_frame(bt->stacktop - SIZE(pt_regs), bt); if (bt->debug) CRASHDEBUG_RESTORE(); } static unsigned long ia64_rse_slot_num (unsigned long *addr) { return (((unsigned long) addr) >> 3) & 0x3f; } /* * Given a bsp address and a number of register locations, calculate a new * bsp address, accounting for any intervening RNAT stores. */ static unsigned long * ia64_rse_skip_regs (unsigned long *addr, long num_regs) { long delta = ia64_rse_slot_num(addr) + num_regs; if (CRASHDEBUG(1)) { fprintf(fp, "%sia64_rse_skip_regs: ia64_rse_slot_num(%lx): %ld num_regs: %ld\n", space(unw_debug), (ulong)addr, ia64_rse_slot_num(addr), num_regs); } if (num_regs < 0) delta -= 0x3e; if (CRASHDEBUG(1)) { fprintf(fp, "%sia64_rse_skip_regs: delta: %ld return(%lx)", space(unw_debug), delta, (ulong)(addr + num_regs + delta/0x3f)); if (addr > (addr + num_regs + delta/0x3f)) fprintf(fp, "(-%ld)\n", addr - (addr + num_regs + delta/0x3f)); else fprintf(fp, "(+%ld)\n", (addr + num_regs + delta/0x3f) - addr); } return(addr + num_regs + delta/0x3f); } /* * Returns the address of the RNAT slot that covers the slot at * address SLOT_ADDR. */ static unsigned long * ia64_rse_rnat_addr (unsigned long *slot_addr) { return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3)); } /* * Initialize the key fields in the unw_frame_info structure. * * NOTE: see kernel source: unw_init_from_blocked_task() */ static void unw_init_from_blocked_task(struct unw_frame_info *info, ulong task) { ulong sw; ulong sol, limit, top; ulong ar_pfs, ar_bspstore, b0; sw = SWITCH_STACK_ADDR(task); BZERO(info, sizeof(struct unw_frame_info)); readmem(sw + OFFSET(switch_stack_b0), KVADDR, &b0, sizeof(ulong), "switch_stack b0", FAULT_ON_ERROR); readmem(sw + OFFSET(switch_stack_ar_pfs), KVADDR, &ar_pfs, sizeof(ulong), "switch_stack ar_pfs", FAULT_ON_ERROR); readmem(sw + OFFSET(switch_stack_ar_bspstore), KVADDR, &ar_bspstore, sizeof(ulong), "switch_stack ar_bspstore", FAULT_ON_ERROR); sol = (ar_pfs >> 7) & 0x7f; /* size of locals */ limit = task + IA64_RBS_OFFSET; top = ar_bspstore; if ((top - task) >= IA64_STK_OFFSET) top = limit; if (CRASHDEBUG(1)) { unw_debug++; fprintf(fp, "unw_init_from_blocked_task: stack top: %lx sol: %ld\n", top, sol); } info->regstk.limit = limit; info->regstk.top = top; info->sw = (struct switch_stack *)sw; info->bsp = (ulong)ia64_rse_skip_regs((ulong *)info->regstk.top, -sol); info->cfm = (ulong *)(sw + OFFSET(switch_stack_ar_pfs)); info->ip = b0; if (CRASHDEBUG(1)) dump_unw_frame_info(info); } /* * Update the unw_frame_info structure based upon its current state. * This routine works without enabling CONFIG_IA64_NEW_UNWIND because * gdb allocates two additional "local" register locations for each * function, found at the end of the stored locals: * * register "sol-1" (last local) = ar.pfs (gives us previous sol) * register "sol-2" (2nd to last local = b0 to previous address * * NOTE: see kernel source: unw_unwind() (#ifndef CONFIG_IA64_NEW_UNWIND) * On entry, info->regstk.top should point to the register backing * store for r32. */ static int old_unw_unwind (struct unw_frame_info *info) { unsigned long sol, cfm; int is_nat; if (!readmem((ulong)info->cfm, KVADDR, &cfm, sizeof(long), "info->cfm", QUIET|RETURN_ON_ERROR)) return -1; sol = (cfm >> 7) & 0x7f; /* size of locals */ if (CRASHDEBUG(1)) { fprintf(fp, "old_unw_unwind: cfm: %lx sol: %ld\n", cfm, sol); unw_debug++; } /* * In general, we would have to make use of unwind info to * unwind an IA-64 stack, but for now gcc uses a special * convention that makes this possible without full-fledged * unwind info. Specifically, we expect "rp" in the second * last, and "ar.pfs" in the last local register, so the * number of locals in a frame must be at least two. If it's * less than that, we reached the end of the C call stack. */ if (sol < 2) return -1; info->ip = rse_read_reg(info, sol - 2, &is_nat); if (CRASHDEBUG(1)) fprintf(fp, "old_unw_unwind: ip: %lx\n", info->ip); if (is_nat || (info->ip & (machdep->machspec->unimpl_va_mask | 0xf))) return -1; info->cfm = ia64_rse_skip_regs((ulong *)info->bsp, sol - 1); cfm = rse_read_reg(info, sol - 1, &is_nat); if (CRASHDEBUG(1)) fprintf(fp, "old_unw_unwind: info->cfm: %lx => %lx\n", (ulong)info->cfm, cfm); if (is_nat) return -1; sol = (cfm >> 7) & 0x7f; info->bsp = (ulong)ia64_rse_skip_regs((ulong *)info->bsp, -sol); if (CRASHDEBUG(1)) { fprintf(fp, "old_unw_unwind: next sol: %ld\n", sol); fprintf(fp, "old_unw_unwind: next bsp: %lx\n", info->bsp); } return 0; #ifdef KERNEL_SOURCE unsigned long sol, cfm = *info->cfm; int is_nat; sol = (cfm >> 7) & 0x7f; /* size of locals */ /* * In general, we would have to make use of unwind info to * unwind an IA-64 stack, but for now gcc uses a special * convention that makes this possible without full-fledged * unwind info. Specifically, we expect "rp" in the second * last, and "ar.pfs" in the last local register, so the * number of locals in a frame must be at least two. If it's * less than that, we reached the end of the C call stack. */ if (sol < 2) return -1; info->ip = rse_read_reg(info, sol - 2, &is_nat); if (is_nat || (info->ip & (my_cpu_data.unimpl_va_mask | 0xf))) /* reject let obviously bad addresses */ return -1; info->cfm = ia64_rse_skip_regs((unsigned long *) info->bsp, sol - 1); cfm = rse_read_reg(info, sol - 1, &is_nat); if (is_nat) return -1; sol = (cfm >> 7) & 0x7f; info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -sol); return 0; #endif /* KERNEL_SOURCE */ } /* * Retrieve a register value from the stack, returning its NAT attribute * as well. * * NOTE: see kernel source: read_reg() */ static ulong rse_read_reg (struct unw_frame_info *info, int regnum, int *is_nat) { ulong *addr, *rnat_addr, rnat; ulong regcontent; if (CRASHDEBUG(1)) { fprintf(fp, "%srse_read_reg: bsp: %lx\n", space(unw_debug), info->bsp); unw_debug++; } addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum); if (CRASHDEBUG(1)) { unw_debug--; fprintf(fp, "%srse_read_reg: addr: %lx\n", space(unw_debug), (ulong)addr); } if (((ulong)addr < info->regstk.limit) || ((ulong)addr >= info->regstk.top) || (((long)addr & 0x7) != 0)) { *is_nat = 1; if (CRASHDEBUG(1)) fprintf(fp, "%srse_read_reg: is_nat: %d -- return 0xdeadbeefdeadbeef\n", space(unw_debug), *is_nat); return 0xdeadbeefdeadbeef; } rnat_addr = ia64_rse_rnat_addr(addr); if (CRASHDEBUG(1)) fprintf(fp, "%srse_read_reg: rnat_addr: %lx\n", space(unw_debug), (ulong)rnat_addr); if ((unsigned long) rnat_addr >= info->regstk.top) readmem((ulong)(info->sw) + OFFSET(switch_stack_ar_rnat), KVADDR, &rnat, sizeof(long), "info->sw->ar_rnat", FAULT_ON_ERROR); else readmem((ulong)rnat_addr, KVADDR, &rnat, sizeof(long), "rnat_addr", FAULT_ON_ERROR); *is_nat = (rnat & (1UL << ia64_rse_slot_num(addr))) != 0; if (CRASHDEBUG(1)) fprintf(fp, "%srse_read_reg: rnat: %lx is_nat: %d\n", space(unw_debug), rnat, *is_nat); readmem((ulong)addr, KVADDR, ®content, sizeof(long), "rse_read_reg addr", FAULT_ON_ERROR); if (CRASHDEBUG(1)) { char buf[BUFSIZE]; fprintf(fp, "%srse_read_reg: addr: %lx => %lx ", space(unw_debug), (ulong)addr, regcontent); if (is_kernel_text(regcontent)) fprintf(fp, "(%s)", value_to_symstr(regcontent, buf, pc->output_radix)); fprintf(fp, "\n"); } return regcontent; } /* * Display the arguments to a function, presuming that they are found at * the beginning of the sol section. */ #define MAX_REGISTER_PARAMS (8) static void rse_function_params(struct unw_frame_info *info, char *name) { int i; int numargs, is_nat[MAX_REGISTER_PARAMS]; char buf1[BUFSIZE], buf2[BUFSIZE], *p1, *p2; ulong arglist[MAX_REGISTER_PARAMS]; numargs = MIN(get_function_numargs(info->ip), MAX_REGISTER_PARAMS); if (CRASHDEBUG(1)) fprintf(fp, "rse_function_params: %s: %d args\n", name, numargs); switch (numargs) { case 0: fprintf(fp, " (void)\n"); return; case -1: return; default: break; } for (i = 0; i < numargs; i++) arglist[i] = rse_read_reg(info, i, &is_nat[i]); sprintf(buf1, " ("); for (i = 0; i < numargs; i++) { p1 = &buf1[strlen(buf1)]; if (is_nat[i]) sprintf(buf2, "[NAT]"); else { if ((p2 = value_symbol(arglist[i]))) sprintf(buf2, "%s", p2); else sprintf(buf2, "%lx", arglist[i]); } sprintf(p1, "%s%s", i ? ", " : "", buf2); if (strlen(buf1) >= 80) sprintf(p1, ",\n %s", buf2); } strcat(buf1, ")\n"); fprintf(fp, "%s", buf1); } static void dump_unw_frame_info(struct unw_frame_info *info) { unw_debug++; fprintf(fp, "%sregstk.limit: %lx\n", space(unw_debug), info->regstk.limit); fprintf(fp, "%s regstk.top: %lx\n", space(unw_debug), info->regstk.top); fprintf(fp, "%s sw: %lx\n", space(unw_debug), (ulong)info->sw); fprintf(fp, "%s bsp: %lx\n", space(unw_debug), info->bsp); fprintf(fp, "%s cfm: %lx\n", space(unw_debug), (ulong)info->cfm); fprintf(fp, "%s ip: %lx\n", space(unw_debug), info->ip); unw_debug--; } static const char *hook_files[] = { "arch/ia64/kernel/entry.S", "arch/ia64/kernel/head.S", }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) static struct line_number_hook ia64_line_number_hooks[] = { {"ia64_execve", ENTRY_S}, {"sys_clone2", ENTRY_S}, {"sys_clone", ENTRY_S}, {"ia64_switch_to", ENTRY_S}, {"save_switch_stack", ENTRY_S}, {"load_switch_stack", ENTRY_S}, {"__ia64_syscall", ENTRY_S}, {"invoke_syscall_trace", ENTRY_S}, {"ia64_trace_syscall", ENTRY_S}, {"ia64_ret_from_clone", ENTRY_S}, {"ia64_ret_from_syscall", ENTRY_S}, {"ia64_leave_kernel", ENTRY_S}, {"handle_syscall_error", ENTRY_S}, {"invoke_schedule_tail", ENTRY_S}, {"invoke_schedule", ENTRY_S}, {"handle_signal_delivery", ENTRY_S}, {"sys_rt_sigsuspend", ENTRY_S}, {"sys_rt_sigreturn", ENTRY_S}, {"ia64_prepare_handle_unaligned", ENTRY_S}, {"unw_init_running", ENTRY_S}, {"_start", HEAD_S}, {"ia64_save_debug_regs", HEAD_S}, {"ia64_load_debug_regs", HEAD_S}, {"__ia64_save_fpu", HEAD_S}, {"__ia64_load_fpu", HEAD_S}, {"__ia64_init_fpu", HEAD_S}, {"ia64_switch_mode", HEAD_S}, {"ia64_set_b1", HEAD_S}, {"ia64_set_b2", HEAD_S}, {"ia64_set_b3", HEAD_S}, {"ia64_set_b4", HEAD_S}, {"ia64_set_b5", HEAD_S}, {"ia64_spinlock_contention", HEAD_S}, {NULL, NULL} /* list must be NULL-terminated */ }; void ia64_dump_line_number(ulong ip) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(ip, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); else { retries++; ip = closest_symbol_value(ip); goto try_closest; } } } /* * For now, just make it a region 7 address for all cases, ignoring the * fact that it might be in a 2.6 kernel's non-unity mapped region. XXX */ ulong ia64_PTOV(ulong paddr) { ulong vaddr; switch (machdep->machspec->kernel_region) { case KERNEL_VMALLOC_REGION: // error(FATAL, "ia64_PTOV: TBD for kernels loaded in region 5\n"); default: case KERNEL_CACHED_REGION: vaddr = paddr + (ulong)(KERNEL_CACHED_BASE); } return vaddr; } /* * Account for 2.6 kernel mapping in region 5. */ ulong ia64_VTOP(ulong vaddr) { struct machine_specific *ms; ulong paddr; ms = &ia64_machine_specific; switch (VADDR_REGION(vaddr)) { case KERNEL_CACHED_REGION: paddr = vaddr - (ulong)(KERNEL_CACHED_BASE); break; case KERNEL_UNCACHED_REGION: paddr = vaddr - (ulong)(KERNEL_UNCACHED_BASE); break; /* * Differentiate between a 2.6 kernel address in region 5 and * a real vmalloc() address. */ case KERNEL_VMALLOC_REGION: /* * Real vmalloc() addresses should never be the subject * of a VTOP() translation. */ if (ia64_IS_VMALLOC_ADDR(vaddr) || (ms->kernel_region != KERNEL_VMALLOC_REGION)) return(error(FATAL, "ia64_VTOP(%lx): unexpected region 5 address\n", vaddr)); /* * If it's a region 5 kernel address, subtract the starting * kernel virtual address, and then add the base physical page. */ paddr = vaddr - ms->kernel_start + (ms->phys_start & KERNEL_TR_PAGE_MASK); break; default: return(error(FATAL, "ia64_VTOP(%lx): invalid kernel address\n", vaddr)); } return paddr; } /* * vmalloc() starting address is either the traditional 0xa000000000000000 or * bumped up in 2.6 to 0xa000000200000000. */ int ia64_IS_VMALLOC_ADDR(ulong vaddr) { return ((vaddr >= machdep->machspec->vmalloc_start) && (vaddr < (ulong)KERNEL_UNCACHED_BASE)); } static int compare_kvaddr(const void *v1, const void *v2) { struct vaddr_range *r1, *r2; r1 = (struct vaddr_range *)v1; r2 = (struct vaddr_range *)v2; return (r1->start < r2->start ? -1 : r1->start == r2->start ? 0 : 1); } static int ia64_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->identity_map_base; vrp[cnt++].end = vt->high_memory; if (machdep->machspec->kernel_start != machdep->identity_map_base) { vrp[cnt].type = KVADDR_START_MAP; vrp[cnt].start = machdep->machspec->kernel_start; vrp[cnt++].end = kt->end; } vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = machdep->machspec->vmalloc_start; vrp[cnt++].end = (ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT; if (VADDR_REGION(vt->node_table[0].mem_map) == KERNEL_VMALLOC_REGION) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = vt->node_table[0].mem_map; vrp[cnt].end = vt->node_table[vt->numnodes-1].mem_map + (vt->node_table[vt->numnodes-1].size * SIZE(page)); /* * Prevent overlap with KVADDR_VMALLOC range. */ if (vrp[cnt].start > vrp[cnt-1].start) vrp[cnt-1].end = vrp[cnt].start; cnt++; } qsort(vrp, cnt, sizeof(struct vaddr_range), compare_kvaddr); return cnt; } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 4 level page table. */ static int ia64_vtop_4l_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { error(FATAL, "ia64_vtop_4l_xen_wpt: TBD\n"); return FALSE; #ifdef TBD ulong *page_dir; ulong *page_upper; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pud_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1); page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE()); pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper)); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); if (!(pud_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; #endif } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 3 level page table. */ static int ia64_vtop_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { error(FATAL, "ia64_vtop_xen_wpt: TBD\n"); return FALSE; #ifdef TBD ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; #endif } #include "netdump.h" #include "xen_dom0.h" /* * Determine the relocatable physical address base. */ static void ia64_calc_phys_start(void) { FILE *iomem; int i, found, errflag; char buf[BUFSIZE]; char *p1; ulong kernel_code_start; struct vmcore_data *vd; ulong phys_start, text_start; Elf64_Phdr *phdr = NULL; /* * Default to 64MB. */ machdep->machspec->phys_start = DEFAULT_PHYS_START; text_start = symbol_exists("_text") ? symbol_value("_text") : BADADDR; if (ACTIVE()) { if ((iomem = fopen("/proc/iomem", "r")) == NULL) return; errflag = 1; while (fgets(buf, BUFSIZE, iomem)) { if (strstr(buf, ": Kernel code")) { clean_line(buf); errflag = 0; break; } } fclose(iomem); if (errflag) return; if (!(p1 = strstr(buf, "-"))) return; else *p1 = NULLCHAR; errflag = 0; kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag); if (errflag) return; machdep->machspec->phys_start = kernel_code_start; if (CRASHDEBUG(1)) { if (text_start == BADADDR) fprintf(fp, "_text: (unknown) "); else fprintf(fp, "_text: %lx ", text_start); fprintf(fp, "Kernel code: %lx -> ", kernel_code_start); fprintf(fp, "phys_start: %lx\n\n", machdep->machspec->phys_start); } return; } /* * Get relocation value from whatever dumpfile format is being used. */ if (DISKDUMP_DUMPFILE()) { if (diskdump_phys_base(&phys_start)) { machdep->machspec->phys_start = phys_start; if (CRASHDEBUG(1)) fprintf(fp, "compressed kdump: phys_start: %lx\n", phys_start); } return; } else if (LKCD_DUMPFILE()) { if (lkcd_get_kernel_start(&phys_start)) { machdep->machspec->phys_start = phys_start; if (CRASHDEBUG(1)) fprintf(fp, "LKCD dump: phys_start: %lx\n", phys_start); } } if ((vd = get_kdump_vmcore_data())) { /* * There should be at most one region 5 region, and it * should be equal to "_text". If not, take whatever * region 5 address comes first and hope for the best. */ for (i = found = 0; i < vd->num_pt_load_segments; i++) { phdr = vd->load64 + i; if (phdr->p_vaddr == text_start) { machdep->machspec->phys_start = phdr->p_paddr; found++; break; } } for (i = 0; !found && (i < vd->num_pt_load_segments); i++) { phdr = vd->load64 + i; if (VADDR_REGION(phdr->p_vaddr) == KERNEL_VMALLOC_REGION) { machdep->machspec->phys_start = phdr->p_paddr; found++; break; } } if (found && CRASHDEBUG(1)) { if (text_start == BADADDR) fprintf(fp, "_text: (unknown) "); else fprintf(fp, "_text: %lx ", text_start); fprintf(fp, "p_vaddr: %lx p_paddr: %lx\n", phdr->p_vaddr, phdr->p_paddr); } return; } } /* * From the xen vmcore, create an index of mfns for each page that makes * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. */ static int ia64_xen_kdump_p2m_create(struct xen_kdump_data *xkd) { /* * Temporarily read physical (machine) addresses from vmcore. */ pc->curcmd_flags |= XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) { fprintf(fp, "readmem (temporary): force XEN_MACHINE_ADDR\n"); fprintf(fp, "ia64_xen_kdump_p2m_create: p2m_mfn: %lx\n", xkd->p2m_mfn); } if ((xkd->p2m_mfn_frame_list = (ulong *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc p2m_frame_list"); if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->p2m_mfn_frame_list, PAGESIZE(), "xen kdump p2m mfn page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn page\n"); xkd->p2m_frames = PAGESIZE()/sizeof(ulong); pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; } physaddr_t ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo) { ulong pgd_idx, pte_idx; ulong pmd, pte; physaddr_t paddr; /* * Temporarily read physical (machine) addresses from vmcore. */ pc->curcmd_flags |= XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (temporary): force XEN_MACHINE_ADDR\n"); xkd->accesses += 2; pgd_idx = (pseudo >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1); pmd = xkd->p2m_mfn_frame_list[pgd_idx] & _PFN_MASK; if (!pmd) { paddr = P2M_FAILURE; goto out; } pmd += ((pseudo >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong); if (pmd != xkd->last_pmd_read) { if (!readmem(pmd, PHYSADDR, &pte, sizeof(ulong), "ia64_xen_kdump_p2m pmd", RETURN_ON_ERROR)) { xkd->last_pmd_read = BADADDR; xkd->last_mfn_read = BADADDR; paddr = P2M_FAILURE; goto out; } xkd->last_pmd_read = pmd; } else { pte = xkd->last_mfn_read; xkd->cache_hits++; } pte = pte & _PFN_MASK; if (!pte) { paddr = P2M_FAILURE; goto out; } if (pte != xkd->last_mfn_read) { if (!readmem(pte, PHYSADDR, xkd->page, PAGESIZE(), "ia64_xen_kdump_p2m pte page", RETURN_ON_ERROR)) { xkd->last_pmd_read = BADADDR; xkd->last_mfn_read = BADADDR; paddr = P2M_FAILURE; goto out; } xkd->last_mfn_read = pte; } else xkd->cache_hits++; pte_idx = (pseudo >> PAGESHIFT()) & (PTRS_PER_PTE - 1); paddr = *(((ulong *)xkd->page) + pte_idx); if (!(paddr & _PAGE_P)) { paddr = P2M_FAILURE; goto out; } paddr = (paddr & _PFN_MASK) | PAGEOFFSET(pseudo); out: pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return paddr; } #include "xendump.h" /* * Create an index of mfns for each page that makes up the * kernel's complete phys_to_machine_mapping[max_pfn] array. */ static int ia64_xendump_p2m_create(struct xendump_data *xd) { if (!symbol_exists("phys_to_machine_mapping")) { xd->flags |= XC_CORE_NO_P2M; return TRUE; } error(FATAL, "ia64_xendump_p2m_create: TBD\n"); /* dummy calls for clean "make [wW]arn" */ ia64_debug_dump_page(NULL, NULL, NULL); ia64_xendump_load_page(0, xd); ia64_xendump_page_index(0, xd); ia64_xendump_panic_task(xd); /* externally called */ ia64_get_xendump_regs(xd, NULL, NULL, NULL); /* externally called */ return FALSE; } static void ia64_debug_dump_page(FILE *ofp, char *page, char *name) { int i; ulong *up; fprintf(ofp, "%s\n", name); up = (ulong *)page; for (i = 0; i < 1024; i++) { fprintf(ofp, "%016lx: %016lx %016lx\n", (ulong)((i * 2) * sizeof(ulong)), *up, *(up+1)); up += 2; } } /* * Find the page associate with the kvaddr, and read its contents * into the passed-in buffer. */ static char * ia64_xendump_load_page(ulong kvaddr, struct xendump_data *xd) { error(FATAL, "ia64_xendump_load_page: TBD\n"); return NULL; } /* * Find the dumpfile page index associated with the kvaddr. */ static int ia64_xendump_page_index(ulong kvaddr, struct xendump_data *xd) { error(FATAL, "ia64_xendump_page_index: TBD\n"); return 0; } static ulong ia64_xendump_panic_task(struct xendump_data *xd) { if (CRASHDEBUG(1)) error(INFO, "ia64_xendump_panic_task: TBD\n"); return NO_TASK; } static void ia64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp) { machdep->get_stack_frame(bt, rip, rsp); if (is_task_active(bt->task) && !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS)) && STREQ(closest_symbol(*rip), "schedule")) error(INFO, "xendump: switch_stack possibly not saved -- try \"bt -t\"\n"); } /* for XEN Hypervisor analysis */ static int ia64_is_kvaddr_hyper(ulong addr) { return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END); } static int ia64_kvtop_hyper(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong virt_percpu_start, phys_percpu_start; ulong addr, dirp, entry; if (!IS_KVADDR(kvaddr)) return FALSE; if (PERCPU_VIRT_ADDR(kvaddr)) { virt_percpu_start = symbol_value("__phys_per_cpu_start"); phys_percpu_start = virt_percpu_start - DIRECTMAP_VIRT_START; *paddr = kvaddr - PERCPU_ADDR + phys_percpu_start; return TRUE; } else if (DIRECTMAP_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - DIRECTMAP_VIRT_START; return TRUE; } else if (!FRAME_TABLE_VIRT_ADDR(kvaddr)) { return FALSE; } /* frametable virtual address */ addr = kvaddr - xhmachdep->frame_table; dirp = symbol_value("frametable_pg_dir"); dirp += ((addr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)) * sizeof(ulong); readmem(dirp, KVADDR, &entry, sizeof(ulong), "frametable_pg_dir", FAULT_ON_ERROR); dirp = entry & _PFN_MASK; if (!dirp) return FALSE; dirp += ((addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong); readmem(dirp, PHYSADDR, &entry, sizeof(ulong), "frametable pmd", FAULT_ON_ERROR); dirp = entry & _PFN_MASK; if (!dirp) return FALSE; dirp += ((addr >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) * sizeof(ulong); readmem(dirp, PHYSADDR, &entry, sizeof(ulong), "frametable pte", FAULT_ON_ERROR); if (!(entry & _PAGE_P)) return FALSE; *paddr = (entry & _PFN_MASK) + (kvaddr & (PAGESIZE() - 1)); return TRUE; } static void ia64_post_init_hyper(void) { struct machine_specific *ms; ulong frame_table; ms = &ia64_machine_specific; if (symbol_exists("unw_init_frame_info")) { machdep->flags |= NEW_UNWIND; if (MEMBER_EXISTS("unw_frame_info", "pt")) { if (MEMBER_EXISTS("cpu_user_regs", "ar_csd")) { machdep->flags |= NEW_UNW_V3; ms->unwind_init = unwind_init_v3; ms->unwind = unwind_v3; ms->unwind_debug = unwind_debug_v3; ms->dump_unwind_stats = dump_unwind_stats_v3; } else { machdep->flags |= NEW_UNW_V2; ms->unwind_init = unwind_init_v2; ms->unwind = unwind_v2; ms->unwind_debug = unwind_debug_v2; ms->dump_unwind_stats = dump_unwind_stats_v2; } } else { machdep->flags |= NEW_UNW_V1; ms->unwind_init = unwind_init_v1; ms->unwind = unwind_v1; ms->unwind_debug = unwind_debug_v1; ms->dump_unwind_stats = dump_unwind_stats_v1; } } else { machdep->flags |= OLD_UNWIND; ms->unwind_init = ia64_old_unwind_init; ms->unwind = ia64_old_unwind; } ms->unwind_init(); if (symbol_exists("frame_table")) { frame_table = symbol_value("frame_table"); readmem(frame_table, KVADDR, &xhmachdep->frame_table, sizeof(ulong), "frame_table virtual address", FAULT_ON_ERROR); } else { error(FATAL, "cannot find frame_table virtual address."); } } int ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt) { int plen, i; ulong paddr, stackbase, stacktop; ulong *__per_cpu_mca; struct xen_hyper_vcpu_context *vcc; vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); if (!vcc) return 0; if (!symbol_exists("__per_cpu_mca") || !(plen = get_array_length("__per_cpu_mca", NULL, 0)) || (plen < xht->pcpus)) return 0; if (!machdep->kvtop(NULL, addr, &paddr, 0)) return 0; __per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * plen); if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca, sizeof(ulong) * plen, "__per_cpu_mca", RETURN_ON_ERROR|QUIET)) return 0; if (CRASHDEBUG(1)) { for (i = 0; i < plen; i++) { fprintf(fp, "__per_cpu_mca[%d]: %lx\n", i, __per_cpu_mca[i]); } } stackbase = __per_cpu_mca[vcc->processor]; stacktop = stackbase + (STACKSIZE() * 2); FREEBUF(__per_cpu_mca); if ((paddr >= stackbase) && (paddr < stacktop)) return 1; else return 0; } static void ia64_init_hyper(int when) { struct syment *sp; switch (when) { case SETUP_ENV: #if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT) prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0); #endif #if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT) prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0); #endif break; case PRE_SYMTAB: machdep->verify_symbol = ia64_verify_symbol; machdep->machspec = &ia64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~(machdep->pageoffset); switch (machdep->pagesize) { case 4096: machdep->stacksize = (power(2, 3) * PAGESIZE()); break; case 8192: machdep->stacksize = (power(2, 2) * PAGESIZE()); break; case 16384: machdep->stacksize = (power(2, 1) * PAGESIZE()); break; case 65536: machdep->stacksize = (power(2, 0) * PAGESIZE()); break; default: machdep->stacksize = 32*1024; break; } if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pud space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = ia64_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->machspec->phys_start = UNKNOWN_PHYS_START; /* ODA: if need make hyper version if (machdep->cmdline_args[0]) parse_cmdline_args(); */ break; case PRE_GDB: if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->kvbase = HYPERVISOR_VIRT_START; machdep->identity_map_base = HYPERVISOR_VIRT_START; machdep->is_kvaddr = ia64_is_kvaddr_hyper; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = ia64_eframe_search; machdep->back_trace = ia64_back_trace_cmd; machdep->processor_speed = xen_hyper_ia64_processor_speed; machdep->uvtop = ia64_uvtop; machdep->kvtop = ia64_kvtop_hyper; machdep->get_stack_frame = ia64_get_stack_frame; machdep->get_stackbase = ia64_get_stackbase; machdep->get_stacktop = ia64_get_stacktop; machdep->translate_pte = ia64_translate_pte; machdep->memory_size = xen_hyper_ia64_memory_size; machdep->dis_filter = ia64_dis_filter; machdep->cmd_mach = ia64_cmd_mach; machdep->get_smp_cpus = xen_hyper_ia64_get_smp_cpus; machdep->line_number_hooks = ia64_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; if ((sp = symbol_search("_stext"))) { machdep->machspec->kernel_region = VADDR_REGION(sp->value); machdep->machspec->kernel_start = sp->value; } else { // machdep->machspec->kernel_region = KERNEL_CACHED_REGION; // machdep->machspec->kernel_start = KERNEL_CACHED_BASE; } /* machdep table for Xen Hypervisor */ xhmachdep->pcpu_init = xen_hyper_ia64_pcpu_init; break; case POST_GDB: STRUCT_SIZE_INIT(switch_stack, "switch_stack"); MEMBER_OFFSET_INIT(thread_struct_fph, "thread_struct", "fph"); MEMBER_OFFSET_INIT(switch_stack_b0, "switch_stack", "b0"); MEMBER_OFFSET_INIT(switch_stack_ar_bspstore, "switch_stack", "ar_bspstore"); MEMBER_OFFSET_INIT(switch_stack_ar_pfs, "switch_stack", "ar_pfs"); MEMBER_OFFSET_INIT(switch_stack_ar_rnat, "switch_stack", "ar_rnat"); MEMBER_OFFSET_INIT(switch_stack_pr, "switch_stack", "pr"); XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_ia64, "cpuinfo_ia64"); XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_proc_freq, "cpuinfo_ia64", "proc_freq"); XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_vendor, "cpuinfo_ia64", "vendor"); if (symbol_exists("per_cpu__cpu_info")) { xht->cpu_data_address = symbol_value("per_cpu__cpu_info"); } /* kakuma Can this be calculated? */ if (!machdep->hz) { machdep->hz = XEN_HYPER_HZ; } break; case POST_INIT: ia64_post_init_hyper(); break; } } #endif crash-utility-crash-9cd43f5/filesys.c0000664000372000037200000035005315107550337017224 0ustar juerghjuergh/* filesys.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2019 David Anderson * Copyright (C) 2002-2019 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include #include static void show_mounts(ulong, int, struct task_context *); static int find_booted_kernel(void); static int find_booted_system_map(void); static int verify_utsname(char *); static char **build_searchdirs(int, int *); static int build_kernel_directory(char *); static int redhat_kernel_directory_v1(char *); static int redhat_kernel_directory_v2(char *); static int redhat_debug_directory(char *); static ulong *create_dentry_array(ulong, int *); static ulong *create_dentry_array_percpu(ulong, int *); static void show_fuser(char *, char *); static int mount_point(char *); static int open_file_reference(struct reference *); static void memory_source_init(void); static int get_pathname_component(ulong, ulong, int, char *, char *); char *inode_type(char *, char *); static void match_proc_version(void); static void get_live_memory_source(void); static int memory_driver_module_loaded(int *); static int insmod_memory_driver_module(void); static int get_memory_driver_dev(dev_t *); static int memory_driver_init(void); static int create_memory_device(dev_t); static int match_file_string(char *, char *, char *); static ulong get_root_vfsmount(char *); static void check_live_arch_mismatch(void); static long get_inode_nrpages(ulong); static void dump_inode_page_cache_info(ulong); #define DENTRY_CACHE (20) #define INODE_CACHE (20) #define FILE_CACHE (20) static struct filesys_table { char *dentry_cache; ulong cached_dentry[DENTRY_CACHE]; ulong cached_dentry_hits[DENTRY_CACHE]; int dentry_cache_index; ulong dentry_cache_fills; char *inode_cache; ulong cached_inode[INODE_CACHE]; ulong cached_inode_hits[INODE_CACHE]; int inode_cache_index; ulong inode_cache_fills; char *file_cache; ulong cached_file[FILE_CACHE]; ulong cached_file_hits[FILE_CACHE]; int file_cache_index; ulong file_cache_fills; } filesys_table = { 0 }; static struct filesys_table *ft = &filesys_table; /* * Open the namelist, dumpfile and output devices. */ void fd_init(void) { pc->nfd = pc->kfd = pc->mfd = pc->dfd = -1; if ((pc->nullfp = fopen("/dev/null", "w+")) == NULL) error(INFO, "cannot open /dev/null (for extraneous output)"); if (REMOTE()) remote_fd_init(); else { if (pc->namelist && pc->namelist_debug && pc->system_map) { error(INFO, "too many namelist options:\n %s\n %s\n %s\n", pc->namelist, pc->namelist_debug, pc->system_map); program_usage(SHORT_FORM); } if (pc->namelist) { if (XEN_HYPER_MODE() && !pc->dumpfile) error(FATAL, "Xen hypervisor mode requires a dumpfile\n"); if (!pc->dumpfile && !get_proc_version()) error(INFO, "/proc/version: %s\n", strerror(errno)); } else { if (pc->dumpfile) { error(INFO, "namelist argument required\n"); program_usage(SHORT_FORM); } if (!pc->dumpfile) check_live_arch_mismatch(); if (!find_booted_kernel()) program_usage(SHORT_FORM); } if (!pc->dumpfile) { pc->flags |= LIVE_SYSTEM; get_live_memory_source(); } if ((pc->nfd = open(pc->namelist, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->namelist, strerror(errno)); else { close(pc->nfd); pc->nfd = -1; } if (LOCAL_ACTIVE() && !(pc->namelist_debug || pc->system_map)) { memory_source_init(); match_proc_version(); } } memory_source_init(); if (ACTIVE()) proc_kcore_init(fp, UNUSED); if (CRASHDEBUG(1)) { fprintf(fp, "readmem: %s() ", readmem_function_name()); if (ACTIVE()) { fprintf(fp, "-> %s ", pc->live_memsrc); if (pc->flags & MEMMOD) fprintf(fp, "(module)"); else if (pc->flags & CRASHBUILTIN) fprintf(fp, "(built-in)"); } fprintf(fp, "\n"); } } /* * Do whatever's necessary to handle the memory source. */ static void memory_source_init(void) { if (REMOTE() && !(pc->flags2 & MEMSRC_LOCAL)) return; if (pc->flags & KERNEL_DEBUG_QUERY) return; if (LOCAL_ACTIVE()) { if (pc->mfd != -1) /* already been here */ return; if (!STREQ(pc->live_memsrc, "/dev/mem") && STREQ(pc->live_memsrc, pc->memory_device)) { if (memory_driver_init()) return; error(INFO, "cannot initialize crash memory driver\n"); error(INFO, "using /dev/mem\n\n"); pc->flags &= ~MEMMOD; pc->flags |= DEVMEM; pc->readmem = read_dev_mem; pc->writemem = write_dev_mem; pc->live_memsrc = "/dev/mem"; } if (STREQ(pc->live_memsrc, "/dev/mem")) { if ((pc->mfd = open("/dev/mem", O_RDWR)) < 0) { if ((pc->mfd = open("/dev/mem", O_RDONLY)) < 0) error(FATAL, "/dev/mem: %s\n", strerror(errno)); } else pc->flags |= MFD_RDWR; } else if (STREQ(pc->live_memsrc, "/proc/kcore")) { if ((pc->mfd = open("/proc/kcore", O_RDONLY)) < 0) error(FATAL, "/proc/kcore: %s\n", strerror(errno)); if (!proc_kcore_init(fp, pc->mfd)) error(FATAL, "/proc/kcore: initialization failed\n"); } else { if (!pc->live_memsrc) error(FATAL, "cannot find a live memory device\n"); else error(FATAL, "unknown memory device: %s\n", pc->live_memsrc); } return; } if (pc->dumpfile) { if (!file_exists(pc->dumpfile, NULL)) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(ENOENT)); if (!(pc->flags & DUMPFILE_TYPES)) error(FATAL, "%s: dump format not supported!\n", pc->dumpfile); if (pc->flags & NETDUMP) { if (!netdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & KDUMP) { if (!kdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & XENDUMP) { if (!xendump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & KVMDUMP) { if (!kvmdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & DISKDUMP) { if (!diskdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & LKCD) { if ((pc->dfd = open(pc->dumpfile, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (!lkcd_dump_init(fp, pc->dfd, pc->dumpfile)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & S390D) { if (!s390_dump_init(pc->dumpfile)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & VMWARE_VMSS) { if (pc->flags2 & VMWARE_VMSS_GUESTDUMP) { if (!vmware_guestdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else { if (!vmware_vmss_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } } } } /* * If only a namelist argument is entered for a live system, and the * version string doesn't match /proc/version, try to avert a failure * by assigning it to a matching System.map. */ static void match_proc_version(void) { char buffer[BUFSIZE], *p1, *p2; if (pc->flags & KERNEL_DEBUG_QUERY) return; if (!strlen(kt->proc_version)) return; if (match_file_string(pc->namelist, kt->proc_version, buffer)) { if (CRASHDEBUG(1)) { fprintf(fp, "/proc/version:\n%s\n", kt->proc_version); fprintf(fp, "%s:\n%s", pc->namelist, buffer); } return; } error(WARNING, "%s%sand /proc/version do not match!\n\n", pc->namelist, strlen(pc->namelist) > 39 ? "\n " : " "); /* * find_booted_system_map() requires VTOP(), which used to be a * hardwired masking of the kernel address. But some architectures * may not know what their physical base address is at this point, * and others may have different machdep->kvbase values, so for all * but the 0-based kernel virtual address architectures, bail out * here with a relevant error message. */ if (!machine_type("S390") && !machine_type("S390X")) { p1 = &kt->proc_version[strlen("Linux version ")]; p2 = strstr(p1, " "); *p2 = NULLCHAR; error(WARNING, "/proc/version indicates kernel version: %s\n", p1); error(FATAL, "please use the vmlinux file for that kernel version, or try using\n" " the System.map for that kernel version as an additional argument.\n", p1); clean_exit(1); } if (find_booted_system_map()) pc->flags |= SYSMAP; } #define CREATE 1 #define DESTROY 0 #define DEFAULT_SEARCHDIRS 6 #define EXTRA_SEARCHDIRS 5 static char ** build_searchdirs(int create, int *preferred) { int i; int cnt, start; DIR *dirp; struct dirent *dp; char dirbuf[BUFSIZE]; static char **searchdirs = { 0 }; static char *default_searchdirs[DEFAULT_SEARCHDIRS+1] = { "/usr/src/linux/", "/boot/", "/boot/efi/redhat", "/boot/efi/EFI/redhat", "/usr/lib/debug/boot/", "/", NULL }; if (!create) { if (searchdirs) { for (i = DEFAULT_SEARCHDIRS; searchdirs[i]; i++) free(searchdirs[i]); free(searchdirs); } return NULL; } if (preferred) *preferred = 0; /* * Allow, at a minimum, the defaults plus an extra four directories: * * /lib/modules * /usr/src/redhat/BUILD/kernel-/linux * /usr/src/redhat/BUILD/kernel-/linux- * /usr/lib/debug/lib/modules * */ cnt = DEFAULT_SEARCHDIRS + EXTRA_SEARCHDIRS; if ((dirp = opendir("/usr/src"))) { for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) cnt++; if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) { error(INFO, "/usr/src/ directory list malloc: %s\n", strerror(errno)); closedir(dirp); return default_searchdirs; } for (i = 0; i < DEFAULT_SEARCHDIRS; i++) searchdirs[i] = default_searchdirs[i]; cnt = DEFAULT_SEARCHDIRS; rewinddir(dirp); for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (STREQ(dp->d_name, "linux") || STREQ(dp->d_name, "redhat") || STREQ(dp->d_name, ".") || STREQ(dp->d_name, "..")) continue; sprintf(dirbuf, "/usr/src/%s", dp->d_name); if (mount_point(dirbuf)) continue; if (!is_directory(dirbuf)) continue; if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/usr/src/ directory entry malloc: %s\n", strerror(errno)); break; } sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } closedir(dirp); searchdirs[cnt] = NULL; } else { if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) { error(INFO, "search directory list malloc: %s\n", strerror(errno)); return default_searchdirs; } for (i = 0; i < DEFAULT_SEARCHDIRS; i++) searchdirs[i] = default_searchdirs[i]; cnt = DEFAULT_SEARCHDIRS; } if (build_kernel_directory(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/lib/modules/ directory entry malloc: %s\n", strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } } if (redhat_kernel_directory_v1(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/usr/src/redhat directory entry malloc: %s\n", strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } } if (redhat_kernel_directory_v2(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/usr/src/redhat directory entry malloc: %s\n", strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } } if (redhat_debug_directory(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "%s directory entry malloc: %s\n", dirbuf, strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); if (preferred) *preferred = cnt; cnt++; } } searchdirs[cnt] = NULL; if (CRASHDEBUG(1)) { i = start = preferred ? *preferred : 0; do { fprintf(fp, "searchdirs[%d]: %s\n", i, searchdirs[i]); if (++i == cnt) { if (start != 0) i = 0; else break; } } while (i != start); } return searchdirs; } static int build_kernel_directory(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "/lib/modules/"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (*p1 != ' ') *p2++ = *p1++; strcat(buf, "/build"); return TRUE; } static int redhat_kernel_directory_v1(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "/usr/src/redhat/BUILD/kernel-"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (((*p1 >= '0') && (*p1 <= '9')) || (*p1 == '.')) *p2++ = *p1++; strcat(buf, "/linux"); return TRUE; } static int redhat_kernel_directory_v2(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "/usr/src/redhat/BUILD/kernel-"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (((*p1 >= '0') && (*p1 <= '9')) || (*p1 == '.')) *p2++ = *p1++; strcat(buf, "/linux-"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (((*p1 >= '0') && (*p1 <= '9')) || (*p1 == '.')) *p2++ = *p1++; return TRUE; } static int redhat_debug_directory(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "%s/", pc->redhat_debug_loc); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (*p1 != ' ') *p2++ = *p1++; return TRUE; } /* * If a namelist was not entered, presume we're using the currently-running * kernel. Read its version string from /proc/version, and then look in * the search directories for a kernel with the same version string embedded * in it. */ static int find_booted_kernel(void) { char kernel[BUFSIZE]; char buffer[BUFSIZE]; char **searchdirs; int i, preferred, wrapped; DIR *dirp; struct dirent *dp; int found; pc->flags |= FINDKERNEL; fflush(fp); if (!file_exists("/proc/version", NULL)) { error(INFO, "/proc/version: %s: cannot determine booted kernel\n", strerror(ENOENT)); return FALSE; } if (!get_proc_version()) { error(INFO, "/proc/version: %s\n", strerror(errno)); return FALSE; } if (CRASHDEBUG(1)) fprintf(fp, "\nfind_booted_kernel: search for [%s]\n", kt->proc_version); searchdirs = build_searchdirs(CREATE, &preferred); for (i = preferred, wrapped = found = FALSE; !found; i++) { if (!searchdirs[i]) { if (preferred && !wrapped) { wrapped = TRUE; i = 0; } else break; } else if (wrapped && (preferred == i)) break; dirp = opendir(searchdirs[i]); if (!dirp) continue; for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (dp->d_name[0] == '.') continue; sprintf(kernel, "%s%s", searchdirs[i], dp->d_name); if (mount_point(kernel) || !file_readable(kernel) || !is_kernel(kernel)) continue; if (CRASHDEBUG(1)) fprintf(fp, "find_booted_kernel: check: %s\n", kernel); found = match_file_string(kernel, kt->proc_version, buffer); if (found) break; } closedir(dirp); } mount_point(DESTROY); build_searchdirs(DESTROY, NULL); if (found) { if ((pc->namelist = (char *)malloc (strlen(kernel)+1)) == NULL) error(FATAL, "booted kernel name malloc: %s\n", strerror(errno)); else { strcpy(pc->namelist, kernel); if (CRASHDEBUG(1)) fprintf(fp, "find_booted_kernel: found: %s\n", pc->namelist); return TRUE; } } error(INFO, "cannot find booted kernel -- please enter namelist argument\n\n"); return FALSE; } /* * Determine whether a file is a mount point, without the benefit of stat(). * This horrendous kludge is necessary to avoid uninterruptible stat() or * fstat() calls on nfs mount-points where the remote directory is no longer * available. */ static int mount_point(char *name) { int i; static int mount_points_gathered = -1; static char **mount_points; char *arglist[MAXARGS]; char buf[BUFSIZE]; char mntfile[BUFSIZE]; int argc, found; FILE *mp; /* * The first time through, stash a list of mount points. */ if (mount_points_gathered < 0) { found = mount_points_gathered = 0; if (file_exists("/proc/mounts", NULL)) sprintf(mntfile, "/proc/mounts"); else if (file_exists("/etc/mtab", NULL)) sprintf(mntfile, "/etc/mtab"); else return FALSE; if ((mp = fopen(mntfile, "r")) == NULL) return FALSE; while (fgets(buf, BUFSIZE, mp)) { argc = parse_line(buf, arglist); if (argc < 2) continue; found++; } fclose(mp); if (!(mount_points = (char **)malloc(sizeof(char *) * found))) return FALSE; if ((mp = fopen(mntfile, "r")) == NULL) return FALSE; i = 0; while (fgets(buf, BUFSIZE, mp) && (mount_points_gathered < found)) { argc = parse_line(buf, arglist); if (argc < 2) continue; if ((mount_points[i] = (char *) malloc(strlen(arglist[1])*2))) { strcpy(mount_points[i], arglist[1]); mount_points_gathered++, i++; } } fclose(mp); if (CRASHDEBUG(2)) for (i = 0; i < mount_points_gathered; i++) fprintf(fp, "mount_points[%d]: %s (%lx)\n", i, mount_points[i], (ulong)mount_points[i]); } /* * A null name string means we're done with this routine forever, * so the malloc'd memory can be freed. */ if (!name) { for (i = 0; i < mount_points_gathered; i++) free(mount_points[i]); free(mount_points); return FALSE; } for (i = 0; i < mount_points_gathered; i++) { if (STREQ(name, mount_points[i])) return TRUE; } return FALSE; } /* * If /proc/version exists, get it for verification purposes later. */ int get_proc_version(void) { FILE *version; if (strlen(kt->proc_version)) /* been here, done that... */ return TRUE; if (!file_exists("/proc/version", NULL)) return FALSE; if ((version = fopen("/proc/version", "r")) == NULL) return FALSE; if (fread(&kt->proc_version, sizeof(char), BUFSIZE-1, version) <= 0) { fclose(version); return FALSE; } fclose(version); strip_linefeeds(kt->proc_version); return TRUE; } /* * Given a non-matching kernel namelist, try to find a System.map file * that has a system_utsname whose contents match /proc/version. */ static int find_booted_system_map(void) { char system_map[BUFSIZE]; char **searchdirs; int i; DIR *dirp; struct dirent *dp; int found; fflush(fp); if (!file_exists("/proc/version", NULL)) { error(INFO, "/proc/version: %s: cannot determine booted System.map\n", strerror(ENOENT)); return FALSE; } if (!get_proc_version()) { error(INFO, "/proc/version: %s\n", strerror(errno)); return FALSE; } found = FALSE; /* * To avoid a search, try the obvious first. */ sprintf(system_map, "/boot/System.map"); if (file_readable(system_map) && verify_utsname(system_map)) { found = TRUE; } else { searchdirs = build_searchdirs(CREATE, NULL); for (i = 0; !found && searchdirs[i]; i++) { dirp = opendir(searchdirs[i]); if (!dirp) continue; for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (!strstr(dp->d_name, "System.map")) continue; sprintf(system_map, "%s%s", searchdirs[i], dp->d_name); if (mount_point(system_map) || !file_readable(system_map) || !is_system_map(system_map)) continue; if (verify_utsname(system_map)) { found = TRUE; break; } } closedir(dirp); } mount_point(DESTROY); build_searchdirs(DESTROY, NULL); } if (found) { if ((pc->system_map = (char *)malloc (strlen(system_map)+1)) == NULL) error(FATAL, "booted system map name malloc: %s\n", strerror(errno)); strcpy(pc->system_map, system_map); if (CRASHDEBUG(1)) fprintf(fp, "find_booted_system_map: found: %s\n", pc->system_map); return TRUE; } error(INFO, "cannot find booted system map -- please enter namelist or system map\n\n"); return FALSE; } /* * Read the system_utsname from /dev/mem, based upon the address found * in the passed-in System.map file, and compare it to /proc/version. */ static int verify_utsname(char *system_map) { char buffer[BUFSIZE]; ulong value; struct new_utsname new_utsname; if (CRASHDEBUG(1)) fprintf(fp, "verify_utsname: check: %s\n", system_map); if (!match_file_string(system_map, "D system_utsname", buffer)) return FALSE; if (extract_hex(buffer, &value, NULLCHAR, TRUE) && (READMEM(pc->mfd, &new_utsname, sizeof(struct new_utsname), value, VTOP(value)) > 0) && ascii_string(new_utsname.release) && ascii_string(new_utsname.version) && STRNEQ(new_utsname.release, "2.") && (strlen(new_utsname.release) > 4) && (strlen(new_utsname.version) > 27)) { if (CRASHDEBUG(1)) { fprintf(fp, "release: [%s]\n", new_utsname.release); fprintf(fp, "version: [%s]\n", new_utsname.version); } if (strstr(kt->proc_version, new_utsname.release) && strstr(kt->proc_version, new_utsname.version)) { return TRUE; } } return FALSE; } /* * Determine whether a file exists, using the caller's stat structure if * one was passed in. */ int file_exists(char *file, struct stat *sp) { struct stat sbuf; if (stat(file, sp ? sp : &sbuf) == 0) return TRUE; return FALSE; } /* * Determine whether a file exists, and if so, if it's readable. */ int file_readable(char *file) { char tmp; int fd; if (!file_exists(file, NULL)) return FALSE; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; if (read(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) { close(fd); return FALSE; } close(fd); return TRUE; } /* * Quick file checksummer. */ int file_checksum(char *file, long *retsum) { int i; int fd; ssize_t cnt; char buf[MIN_PAGE_SIZE]; long csum; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; csum = 0; BZERO(buf, MIN_PAGE_SIZE); while ((cnt = read(fd, buf, MIN_PAGE_SIZE)) > 0) { for (i = 0; i < cnt; i++) csum += buf[i]; BZERO(buf, MIN_PAGE_SIZE); } close(fd); *retsum = csum; return TRUE; } int is_directory(char *file) { struct stat sbuf; if (!file || !strlen(file)) return(FALSE); if (stat(file, &sbuf) == -1) return(FALSE); /* This file doesn't exist. */ return((sbuf.st_mode & S_IFMT) == S_IFDIR ? TRUE : FALSE); } /* * Search a directory tree for filename, and if found, return a temporarily * allocated buffer containing the full pathname. The "done" business is * protection against fgets() prematurely returning NULL before the find * command completes. (I thought this was impossible until I saw it happen...) * When time permits, rewrite this doing the search by hand. */ char * search_directory_tree(char *directory, char *file, int follow_links) { char command[BUFSIZE]; char buf[BUFSIZE]; char *retbuf, *start, *end, *module; FILE *pipe; regex_t regex; int regex_used, done; if (!file_exists("/usr/bin/find", NULL) || !file_exists("/bin/echo", NULL) || !is_directory(directory) || (*file == '(')) return NULL; sprintf(command, "/usr/bin/find %s %s -name %s -print; /bin/echo search done", follow_links ? "-L" : "", directory, file); if ((pipe = popen(command, "r")) == NULL) { error(INFO, "%s: %s\n", command, strerror(errno)); return NULL; } done = FALSE; retbuf = NULL; regex_used = ((start = strstr(file, "[")) && (end = strstr(file, "]")) && (start < end) && (regcomp(®ex, file, 0) == 0)); while (fgets(buf, BUFSIZE-1, pipe) || !done) { if (STREQ(buf, "search done\n")) { done = TRUE; break; } if (!retbuf && !regex_used && STREQ((char *)basename(strip_linefeeds(buf)), file)) { retbuf = GETBUF(strlen(buf)+1); strcpy(retbuf, buf); } if (!retbuf && regex_used) { module = basename(strip_linefeeds(buf)); if (regexec(®ex, module, 0, NULL, 0) == 0) { retbuf = GETBUF(strlen(buf)+1); strcpy(retbuf, buf); } } } if (regex_used) regfree(®ex); pclose(pipe); return retbuf; } /* * Determine whether a file exists, and if so, if it's a tty. */ int is_a_tty(char *filename) { int fd; if ((fd = open(filename, O_RDONLY)) < 0) return FALSE; if (isatty(fd)) { close(fd); return TRUE; } close(fd); return FALSE; } /* * Open a tmpfile for command output. fp is stashed in pc->saved_fp, and * temporarily set to the new FILE pointer. This allows a command to still * print to the original output while the tmpfile is still open. */ #define OPEN_ONLY_ONCE #ifdef OPEN_ONLY_ONCE void open_tmpfile(void) { int ret ATTRIBUTE_UNUSED; if (pc->tmpfile) error(FATAL, "recursive temporary file usage\n"); if (!pc->tmp_fp) { if ((pc->tmp_fp = tmpfile()) == NULL) error(FATAL, "cannot open temporary file\n"); } fflush(pc->tmpfile); ret = ftruncate(fileno(pc->tmp_fp), 0); rewind(pc->tmp_fp); pc->tmpfile = pc->tmp_fp; pc->saved_fp = fp; fp = pc->tmpfile; } #else void open_tmpfile(void) { if (pc->tmpfile) error(FATAL, "recursive temporary file usage\n"); if ((pc->tmpfile = tmpfile()) == NULL) { error(FATAL, "cannot open temporary file\n"); } else { pc->saved_fp = fp; fp = pc->tmpfile; } } #endif /* * Destroy the reference to the tmpfile, and restore fp to the state * it had when open_tmpfile() was called. */ #ifdef OPEN_ONLY_ONCE void close_tmpfile(void) { int ret ATTRIBUTE_UNUSED; if (pc->tmpfile) { fflush(pc->tmpfile); ret = ftruncate(fileno(pc->tmpfile), 0); rewind(pc->tmpfile); pc->tmpfile = NULL; fp = pc->saved_fp; } else error(FATAL, "trying to close an unopened temporary file\n"); } #else void close_tmpfile(void) { if (pc->tmpfile) { fp = pc->saved_fp; fclose(pc->tmpfile); pc->tmpfile = NULL; } else error(FATAL, "trying to close an unopened temporary file\n"); } #endif /* * open_tmpfile2(), set_tmpfile2() and close_tmpfile2() do not use a * permanent tmpfile, and do NOT modify the global fp pointer or pc->saved_fp. * That being the case, all wrapped functions must be aware of it, or the * global fp pointer has to explicitly manipulated by the calling function. * The secondary tmpfile should only be used by common functions that might * be called by a higher-level function using the primary permanent tmpfile, * or alternatively a caller may pass in a FILE pointer to set_tmpfile2(). */ void open_tmpfile2(void) { if (pc->tmpfile2) error(FATAL, "recursive secondary temporary file usage\n"); if ((pc->tmpfile2 = tmpfile()) == NULL) error(FATAL, "cannot open secondary temporary file\n"); rewind(pc->tmpfile2); } void close_tmpfile2(void) { if (pc->tmpfile2) { fflush(pc->tmpfile2); fclose(pc->tmpfile2); pc->tmpfile2 = NULL; } } void set_tmpfile2(FILE *fptr) { if (pc->tmpfile2) error(FATAL, "secondary temporary file already in use\n"); pc->tmpfile2 = fptr; } #define MOUNT_PRINT_INODES 0x1 #define MOUNT_PRINT_FILES 0x2 /* * Display basic information about the currently mounted filesystems. * The -f option lists the open files for the filesystem(s). * The -i option dumps the dirty inodes of the filesystem(s). * If an inode address, mount, vfsmount, superblock, device name or * directory name is also entered, just show the data for the * filesystem indicated by the argument. */ static char mount_hdr[BUFSIZE] = { 0 }; void cmd_mount(void) { int i; int c, found; struct task_context *tc, *namespace_context; ulong value1, value2; char *spec_string; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *arglist[MAXARGS*2]; ulong vfsmount = 0; int flags = 0; int save_next; ulong pid; /* find a context */ pid = 1; while ((namespace_context = pid_to_context(pid)) == NULL) pid++; while ((c = getopt(argcnt, args, "ifn:")) != EOF) { switch(c) { case 'i': if (INVALID_MEMBER(super_block_s_dirty)) { error(INFO, "the super_block.s_dirty linked list does " "not exist in this kernel\n"); option_not_supported(c); } flags |= MOUNT_PRINT_INODES; break; case 'f': flags |= MOUNT_PRINT_FILES; break; case 'n': switch (str_to_context(optarg, &value1, &tc)) { case STR_PID: case STR_TASK: namespace_context = tc; break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); break; } break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind] == 0) { show_mounts(0, flags, namespace_context); return; } /* * Dump everything into a tmpfile, and then walk * through it for each search argument entered. */ open_tmpfile(); show_mounts(0, (VALID_MEMBER(super_block_s_files) ? MOUNT_PRINT_FILES : 0) | (VALID_MEMBER(super_block_s_dirty) ? MOUNT_PRINT_INODES : 0), namespace_context); pc->curcmd_flags &= ~HEADER_PRINTED; do { spec_string = args[optind]; if (STRNEQ(spec_string, "0x") && hexadecimal(spec_string, 0)) shift_string_left(spec_string, 2); found = FALSE; rewind(pc->tmpfile); save_next = 0; while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf1, mount_hdr)) { save_next = TRUE; continue; } if (save_next) { strcpy(buf2, buf1); save_next = FALSE; } if (!(c = parse_line(buf1, arglist))) continue; for (i = 0; i < c; i++) { if (PATHEQ(arglist[i], spec_string)) found = TRUE; /* * Check for a vfsmount address * embedded in a struct mount. */ if ((i == 0) && (c == 5) && VALID_MEMBER(mount_mnt) && hexadecimal(spec_string, 0) && hexadecimal(arglist[i], 0)) { value1 = htol(spec_string, FAULT_ON_ERROR, NULL); value2 = htol(arglist[i], FAULT_ON_ERROR, NULL) + OFFSET(mount_mnt); if (value1 == value2) found = TRUE; } } if (found) { fp = pc->saved_fp; if (flags) { sscanf(buf2,"%lx", &vfsmount); show_mounts(vfsmount, flags, namespace_context); } else { if (!(pc->curcmd_flags & HEADER_PRINTED)) { fprintf(fp, "%s", mount_hdr); pc->curcmd_flags |= HEADER_PRINTED; } fprintf(fp, "%s", buf2); } found = FALSE; fp = pc->tmpfile; } } } while (args[++optind]); close_tmpfile(); } /* For kernels 5.8-6.7, we're skipping show mount cursor entries. */ #define MNT_CURSOR 0x10000000 /* * Do the work for cmd_mount(); */ static void show_mounts(ulong one_vfsmount, int flags, struct task_context *namespace_context) { ulong one_vfsmount_list; long sb_s_files; long s_dirty; ulong devp, dirp, sbp, dirty, type, name; struct list_data list_data, *ld; char buf1[BUFSIZE*2]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE/2]; ulong *dentry_list, *dp, *mntlist; ulong *vfsmnt; char *vfsmount_buf, *super_block_buf, *mount_buf; ulong dentry, inode, inode_sb, mnt_parent; char *dentry_buf, *inode_buf; int cnt, i, m, files_header_printed; int mount_cnt; int devlen; char mount_files_header[BUFSIZE]; long per_cpu_s_files; sprintf(mount_files_header, "%s%s%s%sTYPE%sPATH\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "DENTRY"), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), space(MINSPACE)); dirp = dentry = mnt_parent = sb_s_files = s_dirty = 0; if (VALID_MEMBER(super_block_s_dirty)) s_dirty = OFFSET(super_block_s_dirty); per_cpu_s_files = MEMBER_EXISTS("file", "f_sb_list_cpu"); dentry_list = NULL; mntlist = 0; ld = &list_data; if (one_vfsmount) { one_vfsmount_list = one_vfsmount; mount_cnt = 1; mntlist = &one_vfsmount_list; } else mntlist = get_mount_list(&mount_cnt, namespace_context); devlen = strlen("DEVNAME")+2; if (!strlen(mount_hdr)) { snprintf(mount_hdr, sizeof(mount_hdr), "%s %s %s %s DIRNAME\n", mkstring(buf1, VADDR_PRLEN, CENTER, VALID_STRUCT(mount) ? "MOUNT" : "VFSMOUNT"), mkstring(buf2, VADDR_PRLEN, CENTER, "SUPERBLK"), mkstring(buf3, strlen("rootfs"), LJUST, "TYPE"), mkstring(buf4, devlen, LJUST, "DEVNAME")); } if (flags == 0) fprintf(fp, "%s", mount_hdr); sb_s_files = VALID_MEMBER(super_block_s_files) ? OFFSET(super_block_s_files) : INVALID_OFFSET; if ((flags & MOUNT_PRINT_FILES) && (sb_s_files == INVALID_OFFSET)) { /* * super_block.s_files deprecated */ if (!kernel_symbol_exists("inuse_filps")) { error(INFO, "the super_block.s_files linked list does " "not exist in this kernel\n"); option_not_supported('f'); } /* * No open files list in super_block (2.2). * Use inuse_filps list instead. */ dentry_list = create_dentry_array(symbol_value("inuse_filps"), &cnt); } if (VALID_STRUCT(mount)) { mount_buf = GETBUF(SIZE(mount)); vfsmount_buf = mount_buf + OFFSET(mount_mnt); } else { mount_buf = NULL; vfsmount_buf = GETBUF(SIZE(vfsmount)); } super_block_buf = GETBUF(SIZE(super_block)); for (m = 0, vfsmnt = mntlist; m < mount_cnt; m++, vfsmnt++) { if (VALID_STRUCT(mount)) { readmem(*vfsmnt, KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); devp = ULONG(mount_buf + OFFSET(mount_mnt_devname)); } else { readmem(*vfsmnt, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); devp = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_devname)); } if (VALID_MEMBER(vfsmount_mnt_dirname)) { dirp = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_dirname)); } else { if (VALID_STRUCT(mount)) { mnt_parent = ULONG(mount_buf + OFFSET(mount_mnt_parent)); dentry = ULONG(mount_buf + OFFSET(mount_mnt_mountpoint)); } else { mnt_parent = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_parent)); dentry = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_mountpoint)); } } sbp = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (!IS_KVADDR(sbp)) { if (sbp == 0 && VALID_MEMBER(proc_mounts_cursor) && VALID_MEMBER(vfsmount_mnt_flags)) { int mnt_flags = INT(vfsmount_buf + OFFSET(vfsmount_mnt_flags)); if (mnt_flags == MNT_CURSOR) { if (CRASHDEBUG(1)) fprintf(stderr,"skipped cursor vfsmnt: 0x%lx\n", *vfsmnt); continue; } } error(WARNING, "cannot get super_block from vfsmnt: 0x%lx\n", *vfsmnt); continue; } if (flags) fprintf(fp, "%s", mount_hdr); fprintf(fp, "%s %s ", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(*vfsmnt)), mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(sbp))); readmem(sbp, KVADDR, super_block_buf, SIZE(super_block), "super_block buffer", FAULT_ON_ERROR); type = ULONG(super_block_buf + OFFSET(super_block_s_type)); readmem(type + OFFSET(file_system_type_name), KVADDR, &name, sizeof(void *), "file_system_type name", FAULT_ON_ERROR); if (read_string(name, buf4, (BUFSIZE/2)-1)) sprintf(buf3, "%-6s ", buf4); else sprintf(buf3, "unknown "); if (read_string(devp, buf1, BUFSIZE-1)) sprintf(buf4, "%s ", mkstring(buf2, devlen, LJUST, buf1)); else sprintf(buf4, "%s ", mkstring(buf2, devlen, LJUST, "(unknown)")); sprintf(buf1, "%s%s", buf3, buf4); while ((strlen(buf1) > 17) && (buf1[strlen(buf1)-2] == ' ')) strip_ending_char(buf1, ' '); fprintf(fp, "%s", buf1); if (VALID_MEMBER(vfsmount_mnt_dirname)) { if (read_string(dirp, buf1, BUFSIZE-1)) fprintf(fp, "%-10s\n", buf1); else fprintf(fp, "%-10s\n", "(unknown)"); } else { get_pathname(dentry, buf1, BUFSIZE, 1, VALID_STRUCT(mount) ? mnt_parent + OFFSET(mount_mnt) : mnt_parent); fprintf(fp, "%-10s\n", buf1); } if (flags & MOUNT_PRINT_FILES) { if (sb_s_files != INVALID_OFFSET) { dentry_list = per_cpu_s_files ? create_dentry_array_percpu(sbp+ sb_s_files, &cnt) : create_dentry_array(sbp+sb_s_files, &cnt); } files_header_printed = 0; for (i=0, dp = dentry_list; iflags = VERBOSE; ld->start = dirty; ld->end = (sbp+s_dirty); ld->header = "DIRTY INODES\n"; hq_open(); do_list(ld); hq_close(); } else { fprintf(fp, "DIRTY INODES\nNo dirty inodes found\n"); } } if (flags && !one_vfsmount) fprintf(fp, "\n"); } if (!one_vfsmount) FREEBUF(mntlist); if (VALID_STRUCT(mount)) FREEBUF(mount_buf); else FREEBUF(vfsmount_buf); FREEBUF(super_block_buf); } /* * Allocate and fill a list of the currently-mounted vfsmount pointers. */ ulong * get_mount_list(int *cntptr, struct task_context *namespace_context) { struct list_data list_data, *ld; ulong namespace, root, nsproxy, mnt_ns; struct task_context *tc; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; if (symbol_exists("vfsmntlist")) { get_symbol_data("vfsmntlist", sizeof(void *), &ld->start); ld->end = symbol_value("vfsmntlist"); } else if (VALID_MEMBER(task_struct_nsproxy)) { tc = namespace_context; readmem(tc->task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy, sizeof(void *), "task nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy + OFFSET(nsproxy_mnt_ns), KVADDR, &mnt_ns, sizeof(void *), "nsproxy mnt_ns", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine mount list location!\n"); /* Linux 6.8 and later keep list of mounts in an rbtree. */ if (VALID_MEMBER(mnt_namespace_nr_mounts)) { uint nr_mounts; ulong *mntlist, *l; struct rb_root *mounts; struct rb_node *node; readmem(mnt_ns + OFFSET(mnt_namespace_nr_mounts), KVADDR, &nr_mounts, sizeof(uint), "mnt_namespace.nr_mounts", FAULT_ON_ERROR); if (!nr_mounts) return NULL; mounts = (struct rb_root *)(mnt_ns + OFFSET(mnt_namespace_mounts)); mntlist = (ulong *)GETBUF(sizeof(ulong) * nr_mounts); l = mntlist; for (node = rb_first(mounts); node; l++, node = rb_next(node)) *l = (ulong)node - OFFSET(mount_mnt_node); *cntptr = nr_mounts; return mntlist; } if (!readmem(mnt_ns + OFFSET(mnt_namespace_root), KVADDR, &root, sizeof(void *), "mnt_namespace root", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine mount list location!\n"); ld->start = root + OFFSET_OPTION(vfsmount_mnt_list, mount_mnt_list); ld->end = mnt_ns + OFFSET(mnt_namespace_list); } else if (VALID_MEMBER(namespace_root)) { tc = namespace_context; readmem(tc->task + OFFSET(task_struct_namespace), KVADDR, &namespace, sizeof(void *), "task namespace", FAULT_ON_ERROR); if (!readmem(namespace + OFFSET(namespace_root), KVADDR, &root, sizeof(void *), "namespace root", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine mount list location!\n"); if (CRASHDEBUG(1)) console("namespace: %lx => root: %lx\n", namespace, root); ld->start = root + OFFSET_OPTION(vfsmount_mnt_list, mount_mnt_list); ld->end = namespace + OFFSET(namespace_list); } else error(FATAL, "cannot determine mount list location!\n"); if (VALID_MEMBER(vfsmount_mnt_list)) ld->list_head_offset = OFFSET(vfsmount_mnt_list); else if (VALID_STRUCT(mount)) ld->list_head_offset = OFFSET(mount_mnt_list); else ld->member_offset = OFFSET(vfsmount_mnt_next); *cntptr = do_list(ld); return(ld->list_ptr); } /* * Given a dentry, display its address, inode, super_block, pathname. */ static void display_dentry_info(ulong dentry) { int m, found; char *dentry_buf, *inode_buf, *vfsmount_buf, *mount_buf; ulong inode, superblock, sb, vfs; ulong *mntlist, *vfsmnt; char pathname[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; int mount_cnt; fprintf(fp, "%s%s%s%s%s%sTYPE%sPATH\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "DENTRY"), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "SUPERBLK"), space(MINSPACE), space(MINSPACE)); dentry_buf = fill_dentry_cache(dentry); inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); pathname[0] = NULLCHAR; if (inode) { inode_buf = fill_inode_cache(inode); superblock = ULONG(inode_buf + OFFSET(inode_i_sb)); } else { inode_buf = NULL; superblock = ULONG(dentry_buf + OFFSET(dentry_d_sb)); } if (!superblock) goto nopath; if (VALID_MEMBER(file_f_vfsmnt)) { mntlist = get_mount_list(&mount_cnt, pid_to_context(1)); if (VALID_STRUCT(mount)) { mount_buf = GETBUF(SIZE(mount)); vfsmount_buf = mount_buf + OFFSET(mount_mnt); } else { mount_buf = NULL; vfsmount_buf = GETBUF(SIZE(vfsmount)); } for (m = found = 0, vfsmnt = mntlist; m < mount_cnt; m++, vfsmnt++) { if (VALID_STRUCT(mount)) readmem(*vfsmnt, KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); else readmem(*vfsmnt, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); sb = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (superblock && (sb == superblock)) { get_pathname(dentry, pathname, BUFSIZE, 1, VALID_STRUCT(mount) ? *vfsmnt+OFFSET(mount_mnt) : *vfsmnt); found = TRUE; } } if (!found && symbol_exists("pipe_mnt")) { get_symbol_data("pipe_mnt", sizeof(long), &vfs); if (VALID_STRUCT(mount)) readmem(vfs - OFFSET(mount_mnt), KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); else readmem(vfs, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); sb = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (superblock && (sb == superblock)) { get_pathname(dentry, pathname, BUFSIZE, 1, vfs); found = TRUE; } } if (!found && symbol_exists("sock_mnt")) { get_symbol_data("sock_mnt", sizeof(long), &vfs); if (VALID_STRUCT(mount)) readmem(vfs - OFFSET(mount_mnt), KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); else readmem(vfs, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); sb = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (superblock && (sb == superblock)) { get_pathname(dentry, pathname, BUFSIZE, 1, vfs); found = TRUE; } } } else { mntlist = 0; get_pathname(dentry, pathname, BUFSIZE, 1, 0); } if (mntlist) { FREEBUF(mntlist); if (VALID_STRUCT(mount)) FREEBUF(mount_buf); else FREEBUF(vfsmount_buf); } nopath: fprintf(fp, "%s%s%s%s%s%s%s%s%s\n", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(dentry)), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(inode)), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(superblock)), space(MINSPACE), inode ? inode_type(inode_buf, pathname) : "N/A", space(MINSPACE), pathname); } /* * Return a 4-character type string of an inode, modifying a previously * gathered pathname if necessary. */ char * inode_type(char *inode_buf, char *pathname) { char *type; uint32_t umode32; uint16_t umode16; uint mode; ulong inode_i_op; ulong inode_i_fop; long i_fop_off; mode = umode16 = umode32 = 0; switch (SIZE(umode_t)) { case SIZEOF_32BIT: umode32 = UINT(inode_buf + OFFSET(inode_i_mode)); mode = umode32; break; case SIZEOF_16BIT: umode16 = USHORT(inode_buf + OFFSET(inode_i_mode)); mode = (uint)umode16; break; } type = "UNKN"; if (S_ISREG(mode)) type = "REG "; if (S_ISLNK(mode)) type = "LNK "; if (S_ISDIR(mode)) type = "DIR "; if (S_ISCHR(mode)) type = "CHR "; if (S_ISBLK(mode)) type = "BLK "; if (S_ISFIFO(mode)) { type = "FIFO"; if (symbol_exists("pipe_inode_operations")) { inode_i_op = ULONG(inode_buf + OFFSET(inode_i_op)); if (inode_i_op == symbol_value("pipe_inode_operations")) { type = "PIPE"; pathname[0] = NULLCHAR; } } else { if (symbol_exists("rdwr_pipe_fops") && (i_fop_off = OFFSET(inode_i_fop)) > 0) { inode_i_fop = ULONG(inode_buf + i_fop_off); if (inode_i_fop == symbol_value("rdwr_pipe_fops")) { type = "PIPE"; pathname[0] = NULLCHAR; } } } } if (S_ISSOCK(mode)) { type = "SOCK"; if (STREQ(pathname, "/")) pathname[0] = NULLCHAR; } return type; } /* * Walk an open file list and return an array of open dentries. */ static ulong * create_dentry_array(ulong list_addr, int *count) { struct list_data list_data, *ld; ulong *file, *files_list, *dentry_list; ulong dentry, inode; char *file_buf, *dentry_buf; int cnt, f_count, i; int dentry_cnt = 0; ld = &list_data; BZERO(ld, sizeof(struct list_data)); readmem(list_addr, KVADDR, &ld->start, sizeof(void *), "file list head", FAULT_ON_ERROR); if (list_addr == ld->start) { /* empty list? */ *count = 0; return NULL; } ld->end = list_addr; hq_open(); cnt = do_list(ld); if (cnt == 0) { hq_close(); *count = 0; return NULL; } files_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(files_list, cnt); hq_close(); hq_open(); for (i=0, file = files_list; i__per_cpu_offset[c]; percpu_list[c].dentry_list = create_dentry_array(list_addr, &percpu_list[c].count); total += percpu_list[c].count; } if (total) { dentry_list = (ulong *)GETBUF(total * sizeof(ulong)); for (c = i = 0; c < (cpu+1); c++) { if (percpu_list[c].count == 0) continue; for (j = 0; j < percpu_list[c].count; j++) dentry_list[i++] = percpu_list[c].dentry_list[j]; FREEBUF(percpu_list[c].dentry_list); } } else dentry_list = NULL; FREEBUF(percpu_list); *count = total; return dentry_list; } /* * Stash vfs structure offsets */ void vfs_init(void) { MEMBER_OFFSET_INIT(nlm_file_f_file, "nlm_file", "f_file"); MEMBER_OFFSET_INIT(task_struct_files, "task_struct", "files"); MEMBER_OFFSET_INIT(task_struct_fs, "task_struct", "fs"); MEMBER_OFFSET_INIT(fs_struct_root, "fs_struct", "root"); MEMBER_OFFSET_INIT(fs_struct_pwd, "fs_struct", "pwd"); MEMBER_OFFSET_INIT(fs_struct_rootmnt, "fs_struct", "rootmnt"); MEMBER_OFFSET_INIT(fs_struct_pwdmnt, "fs_struct", "pwdmnt"); MEMBER_OFFSET_INIT(files_struct_open_fds_init, "files_struct", "open_fds_init"); MEMBER_OFFSET_INIT(files_struct_fdt, "files_struct", "fdt"); if (VALID_MEMBER(files_struct_fdt)) { MEMBER_OFFSET_INIT(fdtable_max_fds, "fdtable", "max_fds"); MEMBER_OFFSET_INIT(fdtable_max_fdset, "fdtable", "max_fdset"); MEMBER_OFFSET_INIT(fdtable_open_fds, "fdtable", "open_fds"); MEMBER_OFFSET_INIT(fdtable_fd, "fdtable", "fd"); } else { MEMBER_OFFSET_INIT(files_struct_max_fds, "files_struct", "max_fds"); MEMBER_OFFSET_INIT(files_struct_max_fdset, "files_struct", "max_fdset"); MEMBER_OFFSET_INIT(files_struct_open_fds, "files_struct", "open_fds"); MEMBER_OFFSET_INIT(files_struct_fd, "files_struct", "fd"); } MEMBER_OFFSET_INIT(file_f_dentry, "file", "f_dentry"); MEMBER_OFFSET_INIT(file_f_vfsmnt, "file", "f_vfsmnt"); MEMBER_OFFSET_INIT(file_f_count, "file", "f_count"); MEMBER_OFFSET_INIT(file_f_inode, "file", "f_inode"); MEMBER_OFFSET_INIT(path_mnt, "path", "mnt"); MEMBER_OFFSET_INIT(path_dentry, "path", "dentry"); if (INVALID_MEMBER(file_f_dentry)) { MEMBER_OFFSET_INIT(file_f_path, "file", "f_path"); ASSIGN_OFFSET(file_f_dentry) = OFFSET(file_f_path) + OFFSET(path_dentry); ASSIGN_OFFSET(file_f_vfsmnt) = OFFSET(file_f_path) + OFFSET(path_mnt); } MEMBER_OFFSET_INIT(dentry_d_inode, "dentry", "d_inode"); MEMBER_OFFSET_INIT(dentry_d_parent, "dentry", "d_parent"); MEMBER_OFFSET_INIT(dentry_d_covers, "dentry", "d_covers"); MEMBER_OFFSET_INIT(dentry_d_name, "dentry", "d_name"); MEMBER_OFFSET_INIT(dentry_d_iname, "dentry", "d_iname"); if (INVALID_MEMBER(dentry_d_iname)) { MEMBER_OFFSET_INIT(dentry_d_iname, "dentry", "d_shortname"); } MEMBER_OFFSET_INIT(dentry_d_sb, "dentry", "d_sb"); MEMBER_OFFSET_INIT(inode_i_mode, "inode", "i_mode"); MEMBER_OFFSET_INIT(inode_i_op, "inode", "i_op"); MEMBER_OFFSET_INIT(inode_i_sb, "inode", "i_sb"); MEMBER_OFFSET_INIT(inode_u, "inode", "u"); MEMBER_OFFSET_INIT(qstr_name, "qstr", "name"); MEMBER_OFFSET_INIT(qstr_len, "qstr", "len"); if (INVALID_MEMBER(qstr_len)) ANON_MEMBER_OFFSET_INIT(qstr_len, "qstr", "len"); MEMBER_OFFSET_INIT(vfsmount_mnt_next, "vfsmount", "mnt_next"); MEMBER_OFFSET_INIT(vfsmount_mnt_devname, "vfsmount", "mnt_devname"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_devname, "mount", "mnt_devname"); MEMBER_OFFSET_INIT(vfsmount_mnt_dirname, "vfsmount", "mnt_dirname"); MEMBER_OFFSET_INIT(vfsmount_mnt_sb, "vfsmount", "mnt_sb"); MEMBER_OFFSET_INIT(vfsmount_mnt_list, "vfsmount", "mnt_list"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_list, "mount", "mnt_list"); MEMBER_OFFSET_INIT(vfsmount_mnt_parent, "vfsmount", "mnt_parent"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_parent, "mount", "mnt_parent"); MEMBER_OFFSET_INIT(vfsmount_mnt_mountpoint, "vfsmount", "mnt_mountpoint"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_mountpoint, "mount", "mnt_mountpoint"); MEMBER_OFFSET_INIT(vfsmount_mnt_flags, "vfsmount", "mnt_flags"); MEMBER_OFFSET_INIT(proc_mounts_cursor, "proc_mounts", "cursor"); MEMBER_OFFSET_INIT(mount_mnt, "mount", "mnt"); MEMBER_OFFSET_INIT(namespace_root, "namespace", "root"); MEMBER_OFFSET_INIT(task_struct_nsproxy, "task_struct", "nsproxy"); if (VALID_MEMBER(namespace_root)) { MEMBER_OFFSET_INIT(namespace_list, "namespace", "list"); MEMBER_OFFSET_INIT(task_struct_namespace, "task_struct", "namespace"); } else if (VALID_MEMBER(task_struct_nsproxy)) { MEMBER_OFFSET_INIT(nsproxy_mnt_ns, "nsproxy", "mnt_ns"); MEMBER_OFFSET_INIT(mnt_namespace_root, "mnt_namespace", "root"); MEMBER_OFFSET_INIT(mnt_namespace_list, "mnt_namespace", "list"); /* Linux 6.8 and later */ MEMBER_OFFSET_INIT(mnt_namespace_mounts, "mnt_namespace", "mounts"); MEMBER_OFFSET_INIT(mnt_namespace_nr_mounts, "mnt_namespace", "nr_mounts"); MEMBER_OFFSET_INIT(mount_mnt_node, "mount", "mnt_node"); } else if (THIS_KERNEL_VERSION >= LINUX(2,4,20)) { if (CRASHDEBUG(2)) fprintf(fp, "hardwiring namespace stuff\n"); ASSIGN_OFFSET(task_struct_namespace) = OFFSET(task_struct_files) + sizeof(void *); ASSIGN_OFFSET(namespace_root) = sizeof(void *); ASSIGN_OFFSET(namespace_list) = sizeof(void *) * 2; } MEMBER_OFFSET_INIT(super_block_s_dirty, "super_block", "s_dirty"); MEMBER_OFFSET_INIT(super_block_s_type, "super_block", "s_type"); MEMBER_OFFSET_INIT(file_system_type_name, "file_system_type", "name"); MEMBER_OFFSET_INIT(super_block_s_files, "super_block", "s_files"); MEMBER_OFFSET_INIT(inode_i_flock, "inode", "i_flock"); MEMBER_OFFSET_INIT(file_lock_fl_owner, "file_lock", "fl_owner"); MEMBER_OFFSET_INIT(nlm_host_h_exportent, "nlm_host", "h_exportent"); MEMBER_OFFSET_INIT(svc_client_cl_ident, "svc_client", "cl_ident"); MEMBER_OFFSET_INIT(inode_i_fop, "inode","i_fop"); STRUCT_SIZE_INIT(umode_t, "umode_t"); STRUCT_SIZE_INIT(dentry, "dentry"); STRUCT_SIZE_INIT(files_struct, "files_struct"); if (VALID_MEMBER(files_struct_fdt)) STRUCT_SIZE_INIT(fdtable, "fdtable"); STRUCT_SIZE_INIT(file, "file"); STRUCT_SIZE_INIT(inode, "inode"); STRUCT_SIZE_INIT(mount, "mount"); STRUCT_SIZE_INIT(vfsmount, "vfsmount"); STRUCT_SIZE_INIT(fs_struct, "fs_struct"); STRUCT_SIZE_INIT(super_block, "super_block"); if (!(ft->file_cache = (char *)malloc(SIZE(file)*FILE_CACHE))) error(FATAL, "cannot malloc file cache\n"); if (!(ft->dentry_cache = (char *)malloc(SIZE(dentry)*DENTRY_CACHE))) error(FATAL, "cannot malloc dentry cache\n"); if (!(ft->inode_cache = (char *)malloc(SIZE(inode)*INODE_CACHE))) error(FATAL, "cannot malloc inode cache\n"); MEMBER_OFFSET_INIT(rb_root_rb_node, "rb_root","rb_node"); MEMBER_OFFSET_INIT(rb_node_rb_left, "rb_node","rb_left"); MEMBER_OFFSET_INIT(rb_node_rb_right, "rb_node","rb_right"); } void dump_filesys_table(int verbose) { int i; ulong fhits, dhits, ihits; if (!verbose) goto show_hit_rates; for (i = 0; i < FILE_CACHE; i++) fprintf(fp, " cached_file[%2d]: %lx (%ld)\n", i, ft->cached_file[i], ft->cached_file_hits[i]); fprintf(fp, " file_cache: %lx\n", (ulong)ft->file_cache); fprintf(fp, " file_cache_index: %d\n", ft->file_cache_index); fprintf(fp, " file_cache_fills: %ld\n", ft->file_cache_fills); for (i = 0; i < DENTRY_CACHE; i++) fprintf(fp, " cached_dentry[%2d]: %lx (%ld)\n", i, ft->cached_dentry[i], ft->cached_dentry_hits[i]); fprintf(fp, " dentry_cache: %lx\n", (ulong)ft->dentry_cache); fprintf(fp, "dentry_cache_index: %d\n", ft->dentry_cache_index); fprintf(fp, "dentry_cache_fills: %ld\n", ft->dentry_cache_fills); for (i = 0; i < INODE_CACHE; i++) fprintf(fp, " cached_inode[%2d]: %lx (%ld)\n", i, ft->cached_inode[i], ft->cached_inode_hits[i]); fprintf(fp, " inode_cache: %lx\n", (ulong)ft->inode_cache); fprintf(fp, " inode_cache_index: %d\n", ft->inode_cache_index); fprintf(fp, " inode_cache_fills: %ld\n", ft->inode_cache_fills); show_hit_rates: if (ft->file_cache_fills) { for (i = fhits = 0; i < FILE_CACHE; i++) fhits += ft->cached_file_hits[i]; fprintf(fp, " file hit rate: %2ld%% (%ld of %ld)\n", (fhits * 100)/ft->file_cache_fills, fhits, ft->file_cache_fills); } if (ft->dentry_cache_fills) { for (i = dhits = 0; i < DENTRY_CACHE; i++) dhits += ft->cached_dentry_hits[i]; fprintf(fp, " dentry hit rate: %2ld%% (%ld of %ld)\n", (dhits * 100)/ft->dentry_cache_fills, dhits, ft->dentry_cache_fills); } if (ft->inode_cache_fills) { for (i = ihits = 0; i < INODE_CACHE; i++) ihits += ft->cached_inode_hits[i]; fprintf(fp, " inode hit rate: %2ld%% (%ld of %ld)\n", (ihits * 100)/ft->inode_cache_fills, ihits, ft->inode_cache_fills); } } /* * Get the page count for the specific mapping */ static long get_inode_nrpages(ulong i_mapping) { char *address_space_buf; ulong nrpages; address_space_buf = GETBUF(SIZE(address_space)); readmem(i_mapping, KVADDR, address_space_buf, SIZE(address_space), "address_space buffer", FAULT_ON_ERROR); nrpages = ULONG(address_space_buf + OFFSET(address_space_nrpages)); FREEBUF(address_space_buf); return nrpages; } static void dump_inode_page_cache_info(ulong inode) { char *inode_buf; ulong i_mapping, nrpages, root_rnode, xarray, count; struct list_pair lp; char header[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; inode_buf = GETBUF(SIZE(inode)); readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", FAULT_ON_ERROR); i_mapping = ULONG(inode_buf + OFFSET(inode_i_mapping)); nrpages = get_inode_nrpages(i_mapping); sprintf(header, "%s NRPAGES\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "INODE")); fprintf(fp, "%s", header); fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST|LONG_HEX, MKSTR(inode)), mkstring(buf2, strlen("NRPAGES"), RJUST|LONG_DEC, MKSTR(nrpages))); FREEBUF(inode_buf); if (!nrpages) return; xarray = root_rnode = count = 0; if (MEMBER_EXISTS("address_space", "i_pages") && (STREQ(MEMBER_TYPE_NAME("address_space", "i_pages"), "xarray") || (STREQ(MEMBER_TYPE_NAME("address_space", "i_pages"), "radix_tree_root") && MEMBER_EXISTS("radix_tree_root", "xa_head")))) xarray = i_mapping + OFFSET(address_space_page_tree); else root_rnode = i_mapping + OFFSET(address_space_page_tree); lp.index = 0; lp.value = (void *)&dump_inode_page; if (root_rnode) count = do_radix_tree(root_rnode, RADIX_TREE_DUMP_CB, &lp); else if (xarray) count = do_xarray(xarray, XARRAY_DUMP_CB, &lp); if (count != nrpages) error(INFO, "%s page count: %ld nrpages: %ld\n", root_rnode ? "radix tree" : "xarray", count, nrpages); return; } /* * This command displays information about the open files of a context. * For each open file descriptor the file descriptor number, a pointer * to the file struct, pointer to the dentry struct, pointer to the inode * struct, indication of file type and pathname are printed. * The argument can be a task address or a PID number; if no args, the * current context is used. * If the flag -l is passed, any files held open in the kernel by the * lockd server on behalf of an NFS client are displayed. */ void cmd_files(void) { int c; ulong value; struct task_context *tc; int subsequent; struct reference reference, *ref; char *refarg; int open_flags = 0; ref = NULL; refarg = NULL; while ((c = getopt(argcnt, args, "d:R:p:c")) != EOF) { switch(c) { case 'R': if (ref) { error(INFO, "only one -R option allowed\n"); argerrs++; } else { ref = &reference; BZERO(ref, sizeof(struct reference)); ref->str = refarg = optarg; } break; case 'd': value = htol(optarg, FAULT_ON_ERROR, NULL); display_dentry_info(value); return; case 'p': if (VALID_MEMBER(address_space_page_tree) && VALID_MEMBER(inode_i_mapping)) { value = htol(optarg, FAULT_ON_ERROR, NULL); dump_inode_page_cache_info(value); } else option_not_supported('p'); return; case 'c': if (VALID_MEMBER(address_space_nrpages) && VALID_MEMBER(inode_i_mapping)) open_flags |= PRINT_NRPAGES; else option_not_supported('c'); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!args[optind]) { if (!ref) print_task_header(fp, CURRENT_CONTEXT(), 0); open_files_dump(CURRENT_TASK(), open_flags, ref); return; } subsequent = 0; while (args[optind]) { if (ref && subsequent) { BZERO(ref, sizeof(struct reference)); ref->str = refarg; } switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) { if (!ref) print_task_header(fp, tc, subsequent); open_files_dump(tc->task, open_flags, ref); fprintf(fp, "\n"); } break; case STR_TASK: if (!ref) print_task_header(fp, tc, subsequent); open_files_dump(tc->task, open_flags, ref); break; case STR_INVALID: error(INFO, "invalid task or pid value: %s\n", args[optind]); break; } subsequent++; optind++; } } #define FILES_REF_HEXNUM (0x1) #define FILES_REF_DECNUM (0x2) #define FILES_REF_FOUND (0x4) #define PRINT_FILE_REFERENCE() \ if (!root_pwd_printed) { \ print_task_header(fp, tc, 0); \ fprintf(fp, "%s", root_pwd); \ root_pwd_printed = TRUE; \ } \ if (!header_printed) { \ fprintf(fp, "%s", files_header);\ header_printed = TRUE; \ } \ fprintf(fp, "%s", buf4); \ ref->cmdflags |= FILES_REF_FOUND; #define FILENAME_COMPONENT(P,C) \ ((STREQ((P), "/") && STREQ((C), "/")) || \ (!STREQ((C), "/") && strstr((P),(C)))) /* * open_files_dump() does the work for cmd_files(). */ void open_files_dump(ulong task, int flags, struct reference *ref) { struct task_context *tc; ulong files_struct_addr; ulong fdtable_addr = 0; char *files_struct_buf, *fdtable_buf = NULL; ulong fs_struct_addr; char *dentry_buf, *fs_struct_buf; char *ret ATTRIBUTE_UNUSED; ulong root_dentry, pwd_dentry; ulong root_inode, pwd_inode; ulong vfsmnt; int max_fdset = 0; int max_fds = 0; ulong open_fds_addr; int open_fds_size; ulong *open_fds; ulong fd; ulong file; ulong value; int i, j, use_path; int header_printed = 0; char root_pathname[BUFSIZE]; char pwd_pathname[BUFSIZE]; char files_header[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char root_pwd[BUFSIZE*4]; int root_pwd_printed = 0; int file_dump_flags = 0; BZERO(root_pathname, BUFSIZE); BZERO(pwd_pathname, BUFSIZE); files_struct_buf = GETBUF(SIZE(files_struct)); if (VALID_STRUCT(fdtable)) fdtable_buf = GETBUF(SIZE(fdtable)); fill_task_struct(task); if (flags & PRINT_NRPAGES) { sprintf(files_header, " FD%s%s%s%s%sNRPAGES%sTYPE%sPATH\n", space(MINSPACE), mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), mkstring(buf2, MAX(VADDR_PRLEN, strlen("I_MAPPING")), BITS32() ? (CENTER|RJUST) : (CENTER|LJUST), "I_MAPPING"), space(MINSPACE), space(MINSPACE), space(MINSPACE)); } else { sprintf(files_header, " FD%s%s%s%s%s%s%sTYPE%sPATH\n", space(MINSPACE), mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "FILE"), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "DENTRY"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), space(MINSPACE)); } tc = task_to_context(task); if (ref) ref->cmdflags = 0; fs_struct_addr = ULONG(tt->task_struct + OFFSET(task_struct_fs)); if (fs_struct_addr) { fs_struct_buf = GETBUF(SIZE(fs_struct)); readmem(fs_struct_addr, KVADDR, fs_struct_buf, SIZE(fs_struct), "fs_struct buffer", FAULT_ON_ERROR); use_path = (MEMBER_TYPE("fs_struct", "root") == TYPE_CODE_STRUCT); if (use_path) root_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_root) + OFFSET(path_dentry)); else root_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_root)); if (root_dentry) { if (VALID_MEMBER(fs_struct_rootmnt)) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_rootmnt)); get_pathname(root_dentry, root_pathname, BUFSIZE, 1, vfsmnt); } else if (use_path) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_root) + OFFSET(path_mnt)); get_pathname(root_dentry, root_pathname, BUFSIZE, 1, vfsmnt); } else { get_pathname(root_dentry, root_pathname, BUFSIZE, 1, 0); } } if (use_path) pwd_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd) + OFFSET(path_dentry)); else pwd_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd)); if (pwd_dentry) { if (VALID_MEMBER(fs_struct_pwdmnt)) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_pwdmnt)); get_pathname(pwd_dentry, pwd_pathname, BUFSIZE, 1, vfsmnt); } else if (use_path) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd) + OFFSET(path_mnt)); get_pathname(pwd_dentry, pwd_pathname, BUFSIZE, 1, vfsmnt); } else { get_pathname(pwd_dentry, pwd_pathname, BUFSIZE, 1, 0); } } if ((flags & PRINT_INODES) && root_dentry && pwd_dentry) { dentry_buf = fill_dentry_cache(root_dentry); root_inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); dentry_buf = fill_dentry_cache(pwd_dentry); pwd_inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); fprintf(fp, "ROOT: %lx %s CWD: %lx %s\n", root_inode, root_pathname, pwd_inode, pwd_pathname); } else if (ref) { snprintf(root_pwd, sizeof(root_pwd), "ROOT: %s CWD: %s \n", root_pathname, pwd_pathname); if (FILENAME_COMPONENT(root_pathname, ref->str) || FILENAME_COMPONENT(pwd_pathname, ref->str)) { print_task_header(fp, tc, 0); fprintf(fp, "%s", root_pwd); root_pwd_printed = TRUE; ref->cmdflags |= FILES_REF_FOUND; } } else fprintf(fp, "ROOT: %s CWD: %s\n", root_pathname, pwd_pathname); FREEBUF(fs_struct_buf); } files_struct_addr = ULONG(tt->task_struct + OFFSET(task_struct_files)); if (files_struct_addr) { readmem(files_struct_addr, KVADDR, files_struct_buf, SIZE(files_struct), "files_struct buffer", FAULT_ON_ERROR); if (VALID_MEMBER(files_struct_max_fdset)) { max_fdset = INT(files_struct_buf + OFFSET(files_struct_max_fdset)); max_fds = INT(files_struct_buf + OFFSET(files_struct_max_fds)); } } if (VALID_MEMBER(files_struct_fdt)) { fdtable_addr = ULONG(files_struct_buf + OFFSET(files_struct_fdt)); if (fdtable_addr) { readmem(fdtable_addr, KVADDR, fdtable_buf, SIZE(fdtable), "fdtable buffer", FAULT_ON_ERROR); if (VALID_MEMBER(fdtable_max_fdset)) max_fdset = INT(fdtable_buf + OFFSET(fdtable_max_fdset)); else max_fdset = -1; max_fds = INT(fdtable_buf + OFFSET(fdtable_max_fds)); } } if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) || !files_struct_addr || max_fdset == 0 || max_fds == 0) { if (ref) { if (ref->cmdflags & FILES_REF_FOUND) fprintf(fp, "\n"); } else fprintf(fp, "No open files\n"); if (fdtable_buf) FREEBUF(fdtable_buf); FREEBUF(files_struct_buf); return; } if (ref && IS_A_NUMBER(ref->str)) { if (hexadecimal_only(ref->str, 0)) { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= FILES_REF_HEXNUM; } else { value = dtol(ref->str, FAULT_ON_ERROR, NULL); if (value <= MAX(max_fdset, max_fds)) { ref->decval = value; ref->cmdflags |= FILES_REF_DECNUM; } else { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= FILES_REF_HEXNUM; } } } if (VALID_MEMBER(fdtable_open_fds)) open_fds_addr = ULONG(fdtable_buf + OFFSET(fdtable_open_fds)); else open_fds_addr = ULONG(files_struct_buf + OFFSET(files_struct_open_fds)); open_fds_size = MAX(max_fdset, max_fds) / BITS_PER_BYTE; open_fds = (ulong *)GETBUF(open_fds_size); if (!open_fds) { if (fdtable_buf) FREEBUF(fdtable_buf); FREEBUF(files_struct_buf); return; } if (open_fds_addr) { if (VALID_MEMBER(files_struct_open_fds_init) && (open_fds_addr == (files_struct_addr + OFFSET(files_struct_open_fds_init)))) BCOPY(files_struct_buf + OFFSET(files_struct_open_fds_init), open_fds, open_fds_size); else readmem(open_fds_addr, KVADDR, open_fds, open_fds_size, "fdtable open_fds", FAULT_ON_ERROR); } if (VALID_MEMBER(fdtable_fd)) fd = ULONG(fdtable_buf + OFFSET(fdtable_fd)); else fd = ULONG(files_struct_buf + OFFSET(files_struct_fd)); if (!open_fds_addr || !fd) { if (ref && (ref->cmdflags & FILES_REF_FOUND)) fprintf(fp, "\n"); if (fdtable_buf) FREEBUF(fdtable_buf); FREEBUF(files_struct_buf); FREEBUF(open_fds); return; } file_dump_flags = DUMP_FULL_NAME | DUMP_EMPTY_FILE; if (flags & PRINT_NRPAGES) file_dump_flags |= DUMP_FILE_NRPAGES; j = 0; for (;;) { unsigned long set; i = j * BITS_PER_LONG; if (((max_fdset >= 0) && (i >= max_fdset)) || (i >= max_fds)) break; set = open_fds[j++]; while (set) { if (set & 1) { readmem(fd + i*sizeof(struct file *), KVADDR, &file, sizeof(struct file *), "fd file", FAULT_ON_ERROR); if (ref && file) { open_tmpfile(); if (file_dump(file, 0, 0, i, file_dump_flags)) { BZERO(buf4, BUFSIZE); rewind(pc->tmpfile); ret = fgets(buf4, BUFSIZE, pc->tmpfile); close_tmpfile(); ref->refp = buf4; if (open_file_reference(ref)) { PRINT_FILE_REFERENCE(); } } else close_tmpfile(); } else if (file) { if (!header_printed) { fprintf(fp, "%s", files_header); header_printed = 1; } file_dump(file, 0, 0, i, file_dump_flags); } } i++; set >>= 1; } } if (!header_printed && !ref) fprintf(fp, "No open files\n"); if (ref && (ref->cmdflags & FILES_REF_FOUND)) fprintf(fp, "\n"); if (fdtable_buf) FREEBUF(fdtable_buf); FREEBUF(files_struct_buf); FREEBUF(open_fds); } /* * Check an open file string for references. */ static int open_file_reference(struct reference *ref) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int i, fd, argcnt; ulong vaddr; strcpy(buf, ref->refp); if ((argcnt = parse_line(buf, arglist)) < 5) return FALSE; if (ref->cmdflags & (FILES_REF_HEXNUM|FILES_REF_DECNUM)) { fd = dtol(arglist[0], FAULT_ON_ERROR, NULL); if (((ref->cmdflags & FILES_REF_HEXNUM) && (fd == ref->hexval)) || ((ref->cmdflags & FILES_REF_DECNUM) && (fd == ref->decval))) { return TRUE; } for (i = 1; i < 4; i++) { if (STREQ(arglist[i], "?")) continue; vaddr = htol(arglist[i], FAULT_ON_ERROR, NULL); if (vaddr == ref->hexval) return TRUE; } } if (STREQ(ref->str, arglist[4])) { return TRUE; } if ((argcnt == 6) && FILENAME_COMPONENT(arglist[5], ref->str)) { return TRUE; } return FALSE; } #ifdef DEPRECATED /* * nlm_files_dump() prints files held open by lockd server on behalf * of NFS clients */ #define FILE_NRHASH 32 char nlm_files_header[BUFSIZE] = { 0 }; char *nlm_header = \ "Files open by lockd for client discretionary file locks:\n"; void nlm_files_dump(void) { int header_printed = 0; int i, j, cnt; ulong nlmsvc_ops, nlm_files; struct syment *nsp; ulong nlm_files_array[FILE_NRHASH]; struct list_data list_data, *ld; ulong *file, *files_list; ulong dentry, inode, flock, host, client; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (!strlen(nlm_files_header)) { sprintf(nlm_files_header, "CLIENT %s %s%sTYPE%sPATH\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "NLM_FILE"), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), space(MINSPACE)); } if (!symbol_exists("nlm_files") || !symbol_exists("nlmsvc_ops") || !symbol_exists("nfsd_nlm_ops")) { goto out; } get_symbol_data("nlmsvc_ops", sizeof(void *), &nlmsvc_ops); if (nlmsvc_ops != symbol_value("nfsd_nlm_ops")) { goto out; } if ((nsp = next_symbol("nlm_files", NULL)) == NULL) { error(WARNING, "cannot find next symbol after nlm_files\n"); goto out; } nlm_files = symbol_value("nlm_files"); if (((nsp->value - nlm_files) / sizeof(void *)) != FILE_NRHASH ) { error(WARNING, "FILE_NRHASH has changed from %d\n", FILE_NRHASH); if (((nsp->value - nlm_files) / sizeof(void *)) < FILE_NRHASH ) goto out; } readmem(nlm_files, KVADDR, nlm_files_array, sizeof(ulong) * FILE_NRHASH, "nlm_files array", FAULT_ON_ERROR); for (i = 0; i < FILE_NRHASH; i++) { if (nlm_files_array[i] == 0) { continue; } ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->start = nlm_files_array[i]; hq_open(); cnt = do_list(ld); files_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(files_list, cnt); hq_close(); for (j=0, file = files_list; j 1 || !STREQ(buf, "/")) && !STRNEQ(tmpname, "/")) { sprintf(pathname, "%s%s%s", buf, "/", tmpname); } else { sprintf(pathname, "%s%s", buf, tmpname); } } } else { strncpy(pathname, buf, BUFSIZE); } parent = ULONG(dentry_buf + OFFSET(dentry_d_parent)); if (tmp_dentry == parent && full) { if (VALID_MEMBER(vfsmount_mnt_mountpoint)) { if (tmp_vfsmnt) { if (strncmp(pathname, "//", 2) == 0) shift_string_left(pathname, 1); readmem(tmp_vfsmnt, KVADDR, vfsmnt_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); parent = ULONG(vfsmnt_buf + OFFSET(vfsmount_mnt_mountpoint)); mnt_parent = ULONG(vfsmnt_buf + OFFSET(vfsmount_mnt_parent)); if (tmp_vfsmnt == mnt_parent) break; else tmp_vfsmnt = mnt_parent; goto more_vfsmnt; } } else if (VALID_STRUCT(mount)) { if (tmp_vfsmnt) { if (strncmp(pathname, "//", 2) == 0) shift_string_left(pathname, 1); readmem(tmp_vfsmnt - OFFSET(mount_mnt), KVADDR, mnt_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); parent = ULONG(mnt_buf + OFFSET(mount_mnt_mountpoint)); mnt_parent = ULONG(mnt_buf + OFFSET(mount_mnt_parent)); if ((tmp_vfsmnt - OFFSET(mount_mnt)) == mnt_parent) break; else tmp_vfsmnt = mnt_parent + OFFSET(mount_mnt); goto more_vfsmnt; } } else { parent = ULONG(dentry_buf + OFFSET(dentry_d_covers)); } } } while (tmp_dentry != parent && parent); if (!STREQ(pathname, "/") && LASTCHAR(pathname) == '/') { LASTCHAR(pathname) = '\0'; } if (mnt_buf) FREEBUF(mnt_buf); else if (vfsmnt_buf) FREEBUF(vfsmnt_buf); } /* * If the pathname component, which may be internal or external to the * dentry, has string length equal to what's expected, copy it into the * passed-in buffer, and return its length. If it doesn't match, return 0. */ static int get_pathname_component(ulong dentry, ulong d_name_name, int d_name_len, char *dentry_buf, char *pathbuf) { int len = d_name_len; /* presume success */ if (d_name_name == (dentry + OFFSET(dentry_d_iname))) { if (strlen(dentry_buf + OFFSET(dentry_d_iname)) == d_name_len) strcpy(pathbuf, dentry_buf + OFFSET(dentry_d_iname)); else len = 0; } else if ((read_string(d_name_name, pathbuf, BUFSIZE)) != d_name_len) len = 0; return len; } /* * Cache the passed-in file structure. */ char * fill_file_cache(ulong file) { int i; char *cache; ft->file_cache_fills++; for (i = 0; i < DENTRY_CACHE; i++) { if (ft->cached_file[i] == file) { ft->cached_file_hits[i]++; cache = ft->file_cache + (SIZE(file)*i); return(cache); } } cache = ft->file_cache + (SIZE(file)*ft->file_cache_index); readmem(file, KVADDR, cache, SIZE(file), "fill_file_cache", FAULT_ON_ERROR); ft->cached_file[ft->file_cache_index] = file; ft->file_cache_index = (ft->file_cache_index+1) % DENTRY_CACHE; return(cache); } /* * If active, clear the file references. */ void clear_file_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < DENTRY_CACHE; i++) { ft->cached_file[i] = 0; ft->cached_file_hits[i] = 0; } ft->file_cache_fills = 0; ft->file_cache_index = 0; } /* * Cache the passed-in dentry structure. */ char * fill_dentry_cache(ulong dentry) { int i; char *cache; ft->dentry_cache_fills++; for (i = 0; i < DENTRY_CACHE; i++) { if (ft->cached_dentry[i] == dentry) { ft->cached_dentry_hits[i]++; cache = ft->dentry_cache + (SIZE(dentry)*i); return(cache); } } cache = ft->dentry_cache + (SIZE(dentry)*ft->dentry_cache_index); readmem(dentry, KVADDR, cache, SIZE(dentry), "fill_dentry_cache", FAULT_ON_ERROR); ft->cached_dentry[ft->dentry_cache_index] = dentry; ft->dentry_cache_index = (ft->dentry_cache_index+1) % DENTRY_CACHE; return(cache); } /* * If active, clear the dentry references. */ void clear_dentry_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < DENTRY_CACHE; i++) { ft->cached_dentry[i] = 0; ft->cached_dentry_hits[i] = 0; } ft->dentry_cache_fills = 0; ft->dentry_cache_index = 0; } /* * Cache the passed-in inode structure. */ char * fill_inode_cache(ulong inode) { int i; char *cache; ft->inode_cache_fills++; for (i = 0; i < INODE_CACHE; i++) { if (ft->cached_inode[i] == inode) { ft->cached_inode_hits[i]++; cache = ft->inode_cache + (SIZE(inode)*i); return(cache); } } cache = ft->inode_cache + (SIZE(inode)*ft->inode_cache_index); readmem(inode, KVADDR, cache, SIZE(inode), "fill_inode_cache", FAULT_ON_ERROR); ft->cached_inode[ft->inode_cache_index] = inode; ft->inode_cache_index = (ft->inode_cache_index+1) % INODE_CACHE; return(cache); } /* * If active, clear the inode references. */ void clear_inode_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < DENTRY_CACHE; i++) { ft->cached_inode[i] = 0; ft->cached_inode_hits[i] = 0; } ft->inode_cache_fills = 0; ft->inode_cache_index = 0; } /* * This command displays the tasks using specified files or sockets. * Tasks will be listed that reference the file as the current working * directory, root directory, an open file descriptor, or that mmap the * file. * The argument can be a full pathname without symbolic links, or inode * address. */ void cmd_fuser(void) { int c; char *spec_string, *tmp; struct foreach_data foreach_data, *fd; char task_buf[BUFSIZE]; char buf[BUFSIZE]; char uses[20]; char fuser_header[BUFSIZE]; int doing_fds, doing_mmap, len; int fuser_header_printed, lockd_header_printed; ulong spec_addr; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!args[optind]) { cmd_usage(pc->curcmd, SYNOPSIS); return; } sprintf(fuser_header, " PID %s COMM USAGE\n", mkstring(buf, VADDR_PRLEN, CENTER, "TASK")); doing_fds = doing_mmap = 0; while (args[optind]) { spec_string = args[optind]; spec_addr = htol(spec_string, RETURN_ON_ERROR|QUIET, NULL); if ((spec_addr == BADADDR || !IS_KVADDR(spec_addr)) && spec_string[0] != '/') error(FATAL, "invalid argument: %s\n", args[optind]); if (STRNEQ(spec_string, "0x") && hexadecimal(spec_string, 0)) shift_string_left(spec_string, 2); len = strlen(spec_string); fuser_header_printed = 0; lockd_header_printed = 0; open_tmpfile(); BZERO(&foreach_data, sizeof(struct foreach_data)); fd = &foreach_data; fd->keyword_array[0] = FOREACH_FILES; fd->keyword_array[1] = FOREACH_VM; fd->keys = 2; fd->flags |= FOREACH_i_FLAG; foreach(fd); rewind(pc->tmpfile); BZERO(uses, 20); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "PID:")) { if (!STREQ(uses, "")) { if (!fuser_header_printed) { fprintf(pc->saved_fp, "%s", fuser_header); fuser_header_printed = 1; } show_fuser(task_buf, uses); BZERO(uses, 20); } BZERO(task_buf, BUFSIZE); strcpy(task_buf, buf); doing_fds = doing_mmap = 0; continue; } if (STRNEQ(buf, "ROOT:")) { if ((tmp = strstr(buf, spec_string)) && (tmp[len] == ' ' || tmp[len] == '\n')) { if (strstr(tmp, "CWD:")) { strcat(uses, "root "); if ((tmp = strstr(tmp+len, spec_string)) && (tmp[len] == ' ' || tmp[len] == '\n')) { strcat(uses, "cwd "); } } else { strcat(uses, "cwd "); } } continue; } if (strstr(buf, "DENTRY")) { doing_fds = 1; continue; } if (strstr(buf, "TOTAL_VM")) { doing_fds = 0; continue; } if (strstr(buf, " VMA ")) { doing_mmap = 1; doing_fds = 0; continue; } if ((tmp = strstr(buf, spec_string)) && (tmp[len] == ' ' || tmp[len] == '\n')) { if (doing_fds) { strcat(uses, "fd "); doing_fds = 0; } if (doing_mmap) { strcat(uses, "mmap "); doing_mmap = 0; } } } if (!STREQ(uses, "")) { if (!fuser_header_printed) { fprintf(pc->saved_fp, "%s", fuser_header); fuser_header_printed = 1; } show_fuser(task_buf, uses); BZERO(uses, 20); } close_tmpfile(); optind++; if (!fuser_header_printed && !lockd_header_printed) { fprintf(fp, "No users of %s found\n", spec_string); } } } static void show_fuser(char *buf, char *uses) { char pid[10]; char task[20]; char command[20]; char *p; int i; BZERO(pid, 10); BZERO(task, 20); BZERO(command, 20); p = strstr(buf, "PID: ") + strlen("PID: "); i = 0; while (*p != ' ' && i < 10) { pid[i++] = *p++; } pid[i] = NULLCHAR; p = strstr(buf, "TASK: ") + strlen("TASK: "); while (*p == ' ') p++; i = 0; while (*p != ' ' && i < 20) { task[i++] = *p++; } task[i] = NULLCHAR; mkstring(task, VADDR_PRLEN, RJUST, task); p = strstr(buf, "COMMAND: ") + strlen("COMMAND: "); strncpy(command, p, 16); i = strlen(command) - 1; while (i < 16) { command[i++] = ' '; } command[16] = NULLCHAR; fprintf(pc->saved_fp, "%5s %s %s %s\n", pid, task, command, uses); } /* * Gather some host memory/swap statistics, passing back whatever the * caller requires. */ int monitor_memory(long *freemem_pages, long *freeswap_pages, long *mem_usage, long *swap_usage) { FILE *mp; char buf[BUFSIZE]; char *arglist[MAXARGS]; int argc ATTRIBUTE_UNUSED; int params; ulong freemem, memtotal, freeswap, swaptotal; if (!file_exists("/proc/meminfo", NULL)) return FALSE; if ((mp = fopen("/proc/meminfo", "r")) == NULL) return FALSE; params = 0; freemem = memtotal = freeswap = swaptotal = 0; while (fgets(buf, BUFSIZE, mp)) { if (strstr(buf, "SwapFree")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) freeswap = (atol(arglist[1]) * 1024)/PAGESIZE(); } if (strstr(buf, "MemFree")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) freemem = (atol(arglist[1]) * 1024)/PAGESIZE(); } if (strstr(buf, "MemTotal")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) memtotal = (atol(arglist[1]) * 1024)/PAGESIZE(); } if (strstr(buf, "SwapTotal")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) swaptotal = (atol(arglist[1]) * 1024)/PAGESIZE(); } } fclose(mp); if (params != 4) return FALSE; if (freemem_pages) *freemem_pages = freemem; if (freeswap_pages) *freeswap_pages = freeswap; if (mem_usage) *mem_usage = ((memtotal-freemem)*100) / memtotal; if (swap_usage) *swap_usage = ((swaptotal-freeswap)*100) / swaptotal; return TRUE; } /* * Determine whether two filenames reference the same file. */ int same_file(char *f1, char *f2) { struct stat stat1, stat2; if ((stat(f1, &stat1) != 0) || (stat(f2, &stat2) != 0)) return FALSE; if ((stat1.st_dev == stat2.st_dev) && (stat1.st_ino == stat2.st_ino)) return TRUE; return FALSE; } /* * Determine which live memory source to use. */ #define MODPROBE_CMD "/sbin/modprobe -l --type drivers/char 2>&1" static void get_live_memory_source(void) { FILE *pipe; char buf[BUFSIZE]; char modname1[BUFSIZE/2]; char modname2[BUFSIZE/2]; char *name; int use_module, crashbuiltin; struct stat stat1, stat2; struct utsname utsname; if (!(pc->flags & PROC_KCORE)) pc->flags |= DEVMEM; if (pc->live_memsrc) goto live_report; if (file_readable("/dev/mem")) pc->live_memsrc = "/dev/mem"; else if (file_exists("/proc/kcore", NULL)) { pc->flags &= ~DEVMEM; pc->flags |= PROC_KCORE; pc->live_memsrc = "/proc/kcore"; } use_module = crashbuiltin = FALSE; if (file_exists("/dev/mem", &stat1) && file_exists(pc->memory_device, &stat2) && S_ISCHR(stat1.st_mode) && S_ISCHR(stat2.st_mode) && (stat1.st_rdev == stat2.st_rdev)) { if (!STREQ(pc->memory_device, "/dev/mem")) error(INFO, "%s: same device as /dev/mem\n%s", pc->memory_device, pc->memory_module ? "" : "\n"); if (pc->memory_module) error(INFO, "ignoring --memory_module %s request\n\n", pc->memory_module); } else if (pc->memory_module && memory_driver_module_loaded(NULL)) { error(INFO, "using pre-loaded \"%s\" module\n\n", pc->memory_module); pc->flags |= MODPRELOAD; use_module = TRUE; } else { pc->memory_module = MEMORY_DRIVER_MODULE; if ((pipe = popen(MODPROBE_CMD, "r")) == NULL) { error(INFO, "%s: %s\n", MODPROBE_CMD, strerror(errno)); return; } sprintf(modname1, "%s.o", pc->memory_module); sprintf(modname2, "%s.ko", pc->memory_module); while (fgets(buf, BUFSIZE, pipe)) { if (strstr(buf, "invalid option") && (uname(&utsname) == 0)) { sprintf(buf, "/lib/modules/%s/kernel/drivers/char/%s", utsname.release, modname2); if (file_exists(buf, &stat1)) use_module = TRUE; else { strcat(buf, ".xz"); if (file_exists(buf, &stat1)) use_module = TRUE; } break; } name = basename(strip_linefeeds(buf)); if (STREQ(name, modname1) || STREQ(name, modname2)) { use_module = TRUE; break; } } pclose(pipe); if (!use_module && file_exists("/dev/crash", &stat1) && S_ISCHR(stat1.st_mode)) crashbuiltin = TRUE; } if (use_module) { pc->flags &= ~(DEVMEM|PROC_KCORE); pc->flags |= MEMMOD; pc->readmem = read_memory_device; pc->writemem = write_memory_device; pc->live_memsrc = pc->memory_device; } if (crashbuiltin) { pc->flags &= ~(DEVMEM|PROC_KCORE); pc->flags |= CRASHBUILTIN; pc->readmem = read_memory_device; pc->writemem = write_memory_device; pc->live_memsrc = pc->memory_device; pc->memory_module = NULL; } live_report: if (CRASHDEBUG(1)) fprintf(fp, "get_live_memory_source: %s\n", pc->live_memsrc); } /* * Read /proc/modules to determine whether the crash driver module * has been loaded. */ static int memory_driver_module_loaded(int *count) { FILE *modules; int argcnt, module_loaded; char *arglist[MAXARGS]; char buf[BUFSIZE]; if ((modules = fopen("/proc/modules", "r")) == NULL) { error(INFO, "/proc/modules: %s\n", strerror(errno)); return FALSE; } module_loaded = FALSE; while (fgets(buf, BUFSIZE, modules)) { console("%s", buf); argcnt = parse_line(buf, arglist); if (argcnt < 3) continue; if (STREQ(arglist[0], pc->memory_module)) { module_loaded = TRUE; if (CRASHDEBUG(1)) fprintf(stderr, "\"%s\" module loaded: [%s][%s][%s]\n", arglist[0], arglist[0], arglist[1], arglist[2]); if (count) *count = atoi(arglist[2]); break; } } fclose(modules); return module_loaded; } /* * Insmod the memory driver module. */ static int insmod_memory_driver_module(void) { FILE *pipe; char buf[BUFSIZE]; char command[BUFSIZE]; sprintf(command, "/sbin/modprobe %s", pc->memory_module); if (CRASHDEBUG(1)) fprintf(fp, "%s\n", command); if ((pipe = popen(command, "r")) == NULL) { error(INFO, "%s: %s", command, strerror(errno)); return FALSE; } while (fgets(buf, BUFSIZE, pipe)) fprintf(fp, "%s\n", buf); pclose(pipe); if (!memory_driver_module_loaded(NULL)) { error(INFO, "cannot insmod \"%s\" module\n", pc->memory_module); return FALSE; } return TRUE; } /* * Return the dev_t for the memory device driver. The major number will * be that of the kernel's misc driver; the minor is dynamically created * when the module at inmod time, and found in /proc/misc. */ static int get_memory_driver_dev(dev_t *devp) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int argcnt; FILE *misc; int minor; dev_t dev; dev = 0; if ((misc = fopen("/proc/misc", "r")) == NULL) { error(INFO, "/proc/misc: %s", strerror(errno)); } else { while (fgets(buf, BUFSIZE, misc)) { argcnt = parse_line(buf, arglist); if ((argcnt == 2) && STREQ(arglist[1], pc->memory_module)) { minor = atoi(arglist[0]); dev = makedev(MISC_MAJOR, minor); if (CRASHDEBUG(1)) fprintf(fp, "/proc/misc: %s %s => %d/%d\n", arglist[0], arglist[1], major(dev), minor(dev)); break; } } fclose(misc); } if (!dev) { error(INFO, "cannot determine minor number of %s driver\n", pc->memory_module); return FALSE; } *devp = dev; return TRUE; } /* * Deal with the creation or verification of the memory device file: * * 1. If the device exists, and has the correct major/minor device numbers, * nothing needs to be done. * 2. If the filename exists, but it's not a device file, has the wrong * major/minor device numbers, or the wrong permissions, advise the * user to delete it. * 3. Otherwise, create it. */ static int create_memory_device(dev_t dev) { struct stat stat; if (file_exists(pc->live_memsrc, &stat)) { /* * It already exists -- just use it. */ if ((stat.st_mode == MEMORY_DRIVER_DEVICE_MODE) && (stat.st_rdev == dev)) return TRUE; /* * Either it's not a device special file, or it's got * the wrong major/minor numbers, or the wrong permissions. * Unlink the file -- it shouldn't be there. */ if (!S_ISCHR(stat.st_mode)) error(FATAL, "%s: not a character device -- please delete it!\n", pc->live_memsrc); else if (dev != stat.st_rdev) error(FATAL, "%s: invalid device: %d/%d -- please delete it!\n", pc->live_memsrc, major(stat.st_rdev), minor(stat.st_rdev)); else unlink(pc->live_memsrc); } /* * Either it doesn't exist or it was just unlinked. * In either case, try to create it. */ if (mknod(pc->live_memsrc, MEMORY_DRIVER_DEVICE_MODE, dev)) { error(INFO, "%s: mknod: %s\n", pc->live_memsrc, strerror(errno)); return FALSE; } return TRUE; } /* * If we're here, the memory driver module is being requested: * * 1. If /dev/crash is built into the kernel, just open it. * 2. If the module is not already loaded, insmod it. * 3. Determine the misc driver minor device number that it was assigned. * 4. Create (or verify) the device file. * 5. Then just open it. */ static int memory_driver_init(void) { dev_t dev; if (pc->flags & CRASHBUILTIN) goto open_device; if (!memory_driver_module_loaded(NULL)) { if (!insmod_memory_driver_module()) return FALSE; } else pc->flags |= MODPRELOAD; if (!get_memory_driver_dev(&dev)) return FALSE; if (!create_memory_device(dev)) return FALSE; open_device: if ((pc->mfd = open(pc->memory_device, O_RDONLY)) < 0) { error(INFO, "%s: open: %s\n", pc->memory_device, strerror(errno)); return FALSE; } return TRUE; } /* * Remove the memory driver module and associated file. */ int cleanup_memory_driver(void) { int errors, count; char command[BUFSIZE]; count = errors = 0; if (pc->flags & KERNEL_DEBUG_QUERY) return TRUE; close(pc->mfd); if (file_exists(pc->memory_device, NULL) && unlink(pc->memory_device)) { error(INFO, "%s: %s\n", pc->memory_device, strerror(errno)); errors++; } if (!(pc->flags & MODPRELOAD) && memory_driver_module_loaded(&count) && !count) { sprintf(command, "/sbin/rmmod %s", pc->memory_module); if (CRASHDEBUG(1)) fprintf(fp, "%s\n", command); errors += system(command); } if (errors) error(NOTE, "cleanup_memory_driver failed\n"); return errors ? FALSE : TRUE; } struct do_radix_tree_info { ulong maxcount; ulong count; void *data; }; static void do_radix_tree_count(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_radix_tree_info *info = private; info->count++; } static void do_radix_tree_search(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_radix_tree_info *info = private; struct list_pair *rtp = info->data; if (rtp->index == index) { rtp->value = (void *)slot; info->count = 1; } } static void do_radix_tree_dump(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_radix_tree_info *info = private; fprintf(fp, "[%ld] %lx\n", index, slot); info->count++; } static void do_radix_tree_gather(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_radix_tree_info *info = private; struct list_pair *rtp = info->data; if (info->maxcount) { rtp[info->count].index = index; rtp[info->count].value = (void *)slot; info->count++; info->maxcount--; } } static void do_radix_tree_dump_cb(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_radix_tree_info *info = private; struct list_pair *rtp = info->data; int (*cb)(ulong) = rtp->value; /* Caller defined operation */ if (!cb(slot)) { if ((slot & RADIX_TREE_ENTRY_MASK) == RADIX_TREE_EXCEPTIONAL_ENTRY) { if (CRASHDEBUG(1)) error(INFO, "RADIX_TREE_EXCEPTIONAL_ENTRY: %lx\n", slot); return; } error(FATAL, "do_radix_tree: callback " "operation failed: entry: %ld item: %lx\n", info->count, slot); } info->count++; } /* * do_radix_tree argument usage: * * root: Address of a radix_tree_root structure * * flag: RADIX_TREE_COUNT - Return the number of entries in the tree. * RADIX_TREE_SEARCH - Search for an entry at rtp->index; if found, * store the entry in rtp->value and return a count of 1; otherwise * return a count of 0. * RADIX_TREE_DUMP - Dump all existing index/value pairs. * RADIX_TREE_GATHER - Store all existing index/value pairs in the * passed-in array of list_pair structs starting at rtp, * returning the count of entries stored; the caller can/should * limit the number of returned entries by putting the array size * (max count) in the rtp->index field of the first structure * in the passed-in array. * RADIX_TREE_DUMP_CB - Similar with RADIX_TREE_DUMP, but for each * radix tree entry, a user defined callback at rtp->value will * be invoked. * * rtp: Unused by RADIX_TREE_COUNT and RADIX_TREE_DUMP. * A pointer to a list_pair structure for RADIX_TREE_SEARCH. * A pointer to an array of list_pair structures for * RADIX_TREE_GATHER; the dimension (max count) of the array may * be stored in the index field of the first structure to avoid * any chance of an overrun. * For RADIX_TREE_DUMP_CB, the rtp->value must be initialized as a * callback function. The callback prototype must be: int (*)(ulong); */ ulong do_radix_tree(ulong root, int flag, struct list_pair *rtp) { struct do_radix_tree_info info = { .count = 0, .data = rtp, }; struct radix_tree_ops ops = { .radix = 16, .private = &info, }; switch (flag) { case RADIX_TREE_COUNT: ops.entry = do_radix_tree_count; break; case RADIX_TREE_SEARCH: /* * FIXME: do_radix_tree_traverse() traverses whole * radix tree, not binary search. So this search is * not efficient. */ ops.entry = do_radix_tree_search; break; case RADIX_TREE_DUMP: ops.entry = do_radix_tree_dump; break; case RADIX_TREE_GATHER: if (!(info.maxcount = rtp->index)) info.maxcount = (ulong)(-1); /* caller beware */ ops.entry = do_radix_tree_gather; break; case RADIX_TREE_DUMP_CB: if (rtp->value == NULL) { error(FATAL, "do_radix_tree: need set callback function"); return -EINVAL; } ops.entry = do_radix_tree_dump_cb; break; default: error(FATAL, "do_radix_tree: invalid flag: %lx\n", flag); } do_radix_tree_traverse(root, 1, &ops); return info.count; } struct do_xarray_info { ulong maxcount; ulong count; void *data; }; static void do_xarray_count(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_xarray_info *info = private; info->count++; } static void do_xarray_search(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_xarray_info *info = private; struct list_pair *xp = info->data; if (xp->index == index) { xp->value = (void *)slot; info->count = 1; } } static void do_xarray_dump(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_xarray_info *info = private; fprintf(fp, "[%ld] %lx\n", index, slot); info->count++; } static void do_xarray_gather(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_xarray_info *info = private; struct list_pair *xp = info->data; if (info->maxcount) { xp[info->count].index = index; xp[info->count].value = (void *)slot; info->count++; info->maxcount--; } } static void do_xarray_dump_cb(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_xarray_info *info = private; struct list_pair *xp = info->data; int (*cb)(ulong) = xp->value; /* Caller defined operation */ if (!cb(slot)) { if (slot & XARRAY_TAG_MASK) { if (CRASHDEBUG(1)) error(INFO, "entry has XARRAY_TAG_MASK bits set: %lx\n", slot); return; } error(FATAL, "do_xarray: callback " "operation failed: entry: %ld item: %lx\n", info->count, slot); } info->count++; } /* * do_xarray argument usage: * * root: Address of a xarray structure * * flag: XARRAY_COUNT - Return the number of entries in the tree. * XARRAY_SEARCH - Search for an entry at xp->index; if found, * store the entry in xp->value and return a count of 1; otherwise * return a count of 0. * XARRY_DUMP - Dump all existing index/value pairs. * XARRAY_GATHER - Store all existing index/value pairs in the * passed-in array of list_pair structs starting at xp, * returning the count of entries stored; the caller can/should * limit the number of returned entries by putting the array size * (max count) in the xp->index field of the first structure * in the passed-in array. * XARRAY_DUMP_CB - Similar with XARRAY_DUMP, but for each * xarray entry, a user defined callback at xp->value will * be invoked. * * xp: Unused by XARRAY_COUNT and XARRAY_DUMP. * A pointer to a list_pair structure for XARRAY_SEARCH. * A pointer to an array of list_pair structures for * XARRAY_GATHER; the dimension (max count) of the array may * be stored in the index field of the first structure to avoid * any chance of an overrun. * For XARRAY_DUMP_CB, the rtp->value must be initialized as a * callback function. The callback prototype must be: int (*)(ulong); */ ulong do_xarray(ulong root, int flag, struct list_pair *xp) { struct do_xarray_info info = { .count = 0, .data = xp, }; struct xarray_ops ops = { .radix = 16, .private = &info, }; switch (flag) { case XARRAY_COUNT: ops.entry = do_xarray_count; break; case XARRAY_SEARCH: ops.entry = do_xarray_search; break; case XARRAY_DUMP: ops.entry = do_xarray_dump; break; case XARRAY_GATHER: if (!(info.maxcount = xp->index)) info.maxcount = (ulong)(-1); /* caller beware */ ops.entry = do_xarray_gather; break; case XARRAY_DUMP_CB: if (xp->value == NULL) { error(FATAL, "do_xarray: no callback function specified"); return -EINVAL; } ops.entry = do_xarray_dump_cb; break; default: error(FATAL, "do_xarray: invalid flag: %lx\n", flag); } do_xarray_traverse(root, 1, &ops); return info.count; } int is_readable(char *filename) { int fd; if ((fd = open(filename, O_RDONLY)) < 0) { error(INFO, "%s: %s\n", filename, strerror(errno)); return FALSE; } else close(fd); return TRUE; } static int match_file_string(char *filename, char *string, char *buffer) { int found; char command[BUFSIZE]; FILE *pipe; sprintf(command, "/usr/bin/strings %s", filename); if ((pipe = popen(command, "r")) == NULL) { error(INFO, "%s: %s\n", filename, strerror(errno)); return FALSE; } found = FALSE; while (fgets(buffer, BUFSIZE-1, pipe)) { if (strstr(buffer, string)) { found = TRUE; break; } } pclose(pipe); return found; } char * vfsmount_devname(ulong vfsmnt, char *buf, int maxlen) { ulong devp; BZERO(buf, maxlen); if (VALID_STRUCT(mount)) { if (!readmem(vfsmnt - OFFSET(mount_mnt) + OFFSET(mount_mnt_devname), KVADDR, &devp, sizeof(void *), "mount mnt_devname", QUIET|RETURN_ON_ERROR)) return buf; } else { if (!readmem(vfsmnt + OFFSET(vfsmount_mnt_devname), KVADDR, &devp, sizeof(void *), "vfsmount mnt_devname", QUIET|RETURN_ON_ERROR)) return buf; } if (read_string(devp, buf, BUFSIZE-1)) return buf; return buf; } static ulong get_root_vfsmount(char *file_buf) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; ulong vfsmnt; ulong mnt_parent; vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); if (!strlen(vfsmount_devname(vfsmnt, buf1, BUFSIZE))) return vfsmnt; if (STREQ(buf1, "udev") || STREQ(buf1, "devtmpfs")) { if (VALID_STRUCT(mount)) { if (!readmem(vfsmnt - OFFSET(mount_mnt) + OFFSET(mount_mnt_parent), KVADDR, &mnt_parent, sizeof(void *), "mount mnt_parent", QUIET|RETURN_ON_ERROR)) return vfsmnt; } else { if (!readmem(vfsmnt + OFFSET(vfsmount_mnt_parent), KVADDR, &mnt_parent, sizeof(void *), "vfsmount mnt_parent", QUIET|RETURN_ON_ERROR)) return vfsmnt; } if (!strlen(vfsmount_devname(mnt_parent, buf2, BUFSIZE))) return vfsmnt; if (STREQ(buf1, "udev") && STREQ(buf2, "udev")) return mnt_parent; if (STREQ(buf1, "devtmpfs") && STREQ(buf2, "devtmpfs")) return mnt_parent; } return vfsmnt; } void check_live_arch_mismatch(void) { struct utsname utsname; if (machine_type("X86") && (uname(&utsname) == 0) && STRNEQ(utsname.machine, "x86_64")) error(FATAL, "compiled for the X86 architecture\n"); #if defined(__i386__) || defined(__x86_64__) if (machine_type("ARM")) error(FATAL, "compiled for the ARM architecture\n"); #endif #ifdef __x86_64__ if (machine_type("ARM64")) error(FATAL, "compiled for the ARM64 architecture\n"); #endif #ifdef __x86_64__ if (machine_type("PPC64")) error(FATAL, "compiled for the PPC64 architecture\n"); #endif #ifdef __powerpc64__ if (machine_type("PPC")) error(FATAL, "compiled for the PPC architecture\n"); #endif } crash-utility-crash-9cd43f5/xen_hyper_defs.h0000664000372000037200000007343315107550337020561 0ustar juerghjuergh/* * xen_hyper_defs.h * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifdef XEN_HYPERVISOR_ARCH #include #include #ifdef X86 /* Xen Hypervisor address space layout */ #define IOREMAP_VIRT_END (0UL) #define IOREMAP_VIRT_START (0xFFC00000UL) #define DIRECTMAP_VIRT_END IOREMAP_VIRT_START #define DIRECTMAP_VIRT_START (0xFF000000UL) #define MAPCACHE_VIRT_END DIRECTMAP_VIRT_START #define MAPCACHE_VIRT_START (0xFFC00000UL) #define PERDOMAIN_VIRT_END DIRECTMAP_VIRT_START #define PERDOMAIN_VIRT_START (0xFE800000UL) #define SH_LINEAR_PT_VIRT_END PERDOMAIN_VIRT_START #define SH_LINEAR_PT_VIRT_START (0xFE400000UL) #define SH_LINEAR_PT_VIRT_START_PAE (0xFE000000UL) #define LINEAR_PT_VIRT_END SH_LINEAR_PT_VIRT_START #define LINEAR_PT_VIRT_START (0xFE000000UL) #define LINEAR_PT_VIRT_START_PAE (0xFD800000UL) #define RDWR_MPT_VIRT_END LINEAR_PT_VIRT_START #define RDWR_MPT_VIRT_START (0xFDC00000UL) #define RDWR_MPT_VIRT_START_PAE (0xFC800000UL) #define FRAMETABLE_VIRT_END RDWR_MPT_VIRT_START #define FRAMETABLE_VIRT_START (0xFC400000UL) #define FRAMETABLE_VIRT_START_PAE (0xF6800000UL) #define RO_MPT_VIRT_END FRAMETABLE_VIRT_START #define RO_MPT_VIRT_START (0xFC000000UL) #define RO_MPT_VIRT_START_PAE (0xF5800000UL) #define HYPERVISOR_VIRT_START RO_MPT_VIRT_START #define HYPERVISOR_VIRT_START_PAE RO_MPT_VIRT_START_PAE #endif #ifdef X86_64 #define HYPERVISOR_VIRT_START (0xffff800000000000) #define HYPERVISOR_VIRT_END (0xffff880000000000) #define DIRECTMAP_VIRT_START (0xffff830000000000) #define DIRECTMAP_VIRT_END (0xffff880000000000) #define PAGE_OFFSET_XEN_HYPER DIRECTMAP_VIRT_START #define XEN_VIRT_START (xht->xen_virt_start) #define XEN_VIRT_ADDR(vaddr) \ (((vaddr) >= XEN_VIRT_START) && ((vaddr) < DIRECTMAP_VIRT_START)) #endif #ifdef IA64 #define HYPERVISOR_VIRT_START (0xe800000000000000) #define HYPERVISOR_VIRT_END (0xf800000000000000) #define DEFAULT_SHAREDINFO_ADDR (0xf100000000000000) #define PERCPU_PAGE_SIZE 65536 #define PERCPU_ADDR (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE) #define DIRECTMAP_VIRT_START (0xf000000000000000) #define DIRECTMAP_VIRT_END PERCPU_ADDR #define VIRT_FRAME_TABLE_SIZE (0x0100000000000000) #define PERCPU_VIRT_ADDR(vaddr) \ (((vaddr) >= PERCPU_ADDR) && ((vaddr) < PERCPU_ADDR + PERCPU_PAGE_SIZE)) #define FRAME_TABLE_VIRT_ADDR(vaddr) \ ((vaddr) >= xhmachdep->frame_table && (vaddr) < xhmachdep->frame_table + VIRT_FRAME_TABLE_SIZE) #undef IA64_RBS_OFFSET #define IA64_RBS_OFFSET ((XEN_HYPER_SIZE(vcpu) + 15) & ~15) #endif /* IA64 */ #define DIRECTMAP_VIRT_ADDR(vaddr) \ (((vaddr) >= DIRECTMAP_VIRT_START) && ((vaddr) < DIRECTMAP_VIRT_END)) typedef uint16_t domid_t; typedef uint32_t Elf_Word; /* * NOTE kakuma: The following defines are temporary version for * elf note format which is used only in crash. */ #define XEN_HYPER_ELF_NOTE_V1 1 #define XEN_HYPER_ELF_NOTE_V2 2 #define XEN_HYPER_ELF_NOTE_V3 3 #define XEN_HYPER_ELF_NOTE_V4 4 #ifdef X86 #define XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE 0x100 #endif #if defined(X86_64) || defined(IA64) #define XEN_HYPER_ELF_NOTE_V4_NOTE_SIZE 0x200 #endif /* * Xen Hyper */ #define XEN_HYPER_SMP (0x400) #ifdef X86 #define XEN_HYPER_MAX_VIRT_CPUS (32) #define XEN_HYPER_HZ 100 #endif #ifdef X86_64 #define XEN_HYPER_MAX_VIRT_CPUS (32) #define XEN_HYPER_HZ 100 #endif #ifdef IA64 #define XEN_HYPER_MAX_VIRT_CPUS (64) #define XEN_HYPER_HZ 100 #endif #ifndef XEN_HYPER_MAX_VIRT_CPUS #define XEN_HYPER_MAX_VIRT_CPUS (1) #endif #if defined(X86) || defined(X86_64) #define xen_hyper_per_cpu(var, cpu) \ ({ ulong __var_addr; \ if (xht->__per_cpu_offset) \ __var_addr = (xht->flags & XEN_HYPER_SMP) ? \ ((ulong)(var) + xht->__per_cpu_offset[cpu]) : (ulong)(var); \ else \ __var_addr = (ulong)(var) + ((ulong)(cpu) << xht->percpu_shift); \ __var_addr; }) #elif defined(IA64) #define xen_hyper_per_cpu(var, cpu) \ ((xht->flags & XEN_HYPER_SMP) ? \ (ulong)(var) + (xht->__per_cpu_offset[cpu]) : \ (ulong)(var)) #endif #if defined(X86) || defined(X86_64) #define XEN_HYPER_STACK_ORDER 2 #if 0 #define XEN_HYPER_STACK_SIZE (machdep->pagesize << XEN_HYPER_STACK_ORDER) #endif #define XEN_HYPER_GET_CPU_INFO(sp) \ ((sp & ~(STACKSIZE()-1)) | \ (STACKSIZE() - XEN_HYPER_SIZE(cpu_info))) #endif #define XEN_HYPER_CONRING_SIZE 16384 /* system time */ #define XEN_HYPER_NANO_TO_SEC(ns) ((ulonglong)((ns) / 1000000000ULL)) #define XEN_HYPER_MICR_TO_SEC(us) ((ulonglong)((us) / 1000000ULL)) #define XEN_HYPER_MILI_TO_SEC(ms) ((ulonglong)((ms) / 1000ULL)) /* * Domain */ /* Prepared domain ID. */ #define XEN_HYPER_DOMID_IO (0x7FF1U) #define XEN_HYPER_DOMID_XEN (0x7FF2U) #define XEN_HYPER_DOMID_IDLE (0x7FFFU) /* Domain flags (domain_flags). */ /* Is this domain privileged? */ #define XEN_HYPER__DOMF_privileged 0 #define XEN_HYPER_DOMF_privileged (1UL<= 0) #define XEN_HYPER_VALID_STRUCT(X) (xen_hyper_size_table.X >= 0) #define XEN_HYPER_VALID_MEMBER(X) (xen_hyper_offset_table.X >= 0) #define XEN_HYPER_ASSIGN_SIZE(X) (xen_hyper_size_table.X) #define XEN_HYPER_ASSIGN_OFFSET(X) (xen_hyper_offset_table.X) #define XEN_HYPER_STRUCT_SIZE_INIT(X, Y) (XEN_HYPER_ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) #define XEN_HYPER_MEMBER_SIZE_INIT(X, Y, Z) (XEN_HYPER_ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z)) #define XEN_HYPER_MEMBER_OFFSET_INIT(X, Y, Z) (XEN_HYPER_ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) /* * System */ #define XEN_HYPER_MAX_CPUS() (xht->max_cpus) #define XEN_HYPER_CRASHING_CPU() (xht->crashing_cpu) /* * Dump information */ #define XEN_HYPER_X86_NOTE_EIP(regs) (regs[12]) #define XEN_HYPER_X86_NOTE_ESP(regs) (regs[15]) #define XEN_HYPER_X86_64_NOTE_RIP(regs) (regs[16]) #define XEN_HYPER_X86_64_NOTE_RSP(regs) (regs[19]) /* * Domain */ #define XEN_HYPER_DOMAIN_F_INIT 0x1 #define XEN_HYPER_NR_DOMAINS() (xht->domains) #define XEN_HYPER_RUNNING_DOMAINS() (xhdt->running_domains) /* * Phisycal CPU */ #define XEN_HYPER_NR_PCPUS() (xht->pcpus) #define for_cpu_indexes(i, cpuid) \ for (i = 0, cpuid = xht->cpu_idxs[i]; \ i < XEN_HYPER_NR_PCPUS(); \ cpuid = xht->cpu_idxs[++i]) #define XEN_HYPER_CURR_VCPU(pcpuid) \ (xen_hyper_get_active_vcpu_from_pcpuid(pcpuid)) /* * VCPU */ #define XEN_HYPER_VCPU_F_INIT 0x1 #define XEN_HYPER_NR_VCPUS_IN_DOM(domain_context) (domain_context->vcpu_cnt) #define XEN_HYPER_VCPU_LAST_CONTEXT() (xhvct->last) /* * tools */ #define XEN_HYPER_PRI(fp, len, str, buf, flag, args) \ sprintf args; \ xen_hyper_fpr_indent(fp, len, str, buf, flag); #define XEN_HYPER_PRI_CONST(fp, len, str, flag) \ xen_hyper_fpr_indent(fp, len, str, NULL, flag); #define XEN_HYPER_PRI_L (0x0) #define XEN_HYPER_PRI_R (0x1) #define XEN_HYPER_PRI_LF (0x2) /* * Global data */ extern struct xen_hyper_machdep_table *xhmachdep; extern struct xen_hyper_table *xht; extern struct xen_hyper_dumpinfo_table *xhdit; extern struct xen_hyper_domain_table *xhdt; extern struct xen_hyper_vcpu_table *xhvct; extern struct xen_hyper_pcpu_table *xhpct; extern struct xen_hyper_sched_table *xhscht; extern struct xen_hyper_symbol_table_data *xhsymt; extern struct xen_hyper_offset_table xen_hyper_offset_table; extern struct xen_hyper_size_table xen_hyper_size_table; extern struct command_table_entry xen_hyper_command_table[]; extern struct task_context fake_tc; /* * Xen Hyper command help */ extern char *xen_hyper_help_domain[]; extern char *xen_hyper_help_doms[]; extern char *xen_hyper_help_dumpinfo[]; extern char *xen_hyper_help_log[]; extern char *xen_hyper_help_pcpus[]; extern char *xen_hyper_help_sched[]; extern char *xen_hyper_help_sys[]; extern char *xen_hyper_help_vcpu[]; extern char *xen_hyper_help_vcpus[]; /* * Prototype */ ulonglong xen_hyper_get_uptime_hyper(void); /* * x86 */ int xen_hyper_x86_get_smp_cpus(void); uint64_t xen_hyper_x86_memory_size(void); /* * IA64 */ int xen_hyper_ia64_get_smp_cpus(void); uint64_t xen_hyper_ia64_memory_size(void); ulong xen_hyper_ia64_processor_speed(void); /* * Xen Hyper */ void xen_hyper_init(void); void xen_hyper_domain_init(void); void xen_hyper_vcpu_init(void); void xen_hyper_dumpinfo_init(void); void xen_hyper_misc_init(void); void xen_hyper_post_init(void); struct xen_hyper_dumpinfo_context *xen_hyper_id_to_dumpinfo_context(uint id); struct xen_hyper_dumpinfo_context *xen_hyper_note_to_dumpinfo_context(ulong note); char *xen_hyper_fill_elf_notes(ulong note, char *note_buf, int type); /* domain */ void xen_hyper_refresh_domain_context_space(void); int xen_hyper_get_domains(void); char *xen_hyper_get_domain_next(int mod, ulong *next); domid_t xen_hyper_domain_to_id(ulong domain); char *xen_hyper_id_to_domain_struct(domid_t id); struct xen_hyper_domain_context * xen_hyper_domain_to_domain_context(ulong domain); struct xen_hyper_domain_context * xen_hyper_id_to_domain_context(domid_t id); struct xen_hyper_domain_context * xen_hyper_store_domain_context(struct xen_hyper_domain_context *dc, ulong domain, char *dp); char *xen_hyper_read_domain_from_context(struct xen_hyper_domain_context *dc); char *xen_hyper_read_domain(ulong domain); char *xen_hyper_read_domain_verify(ulong domain); char *xen_hyper_fill_domain_struct(ulong domain, char *domain_struct); void xen_hyper_alloc_domain_context_space(int domains); ulong xen_hyper_domain_state(struct xen_hyper_domain_context *dc); /* vcpu */ void xen_hyper_refresh_vcpu_context_space(void); struct xen_hyper_vcpu_context * xen_hyper_vcpu_to_vcpu_context(ulong vcpu); struct xen_hyper_vcpu_context * xen_hyper_id_to_vcpu_context(ulong domain, domid_t did, int vcid); struct xen_hyper_vcpu_context_array * xen_hyper_domain_to_vcpu_context_array(ulong domain); struct xen_hyper_vcpu_context_array * xen_hyper_domid_to_vcpu_context_array(domid_t id); struct xen_hyper_vcpu_context * xen_hyper_store_vcpu_context(struct xen_hyper_vcpu_context *vcc, ulong vcpu, char *vcp); char * xen_hyper_read_vcpu_from_context(struct xen_hyper_vcpu_context *vcc); char *xen_hyper_read_vcpu(ulong vcpu); char *xen_hyper_read_vcpu_verify(ulong vcpu); char *xen_hyper_fill_vcpu_struct(ulong vcpu, char *vcpu_struct); void xen_hyper_alloc_vcpu_context_arrays_space(int domains); void xen_hyper_alloc_vcpu_context_space(struct xen_hyper_vcpu_context_array *vcca, int vcpus); int xen_hyper_vcpu_state(struct xen_hyper_vcpu_context *vcc); /* pcpu */ #if defined(X86) || defined(X86_64) void xen_hyper_x86_pcpu_init(void); #elif defined(IA64) void xen_hyper_ia64_pcpu_init(void); #endif struct xen_hyper_pcpu_context *xen_hyper_id_to_pcpu_context(uint id); struct xen_hyper_pcpu_context *xen_hyper_pcpu_to_pcpu_context(ulong pcpu); struct xen_hyper_pcpu_context *xen_hyper_store_pcpu_context(struct xen_hyper_pcpu_context *pcc, ulong pcpu, char *pcp); struct xen_hyper_pcpu_context *xen_hyper_store_pcpu_context_tss(struct xen_hyper_pcpu_context *pcc, ulong init_tss, char *tss); char *xen_hyper_read_pcpu(ulong pcpu); char *xen_hyper_fill_pcpu_struct(ulong pcpu, char *pcpu_struct); void xen_hyper_alloc_pcpu_context_space(int pcpus); /* others */ char *xen_hyper_x86_fill_cpu_data(int idx, char *cpuinfo_x86); char *xen_hyper_ia64_fill_cpu_data(int idx, char *cpuinfo_ia64); int xen_hyper_is_vcpu_crash(struct xen_hyper_vcpu_context *vcc); void xen_hyper_print_bt_header(FILE *out, ulong pcpu, int newline); ulong xen_hyper_get_active_vcpu_from_pcpuid(ulong pcpu); ulong xen_hyper_pcpu_to_active_vcpu(ulong pcpu); void xen_hyper_get_cpu_info(void); int xen_hyper_test_pcpu_id(uint pcpu_id); /* * Xen Hyper command */ void xen_hyper_cmd_help(void); void xen_hyper_cmd_domain(void); void xen_hyper_cmd_doms(void); void xen_hyper_cmd_dumpinfo(void); void xen_hyper_cmd_log(void); void xen_hyper_dump_log(void); void xen_hyper_cmd_pcpus(void); void xen_hyper_cmd_sched(void); void xen_hyper_cmd_sys(void); void xen_hyper_cmd_vcpu(void); void xen_hyper_cmd_vcpus(void); void xen_hyper_display_sys_stats(void); void xen_hyper_show_vcpu_context(struct xen_hyper_vcpu_context *vcc); char *xen_hyper_domain_state_string(struct xen_hyper_domain_context *dc, char *buf, int verbose); char *xen_hyper_vcpu_state_string(struct xen_hyper_vcpu_context *vcc, char *buf, int verbose); /* tools */ void xen_hyper_fpr_indent(FILE *fp, int len, char *str1, char *str2, int flag); #else #define XEN_HYPERVISOR_NOT_SUPPORTED \ "Xen hypervisor mode not supported on this architecture\n" #endif crash-utility-crash-9cd43f5/maple_tree.c0000664000372000037200000004262415107550337017665 0ustar juerghjuergh// SPDX-License-Identifier: GPL-2.0+ /* * Maple Tree implementation * Copyright (c) 2018-2022 Oracle Corporation * Authors: Liam R. Howlett * Matthew Wilcox * * The following are copied and modified from lib/maple_tree.c */ #include "maple_tree.h" #include "defs.h" unsigned char *mt_slots = NULL; unsigned char *mt_pivots = NULL; ulong mt_max[4] = {0}; #define MAPLE_BUFSIZE 512 static inline ulong mte_to_node(ulong maple_enode_entry) { return maple_enode_entry & ~MAPLE_NODE_MASK; } static inline enum maple_type mte_node_type(ulong maple_enode_entry) { return (maple_enode_entry >> MAPLE_NODE_TYPE_SHIFT) & MAPLE_NODE_TYPE_MASK; } static inline ulong mt_slot(void **slots, unsigned char offset) { return (ulong)slots[offset]; } static inline bool ma_is_leaf(const enum maple_type type) { return type < maple_range_64; } /*************** For cmd_tree ********************/ struct do_maple_tree_info { ulong maxcount; ulong count; void *data; }; struct maple_tree_ops { void (*entry)(ulong node, ulong slot, const char *path, ulong index, void *private); void *private; bool is_td; }; static const char spaces[] = " "; static void do_mt_range64(ulong, ulong, ulong, uint, char *, ulong *, struct maple_tree_ops *); static void do_mt_arange64(ulong, ulong, ulong, uint, char *, ulong *, struct maple_tree_ops *); static void do_mt_entry(ulong, ulong, ulong, uint, uint, char *, ulong *, struct maple_tree_ops *); static void do_mt_node(ulong, ulong, ulong, uint, char *, ulong *, struct maple_tree_ops *); struct req_entry *fill_member_offsets(char *); void dump_struct_members_fast(struct req_entry *, int, ulong); void dump_struct_members_for_tree(struct tree_data *, int, ulong); static void mt_dump_range(ulong min, ulong max, uint depth) { if (min == max) fprintf(fp, "%.*s%lu: ", depth * 2, spaces, min); else fprintf(fp, "%.*s%lu-%lu: ", depth * 2, spaces, min, max); } static inline bool mt_is_reserved(ulong entry) { return (entry < MAPLE_RESERVED_RANGE) && xa_is_internal(entry); } static inline bool mte_is_leaf(ulong maple_enode_entry) { return ma_is_leaf(mte_node_type(maple_enode_entry)); } static uint mt_height(char *mt_buf) { return (UINT(mt_buf + OFFSET(maple_tree_ma_flags)) & MT_FLAGS_HEIGHT_MASK) >> MT_FLAGS_HEIGHT_OFFSET; } static void dump_mt_range64(char *mr64_buf) { int i; fprintf(fp, " contents: "); for (i = 0; i < mt_slots[maple_range_64] - 1; i++) fprintf(fp, "%p %lu ", VOID_PTR(mr64_buf + OFFSET(maple_range_64_slot) + sizeof(void *) * i), ULONG(mr64_buf + OFFSET(maple_range_64_pivot) + sizeof(ulong) * i)); fprintf(fp, "%p\n", VOID_PTR(mr64_buf + OFFSET(maple_range_64_slot) + sizeof(void *) * i)); } static void dump_mt_arange64(char *ma64_buf) { int i; fprintf(fp, " contents: "); for (i = 0; i < mt_slots[maple_arange_64]; i++) fprintf(fp, "%lu ", ULONG(ma64_buf + OFFSET(maple_arange_64_gap) + sizeof(ulong) * i)); fprintf(fp, "| %02X %02X| ", UCHAR(ma64_buf + OFFSET(maple_arange_64_meta) + OFFSET(maple_metadata_end)), UCHAR(ma64_buf + OFFSET(maple_arange_64_meta) + OFFSET(maple_metadata_gap))); for (i = 0; i < mt_slots[maple_arange_64] - 1; i++) fprintf(fp, "%p %lu ", VOID_PTR(ma64_buf + OFFSET(maple_arange_64_slot) + sizeof(void *) * i), ULONG(ma64_buf + OFFSET(maple_arange_64_pivot) + sizeof(ulong) * i)); fprintf(fp, "%p\n", VOID_PTR(ma64_buf + OFFSET(maple_arange_64_slot) + sizeof(void *) * i)); } static void dump_mt_entry(ulong entry, ulong min, ulong max, uint depth) { mt_dump_range(min, max, depth); if (xa_is_value(entry)) fprintf(fp, "value %ld (0x%lx) [0x%lx]\n", xa_to_value(entry), xa_to_value(entry), entry); else if (xa_is_zero(entry)) fprintf(fp, "zero (%ld)\n", xa_to_internal(entry)); else if (mt_is_reserved(entry)) fprintf(fp, "UNKNOWN ENTRY (0x%lx)\n", entry); else fprintf(fp, "0x%lx\n", entry); } static void dump_mt_node(ulong maple_node, char *node_data, uint type, ulong min, ulong max, uint depth) { mt_dump_range(min, max, depth); fprintf(fp, "node 0x%lx depth %d type %d parent %p", maple_node, depth, type, maple_node ? VOID_PTR(node_data + OFFSET(maple_node_parent)) : NULL); } static void do_mt_range64(ulong entry, ulong min, ulong max, uint depth, char *path, ulong *global_index, struct maple_tree_ops *ops) { ulong maple_node_m_node = mte_to_node(entry); char node_buf[MAPLE_BUFSIZE]; bool leaf = mte_is_leaf(entry); ulong first = min, last; int i; int len = strlen(path); struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; char *mr64_buf; if (SIZE(maple_node) > MAPLE_BUFSIZE) error(FATAL, "MAPLE_BUFSIZE should be larger than maple_node struct"); readmem(maple_node_m_node, KVADDR, node_buf, SIZE(maple_node), "mt_dump_range64 read maple_node", FAULT_ON_ERROR); mr64_buf = node_buf + OFFSET(maple_node_mr64); if (td && td->flags & TREE_STRUCT_VERBOSE) { dump_mt_range64(mr64_buf); } for (i = 0; i < mt_slots[maple_range_64]; i++) { last = max; if (i < (mt_slots[maple_range_64] - 1)) last = ULONG(mr64_buf + OFFSET(maple_range_64_pivot) + sizeof(ulong) * i); else if (!VOID_PTR(mr64_buf + OFFSET(maple_range_64_slot) + sizeof(void *) * i) && max != mt_max[mte_node_type(entry)]) break; if (last == 0 && i > 0) break; if (leaf) do_mt_entry(mt_slot((void **)(mr64_buf + OFFSET(maple_range_64_slot)), i), first, last, depth + 1, i, path, global_index, ops); else if (VOID_PTR(mr64_buf + OFFSET(maple_range_64_slot) + sizeof(void *) * i)) { sprintf(path + len, "/%d", i); do_mt_node(mt_slot((void **)(mr64_buf + OFFSET(maple_range_64_slot)), i), first, last, depth + 1, path, global_index, ops); } if (last == max) break; if (last > max) { fprintf(fp, "node %p last (%lu) > max (%lu) at pivot %d!\n", mr64_buf, last, max, i); break; } first = last + 1; } } static void do_mt_arange64(ulong entry, ulong min, ulong max, uint depth, char *path, ulong *global_index, struct maple_tree_ops *ops) { ulong maple_node_m_node = mte_to_node(entry); char node_buf[MAPLE_BUFSIZE]; bool leaf = mte_is_leaf(entry); ulong first = min, last; int i; int len = strlen(path); struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; char *ma64_buf; if (SIZE(maple_node) > MAPLE_BUFSIZE) error(FATAL, "MAPLE_BUFSIZE should be larger than maple_node struct"); readmem(maple_node_m_node, KVADDR, node_buf, SIZE(maple_node), "mt_dump_arange64 read maple_node", FAULT_ON_ERROR); ma64_buf = node_buf + OFFSET(maple_node_ma64); if (td && td->flags & TREE_STRUCT_VERBOSE) { dump_mt_arange64(ma64_buf); } for (i = 0; i < mt_slots[maple_arange_64]; i++) { last = max; if (i < (mt_slots[maple_arange_64] - 1)) last = ULONG(ma64_buf + OFFSET(maple_arange_64_pivot) + sizeof(ulong) * i); else if (!VOID_PTR(ma64_buf + OFFSET(maple_arange_64_slot) + sizeof(void *) * i)) break; if (last == 0 && i > 0) break; if (leaf) do_mt_entry(mt_slot((void **)(ma64_buf + OFFSET(maple_arange_64_slot)), i), first, last, depth + 1, i, path, global_index, ops); else if (VOID_PTR(ma64_buf + OFFSET(maple_arange_64_slot) + sizeof(void *) * i)) { sprintf(path + len, "/%d", i); do_mt_node(mt_slot((void **)(ma64_buf + OFFSET(maple_arange_64_slot)), i), first, last, depth + 1, path, global_index, ops); } if (last == max) break; if (last > max) { fprintf(fp, "node %p last (%lu) > max (%lu) at pivot %d!\n", ma64_buf, last, max, i); break; } first = last + 1; } } static void do_mt_entry(ulong entry, ulong min, ulong max, uint depth, uint index, char *path, ulong *global_index, struct maple_tree_ops *ops) { int print_radix = 0, i; static struct req_entry **e = NULL; struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; if (ops->entry && entry) ops->entry(entry, entry, path, max, ops->private); if (!td) return; if (!td->count && td->structname_args) { /* * Retrieve all members' info only once (count == 0) * After last iteration all memory will be freed up */ e = (struct req_entry **)GETBUF(sizeof(*e) * td->structname_args); for (i = 0; i < td->structname_args; i++) e[i] = fill_member_offsets(td->structname[i]); } td->count++; if (td->flags & TREE_STRUCT_VERBOSE) { dump_mt_entry(entry, min, max, depth); } else if (td->flags & VERBOSE && entry) fprintf(fp, "%lx\n", entry); if (td->flags & TREE_POSITION_DISPLAY && entry) fprintf(fp, " index: %ld position: %s/%u\n", ++(*global_index), path, index); if (td->structname && entry) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch (count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], entry, print_radix); break; default: if (td->flags & TREE_PARSE_MEMBER) dump_struct_members_for_tree(td, i, entry); else if (td->flags & TREE_READ_MEMBER) dump_struct_members_fast(e[i], print_radix, entry); } } } if (e) FREEBUF(e); } static void do_mt_node(ulong entry, ulong min, ulong max, uint depth, char *path, ulong *global_index, struct maple_tree_ops *ops) { ulong maple_node = mte_to_node(entry); uint type = mte_node_type(entry); uint i; char node_buf[MAPLE_BUFSIZE]; struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; if (SIZE(maple_node) > MAPLE_BUFSIZE) error(FATAL, "MAPLE_BUFSIZE should be larger than maple_node struct"); readmem(maple_node, KVADDR, node_buf, SIZE(maple_node), "mt_dump_node read maple_node", FAULT_ON_ERROR); if (td && td->flags & TREE_STRUCT_VERBOSE) { dump_mt_node(maple_node, node_buf, type, min, max, depth); } switch (type) { case maple_dense: for (i = 0; i < mt_slots[maple_dense]; i++) { if (min + i > max) fprintf(fp, "OUT OF RANGE: "); do_mt_entry(mt_slot((void **)(node_buf + OFFSET(maple_node_slot)), i), min + i, min + i, depth, i, path, global_index, ops); } break; case maple_leaf_64: case maple_range_64: do_mt_range64(entry, min, max, depth, path, global_index, ops); break; case maple_arange_64: do_mt_arange64(entry, min, max, depth, path, global_index, ops); break; default: fprintf(fp, " UNKNOWN TYPE\n"); } } static int do_maple_tree_traverse(ulong ptr, int is_root, struct maple_tree_ops *ops) { char path[BUFSIZE] = {0}; char tree_buf[MAPLE_BUFSIZE]; ulong entry; struct tree_data *td = ops->is_td ? (struct tree_data *)ops->private : NULL; ulong global_index = 0; if (SIZE(maple_tree) > MAPLE_BUFSIZE) error(FATAL, "MAPLE_BUFSIZE should be larger than maple_tree struct"); if (!is_root) { strcpy(path, "direct"); do_mt_node(ptr, 0, mt_max[mte_node_type(ptr)], 0, path, &global_index, ops); } else { readmem(ptr, KVADDR, tree_buf, SIZE(maple_tree), "mt_dump read maple_tree", FAULT_ON_ERROR); entry = ULONG(tree_buf + OFFSET(maple_tree_ma_root)); if (td && td->flags & TREE_STRUCT_VERBOSE) { fprintf(fp, "maple_tree(%lx) flags %X, height %u root 0x%lx\n\n", ptr, UINT(tree_buf + OFFSET(maple_tree_ma_flags)), mt_height(tree_buf), entry); } if (!xa_is_node(entry)) do_mt_entry(entry, 0, 0, 0, 0, path, &global_index, ops); else if (entry) { strcpy(path, "root"); do_mt_node(entry, 0, mt_max[mte_node_type(entry)], 0, path, &global_index, ops); } } return 0; } int do_mptree(struct tree_data *td) { struct maple_tree_ops ops = { .entry = NULL, .private = td, .is_td = true, }; int is_root = !(td->flags & TREE_NODE_POINTER); do_maple_tree_traverse(td->start, is_root, &ops); return 0; } /************* For do_maple_tree *****************/ static void do_maple_tree_count(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_maple_tree_info *info = private; info->count++; } static void do_maple_tree_search(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_maple_tree_info *info = private; struct list_pair *lp = info->data; if (lp->index == index) { lp->value = (void *)slot; info->count = 1; } } static void do_maple_tree_dump(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_maple_tree_info *info = private; fprintf(fp, "[%lu] %lx\n", index, slot); info->count++; } static void do_maple_tree_gather(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_maple_tree_info *info = private; struct list_pair *lp = info->data; if (info->maxcount) { lp[info->count].index = index; lp[info->count].value = (void *)slot; info->count++; info->maxcount--; } } static void do_maple_tree_dump_cb(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_maple_tree_info *info = private; struct list_pair *lp = info->data; int (*cb)(ulong) = lp->value; /* Caller defined operation */ if (!cb(slot)) { error(FATAL, "do_maple_tree: callback " "operation failed: entry: %ld item: %lx\n", info->count, slot); } info->count++; } /* * do_maple_tree argument usage: * * root: Address of a maple_tree_root structure * * flag: MAPLE_TREE_COUNT - Return the number of entries in the tree. * MAPLE_TREE_SEARCH - Search for an entry at lp->index; if found, * store the entry in lp->value and return a count of 1; otherwise * return a count of 0. * MAPLE_TREE_DUMP - Dump all existing index/value pairs. * MAPLE_TREE_GATHER - Store all existing index/value pairs in the * passed-in array of list_pair structs starting at lp, * returning the count of entries stored; the caller can/should * limit the number of returned entries by putting the array size * (max count) in the lp->index field of the first structure * in the passed-in array. * MAPLE_TREE_DUMP_CB - Similar with MAPLE_TREE_DUMP, but for each * maple tree entry, a user defined callback at lp->value will * be invoked. * * lp: Unused by MAPLE_TREE_COUNT and MAPLE_TREE_DUMP. * A pointer to a list_pair structure for MAPLE_TREE_SEARCH. * A pointer to an array of list_pair structures for * MAPLE_TREE_GATHER; the dimension (max count) of the array may * be stored in the index field of the first structure to avoid * any chance of an overrun. * For MAPLE_TREE_DUMP_CB, the lp->value must be initialized as a * callback function. The callback prototype must be: int (*)(ulong); */ ulong do_maple_tree(ulong root, int flag, struct list_pair *lp) { struct do_maple_tree_info info = { .count = 0, .data = lp, }; struct maple_tree_ops ops = { .private = &info, .is_td = false, }; switch (flag) { case MAPLE_TREE_COUNT: ops.entry = do_maple_tree_count; break; case MAPLE_TREE_SEARCH: ops.entry = do_maple_tree_search; break; case MAPLE_TREE_DUMP: ops.entry = do_maple_tree_dump; break; case MAPLE_TREE_GATHER: if (!(info.maxcount = lp->index)) info.maxcount = (ulong)(-1); /* caller beware */ ops.entry = do_maple_tree_gather; break; case MAPLE_TREE_DUMP_CB: if (lp->value == NULL) { error(FATAL, "do_maple_tree: need set callback function"); } ops.entry = do_maple_tree_dump_cb; break; default: error(FATAL, "do_maple_tree: invalid flag: %lx\n", flag); } do_maple_tree_traverse(root, true, &ops); return info.count; } /***********************************************/ void maple_init(void) { int array_len; STRUCT_SIZE_INIT(maple_tree, "maple_tree"); STRUCT_SIZE_INIT(maple_node, "maple_node"); MEMBER_OFFSET_INIT(maple_tree_ma_root, "maple_tree", "ma_root"); MEMBER_OFFSET_INIT(maple_tree_ma_flags, "maple_tree", "ma_flags"); MEMBER_OFFSET_INIT(maple_node_parent, "maple_node", "parent"); MEMBER_OFFSET_INIT(maple_node_ma64, "maple_node", "ma64"); MEMBER_OFFSET_INIT(maple_node_mr64, "maple_node", "mr64"); MEMBER_OFFSET_INIT(maple_node_slot, "maple_node", "slot"); MEMBER_OFFSET_INIT(maple_arange_64_pivot, "maple_arange_64", "pivot"); MEMBER_OFFSET_INIT(maple_arange_64_slot, "maple_arange_64", "slot"); MEMBER_OFFSET_INIT(maple_arange_64_gap, "maple_arange_64", "gap"); MEMBER_OFFSET_INIT(maple_arange_64_meta, "maple_arange_64", "meta"); MEMBER_OFFSET_INIT(maple_range_64_pivot, "maple_range_64", "pivot"); MEMBER_OFFSET_INIT(maple_range_64_slot, "maple_range_64", "slot"); MEMBER_OFFSET_INIT(maple_metadata_end, "maple_metadata", "end"); MEMBER_OFFSET_INIT(maple_metadata_gap, "maple_metadata", "gap"); array_len = get_array_length("mt_slots", NULL, sizeof(char)); mt_slots = calloc(array_len, sizeof(char)); readmem(symbol_value("mt_slots"), KVADDR, mt_slots, array_len * sizeof(char), "maple_init read mt_slots", RETURN_ON_ERROR); array_len = get_array_length("mt_pivots", NULL, sizeof(char)); mt_pivots = calloc(array_len, sizeof(char)); readmem(symbol_value("mt_pivots"), KVADDR, mt_pivots, array_len * sizeof(char), "maple_init read mt_pivots", RETURN_ON_ERROR); mt_max[maple_dense] = mt_slots[maple_dense]; mt_max[maple_leaf_64] = ULONG_MAX; mt_max[maple_range_64] = ULONG_MAX; mt_max[maple_arange_64] = ULONG_MAX; } crash-utility-crash-9cd43f5/gdb_interface.c0000664000372000037200000007131315107550337020321 0ustar juerghjuergh/* gdb_interface.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015,2018-2019 David Anderson * Copyright (C) 2002-2015,2018-2019 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #if !defined(GDB_10_2) && !defined(GDB_16_2) static void exit_after_gdb_info(void); #endif static int is_restricted_command(char *, ulong); static void strip_redirection(char *); int get_frame_offset(ulong); int *gdb_output_format; unsigned int *gdb_print_max; unsigned char *gdb_prettyprint_structs; unsigned char *gdb_prettyprint_arrays; unsigned int *gdb_repeat_count_threshold; unsigned char *gdb_stop_print_at_null; unsigned int *gdb_output_radix; static void gdb_error_debug(void); static ulong gdb_user_print_option_address(char *); /* * Called from main() this routine sets up the call-back hook such that * gdb's main() routine -- renamed gdb_main() -- will call back to * our main_loop() after gdb initializes. */ void gdb_main_loop(int argc, char **argv) { argc = 1; if (pc->flags & SILENT) { if (pc->flags & READNOW) argv[argc++] = "--readnow"; argv[argc++] = "--quiet"; argv[argc++] = pc->namelist_debug ? pc->namelist_debug : (pc->debuginfo_file && (st->flags & CRC_MATCHES) ? pc->debuginfo_file : pc->namelist); } else { if (pc->flags & READNOW) argv[argc++] = "--readnow"; argv[argc++] = pc->namelist_debug ? pc->namelist_debug : (pc->debuginfo_file && (st->flags & CRC_MATCHES) ? pc->debuginfo_file : pc->namelist); } if (CRASHDEBUG(1)) { int i; fprintf(fp, "gdb "); for (i = 1; i < argc; i++) fprintf(fp, "%s ", argv[i]); fprintf(fp, "\n"); } optind = 0; #if !defined(GDB_10_2) && !defined(GDB_16_2) #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) command_loop_hook = main_loop; #else deprecated_command_loop_hook = main_loop; #endif #endif gdb_main_entry(argc, argv); } /* * Update any hooks that gdb has set. */ void update_gdb_hooks(void) { #if defined(GDB_6_0) || defined(GDB_6_1) command_loop_hook = pc->flags & VERSION_QUERY ? exit_after_gdb_info : main_loop; target_new_objfile_hook = NULL; #endif #if defined(GDB_7_0) || defined(GDB_7_3_1) || defined(GDB_7_6) deprecated_command_loop_hook = pc->flags & VERSION_QUERY ? exit_after_gdb_info : main_loop; #endif } void gdb_readnow_warning(void) { if ((THIS_GCC_VERSION >= GCC(3,4,0)) && (THIS_GCC_VERSION < GCC(4,0,0)) && !(pc->flags & READNOW)) { fprintf(stderr, "WARNING: Because this kernel was compiled with gcc version %d.%d.%d, certain\n" " commands or command options may fail unless crash is invoked with\n" " the \"--readnow\" command line option.\n\n", kt->gcc_version[0], kt->gcc_version[1], kt->gcc_version[2]); } } /* * Used only by the -v command line option, get gdb to initialize itself * with no arguments, print its version and GPL paragraph, and then call * back to exit_after_gdb_info(). */ void display_gdb_banner(void) { optind = 0; #if !defined(GDB_10_2) && !defined(GDB_16_2) #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) command_loop_hook = exit_after_gdb_info; #else deprecated_command_loop_hook = exit_after_gdb_info; #endif #endif args[0] = "gdb"; args[1] = "-version"; gdb_main_entry(2, args); } #if !defined(GDB_10_2) && !defined(GDB_16_2) static void exit_after_gdb_info(void) { fprintf(fp, "\n"); clean_exit(0); } #endif /* * Stash a copy of the gdb version locally. This can be called before * gdb gets initialized, so bypass gdb_interface(). */ void get_gdb_version(void) { struct gnu_request request; if (!pc->gdb_version) { request.command = GNU_VERSION; gdb_command_funnel(&request); /* bypass gdb_interface() */ pc->gdb_version = request.buf; } } extern void *current_program_space; void gdb_session_init(void) { struct gnu_request *req; int debug_data_pulled_in; if (!have_partial_symbols(current_program_space) && !have_full_symbols(current_program_space)) no_debugging_data(FATAL); /* * Restore the SIGINT and SIGPIPE handlers, which got temporarily * re-assigned by gdb. The SIGINT call also initializes GDB's * SIGINT sigaction. */ SIGACTION(SIGINT, restart, &pc->sigaction, &pc->gdb_sigaction); SIGACTION(SIGPIPE, SIG_IGN, &pc->sigaction, NULL); if (!(pc->flags & DROP_CORE)) SIGACTION(SIGSEGV, restart, &pc->sigaction, NULL); /* * Set up pointers to gdb variables. */ #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) gdb_output_format = &output_format; gdb_print_max = &print_max; gdb_prettyprint_structs = &prettyprint_structs; gdb_prettyprint_arrays = &prettyprint_arrays; gdb_repeat_count_threshold = &repeat_count_threshold; gdb_stop_print_at_null = &stop_print_at_null; gdb_output_radix = &output_radix; #else gdb_output_format = (int *) gdb_user_print_option_address("output_format"); gdb_print_max = (unsigned int *) gdb_user_print_option_address("print_max"); gdb_prettyprint_structs = (unsigned char *) gdb_user_print_option_address("prettyprint_structs"); gdb_prettyprint_arrays = (unsigned char *) gdb_user_print_option_address("prettyprint_arrays"); gdb_repeat_count_threshold = (unsigned int *) gdb_user_print_option_address("repeat_count_threshold"); gdb_stop_print_at_null = (unsigned char *) gdb_user_print_option_address("stop_print_at_null"); gdb_output_radix = (unsigned int *) gdb_user_print_option_address("output_radix"); #endif /* * If the output radix is set via the --hex or --dec command line * option, then pc->output_radix will be non-zero; otherwise use * the gdb default. */ if (pc->output_radix) { *gdb_output_radix = pc->output_radix; *gdb_output_format = (*gdb_output_radix == 10) ? 0 : 'x'; } switch (*gdb_output_radix) { case 10: case 16: pc->output_radix = *gdb_output_radix; break; default: pc->output_radix = *gdb_output_radix = 10; *gdb_output_format = 0; } *gdb_prettyprint_structs = 1; *gdb_repeat_count_threshold = 0x7fffffff; *gdb_print_max = 256; #ifdef GDB_5_3 gdb_disassemble_from_exec = 0; #endif pc->flags |= GDB_INIT; /* set here so gdb_interface will work */ req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->buf = GETBUF(BUFSIZE); /* * Make sure the namelist has symbolic data. Later versions of * gcc may require that debug data be pulled in by printing a * static kernel data structure. */ debug_data_pulled_in = FALSE; retry: BZERO(req->buf, BUFSIZE); req->command = GNU_GET_DATATYPE; req->name = XEN_HYPER_MODE() ? "page_info" : "task_struct"; req->flags = GNU_RETURN_ON_ERROR; gdb_interface(req); if (req->flags & GNU_COMMAND_FAILED) { if (XEN_HYPER_MODE()) no_debugging_data(WARNING); /* just bail out */ if (!debug_data_pulled_in) { if (CRASHDEBUG(1)) error(INFO, "gdb_session_init: pulling in debug data by accessing init_mm.mmap %s\n", symbol_exists("sysfs_mount") ? "and syfs_mount" : ""); debug_data_pulled_in = TRUE; req->command = GNU_PASS_THROUGH; req->flags = GNU_RETURN_ON_ERROR|GNU_NO_READMEM; req->name = NULL; if (symbol_exists("sysfs_mount")) sprintf(req->buf, "print sysfs_mount, init_mm.mmap"); else sprintf(req->buf, "print init_mm.mmap"); gdb_interface(req); if (!(req->flags & GNU_COMMAND_FAILED)) goto retry; } no_debugging_data(WARNING); } if (pc->flags & KERNEL_DEBUG_QUERY) { fprintf(fp, "\n%s: %s: contains debugging data\n\n", pc->program_name, pc->namelist); if (REMOTE()) remote_exit(); clean_exit(0); } /* * Set up any pre-ordained gdb settings here that can't be * accessed directly. */ req->command = GNU_PASS_THROUGH; req->name = NULL, req->flags = 0; sprintf(req->buf, "set height 0"); gdb_interface(req); req->command = GNU_PASS_THROUGH; req->name = NULL, req->flags = 0; sprintf(req->buf, "set width 0"); gdb_interface(req); #if defined(GDB_10_2) || defined(GDB_16_2) req->command = GNU_PASS_THROUGH; req->name = NULL, req->flags = 0; sprintf(req->buf, "set max-value-size unlimited"); gdb_interface(req); req->command = GNU_PASS_THROUGH; req->name = NULL, req->flags = 0; sprintf(req->buf, "set max-completions unlimited"); gdb_interface(req); #endif #if 0 /* * Patch gdb's symbol values with the correct values from either * the System.map or non-debug vmlinux, whichever is in effect. */ if ((pc->flags & SYSMAP) || (kt->flags & (RELOC_SET|RELOC_FORCE)) || (pc->namelist_debug && !pc->debuginfo_file)) { req->command = GNU_PATCH_SYMBOL_VALUES; req->flags = GNU_RETURN_ON_ERROR; gdb_interface(req); if (req->flags & GNU_COMMAND_FAILED) error(FATAL, "patching of gdb symbol values failed\n"); } else if (!(pc->flags & SILENT)) #else if (!(pc->flags & SILENT)) #endif fprintf(fp, "\n"); FREEBUF(req->buf); FREEBUF(req); } /* * Quickest way to gdb -- just pass a command string to pass through. */ int gdb_pass_through(char *cmd, FILE *fptr, ulong flags) { struct gnu_request *req; int retval; if (CRASHDEBUG(1)) console("gdb_pass_through: [%s]\n", cmd); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->buf = cmd; if (fptr) req->fp = fptr; req->command = GNU_PASS_THROUGH; req->flags = flags; gdb_interface(req); if ((req->flags & (GNU_RETURN_ON_ERROR|GNU_COMMAND_FAILED)) == (GNU_RETURN_ON_ERROR|GNU_COMMAND_FAILED)) retval = FALSE; else retval = TRUE; FREEBUF(req); return retval; } /* * General purpose routine for passing commands to gdb. All gdb commands * come through here, where they are passed to gdb_command_funnel(). */ void gdb_interface(struct gnu_request *req) { if (!(pc->flags & GDB_INIT)) error(FATAL, "gdb_interface: gdb not initialized?\n"); if (output_closed()) restart(0); if (!req->fp) { req->fp = ((pc->flags & RUNTIME) || (pc->flags2 & ALLOW_FP)) ? fp : CRASHDEBUG(1) ? fp : pc->nullfp; } pc->cur_req = req; pc->cur_gdb_cmd = req->command; if (CRASHDEBUG(2)) dump_gnu_request(req, IN_GDB); if (!(pc->flags & DROP_CORE)) SIGACTION(SIGSEGV, restart, &pc->sigaction, NULL); else SIGACTION(SIGSEGV, SIG_DFL, &pc->sigaction, NULL); if (interruptible()) { SIGACTION(SIGINT, pc->gdb_sigaction.sa_handler, &pc->gdb_sigaction, NULL); } else { SIGACTION(SIGINT, SIG_IGN, &pc->sigaction, NULL); SIGACTION(SIGPIPE, SIG_IGN, &pc->sigaction, NULL); } pc->flags |= IN_GDB; gdb_command_funnel(req); pc->flags &= ~IN_GDB; SIGACTION(SIGINT, restart, &pc->sigaction, NULL); SIGACTION(SIGSEGV, SIG_DFL, &pc->sigaction, NULL); if (req->flags & GNU_COMMAND_FAILED) gdb_error_debug(); if (CRASHDEBUG(2)) dump_gnu_request(req, !IN_GDB); pc->last_gdb_cmd = pc->cur_gdb_cmd; pc->cur_gdb_cmd = 0; pc->cur_req = NULL; } /* * help -g output */ void dump_gdb_data(void) { fprintf(fp, " prettyprint_arrays: %d\n", *gdb_prettyprint_arrays); fprintf(fp, " prettyprint_structs: %d\n", *gdb_prettyprint_structs); fprintf(fp, "repeat_count_threshold: %x\n", *gdb_repeat_count_threshold); fprintf(fp, " stop_print_at_null: %d\n", *gdb_stop_print_at_null); fprintf(fp, " print_max: %d\n", *gdb_print_max); fprintf(fp, " output_radix: %d\n", *gdb_output_radix); fprintf(fp, " output_format: "); switch (*gdb_output_format) { case 'x': fprintf(fp, "hex\n"); break; case 'o': fprintf(fp, "octal\n"); break; case 0: fprintf(fp, "decimal\n"); break; } } void dump_gnu_request(struct gnu_request *req, int in_gdb) { int others; char buf[BUFSIZE]; if (pc->flags & KERNEL_DEBUG_QUERY) return; console("%scommand: %d (%s)\n", in_gdb ? "GDB IN: " : "GDB OUT: ", req->command, gdb_command_string(req->command, buf, TRUE)); console("buf: %lx ", req->buf); if (req->buf && ascii_string(req->buf)) console(" \"%s\"", req->buf); console("\n"); console("fp: %lx ", req->fp); if (req->fp == pc->nullfp) console("(pc->nullfp) "); if (req->fp == pc->stdpipe) console("(pc->stdpipe) "); if (req->fp == pc->pipe) console("(pc->pipe) "); if (req->fp == pc->ofile) console("(pc->ofile) "); if (req->fp == pc->ifile) console("(pc->ifile) "); if (req->fp == pc->ifile_pipe) console("(pc->ifile_pipe) "); if (req->fp == pc->ifile_ofile) console("(pc->ifile_ofile) "); if (req->fp == pc->tmpfile) console("(pc->tmpfile) "); if (req->fp == pc->saved_fp) console("(pc->saved_fp) "); if (req->fp == pc->tmp_fp) console("(pc->tmp_fp) "); console("flags: %lx (", req->flags); others = 0; if (req->flags & GNU_PRINT_LINE_NUMBERS) console("%sGNU_PRINT_LINE_NUMBERS", others++ ? "|" : ""); if (req->flags & GNU_FUNCTION_ONLY) console("%sGNU_FUNCTION_ONLY", others++ ? "|" : ""); if (req->flags & GNU_PRINT_ENUMERATORS) console("%sGNU_PRINT_ENUMERATORS", others++ ? "|" : ""); if (req->flags & GNU_RETURN_ON_ERROR) console("%sGNU_RETURN_ON_ERROR", others++ ? "|" : ""); if (req->flags & GNU_FROM_TTY_OFF) console("%sGNU_FROM_TTY_OFF", others++ ? "|" : ""); if (req->flags & GNU_NO_READMEM) console("%sGNU_NO_READMEM", others++ ? "|" : ""); if (req->flags & GNU_VAR_LENGTH_TYPECODE) console("%sGNU_VAR_LENGTH_TYPECODE", others++ ? "|" : ""); console(")\n"); console("addr: %lx ", req->addr); console("addr2: %lx ", req->addr2); console("count: %ld\n", req->count); if ((ulong)req->name > (ulong)PATCH_KERNEL_SYMBOLS_STOP) console("name: \"%s\" ", req->name); else console("name: %lx ", (ulong)req->name); console("length: %ld ", req->length); console("typecode: %d\n", req->typecode); #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) console("typename: %s\n", req->typename); #else console("type_name: %s\n", req->type_name); #endif console("target_typename: %s\n", req->target_typename); console("target_length: %ld ", req->target_length); console("target_typecode: %d ", req->target_typecode); console("is_typedef: %d ", req->is_typedef); console("member: \"%s\" ", req->member); console("member_offset: %ld\n", req->member_offset); console("member_length: %ld\n", req->member_length); console("member_typecode: %d\n", req->member_typecode); console("member_main_type_name: %s\n", req->member_main_type_name); console("member_main_type_tag_name: %s\n", req->member_main_type_tag_name); console("member_target_type_name: %s\n", req->member_target_type_name); console("member_target_type_tag_name: %s\n", req->member_target_type_tag_name); console("value: %lx ", req->value); console("tagname: \"%s\" ", req->tagname); console("pc: %lx ", req->pc); if (is_kernel_text(req->pc)) console("(%s)", value_to_symstr(req->pc, buf, 0)); console("\n"); console("sp: %lx ", req->sp); console("ra: %lx ", req->ra); console("frame: %ld ", req->frame); console("prevsp: %lx\n", req->prevsp); console("prevpc: %lx ", req->prevpc); console("lastsp: %lx ", req->lastsp); console("task: %lx ", req->task); console("debug: %lx\n", req->debug); console("\n"); } char * gdb_command_string(int cmd, char *buf, int live) { switch (cmd) { case GNU_PASS_THROUGH: sprintf(buf, "GNU_PASS_THROUGH"); break; case GNU_DATATYPE_INIT: sprintf(buf, "GNU_DATATYPE_INIT"); break; case GNU_DISASSEMBLE: sprintf(buf, "GNU_DISASSEMBLE"); break; case GNU_GET_LINE_NUMBER: sprintf(buf, "GNU_GET_LINE_NUMBER"); break; case GNU_GET_DATATYPE: if (live) sprintf(buf, "GNU_GET_DATATYPE[%s]", pc->cur_req->name ? pc->cur_req->name : "?"); else sprintf(buf, "GNU_GET_DATATYPE"); break; case GNU_STACK_TRACE: sprintf(buf, "GNU_STACK_TRACE"); break; case GNU_ALPHA_FRAME_OFFSET: sprintf(buf, "GNU_ALPHA_FRAME_OFFSET"); break; case GNU_COMMAND_EXISTS: sprintf(buf, "GNU_COMMAND_EXISTS"); break; case GNU_FUNCTION_NUMARGS: sprintf(buf, "GNU_FUNCTION_NUMARGS"); break; case GNU_RESOLVE_TEXT_ADDR: sprintf(buf, "GNU_RESOLVE_TEXT_ADDR"); break; case GNU_DEBUG_COMMAND: sprintf(buf, "GNU_DEBUG_COMMAND"); break; case GNU_ADD_SYMBOL_FILE: sprintf(buf, "GNU_ADD_SYMBOL_FILE"); break; case GNU_DELETE_SYMBOL_FILE: sprintf(buf, "GNU_DELETE_SYMBOL_FILE"); break; case GNU_VERSION: sprintf(buf, "GNU_VERSION"); break; case GNU_GET_SYMBOL_TYPE: sprintf(buf, "GNU_GET_SYMBOL_TYPE"); break; case GNU_PATCH_SYMBOL_VALUES: sprintf(buf, "GNU_PATCH_SYMBOL_VALUES"); break; case GNU_USER_PRINT_OPTION: sprintf(buf, "GNU_USER_PRINT_OPTION"); break; case GNU_SET_CRASH_BLOCK: sprintf(buf, "GNU_SET_CRASH_BLOCK"); break; case GNU_GET_FUNCTION_RANGE: sprintf(buf, "GNU_GET_FUNCTION_RANGE"); break; case 0: buf[0] = NULLCHAR; break; default: sprintf(buf, "(?)\n"); break; } return buf; } /* * Restore known gdb state. */ void restore_gdb_sanity(void) { if (!(pc->flags & GDB_INIT)) return; if (pc->output_radix) { *gdb_output_radix = pc->output_radix; *gdb_output_format = (*gdb_output_radix == 10) ? 0 : 'x'; } *gdb_prettyprint_structs = 1; /* these may piss somebody off... */ *gdb_repeat_count_threshold = 0x7fffffff; if (st->flags & ADD_SYMBOL_FILE) { error(INFO, "%s\n gdb add-symbol-file command failed\n", st->current->mod_namelist); delete_load_module(st->current->mod_base); st->flags &= ~ADD_SYMBOL_FILE; } if (pc->cur_gdb_cmd) { pc->last_gdb_cmd = pc->cur_gdb_cmd; pc->cur_gdb_cmd = 0; } } /* * Check whether string in args[0] is a valid gdb command. */ int is_gdb_command(int merge_orig_args, ulong flags) { int retval; struct gnu_request *req; if (!args[0]) return FALSE; if (STREQ(args[0], "Q")) { args[0] = "q"; return TRUE; } if (is_restricted_command(args[0], flags)) return FALSE; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->buf = GETBUF(strlen(args[0])+1); req->command = GNU_COMMAND_EXISTS; req->name = args[0]; req->flags = GNU_RETURN_ON_ERROR; req->fp = pc->nullfp; gdb_interface(req); if (req->flags & GNU_COMMAND_FAILED) retval = FALSE; else retval = req->value; FREEBUF(req->buf); FREEBUF(req); if (retval && merge_orig_args) { int i; for (i = argcnt; i; i--) args[i] = args[i-1]; args[0] = "gdb"; argcnt++; } return retval; } /* * Check whether a command is on the gdb-prohibited list. */ static char *prohibited_list[] = { "run", "r", "break", "b", "tbreak", "hbreak", "thbreak", "rbreak", "watch", "rwatch", "awatch", "attach", "continue", "c", "fg", "detach", "finish", "handle", "interrupt", "jump", "kill", "next", "nexti", "signal", "step", "s", "stepi", "target", "until", "delete", "clear", "disable", "enable", "condition", "ignore", "catch", "tcatch", "return", "file", "exec-file", "core-file", "symbol-file", "load", "si", "ni", "shell", "sy", NULL /* must be last */ }; static char *restricted_list[] = { "define", "document", "while", "if", NULL /* must be last */ }; #define RESTRICTED_GDB_COMMAND \ "restricted gdb command: %s\n%s\"%s\" may only be used in a .gdbinit file or in a command file.\n%sThe .gdbinit file is read automatically during %s initialization.\n%sOther user-defined command files may be read interactively during\n%s%s runtime by using the gdb \"source\" command.\n" static int is_restricted_command(char *cmd, ulong flags) { int i; char *newline; for (i = 0; prohibited_list[i]; i++) { if (STREQ(prohibited_list[i], cmd)) { if (flags == RETURN_ON_ERROR) return TRUE; pc->curcmd = pc->program_name; error(FATAL, "prohibited gdb command: %s\n", cmd); } } for (i = 0; restricted_list[i]; i++) { if (STREQ(restricted_list[i], cmd)) { if (flags == RETURN_ON_ERROR) return TRUE; newline = space(strlen(pc->program_name)+2); pc->curcmd = pc->program_name; error(FATAL, RESTRICTED_GDB_COMMAND, cmd, newline, cmd, newline, pc->program_name, newline, newline, pc->program_name); } } return FALSE; } /* * Remove pipe/redirection stuff from the end of the command line. */ static void strip_redirection(char *buf) { char *p1, *p2; p1 = strstr_rightmost(buf, args[argcnt-1]); p2 = p1 + strlen(args[argcnt-1]); console("strip_redirection: [%s]\n", p2); if ((p1 = strpbrk(p2, "|!>"))) *p1 = NULLCHAR; strip_ending_whitespace(buf); } /* * Command for passing strings directly to gdb. */ void cmd_gdb(void) { char buf[BUFSIZE]; char **argv; argv = STREQ(args[0], "gdb") ? &args[1] : &args[0]; if (*argv == NULL) cmd_usage(pc->curcmd, SYNOPSIS); if (STREQ(*argv, "set") && argv[1]) { /* * Intercept set commands in case something has to be done * here or elsewhere. */ if (STREQ(argv[1], "gdb")) { cmd_set(); return; } if (STREQ(argv[1], "output-radix") && argv[2]) pc->output_radix = stol(argv[2], FAULT_ON_ERROR, NULL); } /* * If the command is not restricted, pass it on. */ if (!is_restricted_command(*argv, FAULT_ON_ERROR)) { if (STREQ(pc->command_line, "gdb")) { strcpy(buf, first_space(pc->orig_line)); strip_beginning_whitespace(buf); } else strcpy(buf, pc->orig_line); if (pc->redirect & (REDIRECT_TO_FILE|REDIRECT_TO_PIPE)) strip_redirection(buf); if (!gdb_pass_through(buf, NULL, GNU_RETURN_ON_ERROR)) error(INFO, "gdb request failed: %s\n", buf); } } /* * The gdb target_xfer_memory() has a hook installed to re-route * all memory accesses back here; reads of 1 or 4 bytes come primarily * from text disassembly requests, and are diverted to the text cache. */ int gdb_readmem_callback(ulong addr, void *buf, int len, int write) { char locbuf[SIZEOF_32BIT], *p1; int memtype; ulong readflags; if (write) return FALSE; if (!(pc->cur_req)) { return(readmem(addr, KVADDR, buf, len, "gdb_readmem_callback", RETURN_ON_ERROR)); } if (pc->cur_req->flags & GNU_NO_READMEM) return TRUE; readflags = pc->curcmd_flags & PARTIAL_READ_OK ? RETURN_ON_ERROR|RETURN_PARTIAL : RETURN_ON_ERROR; if (STREQ(pc->curcmd, "bpf") && pc->curcmd_private && (addr > (ulong)pc->curcmd_private)) readflags |= QUIET; if (pc->curcmd_flags & MEMTYPE_UVADDR) memtype = UVADDR; else if (pc->curcmd_flags & MEMTYPE_FILEADDR) memtype = FILEADDR; else if (!IS_KVADDR(addr)) { if (STREQ(pc->curcmd, "gdb") && STRNEQ(pc->cur_req->buf, "x/")) { memtype = UVADDR; } else { if (CRASHDEBUG(1)) console("gdb_readmem_callback: %lx %d FAILED\n", addr, len); return FALSE; } } else memtype = KVADDR; if (CRASHDEBUG(1)) console("gdb_readmem_callback[%d]: %lx %d\n", memtype, addr, len); if (memtype == FILEADDR) return(readmem(pc->curcmd_private, memtype, buf, len, "gdb_readmem_callback", readflags)); switch (len) { case SIZEOF_8BIT: if (STREQ(pc->curcmd, "bt")) { if (readmem(addr, memtype, buf, SIZEOF_8BIT, "gdb_readmem_callback", readflags)) return TRUE; } p1 = (char *)buf; if (!readmem(addr, memtype, locbuf, SIZEOF_32BIT, "gdb_readmem_callback", readflags)) return FALSE; *p1 = locbuf[0]; return TRUE; case SIZEOF_32BIT: if (STREQ(pc->curcmd, "bt")) { if (readmem(addr, memtype, buf, SIZEOF_32BIT, "gdb_readmem_callback", readflags)) return TRUE; } if (!readmem(addr, memtype, buf, SIZEOF_32BIT, "gdb_readmem callback", readflags)) return FALSE; return TRUE; } return(readmem(addr, memtype, buf, len, "gdb_readmem_callback", readflags)); } /* * Machine-specific line-number pc section range verifier. */ int gdb_line_number_callback(ulong pc, ulong low, ulong high) { if (machdep->verify_line_number) return machdep->verify_line_number(pc, low, high); return TRUE; } /* * Prevent gdb from trying to translate and print pointers * that are not kernel virtual addresses. */ int gdb_print_callback(ulong addr) { if (!addr) return FALSE; else return IS_KVADDR(addr); } char * gdb_lookup_module_symbol(ulong addr, ulong *offset) { struct syment *sp; if ((sp = value_search_module(addr, offset))) { return sp->name; } else { return NULL; } } int is_kvaddr(ulong addr) { return IS_KVADDR(addr); } /* * Used by gdb_interface() to catch gdb-related errors, if desired. */ static void gdb_error_debug(void) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; int buffers; if (CRASHDEBUG(2)) { sprintf(buf2, "\n"); if (CRASHDEBUG(5) && (buffers = get_embedded())) sprintf(buf2, "(%d buffer%s in use)\n", buffers, buffers > 1 ? "s" : ""); fprintf(stderr, "%s: returned via gdb_error_hook %s", gdb_command_string(pc->cur_gdb_cmd, buf1, TRUE), buf2); console("%s: returned via gdb_error_hook %s", gdb_command_string(pc->cur_gdb_cmd, buf1, TRUE), buf2); } } /* * gdb callback to access debug mode. */ int gdb_CRASHDEBUG(ulong dval) { if (CRASHDEBUG(dval)) return TRUE; return (pc->cur_req && (pc->cur_req->debug >= dval)); } static ulong gdb_user_print_option_address(char *name) { struct gnu_request request; request.command = GNU_USER_PRINT_OPTION; request.name = name; gdb_command_funnel(&request); return request.addr; } /* * Try to set a crash scope block based upon the vaddr. */ int gdb_set_crash_scope(ulong vaddr, char *arg) { struct gnu_request request, *req = &request; char name[BUFSIZE]; struct load_module *lm; if (vaddr) { if (!is_kernel_text(vaddr)) { error(INFO, "invalid text address: %s\n", arg); return FALSE; } if (module_symbol(vaddr, NULL, &lm, name, 0)) { if (!(lm->mod_flags & MOD_LOAD_SYMS)) { error(INFO, "attempting to find/load \"%s\" module debuginfo\n", lm->mod_name); if (!load_module_symbols_helper(lm->mod_name)) { error(INFO, "cannot find/load \"%s\" module debuginfo\n", lm->mod_name); return FALSE; } } } } req->command = GNU_SET_CRASH_BLOCK; req->addr = vaddr; req->flags = 0; req->addr2 = 0; req->fp = pc->nullfp; gdb_command_funnel(req); if (CRASHDEBUG(1)) fprintf(fp, "gdb_set_crash_scope: %s addr: %lx block: %lx\n", req->flags & GNU_COMMAND_FAILED ? "FAILED" : "OK", req->addr, req->addr2); if (req->flags & GNU_COMMAND_FAILED) { error(INFO, "gdb cannot find text block for address: %s\n", arg); return FALSE; } return TRUE; } #ifndef ALPHA /* * Stub routine needed for resolution by non-alpha, modified gdb code. */ int get_frame_offset(ulong pc) { return (error(FATAL, "get_frame_offset: invalid request for non-alpha systems!\n")); } #endif /* !ALPHA */ unsigned long crash_get_kaslr_offset(void); unsigned long crash_get_kaslr_offset(void) { return kt->relocate * -1; } /* Callbacks for crash_target */ int crash_get_current_task_reg (int regno, const char *regname, int regsize, void *value, int sid); int crash_get_current_task_reg (int regno, const char *regname, int regsize, void *value, int sid) { if (!machdep->get_current_task_reg) return FALSE; return machdep->get_current_task_reg(regno, regname, regsize, value, sid); } /* arm64 kernel lr maybe has patuh */ #ifdef ARM64 void crash_decode_ptrauth_pc(ulong *pc); void crash_decode_ptrauth_pc(ulong *pc) { struct machine_specific *ms = machdep->machspec; if (is_kernel_text(*pc | ms->CONFIG_ARM64_KERNELPACMASK)) *pc |= ms->CONFIG_ARM64_KERNELPACMASK; } #endif /* !ARM64 */ crash-utility-crash-9cd43f5/cmdline.c0000664000372000037200000017343515107550337017170 0ustar juerghjuergh/* cmdline.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2015,2019 David Anderson * Copyright (C) 2002-2015,2019 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" static void restore_sanity(void); static void restore_ifile_sanity(void); static int pseudo_command(char *); static void check_special_handling(char *); static int is_executable_in_PATH(char *); static int is_shell_script(char *); static void list_aliases(char *); static int allocate_alias(int); static int alias_exists(char *); static void resolve_aliases(void); static int setup_redirect(int); int multiple_pipes(char **); static int output_command_to_pids(void); static void set_my_tty(void); static char *signame(int); static int setup_stdpipe(void); static void wait_for_children(ulong); #define ZOMBIES_ONLY (1) #define ALL_CHILDREN (2) int shell_command(char *); static void modify_orig_line(char *, struct args_input_file *); static void modify_expression_arg(char *, char **, struct args_input_file *); static int verify_args_input_file(char *); static char *crash_readline_completion_generator(const char *, int); static char **crash_readline_completer(const char *, int, int); #define READLINE_LIBRARY #include #include #include static void readline_init(void); static struct alias_data alias_head = { 0 }; void process_command_line(void) { /* * Restore normal environment, clearing out any excess baggage * piled up by the previous command. */ restore_sanity(); fp = stdout; BZERO(pc->command_line, BUFSIZE); if (!pc->ifile_in_progress && !(pc->flags & (TTY|SILENT|CMDLINE_IFILE|RCHOME_IFILE|RCLOCAL_IFILE))) fprintf(fp, "%s", pc->prompt); fflush(fp); /* * Input can come from five possible sources: * * 1. an .rc file located in the user's HOME directory. * 2. an .rc file located in the current directory. * 3. an input file that was designated by the -i flag at * program invocation. * 4. from a terminal. * 5. from a pipe, if stdin is a pipe rather than a terminal. * * But first, handle the interruption of an input file caused * by a FATAL error in one of its commands. * */ if (pc->ifile_in_progress) { switch (pc->ifile_in_progress) { case RCHOME_IFILE: pc->flags |= INIT_IFILE|RCHOME_IFILE; sprintf(pc->command_line, "< %s/.%src", pc->home, pc->program_name); break; case RCLOCAL_IFILE: sprintf(pc->command_line, "< .%src", pc->program_name); pc->flags |= INIT_IFILE|RCLOCAL_IFILE; break; case CMDLINE_IFILE: sprintf(pc->command_line, "< %s", pc->input_file); pc->flags |= INIT_IFILE|CMDLINE_IFILE; break; case RUNTIME_IFILE: sprintf(pc->command_line, "%s", pc->runtime_ifile_cmd); pc->flags |= IFILE_ERROR; break; default: error(FATAL, "invalid input file\n"); } } else if (pc->flags & RCHOME_IFILE) { sprintf(pc->command_line, "< %s/.%src", pc->home, pc->program_name); pc->flags |= INIT_IFILE; } else if (pc->flags & RCLOCAL_IFILE) { sprintf(pc->command_line, "< .%src", pc->program_name); pc->flags |= INIT_IFILE; } else if (pc->flags & CMDLINE_IFILE) { sprintf(pc->command_line, "< %s", pc->input_file); pc->flags |= INIT_IFILE; } else if (pc->flags & TTY) { if (!(pc->readline = readline(pc->prompt))) { args[0] = NULL; fprintf(fp, "\n"); return; } if (strlen(pc->readline) >= BUFSIZE) error(FATAL, "input line exceeds maximum of 1500 bytes\n"); else strcpy(pc->command_line, pc->readline); free(pc->readline); clean_line(pc->command_line); pseudo_command(pc->command_line); strcpy(pc->orig_line, pc->command_line); if (strlen(pc->command_line) && !iscntrl(pc->command_line[0])) add_history(pc->command_line); check_special_handling(pc->command_line); } else { if (fgets(pc->command_line, BUFSIZE-1, stdin) == NULL) clean_exit(1); if (!(pc->flags & SILENT)) { fprintf(fp, "%s", pc->command_line); fflush(fp); } clean_line(pc->command_line); strcpy(pc->orig_line, pc->command_line); } /* * First clean out all linefeeds and leading/trailing spaces. * Then substitute aliases for the real thing they represent. */ clean_line(pc->command_line); resolve_aliases(); /* * Setup output redirection based upon the command line itself or * based upon the default scrolling behavior, if any. */ switch (setup_redirect(FROM_COMMAND_LINE)) { case REDIRECT_NOT_DONE: case REDIRECT_TO_STDPIPE: case REDIRECT_TO_PIPE: case REDIRECT_TO_FILE: break; case REDIRECT_SHELL_ESCAPE: case REDIRECT_SHELL_COMMAND: case REDIRECT_FAILURE: RESTART(); break; } /* * Setup the global argcnt and args[] array for use by everybody * during the life of this command. */ argcnt = parse_line(pc->command_line, args); } /* * Allow input file redirection without having to put a space between * the < and the filename. Allow the "pointer-to" asterisk to "touch" * the structure/union name. */ static void check_special_handling(char *s) { char local[BUFSIZE]; strcpy(local, s); if ((local[0] == '*') && (!whitespace(local[1]))) { sprintf(s, "* %s", &local[1]); return; } if ((local[0] == '<') && (!whitespace(local[1]))) { sprintf(s, "< %s", &local[1]); return; } } static int is_executable_in_PATH(char *filename) { char *buf1, *buf2; char *tok, *path; int retval; if ((path = getenv("PATH"))) { buf1 = GETBUF(strlen(path)+1); buf2 = GETBUF(strlen(path)+1); strcpy(buf2, path); } else return FALSE; retval = FALSE; tok = strtok(buf2, ":"); while (tok) { sprintf(buf1, "%s/%s", tok, filename); if (file_exists(buf1, NULL) && (access(buf1, X_OK) == 0)) { retval = TRUE; break; } tok = strtok(NULL, ":"); } FREEBUF(buf1); FREEBUF(buf2); return retval; } /* * At this point the only pseudo commands are the "r" (repeat) and * the "h" (history) command: * * 1. an "r" alone, or "!!" along, just means repeat the last command. * 2. an "r" followed by a number, means repeat that command from the * history table. * 3. an "!" followed by a number that is not the name of a command * in the user's PATH, means repeat that command from the history table. * 4. an "r" followed by one or more non-decimal characters means to * seek back until a line-beginning match is found. * 5. an "h" alone, or a string beginning with "hi", means history. */ static int pseudo_command(char *input) { int i; HIST_ENTRY *entry; int idx, found; char *p; clean_line(input); /* * Just dump all commands that have been entered to date. */ if (STREQ(input, "h") || STRNEQ(input, "hi")) { dump_history(); pc->command_line[0] = NULLCHAR; return TRUE; } if (STREQ(input, "r") || STREQ(input, "!!")) { if (!history_offset) error(FATAL, "no commands entered!\n"); entry = history_get(history_offset); strcpy(input, entry->line); fprintf(fp, "%s%s\n", pc->prompt, input); return TRUE; } if ((input[0] == 'r') && decimal(&input[1], 0)) { if (!history_offset) error(FATAL, "no commands entered!\n"); p = &input[1]; goto rerun; } if ((input[0] == '!') && decimal(&input[1], 0) && !is_executable_in_PATH(first_nonspace(&input[1]))) { p = first_nonspace(&input[1]); goto rerun; } if (STRNEQ(input, "r ")) { if (!history_offset) error(FATAL, "no commands entered!\n"); p = first_nonspace(&input[1]); rerun: if (decimal(p, 0)) { idx = atoi(p); if (idx == 0) goto invalid_repeat_request; if (idx > history_offset) error(FATAL, "command %d not entered yet!\n", idx); entry = history_get(idx); strcpy(input, entry->line); fprintf(fp, "%s%s\n", pc->prompt, input); return TRUE; } idx = -1; found = FALSE; for (i = history_offset; i > 0; i--) { entry = history_get(i); if (STRNEQ(entry->line, p)) { found = TRUE; break; } } if (found) { strcpy(input, entry->line); fprintf(fp, "%s%s\n", pc->prompt, input); return TRUE; } invalid_repeat_request: fprintf(fp, "invalid repeat request: %s\n", input); strcpy(input, ""); return TRUE; } return FALSE; } /* * Dump the history table in first-to-last chronological order. */ void dump_history(void) { int i; HIST_ENTRY **the_history; HIST_ENTRY *entry; if (!history_offset) error(FATAL, "no commands entered!\n"); the_history = history_list(); for (i = 0; i < history_offset; i++) { entry = the_history[i]; fprintf(fp, "[%d] %s\n", i+1, entry->line); } } /* * Pager arguments. */ static char *less_argv[5] = { "/usr/bin/less", "-E", "-X", "-Ps -- MORE -- forward\\: , or j backward\\: b or k quit\\: q", NULL }; static char *more_argv[2] = { "/bin/more", NULL }; static char **CRASHPAGER_argv = NULL; int CRASHPAGER_valid(void) { int i, c; char *env, *CRASHPAGER_buf; char *arglist[MAXARGS]; if (CRASHPAGER_argv) return TRUE; if (!(env = getenv("CRASHPAGER"))) return FALSE; if (strstr(env, "|") || strstr(env, "<") || strstr(env, ">")) { error(INFO, "CRASHPAGER ignored: contains invalid character: \"%s\"\n", env); return FALSE; } if ((CRASHPAGER_buf = (char *)malloc(strlen(env)+1)) == NULL) return FALSE; strcpy(CRASHPAGER_buf, env); if (!(c = parse_line(CRASHPAGER_buf, arglist)) || !file_exists(arglist[0], NULL) || access(arglist[0], X_OK) || !(CRASHPAGER_argv = (char **)malloc(sizeof(char *) * (c+1)))) { free(CRASHPAGER_buf); if (strlen(env)) error(INFO, "CRASHPAGER ignored: \"%s\"\n", env); return FALSE; } for (i = 0; i < c; i++) CRASHPAGER_argv[i] = arglist[i]; CRASHPAGER_argv[i] = NULL; return TRUE; } /* * Set up a command string buffer for error/help output. */ char * setup_scroll_command(void) { char *buf; long i, len; if (!(pc->flags & SCROLL)) return NULL; switch (pc->scroll_command) { case SCROLL_LESS: buf = GETBUF(strlen(less_argv[0])+1); strcpy(buf, less_argv[0]); break; case SCROLL_MORE: buf = GETBUF(strlen(more_argv[0])+1); strcpy(buf, more_argv[0]); break; case SCROLL_CRASHPAGER: for (i = len = 0; CRASHPAGER_argv[i]; i++) len += strlen(CRASHPAGER_argv[i])+1; buf = GETBUF(len); for (i = 0; CRASHPAGER_argv[i]; i++) { sprintf(&buf[strlen(buf)], "%s%s", i ? " " : "", CRASHPAGER_argv[i]); } break; default: return NULL; } return buf; } /* * Parse the command line for pipe or redirect characters: * * 1. if a "|" character is found, popen() what comes after it, and * modify the contents of the global "fp" FILE pointer. * 2. if one or two ">" characters are found, fopen() the filename that * follows, and modify the contents of the global "fp" FILE pointer. * * Care is taken to segregate: * * 1. expressions encompassed by parentheses, or * 2. strings encompassed by single or double quotation marks * * When either of the above are in affect, no redirection is done. * * Lastly, if no redirection is requested by the user on the command line, * output is passed to the default scrolling command, which is popen()'d * and again, the contents of the global "fp" FILE pointer is modified. * This default behavior is not performed if the command is coming from * an input file, nor if scrolling has been turned off. */ static int setup_redirect(int origin) { char *p, which; int append; int expression; int string; int ret ATTRIBUTE_UNUSED; FILE *pipe; FILE *ofile; pc->redirect = origin; pc->eoc_index = 0; p = pc->command_line; if (STREQ(p, "|") || STREQ(p, "!")) { ret = system("/bin/sh"); pc->redirect |= REDIRECT_SHELL_ESCAPE; return REDIRECT_SHELL_ESCAPE; } if (FIRSTCHAR(p) == '|' || FIRSTCHAR(p) == '!') pc->redirect |= REDIRECT_SHELL_COMMAND; expression = 0; string = FALSE; while (*p) { if (*p == '(') expression++; if (*p == ')') expression--; if ((*p == '"') || (*p == '\'')) string = !string; if (!(expression || string) && ((*p == '|') || (*p == '!'))) { which = *p; *p = NULLCHAR; pc->eoc_index = p - pc->command_line; p++; p = strip_beginning_whitespace(p); if (!strlen(p)) { error(INFO, "no shell command after '%c'\n", which); pc->redirect |= REDIRECT_FAILURE; return REDIRECT_FAILURE; } if (LASTCHAR(p) == '|') error(FATAL_RESTART, "pipe to nowhere?\n"); if (pc->redirect & REDIRECT_SHELL_COMMAND) return shell_command(p); if ((pipe = popen(p, "w")) == NULL) { error(INFO, "cannot open pipe\n"); pc->redirect |= REDIRECT_FAILURE; return REDIRECT_FAILURE; } setbuf(pipe, NULL); switch (origin) { case FROM_COMMAND_LINE: fp = pc->pipe = pipe; break; case FROM_INPUT_FILE: fp = pc->ifile_pipe = pipe; break; } if (multiple_pipes(&p)) pc->redirect |= REDIRECT_MULTI_PIPE; strcpy(pc->pipe_command, p); null_first_space(pc->pipe_command); pc->redirect |= REDIRECT_TO_PIPE; if (!(pc->redirect & REDIRECT_SHELL_COMMAND)) { if ((pc->pipe_pid = output_command_to_pids())) pc->redirect |= REDIRECT_PID_KNOWN; else error(FATAL_RESTART, "pipe operation failed\n"); } return REDIRECT_TO_PIPE; } if (!(expression || string) && (*p == '>') && !((p > pc->command_line) && (*(p-1) == '-'))) { append = FALSE; *p = NULLCHAR; pc->eoc_index = p - pc->command_line; if (*(p+1) == '>') { append = TRUE; *p = NULLCHAR; p++; } p++; p = strip_beginning_whitespace(p); if (!strlen(p)) { error(INFO, "no file name after %s\n", append ? ">>" : ">"); pc->redirect |= REDIRECT_FAILURE; return REDIRECT_FAILURE; } if (pc->flags & IFILE_ERROR) append = TRUE; if ((ofile = fopen(p, append ? "a+" : "w+")) == NULL) { error(INFO, "unable to open %s\n", p); pc->redirect = REDIRECT_FAILURE; return REDIRECT_FAILURE; } setbuf(ofile, NULL); switch (origin) { case FROM_COMMAND_LINE: fp = pc->ofile = ofile; break; case FROM_INPUT_FILE: fp = pc->ifile_ofile = ofile; break; } pc->redirect |= REDIRECT_TO_FILE; return REDIRECT_TO_FILE; } p++; } if ((origin == FROM_COMMAND_LINE) && (pc->flags & TTY) && (pc->flags & SCROLL) && pc->scroll_command) { if (!strlen(pc->command_line) || STREQ(pc->command_line, "q") || STREQ(pc->command_line, "Q") || STREQ(pc->command_line, "exit") || STRNEQ(pc->command_line, "<")) { pc->redirect |= REDIRECT_NOT_DONE; return REDIRECT_NOT_DONE; } if (!setup_stdpipe()) { error(INFO, "cannot open pipe\n"); pc->redirect |= REDIRECT_FAILURE; return REDIRECT_FAILURE; } fp = pc->stdpipe; pc->redirect |= REDIRECT_TO_STDPIPE; switch (pc->scroll_command) { case SCROLL_LESS: strcpy(pc->pipe_command, less_argv[0]); break; case SCROLL_MORE: strcpy(pc->pipe_command, more_argv[0]); break; case SCROLL_CRASHPAGER: strcpy(pc->pipe_command, CRASHPAGER_argv[0]); break; } return REDIRECT_TO_STDPIPE; } pc->redirect |= REDIRECT_NOT_DONE; return REDIRECT_NOT_DONE; } /* * Find the last command in an input line that possibly contains * multiple pipes. */ int multiple_pipes(char **input) { char *p, *found; int quote; found = NULL; quote = FALSE; for (p = *input; *p; p++) { if ((*p == '\'') || (*p == '"')) { quote = !quote; continue; } else if (quote) continue; if (*p == '|') { if (STRNEQ(p, "||")) break; found = first_nonspace(p+1); } } if (found) { *input = found; return TRUE; } else return FALSE; } void debug_redirect(char *s) { int others; int alive; others = 0; console("%s: (", s); if (pc->redirect & FROM_COMMAND_LINE) console("%sFROM_COMMAND_LINE", others++ ? "|" : ""); if (pc->redirect & FROM_INPUT_FILE) console("%sFROM_INPUT_FILE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_NOT_DONE) console("%sREDIRECT_NOT_DONE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_PIPE) console("%sREDIRECT_TO_PIPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_STDPIPE) console("%sREDIRECT_TO_STDPIPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_FILE) console("%sREDIRECT_TO_FILE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_FAILURE) console("%sREDIRECT_FAILURE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_SHELL_ESCAPE) console("%sREDIRECT_SHELL_ESCAPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_SHELL_COMMAND) console("%sREDIRECT_SHELL_COMMAND", others++ ? "|" : ""); if (pc->redirect & REDIRECT_PID_KNOWN) console("%sREDIRECT_PID_KNOWN", others++ ? "|" : ""); if (pc->redirect & REDIRECT_MULTI_PIPE) console("%sREDIRECT_MULTI_PIPE", others++ ? "|" : ""); console(")\n"); if (pc->pipe_pid || strlen(pc->pipe_command)) { if (pc->pipe_pid && PID_ALIVE(pc->pipe_pid)) alive = TRUE; else alive = FALSE; console("pipe_pid: %d (%s) pipe_command: %s\n", pc->pipe_pid, alive ? "alive" : "dead", pc->pipe_command); } } /* * Determine whether the pid receiving the current piped output is still * alive. * * NOTE: This routine returns TRUE by default, and only returns FALSE if * the pipe_pid exists *and* it's known to have died. Therefore the * caller must be cognizant of pc->pipe_pid or pc->stdpipe_pid. */ int output_open(void) { int waitstatus, waitret; if (!(pc->flags & TTY)) return TRUE; switch (pc->redirect & PIPE_OPTIONS) { case (REDIRECT_TO_STDPIPE|FROM_COMMAND_LINE): waitret = waitpid(pc->stdpipe_pid, &waitstatus, WNOHANG); if ((waitret == pc->stdpipe_pid) || (waitret == -1)) return FALSE; break; case (REDIRECT_TO_PIPE|FROM_INPUT_FILE): if (pc->curcmd_flags & REPEAT) break; /* FALLTHROUGH */ case (REDIRECT_TO_PIPE|FROM_COMMAND_LINE): switch (pc->redirect & (REDIRECT_MULTI_PIPE)) { case REDIRECT_MULTI_PIPE: if (!PID_ALIVE(pc->pipe_pid)) return FALSE; break; default: waitret = waitpid(pc->pipe_pid, &waitstatus, WNOHANG); if (waitret == pc->pipe_pid) return FALSE; if (waitret == -1) { /* intervening sh */ if (!PID_ALIVE(pc->pipe_pid)) return FALSE; } break; } break; default: break; } return TRUE; } /* * Determine the pids of the current popen'd shell and output command. * This is all done using /proc; the ps kludge at the bottom of this * routine is legacy, and should only get executed if /proc doesn't exist. */ static int output_command_to_pids(void) { DIR *dirp; struct dirent *dp; FILE *stp; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char lookfor[BUFSIZE+2]; char *pid, *name, *status, *p_pid, *pgrp, *comm; char *arglist[MAXARGS]; int argc; FILE *pipe; int retries, shell_has_exited; retries = 0; shell_has_exited = FALSE; pc->pipe_pid = pc->pipe_shell_pid = 0; comm = strrchr(pc->pipe_command, '/'); sprintf(lookfor, "(%s)", comm ? ++comm : pc->pipe_command); stall(1000); retry: if (is_directory("/proc") && (dirp = opendir("/proc"))) { for (dp = readdir(dirp); dp && !pc->pipe_pid; dp = readdir(dirp)) { if (!decimal(dp->d_name, 0)) continue; sprintf(buf1, "/proc/%s/stat", dp->d_name); if (file_exists(buf1, NULL) && (stp = fopen(buf1, "r"))) { if (fgets(buf2, BUFSIZE, stp)) { pid = strtok(buf2, " "); name = strtok(NULL, " "); status = strtok(NULL, " "); p_pid = strtok(NULL, " "); pgrp = strtok(NULL, " "); if (STREQ(name, "(sh)") && (atoi(p_pid) == getpid())) { pc->pipe_shell_pid = atoi(pid); if (STREQ(status, "Z")) shell_has_exited = TRUE; } if (STREQ(name, lookfor) && ((atoi(p_pid) == getpid()) || (atoi(p_pid) == pc->pipe_shell_pid) || (atoi(pgrp) == getpid()))) { pc->pipe_pid = atoi(pid); console( "FOUND[%d] (%d->%d->%d) %s %s p_pid: %s pgrp: %s\n", retries, getpid(), pc->pipe_shell_pid, pc->pipe_pid, name, status, p_pid, pgrp); } } fclose(stp); } } closedir(dirp); } if (!pc->pipe_pid && !shell_has_exited && ((retries++ < 10) || pc->pipe_shell_pid)) { stall(1000); goto retry; } console("getpid: %d pipe_shell_pid: %d pipe_pid: %d\n", getpid(), pc->pipe_shell_pid, pc->pipe_pid); if (pc->pipe_pid) return pc->pipe_pid; sprintf(buf1, "ps -ft %s", pc->my_tty); console("%s: ", buf1); if ((pipe = popen(buf1, "r")) == NULL) { error(INFO, "cannot determine output pid\n"); return 0; } while (fgets(buf1, BUFSIZE, pipe)) { argc = parse_line(buf1, arglist); if ((argc >= 8) && STREQ(arglist[7], pc->pipe_command) && STRNEQ(pc->my_tty, arglist[5])) { pc->pipe_pid = atoi(arglist[1]); break; } } pclose(pipe); console("%d\n", pc->pipe_pid); return pc->pipe_pid; } /* * Close straggling, piped-to, output commands. */ void close_output(void) { if ((pc->flags & TTY) && (pc->pipe_pid || strlen(pc->pipe_command)) && output_open()) kill(pc->pipe_pid, 9); } /* * Initialize what's needed for the command line: * * 1. termios structures for raw and cooked terminal mode. * 2. set up SIGINT and SIGPIPE handlers for aborted commands. * 3. set up the command history table. * 4. create the prompt string. */ void cmdline_init(void) { int fd = 0; /* * Stash a copy of the original termios setup. * Build a raw version for quick use for each command entry. */ if (isatty(fileno(stdin)) && ((fd = open("/dev/tty", O_RDONLY)) >= 0)) { if (tcgetattr(fd, &pc->termios_orig) == -1) error(FATAL, "tcgetattr /dev/tty: %s\n", strerror(errno)); if (tcgetattr(fd, &pc->termios_raw) == -1) error(FATAL, "tcgetattr /dev/tty: %s\n", strerror(errno)); close(fd); pc->termios_raw.c_lflag &= ~ECHO & ~ICANON; pc->termios_raw.c_cc[VMIN] = (char)1; pc->termios_raw.c_cc[VTIME] = (char)0; restore_sanity(); pc->flags |= TTY; set_my_tty(); SIGACTION(SIGINT, restart, &pc->sigaction, NULL); readline_init(); } else { if (fd < 0) error(INFO, "/dev/tty: %s\n", strerror(errno)); if (!(pc->flags & SILENT)) fprintf(fp, "NOTE: stdin: not a tty\n\n"); fflush(fp); pc->flags &= ~TTY; } SIGACTION(SIGPIPE, SIG_IGN, &pc->sigaction, NULL); set_command_prompt(NULL); } /* * Create and stash the original prompt, but allow changes during runtime. */ void set_command_prompt(char *new_prompt) { static char *orig_prompt = NULL; if (!orig_prompt) { if (!(orig_prompt = (char *)malloc(strlen(pc->program_name)+3))) error(FATAL, "cannot malloc prompt string\n"); sprintf(orig_prompt, "%s> ", pc->program_name); } if (new_prompt) pc->prompt = new_prompt; else pc->prompt = orig_prompt; } /* * SIGINT, SIGPIPE, and SIGSEGV handler. * Signal number 0 is sent for a generic restart. */ #define MAX_RECURSIVE_SIGNALS (10) #define MAX_SIGINTS_ACCEPTED (1) void restart(int sig) { static int in_restart = 0; console("restart (%s) %s\n", signame(sig), pc->flags & IN_GDB ? "(in gdb)" : "(in crash)"); if (sig == SIGUSR2) clean_exit(1); if (pc->flags & IN_RESTART) { fprintf(stderr, "\nembedded signal received (%s): recursive restart call\n", signame(sig)); if (++in_restart < MAX_RECURSIVE_SIGNALS) return; fprintf(stderr, "bailing out...\n"); clean_exit(1); } else { pc->flags |= IN_RESTART; in_restart = 0; } switch (sig) { case SIGSEGV: fflush(fp); fprintf(stderr, " \n", pc->flags & IN_GDB ? " in gdb" : ""); case 0: case SIGPIPE: restore_sanity(); break; case SIGINT: SIGACTION(SIGINT, restart, &pc->sigaction, NULL); pc->flags |= _SIGINT_; pc->sigint_cnt++; pc->flags &= ~IN_RESTART; if (pc->sigint_cnt == MAX_SIGINTS_ACCEPTED) { restore_sanity(); if (pc->ifile_in_progress) { pc->ifile_in_progress = 0; pc->ifile_offset = 0; } break; } return; default: fprintf(stderr, "unexpected signal received: %s\n", signame(sig)); restore_sanity(); close_output(); break; } fprintf(stderr, "\n"); pc->flags &= ~(IN_FOREACH|IN_GDB|IN_RESTART); longjmp(pc->main_loop_env, 1); } /* * Return a signal name string, or a number if the signal is not listed. */ static char * signame(int sig) { static char sigbuf[20]; switch (sig) { case SIGINT: sprintf(sigbuf, "SIGINT-%d", pc->sigint_cnt+1); return sigbuf; case SIGPIPE: return "SIGPIPE"; case SIGSEGV: return "SIGSEGV"; default: sprintf(sigbuf, "%d", sig); return sigbuf; } } /* * Restore the program environment to the state it was in before the * last command was executed: * * 1. close all temporarily opened pipes and output files. * 2. set the terminal back to normal cooked mode. * 3. free all temporary buffers. * 4. restore the last known output radix. */ static void restore_sanity(void) { int fd, waitstatus; struct extension_table *ext; struct command_table_entry *cp; if (pc->stdpipe) { close(fileno(pc->stdpipe)); pc->stdpipe = NULL; if (pc->stdpipe_pid && PID_ALIVE(pc->stdpipe_pid)) { while (!waitpid(pc->stdpipe_pid, &waitstatus, WNOHANG)) stall(1000); } pc->stdpipe_pid = 0; } if (pc->pipe) { close(fileno(pc->pipe)); pc->pipe = NULL; console("wait for redirect %d->%d to finish...\n", pc->pipe_shell_pid, pc->pipe_pid); if (pc->pipe_pid) while (PID_ALIVE(pc->pipe_pid)) { waitpid(pc->pipe_pid, &waitstatus, WNOHANG); stall(1000); } if (pc->pipe_shell_pid) while (PID_ALIVE(pc->pipe_shell_pid)) { waitpid(pc->pipe_shell_pid, &waitstatus, WNOHANG); stall(1000); } pc->pipe_pid = 0; } if (pc->ifile_pipe) { fflush(pc->ifile_pipe); close(fileno(pc->ifile_pipe)); pc->ifile_pipe = NULL; if (pc->pipe_pid && ((pc->redirect & (PIPE_OPTIONS|REDIRECT_PID_KNOWN)) == (FROM_INPUT_FILE|REDIRECT_TO_PIPE|REDIRECT_PID_KNOWN))) { console("wait for redirect %d->%d to finish...\n", pc->pipe_shell_pid, pc->pipe_pid); while (PID_ALIVE(pc->pipe_pid)) { waitpid(pc->pipe_pid, &waitstatus, WNOHANG); stall(1000); } if (pc->pipe_shell_pid) while (PID_ALIVE(pc->pipe_shell_pid)) { waitpid(pc->pipe_shell_pid, &waitstatus, WNOHANG); stall(1000); } if (pc->redirect & (REDIRECT_MULTI_PIPE)) wait_for_children(ALL_CHILDREN); } } if (pc->ofile) { fclose(pc->ofile); pc->ofile = NULL; } if (pc->ifile_ofile) { fclose(pc->ifile_ofile); pc->ifile_ofile = NULL; } if (pc->ifile) { fclose(pc->ifile); pc->ifile = NULL; } if (pc->args_ifile) { fclose(pc->args_ifile); pc->args_ifile = NULL; } if (pc->tmpfile) close_tmpfile(); if (pc->tmpfile2) close_tmpfile2(); if (pc->cmd_cleanup) pc->cmd_cleanup(pc->cmd_cleanup_arg); if (pc->flags & TTY) { if ((fd = open("/dev/tty", O_RDONLY)) < 0) { console("/dev/tty: %s\n", strerror(errno)); clean_exit(1); } if (tcsetattr(fd, TCSANOW, &pc->termios_orig) == -1) error(FATAL, "tcsetattr /dev/tty: %s\n", strerror(errno)); close(fd); } wait_for_children(ZOMBIES_ONLY); pc->flags &= ~(INIT_IFILE|RUNTIME_IFILE|IFILE_ERROR|_SIGINT_|PLEASE_WAIT); pc->sigint_cnt = 0; pc->redirect = 0; pc->pipe_command[0] = NULLCHAR; pc->pipe_pid = 0; pc->pipe_shell_pid = 0; pc->sbrk = sbrk(0); if ((pc->curcmd_flags & (UD2A_INSTRUCTION|BAD_INSTRUCTION)) == (UD2A_INSTRUCTION|BAD_INSTRUCTION)) error(WARNING, "A (bad) instruction was noted in last disassembly.\n" " Use \"dis -b [number]\" to set/restore the number of\n" " encoded bytes to skip after a ud2a (BUG) instruction.\n"); pc->curcmd_flags = 0; pc->curcmd_private = 0; restore_gdb_sanity(); free_all_bufs(); /* * Clear the structure cache references -- no-ops if DUMPFILE(). */ clear_task_cache(); clear_machdep_cache(); clear_swap_info_cache(); clear_file_cache(); clear_dentry_cache(); clear_inode_cache(); clear_vma_cache(); clear_active_set(); if (kt->ikconfig_flags & IKCONFIG_LOADED) read_in_kernel_config(IKCFG_FREE); /* * Call the cleanup() function of any extension. */ for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (cp->flags & CLEANUP) (*cp->func)(); } } if (CRASHDEBUG(5)) { dump_filesys_table(0); dump_vma_cache(0); } if (REMOTE()) remote_clear_pipeline(); hq_close(); } /* * Similar to above, but only called in between each command that is * read from an input file. */ static void restore_ifile_sanity(void) { int fd; pc->flags &= ~IFILE_ERROR; if (pc->ifile_pipe) { close(fileno(pc->ifile_pipe)); pc->ifile_pipe = NULL; } if (pc->ifile_ofile) { fclose(pc->ifile_ofile); pc->ifile_ofile = NULL; } if (pc->flags & TTY) { if ((fd = open("/dev/tty", O_RDONLY)) < 0) { console("/dev/tty: %s\n", strerror(errno)); clean_exit(1); } if (tcsetattr(fd, TCSANOW, &pc->termios_orig) == -1) error(FATAL, "tcsetattr /dev/tty: %s\n", strerror(errno)); close(fd); } if (pc->tmpfile2) { close_tmpfile2(); } restore_gdb_sanity(); free_all_bufs(); hq_close(); } /* * Check whether a SIGINT was received during the execution of a command, * clearing the flag if it was set. This allows individual commands or * entities to do whatever is appropriate to handle CTRL-C. */ int received_SIGINT(void) { if (pc->flags & _SIGINT_) { pc->flags &= ~_SIGINT_; pc->sigint_cnt = 0; if (pc->ifile_in_progress) { pc->ifile_in_progress = 0; pc->ifile_offset = 0; } return TRUE; } else return FALSE; } /* * Look for an executable file that begins with #! */ static int is_shell_script(char *s) { int fd; char interp[2]; struct stat sbuf; if ((fd = open(s, O_RDONLY)) < 0) return FALSE; if (isatty(fd)) { close(fd); return FALSE; } if (read(fd, interp, 2) != 2) { close(fd); return FALSE; } if (!STRNEQ(interp, "#!")) { close(fd); return FALSE; } close(fd); if (stat(s, &sbuf) == -1) return FALSE; if (!(sbuf.st_mode & (S_IXUSR|S_IXGRP|S_IXOTH))) return FALSE; return TRUE; } /* * After verifying the user's input file, loop through each line, executing * one command at a time. This command pretty much does the same as * get_command_line(), but also kicks off the command execution as well. * It's kept self-contained, as indicated by the RUNTIME_IFILE flag, and * keeps its own internal sanity by calling restore_ifile_sanity() between * each line. */ void exec_input_file(void) { char *file; FILE *incoming_fp; char buf[BUFSIZE]; ulong this; /* * Do start-up .rc or input files in the proper order. */ if (pc->flags & RCHOME_IFILE) { this = RCHOME_IFILE; pc->flags &= ~RCHOME_IFILE; } else if (pc->flags & RCLOCAL_IFILE) { this = RCLOCAL_IFILE; pc->flags &= ~RCLOCAL_IFILE; } else if (pc->flags & CMDLINE_IFILE) { this = CMDLINE_IFILE; pc->flags &= ~CMDLINE_IFILE; } else this = 0; if (pc->flags & RUNTIME_IFILE) { error(INFO, "embedded input files not allowed!\n"); return; } if (argcnt < 2) { error(INFO, "no input file entered!\n"); return; } else file = args[1]; if (!file_exists(file, NULL)) { error(INFO, "%s: %s\n", file, strerror(ENOENT)); return; } if (is_elf_file(file)) { error(INFO, "input from executable files not supported yet!\n"); return; } if (is_shell_script(file)) { error(INFO, "input from shell scripts not supported yet!\n"); return; } if ((pc->ifile = fopen(file, "r")) == NULL) { error(INFO, "%s: %s\n", file, strerror(errno)); return; } pc->flags |= RUNTIME_IFILE; incoming_fp = fp; /* * Handle runtime commands that use input files. */ if ((pc->ifile_in_progress = this) == 0) { if (!pc->runtime_ifile_cmd) { if (!(pc->runtime_ifile_cmd = (char *)malloc(BUFSIZE))) { error(INFO, "cannot malloc input file command line buffer\n"); return; } BZERO(pc->runtime_ifile_cmd, BUFSIZE); } if (!strlen(pc->runtime_ifile_cmd)) strcpy(pc->runtime_ifile_cmd, pc->orig_line); pc->ifile_in_progress = RUNTIME_IFILE; } /* * If there's an offset, then there was a FATAL error caused * by the last command executed from the input file. */ if (pc->ifile_offset) fseek(pc->ifile, (long)pc->ifile_offset, SEEK_SET); while (fgets(buf, BUFSIZE-1, pc->ifile)) { /* * Restore normal environment. */ fp = incoming_fp; restore_ifile_sanity(); BZERO(pc->command_line, BUFSIZE); BZERO(pc->orig_line, BUFSIZE); if (this & (RCHOME_IFILE|RCLOCAL_IFILE)) pc->curcmd_flags |= FROM_RCFILE; pc->ifile_offset = ftell(pc->ifile); if (STRNEQ(buf, "#") || STREQ(buf, "\n")) continue; check_special_handling(buf); strcpy(pc->command_line, buf); clean_line(pc->command_line); strcpy(pc->orig_line, pc->command_line); strip_linefeeds(pc->orig_line); resolve_aliases(); switch (setup_redirect(FROM_INPUT_FILE)) { case REDIRECT_NOT_DONE: case REDIRECT_TO_PIPE: case REDIRECT_TO_FILE: break; case REDIRECT_SHELL_ESCAPE: case REDIRECT_SHELL_COMMAND: continue; case REDIRECT_FAILURE: goto done_input; } if (CRASHDEBUG(1)) console(buf); if (!(argcnt = parse_line(pc->command_line, args))) continue; if (!(pc->flags & SILENT)) { fprintf(fp, "%s%s", pc->prompt, buf); fflush(fp); } exec_command(); if (received_SIGINT()) goto done_input; } done_input: fclose(pc->ifile); pc->ifile = NULL; pc->flags &= ~RUNTIME_IFILE; pc->ifile_offset = 0; if (pc->runtime_ifile_cmd) BZERO(pc->runtime_ifile_cmd, BUFSIZE); pc->ifile_in_progress = 0; } /* * Prime the alias list with a few built-in's. */ void alias_init(char *inbuf) { char buf[BUFSIZE]; if (inbuf) { strcpy(buf, inbuf); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); return; } strcpy(buf, "alias man help"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias ? help"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias quit q"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias sf set scroll off"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias sn set scroll on"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias hex set radix 16"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias dec set radix 10"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias g gdb"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias px p -x"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias pd p -d"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias for foreach"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias size *"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias dmesg log"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); strcpy(buf, "alias lsmod mod"); argcnt = parse_line(buf, args); allocate_alias(ALIAS_BUILTIN); } /* * Before the command line is parsed, take a snapshot and parse the snapshot. * If args[0] is an known alias, recreate the pc->command_line string with * the alias substitution. */ static void resolve_aliases(void) { int i; struct alias_data *ad; int found; char *p1, *remainder; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (!strlen(pc->command_line)) return; strcpy(buf1, pc->command_line); argcnt = parse_line(buf1, args); if (argcnt > 1) { strcpy(buf2, &pc->command_line[args[1] - buf1]); remainder = buf2; } else remainder = NULL; found = FALSE; for (ad = alias_head.next; ad; ad = ad->next) { if (STREQ(ad->alias, args[0])) { for (i = 0; i < ad->argcnt; i++) args[i] = ad->args[i]; found = TRUE; break; } } if (!found) return; BZERO(pc->command_line, BUFSIZE); p1 = pc->command_line; for (i = 0; i < ad->argcnt; i++) { snprintf(p1, BUFSIZE - (p1-pc->command_line), "%s ", args[i]); while (*p1) p1++; if ((p1 - pc->command_line) >= BUFSIZE) break; } if (remainder) { if ((strlen(remainder)+strlen(pc->command_line)) < BUFSIZE) strcat(pc->command_line, remainder); else error(INFO, "command line overflow.\n"); } else if (strlen(pc->command_line) >= (BUFSIZE-1)) error(INFO, "command line overflow.\n"); clean_line(pc->command_line); } /* * If input string is an alias, return a pointer to the alias_data struct. */ struct alias_data * is_alias(char *s) { struct alias_data *ad; for (ad = alias_head.next; ad; ad = ad->next) { if (STREQ(ad->alias, s)) return(ad); } return NULL; } /* * .rc file commands that are "set" commands may be performed prior * to initialization, so pass them to cmd_set() for consideration. * All other commands are flagged for execution by exec_input_file() * after session initialization is complete. */ void resolve_rc_cmd(char *s, int origin) { clean_line(s); if (*s == '#') return; if ((argcnt = parse_line(s, args)) == 0) return; if (STREQ(args[0], "set")) { optind = 0; cmd_set(); } switch (origin) { case ALIAS_RCHOME: pc->flags |= RCHOME_IFILE; break; case ALIAS_RCLOCAL: pc->flags |= RCLOCAL_IFILE; break; } return; } /* * The "alias" command. With no arguments, list all aliases. With one * argument -- which must be an alias -- display the string it's aliased to. * With two or more arguments, setup a new alias, where the first argument * is the alias, and the remaining arguments make up the alias string. * If the second arg is the NULL string "", delete the alias. */ void cmd_alias(void) { if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); switch (argcnt) { case 1: list_aliases(NULL); break; case 2: list_aliases(args[1]); break; default: if (allocate_alias(ALIAS_RUNTIME)) list_aliases(args[1]); break; } } /* * Dump the current set of aliases. */ static void list_aliases(char *s) { int i; struct alias_data *ad; int found, precision; char buf[BUFSIZE]; if (!alias_head.next) { error(INFO, "alias list is empty\n"); return; } BZERO(buf, BUFSIZE); found = FALSE; precision = 7; for (ad = alias_head.next; ad; ad = ad->next) { switch (ad->origin) { case ALIAS_RCLOCAL: sprintf(buf, ".%src", pc->program_name); if (strlen(buf) > precision) precision = strlen(buf); break; case ALIAS_RCHOME: sprintf(buf, "$HOME/.%src", pc->program_name); if (strlen(buf) > precision) precision = strlen(buf); break; } } fprintf(fp, "ORIGIN"); pad_line(fp, precision-6, ' '); BZERO(buf, BUFSIZE); fprintf(fp, " ALIAS COMMAND\n"); for (ad = alias_head.next; ad; ad = ad->next) { if (s && !STREQ(s, ad->alias)) continue; found = TRUE; switch (ad->origin) { case ALIAS_RUNTIME: sprintf(buf, "runtime"); break; case ALIAS_RCLOCAL: sprintf(buf, ".%src", pc->program_name); break; case ALIAS_RCHOME: sprintf(buf, "$HOME/.%src", pc->program_name); break; case ALIAS_BUILTIN: sprintf(buf, "builtin"); break; } fprintf(fp, "%s ", buf); pad_line(fp, precision-strlen(buf), ' '); fprintf(fp, "%-7s ", ad->alias); for (i = 0; i < ad->argcnt; i++) { fprintf(fp, "%s ", ad->args[i]); } fprintf(fp, "\n"); } if (s && !found) fprintf(fp, "alias does not exist: %s\n", s); } /* * Verify the alias request set up in the args[] array: * * 1. make sure that the alias string starts with a legitimate command. * 2. if the already exists, deallocate its current version. * * Then malloc space for the alias string, and link it in to the alias list. */ static int allocate_alias(int origin) { int i; int size; struct alias_data *ad; struct alias_data *newad; char *p1, *enclosed_string; int found; if ((enclosed_string = strstr(args[2], " "))) *enclosed_string = NULLCHAR; found = FALSE; if (get_command_table_entry(args[1])) { error(INFO, "cannot alias existing command name: %s\n", args[1]); return FALSE; } if (get_command_table_entry(args[2])) found = TRUE; /* Accept the "clear" external command exceptionally. */ if (STREQ(args[2], "clear")) found = TRUE; if (!found) { if (!strlen(args[2])) { if (alias_exists(args[1])) { deallocate_alias(args[1]); fprintf(fp, "alias deleted: %s\n", args[1]); } } else { error(INFO, "invalid alias attempt on non-existent command: %s\n", args[2]); } return FALSE; } if (alias_exists(args[1])) deallocate_alias(args[1]); if (enclosed_string) *enclosed_string = ' '; size = sizeof(struct alias_data) + argcnt; for (i = 0; i < argcnt; i++) size += strlen(args[i]); if ((newad = (struct alias_data *)malloc(size+1)) == NULL) { error(INFO, "alias_data malloc: %s\n", strerror(errno)); return FALSE; } BZERO(newad, size); newad->next = NULL; newad->size = size; newad->origin = origin; p1 = newad->argbuf; for (i = 1; i < argcnt; i++) { sprintf(p1, "%s ", args[i]); while (*p1) p1++; } p1 = strstr(newad->argbuf, " "); *p1 = NULLCHAR; newad->alias = newad->argbuf; newad->argcnt = parse_line(p1+1, newad->args); for (ad = &alias_head; ad->next; ad = ad->next) ; ad->next = newad; return TRUE; } /* * Check whether the passed-in string is a currently-existing alias. */ static int alias_exists(char *s) { struct alias_data *ad; if (!alias_head.next) return FALSE; for (ad = alias_head.next; ad; ad = ad->next) if (STREQ(ad->alias, s)) return TRUE; return FALSE; } /* * If the passed-in string is an alias, delink it and free its memory. */ void deallocate_alias(char *s) { struct alias_data *ad, *lastad; for (ad = alias_head.next, lastad = &alias_head; ad; ad = ad->next) { if (!STREQ(ad->alias, s)) { lastad = ad; continue; } lastad->next = ad->next; free(ad); break; } } /* * "help -a" output */ void dump_alias_data(void) { int i; struct alias_data *ad; fprintf(fp, "alias_head.next: %lx\n\n", (ulong)alias_head.next); for (ad = alias_head.next; ad; ad = ad->next) { fprintf(fp, " next: %lx\n", (ulong)ad->next); fprintf(fp, " alias: %s\n", ad->alias); fprintf(fp, " size: %d\n", ad->size); fprintf(fp, " origin: "); switch (ad->origin) { case ALIAS_RUNTIME: fprintf(fp, "runtime setting \n"); break; case ALIAS_RCLOCAL: fprintf(fp, ".%src \n", pc->program_name); break; case ALIAS_RCHOME: fprintf(fp, "$HOME/.%src \n", pc->program_name); break; case ALIAS_BUILTIN: fprintf(fp, "builtin\n"); break; } fprintf(fp, " argcnt: %d\n", ad->argcnt); for (i = 0; i < ad->argcnt; i++) fprintf(fp, " args[%d]: %lx: %s\n", i, (ulong)ad->args[i], ad->args[i]); fprintf(fp, "\n"); } } /* * Repeat a command on a live system. */ void cmd_repeat(void) { ulong delay; char buf[BUFSIZE]; char bufsave[BUFSIZE]; FILE *incoming_fp; if (argcnt == 1) cmd_usage(pc->curcmd, SYNOPSIS); delay = 0; if (args[1][0] == '-') { switch (args[1][1]) { default: case NULLCHAR: cmd_usage(pc->curcmd, SYNOPSIS); case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': delay = dtol(&args[1][1], FAULT_ON_ERROR, NULL); concat_args(buf, 2, FALSE); break; } } else concat_args(buf, 1, FALSE); check_special_handling(buf); strcpy(pc->command_line, buf); resolve_aliases(); if (!argcnt) return; strcpy(buf, pc->command_line); strcpy(bufsave, buf); argcnt = parse_line(buf, args); if (!argcnt) return; if (STREQ(args[0], "<") && (pc->flags & TTY) && (pc->flags & SCROLL) && pc->scroll_command) error(FATAL, "scrolling must be turned off when repeating an input file\n"); pc->curcmd_flags |= REPEAT; incoming_fp = fp; while (TRUE) { optind = 0; fp = incoming_fp; exec_command(); free_all_bufs(); wait_for_children(ZOMBIES_ONLY); if (received_SIGINT() || !output_open()) break; if ((pc->flags & TTY) && !is_a_tty("/dev/tty")) break; if (!(pc->curcmd_flags & REPEAT)) break; if (delay) sleep(delay); strcpy(buf, bufsave); argcnt = parse_line(buf, args); } } /* * Initialize readline, set the editing mode, and then perform any * crash-specific bindings, etc. */ static void readline_init(void) { rl_initialize(); if (STREQ(pc->editing_mode, "vi")) { rl_editing_mode = vi_mode; rl_bind_key(CTRL('N'), rl_get_next_history); rl_bind_key(CTRL('P'), rl_get_previous_history); rl_bind_key_in_map(CTRL('P'), rl_get_previous_history, vi_insertion_keymap); rl_bind_key_in_map(CTRL('N'), rl_get_next_history, vi_insertion_keymap); rl_bind_key_in_map(CTRL('l'), rl_clear_screen, vi_insertion_keymap); rl_generic_bind(ISFUNC, "[A", (char *)rl_get_previous_history, vi_movement_keymap); rl_generic_bind(ISFUNC, "[B", (char *)rl_get_next_history, vi_movement_keymap); } if (STREQ(pc->editing_mode, "emacs")) { rl_editing_mode = emacs_mode; } rl_attempted_completion_function = crash_readline_completer; rl_attempted_completion_over = 1; } /* * Find and set the tty string of this session as seen in "ps -ef" output. */ static void set_my_tty(void) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int argc; FILE *pipe; strcpy(pc->my_tty, "?"); if (file_exists("/usr/bin/tty", NULL)) { sprintf(buf, "/usr/bin/tty"); if ((pipe = popen(buf, "r")) == NULL) return; while (fgets(buf, BUFSIZE, pipe)) { if (STRNEQ(buf, "/dev/")) { strcpy(pc->my_tty, strip_line_end(&buf[strlen("/dev/")])); break; } } pclose(pipe); return; } sprintf(buf, "ps -ef | grep ' %d '", getpid()); if (CRASHDEBUG(1)) fprintf(fp, "popen(%s)\n", buf); if ((pipe = popen(buf, "r")) == NULL) return; while (fgets(buf, BUFSIZE, pipe)) { argc = parse_line(buf, arglist); if ((argc >= 8) && (atoi(arglist[1]) == getpid())) { if (strlen(arglist[5]) < 9) strcpy(pc->my_tty, arglist[5]); else strncpy(pc->my_tty, arglist[5], 9); } } pclose(pipe); } /* * Check whether SIGINT's are allowed before shipping a request off to gdb. */ int interruptible(void) { if (!(pc->flags & RUNTIME)) return FALSE; if (!(pc->flags & TTY)) return FALSE; if ((pc->redirect & (FROM_INPUT_FILE|REDIRECT_NOT_DONE)) == (FROM_INPUT_FILE|REDIRECT_NOT_DONE)) return TRUE; if (strlen(pc->pipe_command)) return FALSE; return TRUE; } /* * Set up the standard output pipe using whichever was selected during init. */ static int setup_stdpipe(void) { char *path; if (pipe(pc->pipefd) < 0) { error(INFO, "pipe system call failed: %s", strerror(errno)); return FALSE; } if ((pc->stdpipe_pid = fork()) < 0) { error(INFO, "fork system call failed: %s", strerror(errno)); return FALSE; } path = NULL; if (pc->stdpipe_pid > 0) { pc->redirect |= REDIRECT_PID_KNOWN; close(pc->pipefd[0]); /* parent closes read end */ if ((pc->stdpipe = fdopen(pc->pipefd[1], "w")) == NULL) { error(INFO, "fdopen system call failed: %s", strerror(errno)); return FALSE; } setbuf(pc->stdpipe, NULL); switch (pc->scroll_command) { case SCROLL_LESS: strcpy(pc->pipe_command, less_argv[0]); break; case SCROLL_MORE: strcpy(pc->pipe_command, more_argv[0]); break; case SCROLL_CRASHPAGER: strcpy(pc->pipe_command, CRASHPAGER_argv[0]); break; } if (CRASHDEBUG(2)) console("pipe: %lx\n", pc->stdpipe); return TRUE;; } else { close(pc->pipefd[1]); /* child closes write end */ if (dup2(pc->pipefd[0], 0) != 0) { perror("child dup2 failed"); clean_exit(1); } if (CRASHDEBUG(2)) console("execv: %d\n", getpid()); switch (pc->scroll_command) { case SCROLL_LESS: path = less_argv[0]; execv(path, less_argv); break; case SCROLL_MORE: path = more_argv[0]; execv(path, more_argv); break; case SCROLL_CRASHPAGER: path = CRASHPAGER_argv[0]; execv(path, CRASHPAGER_argv); break; } perror(path); fprintf(stderr, "execv of scroll command failed\n"); exit(1); } } static void wait_for_children(ulong waitflag) { int status, pid; while (TRUE) { switch (pid = waitpid(-1, &status, WNOHANG)) { case 0: if (CRASHDEBUG(2)) console("wait_for_children: child running...\n"); if (waitflag == ZOMBIES_ONLY) return; break; case -1: if (CRASHDEBUG(2)) console("wait_for_children: no children alive\n"); return; default: console("wait_for_children(%d): reaped %d\n", waitflag, pid); if (CRASHDEBUG(2)) fprintf(fp, "wait_for_children: reaped %d\n", pid); break; } stall(1000); } } /* * Run an escaped shell command, redirecting the output to * the current output file. */ int shell_command(char *cmd) { FILE *pipe; char buf[BUFSIZE]; if ((pipe = popen(cmd, "r")) == NULL) { error(INFO, "cannot open pipe: %s\n", cmd); pc->redirect &= ~REDIRECT_SHELL_COMMAND; pc->redirect |= REDIRECT_FAILURE; return REDIRECT_FAILURE; } while (fgets(buf, BUFSIZE, pipe)) fputs(buf, fp); pclose(pipe); return REDIRECT_SHELL_COMMAND; } static int verify_args_input_file(char *fileptr) { struct stat stat; if (!file_exists(fileptr, &stat)) { if (CRASHDEBUG(1)) error(INFO, "%s: no such file\n", fileptr); } else if (!S_ISREG(stat.st_mode)) { if (CRASHDEBUG(1)) error(INFO, "%s: not a regular file\n", fileptr); } else if (!stat.st_size) { if (CRASHDEBUG(1)) error(INFO, "%s: file is empty\n", fileptr); } else if (!file_readable(fileptr)) { if (CRASHDEBUG(1)) error(INFO, "%s: permission denied\n", fileptr); } else return TRUE; return FALSE; } /* * Verify a command line argument input file. */ #define NON_FILENAME_CHARS "*?!|\'\"{}<>;,^()$~" int is_args_input_file(struct command_table_entry *ct, struct args_input_file *aif) { int c, start, whites, args_used; char *p1, *p2, *curptr, *fileptr; char buf[BUFSIZE]; int retval; if (pc->curcmd_flags & NO_MODIFY) return FALSE; if (STREQ(ct->name, "repeat")) return FALSE; BZERO(aif, sizeof(struct args_input_file)); retval = FALSE; if (STREQ(ct->name, "gdb")) { curptr = pc->orig_line; next_gdb: if ((p1 = strstr(curptr, "<"))) { while (STRNEQ(p1, "<<")) { p2 = p1+2; if (!(p1 = strstr(p2, "<"))) return retval; } } if (!p1) return retval; start = p1 - curptr; p2 = p1+1; for (whites = 0; whitespace(*p2); whites++) p2++; if (*p2 == NULLCHAR) return retval; strcpy(buf, p2); p2 = buf; if (*p2) { fileptr = p2; while (*p2 && !whitespace(*p2) && (strpbrk(p2, NON_FILENAME_CHARS) != p2)) p2++; *p2 = NULLCHAR; if (verify_args_input_file(fileptr)) { if (retval == TRUE) { error(INFO, "ignoring multiple argument input files: " "%s and %s\n", aif->fileptr, fileptr); return FALSE; } aif->start = start; aif->resume = start + (p2-buf) + whites + 1; aif->fileptr = GETBUF(strlen(fileptr)+1); strcpy(aif->fileptr, fileptr); aif->is_gdb_cmd = TRUE; retval = TRUE; } } curptr = p1+1; goto next_gdb; } for (c = 0; c < argcnt; c++) { if (STRNEQ(args[c], "<") && !STRNEQ(args[c], "<<")) { if (strlen(args[c]) > 1) { fileptr = &args[c][1]; args_used = 1; } else { if ((c+1) == argcnt) error(FATAL, "< requires a file argument\n"); fileptr = args[c+1]; args_used = 2; } if (!verify_args_input_file(fileptr)) continue; if (retval == TRUE) error(FATAL, "multiple input files are not supported\n"); aif->index = c; aif->fileptr = GETBUF(strlen(fileptr)+1); strcpy(aif->fileptr, fileptr); aif->args_used = args_used; retval = TRUE; continue; } if (STRNEQ(args[c], "(")) { curptr = args[c]; next_expr: if ((p1 = strstr(curptr, "<"))) { while (STRNEQ(p1, "<<")) { p2 = p1+2; if (!(p1 = strstr(p2, "<"))) continue; } } if (!p1) continue; start = p1 - curptr; p2 = p1+1; for (whites = 0; whitespace(*p2); whites++) p2++; if (*p2 == NULLCHAR) continue; strcpy(buf, p2); p2 = buf; if (*p2) { fileptr = p2; while (*p2 && !whitespace(*p2) && (strpbrk(p2, NON_FILENAME_CHARS) != p2)) p2++; *p2 = NULLCHAR; if (!verify_args_input_file(fileptr)) continue; if (retval == TRUE) { error(INFO, "ignoring multiple argument input files: " "%s and %s\n", aif->fileptr, fileptr); return FALSE; } retval = TRUE; aif->in_expression = TRUE; aif->args_used = 1; aif->index = c; aif->start = start; aif->resume = start + (p2-buf) + whites + 1; aif->fileptr = GETBUF(strlen(fileptr)+1); strcpy(aif->fileptr, fileptr); } curptr = p1+1; goto next_expr; } } return retval; } static void modify_orig_line(char *inbuf, struct args_input_file *aif) { char buf[BUFSIZE]; strcpy(buf, pc->orig_line); strcpy(&buf[aif->start], inbuf); strcat(buf, &pc->orig_line[aif->resume]); strcpy(pc->orig_line, buf); } static void modify_expression_arg(char *inbuf, char **aif_args, struct args_input_file *aif) { char *old, *new; old = aif_args[aif->index]; new = GETBUF(strlen(aif_args[aif->index]) + strlen(inbuf)); strcpy(new, old); strcpy(&new[aif->start], inbuf); strcat(new, &old[aif->resume]); aif_args[aif->index] = new; } /* * Sequence through an args input file, and for each line, * reinitialize the global args[] and argcnt, and issue the command. */ void exec_args_input_file(struct command_table_entry *ct, struct args_input_file *aif) { char buf[BUFSIZE]; int i, c, aif_cnt; int orig_argcnt; char *aif_args[MAXARGS]; char *new_args[MAXARGS]; char *orig_args[MAXARGS]; char orig_line[BUFSIZE]; char *save_args[MAXARGS]; char save_line[BUFSIZE]; if ((pc->args_ifile = fopen(aif->fileptr, "r")) == NULL) error(FATAL, "%s: %s\n", aif->fileptr, strerror(errno)); if (aif->is_gdb_cmd) strcpy(orig_line, pc->orig_line); BCOPY(args, orig_args, sizeof(args)); orig_argcnt = argcnt; /* * Commands cannot be trusted to leave the arguments intact. * Stash them here and restore them each time through the loop. */ save_args[0] = save_line; for (i = 0; i < orig_argcnt; i++) { strcpy(save_args[i], orig_args[i]); save_args[i+1] = save_args[i] + strlen(save_args[i]) + 2; } while (fgets(buf, BUFSIZE-1, pc->args_ifile)) { clean_line(buf); if ((strlen(buf) == 0) || (buf[0] == '#')) continue; for (i = 1; i < orig_argcnt; i++) strcpy(orig_args[i], save_args[i]); if (aif->is_gdb_cmd) { console("(gdb) before: [%s]\n", orig_line); strcpy(pc->orig_line, orig_line); modify_orig_line(buf, aif); console("(gdb) after: [%s]\n", pc->orig_line); } else if (aif->in_expression) { console("expr before: [%s]\n", orig_args[aif->index]); BCOPY(orig_args, aif_args, sizeof(aif_args)); modify_expression_arg(buf, aif_args, aif); BCOPY(aif_args, args, sizeof(aif_args)); console("expr after: [%s]\n", args[aif->index]); } else { if (!(aif_cnt = parse_line(buf, aif_args))) continue; for (i = 0; i < orig_argcnt; i++) console("%s[%d]:%s %s", (i == 0) ? "before: " : "", i, orig_args[i], (i+1) == orig_argcnt ? "\n" : ""); for (i = 0; i < aif->index; i++) new_args[i] = orig_args[i]; for (i = aif->index, c = 0; c < aif_cnt; c++, i++) new_args[i] = aif_args[c]; for (i = aif->index + aif_cnt, c = aif->index + aif->args_used; c < orig_argcnt; c++, i++) new_args[i] = orig_args[c]; argcnt = orig_argcnt - aif->args_used + aif_cnt; new_args[argcnt] = NULL; BCOPY(new_args, args, sizeof(args)); for (i = 0; i < argcnt; i++) console("%s[%d]:%s %s", (i == 0) ? " after: " : "", i, args[i], (i+1) == argcnt ? "\n" : ""); } optind = argerrs = 0; pc->cmdgencur++; if (setjmp(pc->foreach_loop_env)) pc->flags &= ~IN_FOREACH; else { pc->flags |= IN_FOREACH; (*ct->func)(); pc->flags &= ~IN_FOREACH; } if (pc->cmd_cleanup) pc->cmd_cleanup(pc->cmd_cleanup_arg); free_all_bufs(); if (received_SIGINT()) break; } fclose(pc->args_ifile); pc->args_ifile = NULL; } static char * crash_readline_completion_generator(const char *match, int state) { static struct syment *sp_match; if (state == 0) sp_match = NULL; sp_match = symbol_complete_match(match, sp_match); if (sp_match) return(strdup(sp_match->name)); else return NULL; } static char ** crash_readline_completer(const char *match, int start, int end) { rl_attempted_completion_over = 1; return rl_completion_matches(match, crash_readline_completion_generator); } crash-utility-crash-9cd43f5/alpha.c0000664000372000037200000022526615107550337016642 0ustar juerghjuergh/* alpha.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2010-2013 David Anderson * Copyright (C) 2002-2006, 2010-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifdef ALPHA #include "defs.h" static void alpha_back_trace(struct gnu_request *, struct bt_info *); static int alpha_trace_status(struct gnu_request *, struct bt_info *); static void alpha_exception_frame(ulong, ulong, struct gnu_request *, struct bt_info *); static void alpha_frame_offset(struct gnu_request *, ulong); static int alpha_backtrace_resync(struct gnu_request *, ulong, struct bt_info *); static void alpha_print_stack_entry(struct gnu_request *, ulong, char *, ulong, struct bt_info *); static int alpha_resync_speculate(struct gnu_request *, ulong,struct bt_info *); static int alpha_dis_filter(ulong, char *, unsigned int); static void dis_address_translation(ulong, char *, unsigned int); static void alpha_cmd_mach(void); static int alpha_get_smp_cpus(void); static void alpha_display_machine_stats(void); static void alpha_dump_line_number(char *, ulong); static void display_hwrpb(unsigned int); static void alpha_post_init(void); static struct line_number_hook alpha_line_number_hooks[]; #define ALPHA_CONTINUE_TRACE (1) #define ALPHA_END_OF_TRACE (2) #define ALPHA_EXCEPTION_FRAME (3) #define ALPHA_SYSCALL_FRAME (4) #define ALPHA_MM_FAULT (5) #define ALPHA_INTERRUPT_PENDING (6) #define ALPHA_RESCHEDULE (7) #define ALPHA_DOWN_FAILED (8) #define ALPHA_RET_FROM_SMP_FORK (9) #define ALPHA_SIGNAL_RETURN (10) #define ALPHA_STRACE (11) static int alpha_eframe_search(struct bt_info *); static int alpha_uvtop(struct task_context *, ulong, physaddr_t *, int); static int alpha_kvtop(struct task_context *, ulong, physaddr_t *, int); static void alpha_back_trace_cmd(struct bt_info *); static ulong alpha_get_task_pgd(ulong task); static ulong alpha_processor_speed(void); static void alpha_dump_irq(int); static void alpha_get_stack_frame(struct bt_info *, ulong *, ulong *); static void get_alpha_frame(struct bt_info *, ulong *, ulong *); static int verify_user_eframe(struct bt_info *, ulong, ulong); static int alpha_translate_pte(ulong, void *, ulonglong); static uint64_t alpha_memory_size(void); static ulong alpha_vmalloc_start(void); static int alpha_is_task_addr(ulong); static int alpha_verify_symbol(const char *, ulong, char); struct percpu_data { ulong halt_PC; ulong halt_ra; ulong halt_pv; }; #define GET_HALT_PC 0x1 #define GET_HALT_RA 0x2 #define GET_HALT_PV 0x3 static ulong get_percpu_data(int, ulong, struct percpu_data *); /* * Do all necessary machine-specific setup here. This is called three times, * before symbol table initialization, and before and after GDB has been * initialized. */ void alpha_init(int when) { int tmp; switch (when) { case PRE_SYMTAB: machdep->verify_symbol = alpha_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~(machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; break; case PRE_GDB: switch (symbol_value("_stext") & KSEG_BASE) { case KSEG_BASE: machdep->kvbase = KSEG_BASE; break; case KSEG_BASE_48_BIT: machdep->kvbase = KSEG_BASE_48_BIT; break; default: error(FATAL, "cannot determine KSEG base from _stext: %lx\n", symbol_value("_stext")); } machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = alpha_eframe_search; machdep->back_trace = alpha_back_trace_cmd; machdep->processor_speed = alpha_processor_speed; machdep->uvtop = alpha_uvtop; machdep->kvtop = alpha_kvtop; machdep->get_task_pgd = alpha_get_task_pgd; if (symbol_exists("irq_desc")) machdep->dump_irq = generic_dump_irq; else machdep->dump_irq = alpha_dump_irq; machdep->get_stack_frame = alpha_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = alpha_translate_pte; machdep->memory_size = alpha_memory_size; machdep->vmalloc_start = alpha_vmalloc_start; machdep->is_task_addr = alpha_is_task_addr; if (symbol_exists("console_crash")) { get_symbol_data("console_crash", sizeof(int), &tmp); if (tmp) machdep->flags |= HWRESET; } machdep->dis_filter = alpha_dis_filter; machdep->cmd_mach = alpha_cmd_mach; machdep->get_smp_cpus = alpha_get_smp_cpus; machdep->line_number_hooks = alpha_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; break; case POST_GDB: MEMBER_OFFSET_INIT(thread_struct_ptbr, "thread_struct", "ptbr"); MEMBER_OFFSET_INIT(hwrpb_struct_cycle_freq, "hwrpb_struct", "cycle_freq"); MEMBER_OFFSET_INIT(hwrpb_struct_processor_offset, "hwrpb_struct", "processor_offset"); MEMBER_OFFSET_INIT(hwrpb_struct_processor_size, "hwrpb_struct", "processor_size"); MEMBER_OFFSET_INIT(percpu_struct_halt_PC, "percpu_struct", "halt_PC"); MEMBER_OFFSET_INIT(percpu_struct_halt_ra, "percpu_struct", "halt_ra"); MEMBER_OFFSET_INIT(percpu_struct_halt_pv, "percpu_struct", "halt_pv"); MEMBER_OFFSET_INIT(switch_stack_r26, "switch_stack", "r26"); if (symbol_exists("irq_action")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_action, "irq_action", NULL, 0); else if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else machdep->nr_irqs = 0; if (!machdep->hz) machdep->hz = HZ; break; case POST_INIT: alpha_post_init(); break; } } /* * Unroll a kernel stack. */ static void alpha_back_trace_cmd(struct bt_info *bt) { char buf[BUFSIZE]; struct gnu_request *req; bt->flags |= BT_EXCEPTION_FRAME; if (CRASHDEBUG(1) || bt->debug) fprintf(fp, " => PC: %lx (%s) FP: %lx \n", bt->instptr, value_to_symstr(bt->instptr, buf, 0), bt->stkptr ); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_STACK_TRACE; req->flags = GNU_RETURN_ON_ERROR; req->buf = GETBUF(BUFSIZE); req->debug = bt->debug; req->task = bt->task; req->pc = bt->instptr; req->sp = bt->stkptr; if (bt->flags & BT_USE_GDB) { strcpy(req->buf, "backtrace"); gdb_interface(req); } else alpha_back_trace(req, bt); FREEBUF(req->buf); FREEBUF(req); } /* * Unroll the kernel stack. */ #define ALPHA_BACKTRACE_SPECULATE(X) \ { \ speculate_location = X; \ \ if (bt->flags & BT_SPECULATE) \ return; \ \ BZERO(btloc, sizeof(struct bt_info)); \ btloc->task = req->task; \ btloc->tc = bt->tc; \ btloc->stackbase = bt->stackbase; \ btloc->stacktop = bt->stacktop; \ btloc->flags = BT_TEXT_SYMBOLS_NOPRINT; \ hook.eip = 0; \ hook.esp = req->lastsp ? req->lastsp + sizeof(long) : 0; \ btloc->hp = &hook; \ \ back_trace(btloc); \ \ if (hook.esp && hook.eip) { \ req->hookp = &hook; \ if (alpha_resync_speculate(req, bt->flags, bt)) { \ req->pc = hook.eip; \ req->sp = hook.esp; \ continue; \ } \ goto show_remaining_text; \ } \ goto show_remaining_text; \ } static void alpha_back_trace(struct gnu_request *req, struct bt_info *bt) { char buf[BUFSIZE]; int frame; int done; int status; struct stack_hook hook; int eframe_same_pc_ra_function; int speculate_location; struct bt_info bt_info, *btloc; frame = 0; req->curframe = 0; btloc = &bt_info; if (!IS_KVADDR(req->pc)) { if (BT_REFERENCE_CHECK(bt)) return; if ((machdep->flags & HWRESET) && is_task_active(req->task)) { fprintf(fp, "(hardware reset while in user space)\n"); return; } fprintf(fp, "invalid pc: %lx\n", req->pc); alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME, req, bt); return; } for (done = FALSE; !done && (frame < 100); frame++) { speculate_location = 0; if ((req->name = closest_symbol(req->pc)) == NULL) { req->ra = req->pc = 0; if (alpha_backtrace_resync(req, bt->flags | BT_FROM_CALLFRAME, bt)) continue; if (BT_REFERENCE_FOUND(bt)) return; ALPHA_BACKTRACE_SPECULATE(1); } if (!INSTACK(req->sp, bt)) break; if (!is_kernel_text(req->pc)) ALPHA_BACKTRACE_SPECULATE(2); alpha_print_stack_entry(req, req->pc, req->name, bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; switch (status = alpha_trace_status(req, bt)) { case ALPHA_CONTINUE_TRACE: alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_END_OF_TRACE: done = TRUE; break; case ALPHA_STRACE: alpha_exception_frame(req->sp, BT_USER_EFRAME|BT_STRACE, req, bt); done = TRUE; break; case ALPHA_RET_FROM_SMP_FORK: alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME|BT_RET_FROM_SMP_FORK, req, bt); done = TRUE; break; case ALPHA_DOWN_FAILED: frame++; alpha_print_stack_entry(req, req->pc, closest_symbol(req->pc), bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_RESCHEDULE: alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME|BT_RESCHEDULE, req, bt); done = TRUE; break; case ALPHA_MM_FAULT: alpha_exception_frame(req->sp, bt->flags, req, bt); if (!IS_KVADDR(req->pc)) { done = TRUE; break; } alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } frame++; alpha_print_stack_entry(req, req->pc, closest_symbol(req->pc), bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; if (!IS_KVADDR(req->pc)) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_SYSCALL_FRAME: req->sp = verify_user_eframe(bt, req->task, req->sp) ? req->sp : USER_EFRAME_ADDR(req->task); alpha_exception_frame(req->sp, bt->flags, req, bt); if (!IS_KVADDR(req->pc)) { done = TRUE; break; } alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_SIGNAL_RETURN: alpha_exception_frame(USER_EFRAME_ADDR(req->task), bt->flags, req, bt); done = TRUE; break; case ALPHA_EXCEPTION_FRAME: alpha_frame_offset(req, 0); if (!req->value) { fprintf(fp, "ALPHA EXCEPTION FRAME w/no frame offset for %lx (%s)\n", req->pc, value_to_symstr(req->pc, buf, 0)); done = TRUE; break; } alpha_exception_frame(req->sp + req->value, bt->flags, req, bt); if (!IS_KVADDR(req->pc)) { done = TRUE; break; } alpha_frame_offset(req, 0); if (!req->value) { fprintf(fp, "ALPHA EXCEPTION FRAME w/no frame offset for %lx (%s)\n", req->pc, value_to_symstr(req->pc, buf, 0)); done = TRUE; break; } eframe_same_pc_ra_function = SAME_FUNCTION(req->pc, req->ra); frame++; alpha_print_stack_entry(req, req->pc, closest_symbol(req->pc), bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; if (!IS_KVADDR(req->pc)) { done = TRUE; break; } if (STREQ(closest_symbol(req->pc), "ret_from_reschedule")) { alpha_exception_frame( USER_EFRAME_ADDR(req->task), BT_USER_EFRAME|BT_RESCHEDULE, req, bt); done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); if (!is_kernel_text(req->pc)) { if (alpha_backtrace_resync(req, bt->flags | BT_FROM_EXCEPTION, bt)) break; if (BT_REFERENCE_FOUND(bt)) return; ALPHA_BACKTRACE_SPECULATE(3); } if (!eframe_same_pc_ra_function && (req->pc != req->ra)) { req->pc = req->ra; break; } req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_INTERRUPT_PENDING: alpha_frame_offset(req, 0); if (!req->value) { req->prevpc = req->pc; req->pc = req->addr; req->prevsp = req->sp; req->sp = req->frame; } else { req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; } break; } } return; show_remaining_text: if (BT_REFERENCE_CHECK(bt)) return; BZERO(btloc, sizeof(struct bt_info)); btloc->task = req->task; btloc->tc = bt->tc; btloc->stackbase = bt->stackbase; btloc->stacktop = bt->stacktop; btloc->flags = BT_TEXT_SYMBOLS_NOPRINT; hook.esp = req->lastsp + sizeof(long); btloc->hp = &hook; back_trace(btloc); if (hook.eip) { fprintf(fp, "NOTE: cannot resolve trace from this point -- remaining text symbols on stack:\n"); btloc->flags = BT_TEXT_SYMBOLS_PRINT|BT_ERROR_MASK; hook.esp = req->lastsp + sizeof(long); back_trace(btloc); } else fprintf(fp, "NOTE: cannot resolve trace from this point -- no remaining text symbols\n"); if (CRASHDEBUG(1)) fprintf(fp, "speculate_location: %d\n", speculate_location); alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME, req, bt); } /* * print one entry of a stack trace */ static void alpha_print_stack_entry(struct gnu_request *req, ulong callpc, char *name, ulong flags, struct bt_info *bt) { struct load_module *lm; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(name, bt->ref->str) || (STREQ(name, "strace") && STREQ(bt->ref->str, "entSys"))) { bt->ref->cmdflags |= BT_REF_FOUND; } break; case BT_REF_HEXVAL: if (bt->ref->hexval == callpc) bt->ref->cmdflags |= BT_REF_FOUND; break; } } else { fprintf(fp, "%s#%d [%lx] %s at %lx", req->curframe < 10 ? " " : "", req->curframe, req->sp, STREQ(name, "strace") ? "strace (via entSys)" : name, callpc); if (module_symbol(callpc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); } if (!(flags & BT_SPECULATE)) req->curframe++; if (flags & BT_SAVE_LASTSP) req->lastsp = req->sp; if (BT_REFERENCE_CHECK(bt)) return; if (flags & BT_LINE_NUMBERS) alpha_dump_line_number(name, callpc); } static const char *hook_files[] = { "arch/alpha/kernel/entry.S", "arch/alpha/kernel/head.S", "init/main.c", "arch/alpha/kernel/smp.c", }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) #define MAIN_C ((char **)&hook_files[2]) #define SMP_C ((char **)&hook_files[3]) static struct line_number_hook alpha_line_number_hooks[] = { {"entInt", ENTRY_S}, {"entMM", ENTRY_S}, {"entArith", ENTRY_S}, {"entIF", ENTRY_S}, {"entDbg", ENTRY_S}, {"kernel_clone", ENTRY_S}, {"kernel_thread", ENTRY_S}, {"__kernel_execve", ENTRY_S}, {"do_switch_stack", ENTRY_S}, {"undo_switch_stack", ENTRY_S}, {"entUna", ENTRY_S}, {"entUnaUser", ENTRY_S}, {"sys_fork", ENTRY_S}, {"sys_clone", ENTRY_S}, {"sys_vfork", ENTRY_S}, {"alpha_switch_to", ENTRY_S}, {"entSys", ENTRY_S}, {"ret_from_sys_call", ENTRY_S}, {"ret_from_reschedule", ENTRY_S}, {"restore_all", ENTRY_S}, {"strace", ENTRY_S}, {"strace_success", ENTRY_S}, {"strace_error", ENTRY_S}, {"syscall_error", ENTRY_S}, {"ret_success", ENTRY_S}, {"signal_return", ENTRY_S}, {"ret_from_fork", ENTRY_S}, {"reschedule", ENTRY_S}, {"sys_sigreturn", ENTRY_S}, {"sys_rt_sigreturn", ENTRY_S}, {"sys_sigsuspend", ENTRY_S}, {"sys_rt_sigsuspend", ENTRY_S}, {"ret_from_smpfork", ENTRY_S}, {"_stext", HEAD_S}, {"__start", HEAD_S}, {"__smp_callin", HEAD_S}, {"cserve_ena", HEAD_S}, {"cserve_dis", HEAD_S}, {"halt", HEAD_S}, {"start_kernel", MAIN_C}, {"smp_callin", SMP_C}, {NULL, NULL} /* list must be NULL-terminated */ }; static void alpha_dump_line_number(char *name, ulong callpc) { char buf[BUFSIZE], *p; int retries; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } /* * Look for the frame size storage at the beginning of a function. * If it's not obvious, try gdb. * * For future reference, here's where the numbers come from: * * 0xfffffc00003217e8 : subq sp,0x50,sp * fffffc00003217e8: 43ca153e * 010000 11110 01010000 1 0101001 11110 * * 0xfffffc0000321668 : subq sp,0x60,sp * fffffc0000321668: 43cc153e * 010000 11110 01100000 1 0101001 11110 * * 0xfffffc000035d028 : subq sp,0x70,sp * fffffc000035d028: 43ce153e * 010000 11110 01110000 1 0101001 11110 * * 0100 0011 110x xxxx xxx1 0101 0011 1110 * 1111 1111 111x xxxx xxx1 1111 1111 1111 * 0000 0000 0001 1111 1110 0000 0000 0000 * f f e 0 1 f f f instruction mask * 0 0 1 f e 0 0 0 offset * * stq ra,0(sp) * fffffc000035d034: b75e0000 */ static void alpha_frame_offset(struct gnu_request *req, ulong alt_pc) { uint *ip, ival; ulong value; req->value = value = 0; if (alt_pc && !is_kernel_text(alt_pc)) error(FATAL, "trying to get frame offset of non-text address: %lx\n", alt_pc); else if (!alt_pc && !is_kernel_text(req->pc)) error(FATAL, "trying to get frame offset of non-text address: %lx\n", req->pc); ip = alt_pc ? (int *)closest_symbol_value(alt_pc) : (int *)closest_symbol_value(req->pc); if (!ip) goto use_gdb; ival = 0; /* * Don't go any farther than "stq ra,0(sp)" (0xb75e0000) */ while (ival != 0xb75e0000) { readmem((ulong)ip, KVADDR, &ival, sizeof(uint), "text value", FAULT_ON_ERROR); if ((ival & 0xffe01fff) == 0x43c0153e) { value = (ival & 0x1fe000) >> 13; break; } ip++; } if (value) { req->value = value; return; } use_gdb: #ifndef GDB_5_3 { static int gdb_frame_offset_warnings = 10; if (gdb_frame_offset_warnings-- > 0) error(WARNING, "GNU_ALPHA_FRAME_OFFSET functionality not ported to gdb\n"); } #endif req->command = GNU_ALPHA_FRAME_OFFSET; if (alt_pc) { ulong pc_save; pc_save = req->pc; req->pc = alt_pc; gdb_interface(req); req->pc = pc_save; } else gdb_interface(req); } /* * Look for key routines that either mean the trace has ended or has * bumped into an exception frame. */ int alpha_trace_status(struct gnu_request *req, struct bt_info *bt) { ulong value; char *func; ulong frame; req->addr = 0; func = req->name; frame = req->sp; if (STREQ(func, "start_kernel") || STREQ(func, "smp_callin") || STREQ(func, "kernel_thread") || STREQ(func, "__kernel_thread")) return ALPHA_END_OF_TRACE; if (STREQ(func, "ret_from_smp_fork") || STREQ(func, "ret_from_smpfork")) return ALPHA_RET_FROM_SMP_FORK; if (STREQ(func, "entSys")) return ALPHA_SYSCALL_FRAME; if (STREQ(func, "entMM")) { req->sp += 56; /* see entMM in entry.S */ return ALPHA_MM_FAULT; } if (STREQ(func, "do_entInt")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "do_entArith")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "do_entIF")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "do_entDbg")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "handle_bottom_half")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "handle_softirq")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "reschedule")) return ALPHA_RESCHEDULE; if (STREQ(func, "ret_from_reschedule")) return ALPHA_RESCHEDULE; if (STREQ(func, "signal_return")) return ALPHA_SIGNAL_RETURN; if (STREQ(func, "strace")) return ALPHA_STRACE; if (STREQ(func, "__down_failed") || STREQ(func, "__down_failed_interruptible")) { readmem(req->sp + 144, KVADDR, &req->pc, sizeof(ulong), "__down_failed r26", FAULT_ON_ERROR); req->sp += 160; return ALPHA_DOWN_FAILED; } value = GET_STACK_ULONG(frame); if (STREQ(closest_symbol(value), "do_entInt") || STREQ(closest_symbol(value), "do_entArith") || STREQ(closest_symbol(value), "do_entIF") || STREQ(closest_symbol(value), "do_entDbg")) { req->addr = value; req->frame = 0; while (INSTACK(frame, bt)) { frame += sizeof(ulong); value = GET_STACK_ULONG(frame); if (STREQ(closest_symbol(value), "ret_from_sys_call")) { alpha_frame_offset(req, req->addr); /* req->frame = frame + req->value; XXX */ break; } } return ALPHA_INTERRUPT_PENDING; } return ALPHA_CONTINUE_TRACE; } /* * Redo the gdb pt_regs structure output. */ enum regnames { _r0_, _r1_, _r2_, _r3_, _r4_, _r5_, _r6_, _r7_, _r8_, _r19_, _r20_, _r21_, _r22_, _r23_, _r24_, _r25_, _r26_, _r27_, _r28_, _hae_, _trap_a0_, _trap_a1_, _trap_a2_, _ps_, _pc_, _gp_, _r16_, _r17_, _r18_, NUMREGS}; struct alpha_eframe { char regs[30][30]; ulong value[29]; }; static void alpha_exception_frame(ulong addr, ulong flags, struct gnu_request *req, struct bt_info *bt) { int i, j; char buf[BUFSIZE]; ulong value; physaddr_t paddr; struct alpha_eframe eframe; if (CRASHDEBUG(4)) fprintf(fp, "alpha_exception_frame: %lx\n", addr); if (flags & BT_SPECULATE) { req->pc = 0; fprintf(fp, "ALPHA EXCEPTION FRAME\n"); return; } BZERO(&eframe, sizeof(struct alpha_eframe)); open_tmpfile(); dump_struct("pt_regs", addr, RADIX(16)); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { strip_comma(clean_line(buf)); if (!strstr(buf, "0x")) continue; extract_hex(buf, &value, NULLCHAR, TRUE); if (CRASHDEBUG(4)) fprintf(pc->saved_fp, "<%s> %lx\n", buf, value); if (STRNEQ(buf, "r0 = ")) { sprintf(eframe.regs[_r0_], " V0/R0: %016lx", value); eframe.value[_r0_] = value; } if (STRNEQ(buf, "r1 = ")) { sprintf(eframe.regs[_r1_], " T0/R1: %016lx", value); eframe.value[_r1_] = value; } if (STRNEQ(buf, "r2 = ")) { sprintf(eframe.regs[_r2_], " T1/R2: %016lx", value); eframe.value[_r2_] = value; } if (STRNEQ(buf, "r3 = ")) { sprintf(eframe.regs[_r3_], " T2/R3: %016lx", value); eframe.value[_r3_] = value; } if (STRNEQ(buf, "r4 = ")) { sprintf(eframe.regs[_r4_], " T3/R4: %016lx", value); eframe.value[_r4_] = value; } if (STRNEQ(buf, "r5 = ")) { sprintf(eframe.regs[_r5_], " T4/R5: %016lx", value); eframe.value[_r5_] = value; } if (STRNEQ(buf, "r6 = ")) { sprintf(eframe.regs[_r6_], " T5/R6: %016lx", value); eframe.value[_r6_] = value; } if (STRNEQ(buf, "r7 = ")) { sprintf(eframe.regs[_r7_], " T6/R7: %016lx", value); eframe.value[_r7_] = value; } if (STRNEQ(buf, "r8 = ")) { sprintf(eframe.regs[_r8_], " T7/R8: %016lx", value); eframe.value[_r8_] = value; } if (STRNEQ(buf, "r19 = ")) { sprintf(eframe.regs[_r19_], " A3/R19: %016lx", value); eframe.value[_r19_] = value; } if (STRNEQ(buf, "r20 = ")) { sprintf(eframe.regs[_r20_], " A4/R20: %016lx", value); eframe.value[_r20_] = value; } if (STRNEQ(buf, "r21 = ")) { sprintf(eframe.regs[_r21_], " A5/R21: %016lx", value); eframe.value[_r21_] = value; } if (STRNEQ(buf, "r22 = ")) { sprintf(eframe.regs[_r22_], " T8/R22: %016lx", value); eframe.value[_r22_] = value; } if (STRNEQ(buf, "r23 = ")) { sprintf(eframe.regs[_r23_], " T9/R23: %016lx", value); eframe.value[_r23_] = value; } if (STRNEQ(buf, "r24 = ")) { sprintf(eframe.regs[_r24_], "T10/R24: %016lx", value); eframe.value[_r24_] = value; } if (STRNEQ(buf, "r25 = ")) { sprintf(eframe.regs[_r25_], "T11/R25: %016lx", value); eframe.value[_r25_] = value; } if (STRNEQ(buf, "r26 = ")) { sprintf(eframe.regs[_r26_], " RA/R26: %016lx", value); eframe.value[_r26_] = value; } if (STRNEQ(buf, "r27 = ")) { sprintf(eframe.regs[_r27_], "T12/R27: %016lx", value); eframe.value[_r27_] = value; } if (STRNEQ(buf, "r28 = ")) { sprintf(eframe.regs[_r28_], " AT/R28: %016lx", value); eframe.value[_r28_] = value; } if (STRNEQ(buf, "hae = ")) { sprintf(eframe.regs[_hae_], " HAE: %016lx", value); eframe.value[_hae_] = value; } if (STRNEQ(buf, "trap_a0 = ")) { sprintf(eframe.regs[_trap_a0_], "TRAP_A0: %016lx", value); eframe.value[_trap_a0_] = value; } if (STRNEQ(buf, "trap_a1 = ")) { sprintf(eframe.regs[_trap_a1_], "TRAP_A1: %016lx", value); eframe.value[_trap_a1_] = value; } if (STRNEQ(buf, "trap_a2 = ")) { sprintf(eframe.regs[_trap_a2_], "TRAP_A2: %016lx", value); eframe.value[_trap_a2_] = value; } if (STRNEQ(buf, "ps = ")) { sprintf(eframe.regs[_ps_], " PS: %016lx", value); eframe.value[_ps_] = value; } if (STRNEQ(buf, "pc = ")) { sprintf(eframe.regs[_pc_], " PC: %016lx", value); eframe.value[_pc_] = value; } if (STRNEQ(buf, "gp = ")) { sprintf(eframe.regs[_gp_], " GP/R29: %016lx", value); eframe.value[_gp_] = value; } if (STRNEQ(buf, "r16 = ")) { sprintf(eframe.regs[_r16_], " A0/R16: %016lx", value); eframe.value[_r16_] = value; } if (STRNEQ(buf, "r17 = ")) { sprintf(eframe.regs[_r17_], " A1/R17: %016lx", value); eframe.value[_r17_] = value; } if (STRNEQ(buf, "r18 =")) { sprintf(eframe.regs[_r18_], " A2/R18: %016lx", value); eframe.value[_r18_] = value; } } close_tmpfile(); if ((flags & BT_EXCEPTION_FRAME) && !BT_REFERENCE_CHECK(bt)) { dump_eframe: fprintf(fp, " EFRAME: %lx ", addr); fprintf(fp, "%s\n", eframe.regs[_r24_]); for (i = 0; i < (((NUMREGS+1)/2)-1); i++) { fprintf(fp, "%s ", eframe.regs[i]); pad_line(fp, 21 - strlen(eframe.regs[i]), ' '); j = i+((NUMREGS+1)/2); fprintf(fp, "%s", eframe.regs[j]); if (((j == _pc_) || (j == _r26_)) && is_kernel_text(eframe.value[j])) fprintf(fp, " <%s>", value_to_symstr(eframe.value[j], buf, 0)); fprintf(fp, "\n"); } } req->ra = eframe.value[_r26_]; req->pc = eframe.value[_pc_]; req->sp = addr + (29 * sizeof(ulong)); if (flags & BT_USER_EFRAME) { flags &= ~BT_USER_EFRAME; if (!BT_REFERENCE_CHECK(bt) && (eframe.value[_ps_] == 8) && (((uvtop(task_to_context(req->task), req->pc, &paddr, 0) || (volatile ulong)paddr) && (uvtop(task_to_context(req->task), req->ra, &paddr, 0) || (volatile ulong)paddr)) || (IS_ZOMBIE(req->task) || IS_EXITING(req->task)))) { if (!(flags & (BT_RESCHEDULE|BT_RET_FROM_SMP_FORK|BT_STRACE))) fprintf(fp, "NOTE: kernel-entry exception frame:\n"); goto dump_eframe; } } } /* * Look for likely exception frames in a stack. */ struct alpha_pt_regs { ulong reg_value[NUMREGS]; }; static int alpha_eframe_search(struct bt_info *bt) { ulong *first, *last; ulong eframe; struct alpha_pt_regs *pt; struct gnu_request *req; /* needed for alpha_exception_frame */ ulong *stack; int cnt; stack = (ulong *)bt->stackbuf; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->task = bt->task; first = stack + (roundup(SIZE(task_struct), sizeof(ulong)) / sizeof(ulong)); last = stack + (((bt->stacktop - bt->stackbase) - SIZE(pt_regs)) / sizeof(ulong)); for (cnt = 0; first <= last; first++) { pt = (struct alpha_pt_regs *)first; /* check for kernel exception frame */ if (!(pt->reg_value[_ps_] & 0xfffffffffffffff8) && (is_kernel_text(pt->reg_value[_pc_]) || IS_MODULE_VADDR(pt->reg_value[_pc_])) && (is_kernel_text(pt->reg_value[_r26_]) || IS_MODULE_VADDR(pt->reg_value[_r26_])) && IS_KVADDR(pt->reg_value[_gp_])) { cnt++; if (bt->flags & BT_EFRAME_COUNT) continue; fprintf(fp, "\nKERNEL-MODE EXCEPTION FRAME:\n"); eframe = bt->task + ((ulong)first - (ulong)stack); alpha_exception_frame(eframe, BT_EXCEPTION_FRAME, req, bt); continue; } /* check for user exception frame */ if ((pt->reg_value[_ps_] == 0x8) && ((IN_TASK_VMA(bt->task, pt->reg_value[_pc_]) && IN_TASK_VMA(bt->task, pt->reg_value[_r26_]) && IS_UVADDR(pt->reg_value[_gp_], bt->tc)) || ((first == last) && (IS_ZOMBIE(bt->task) || IS_EXITING(bt->task))))) { cnt++; if (bt->flags & BT_EFRAME_COUNT) continue; fprintf(fp, "\nUSER-MODE EXCEPTION FRAME:\n"); eframe = bt->task + ((ulong)first - (ulong)stack); alpha_exception_frame(eframe, BT_EXCEPTION_FRAME, req, bt); } } FREEBUF(req); return cnt; } /* * Before dumping a nonsensical exception frame, give it a quick test. */ static int verify_user_eframe(struct bt_info *bt, ulong task, ulong sp) { struct alpha_pt_regs ptbuf, *pt; readmem(sp, KVADDR, &ptbuf, sizeof(struct alpha_pt_regs), "pt_regs", FAULT_ON_ERROR); pt = &ptbuf; if ((pt->reg_value[_ps_] == 0x8) && ((IN_TASK_VMA(task, pt->reg_value[_pc_]) && IN_TASK_VMA(task, pt->reg_value[_r26_]) && IS_UVADDR(pt->reg_value[_gp_], bt->tc)) || ((pt == (struct alpha_pt_regs *)USER_EFRAME_ADDR(task)) && (IS_ZOMBIE(task) || IS_EXITING(task))))) { return TRUE; } return FALSE; } /* * Try to resync the stack location when there is no valid stack frame, * typically just above an exception frame. Use the req->ra value from the * exception frame as the new starting req->pc. Then walk up the stack until * a text routine that calls the newly-assigned pc is found -- that stack * location then becomes the new req->sp. * * If we're not coming from an exception frame, req-ra and req->pc will be * purposely zeroed out. In that case, use the prevsp value to find the * first pc that called the last frame's pc. * * Add any other repeatable "special-case" frames to the beginning of this * routine (ex. debug_spin_lock). Last ditch -- at the end of this routine, * speculate what might have happened (possibly in the background) -- and * if it looks good, run with it. */ static int alpha_backtrace_resync(struct gnu_request *req, ulong flags, struct bt_info *bt) { char addr[BUFSIZE]; char buf[BUFSIZE]; char lookfor1[BUFSIZE]; char lookfor2[BUFSIZE]; ulong newpc; ulong *stkp; ulong *stkp_newpc, *stkp_next; ulong value; int found; char *name; int exception; if (CRASHDEBUG(1)) fprintf(fp, "RESYNC1: [%lx-%d] ra: %lx pc: %lx sp: %lx\n", flags, req->curframe, req->ra, req->pc, req->sp); if (!req->ra && !req->pc) { req->ra = req->prevpc; exception = FALSE; } else exception = TRUE; if (!IS_KVADDR(req->ra)) return FALSE; name = closest_symbol(req->ra); sprintf(lookfor1, "<%s>", name); sprintf(lookfor2, "<%s+", name); if (CRASHDEBUG(1)) fprintf(fp, "RESYNC2: exception: %s lookfor: %s or %s\n", exception ? "TRUE" : "FALSE", lookfor1, lookfor2); /* * This is common when a non-panicking active CPU is spinning * in debug_spin_lock(). The next pc is offset by 0x30 from * the top of the exception frame, and the next sp is equal * to the frame offset of debug_spin_lock(). I can't explain it... */ if ((flags & BT_FROM_EXCEPTION) && STREQ(name, "debug_spin_lock")) { alpha_print_stack_entry(req, req->ra, closest_symbol(req->ra), flags, bt); if (BT_REFERENCE_FOUND(bt)) return FALSE; alpha_frame_offset(req, req->ra); stkp = (ulong *)(req->sp + 0x30); value = GET_STACK_ULONG(stkp); if (!is_kernel_text(value)) { req->sp = req->prevsp; return FALSE; } req->pc = value; req->sp += req->value; return TRUE; } /* * If the ra is a system call, then all we should have to do is * find the next reference to entSys on the stack, and set the * sp to that value. */ if (is_system_call(name, 0)) { /* stkp = (ulong *)req->sp; */ stkp = (ulong *)req->prevsp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (IS_KVADDR(value) && is_kernel_text(value)) { if (STREQ(closest_symbol(value), "entSys")) { req->pc = value; req->sp = USER_EFRAME_ADDR(req->task); return TRUE; } } } } /* * Just find the next location containing text. (?) */ if (STREQ(name, "do_coredump")) { stkp = (ulong *)(req->sp + sizeof(long)); for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (IS_KVADDR(value) && is_kernel_text(value)) { req->pc = req->ra; req->sp = (ulong)stkp; return TRUE; } } } if (flags & BT_SPECULATE) return FALSE; if (CRASHDEBUG(1)) { fprintf(fp, "RESYNC3: prevsp: %lx ra: %lx name: %s\n", req->prevsp, req->ra, name); fprintf(fp, "RESYNC3: prevpc: %lx\n", req->prevpc); } stkp_newpc = stkp_next = 0; newpc = 0; found = FALSE; if (exception) { newpc = req->ra; stkp = (ulong *)req->sp; } else stkp = (ulong *)req->prevsp; if (CRASHDEBUG(1)) fprintf(fp, "RESYNC4: stkp: %lx newpc: %lx\n", (ulong)stkp, newpc); for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); /* * First find the new pc on the stack. */ if (!found) { if (!exception && is_kernel_text(value)) { found = TRUE; } else if (value == newpc) { found = TRUE; stkp_newpc = stkp; continue; } } if (!IS_KVADDR(value)) continue; if (is_kernel_text(value)) { if (!stkp_next) stkp_next = stkp; if (CRASHDEBUG(2)) { fprintf(fp, "RESYNC6: disassemble %lx (%s)\n", value - sizeof(uint), value_to_symstr(value - sizeof(uint), buf, 0)); } req->command = GNU_DISASSEMBLE; req->addr = value - sizeof(uint); sprintf(addr, "0x%lx", req->addr); open_tmpfile(); req->fp = pc->tmpfile; gdb_interface(req); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { clean_line(buf); if (STRNEQ(buf, "Dump of") || STRNEQ(buf, "End of")) continue; if (STRNEQ(buf, addr)) { if (LASTCHAR(buf) == ':') { fgets(buf, BUFSIZE, pc->tmpfile); clean_line(buf); } if (CRASHDEBUG(2) && (strstr(buf, "jsr") || strstr(buf, "bsr"))) fprintf(pc->saved_fp, "%s\n", buf); if ((strstr(buf, "jsr") || strstr(buf, "bsr")) && (strstr(buf, lookfor1) || strstr(buf, lookfor2))) { if (exception) { req->pc = newpc; req->sp = (ulong)stkp; } else req->pc = req->addr; close_tmpfile(); return TRUE; } } } close_tmpfile(); } } if (CRASHDEBUG(1)) { fprintf(fp, "RESYNC9: [%d] name: %s pc: %lx ra: %lx\n", req->curframe, name, req->pc, req->ra); fprintf(fp, "RESYNC9: sp: %lx lastsp: %lx\n", req->sp, req->lastsp); fprintf(fp, "RESYNC9: prevpc: %lx prevsp: %lx\n", req->prevpc, req->prevsp); } /* * At this point, all we can do is speculate based upon * past experiences... */ return (alpha_resync_speculate(req, flags, bt)); } /* * Try one level of speculation. If it works, fine -- if not, give up. */ static int alpha_resync_speculate(struct gnu_request *req, ulong flags, struct bt_info *bt) { ulong *stkp; ulong value; ulong found_sp, found_ra; struct stack_hook hook; struct bt_info bt_info, *btloc; char buf[BUFSIZE]; int kernel_thread; int looks_good; if (flags & BT_SPECULATE) /* already been here on this trace... */ return FALSE; if (pc->tmpfile) return FALSE; found_ra = found_sp = 0; kernel_thread = is_kernel_thread(req->task); /* * Add "known" possibilities here. */ switch (flags & (BT_FROM_EXCEPTION|BT_FROM_CALLFRAME)) { case BT_FROM_EXCEPTION: if (STREQ(closest_symbol(req->prevpc), "read_lock") || STREQ(closest_symbol(req->ra), "do_select") || STREQ(closest_symbol(req->ra), "schedule")) { stkp = (ulong *)req->sp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (found_ra) { if (is_kernel_text_offset(value)) { found_sp = (ulong)stkp; break; } continue; } if (value == req->ra) found_ra = value; } } break; case BT_FROM_CALLFRAME: if (STREQ(closest_symbol(req->ra), "sys_read")) { value = GET_STACK_ULONG(req->prevsp - 32); if (STREQ(closest_symbol(value), "entSys")) { found_ra = value; found_sp = req->prevsp - 32; } } else if (STREQ(closest_symbol(req->ra), "exit_autofs4_fs")) { stkp = (ulong *)req->sp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (found_ra && (value != found_ra)) { if (is_kernel_text_offset(value)) { found_sp = (ulong)stkp; break; } continue; } if (is_kernel_text_offset(value)) found_ra = value; } } break; default: if (req->hookp && STREQ(closest_symbol(req->prevpc), "filemap_nopage") && !STREQ(closest_symbol(req->hookp->eip), "do_no_page")) { found_ra = found_sp = 0; stkp = (ulong *)req->prevsp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (found_ra && (value != found_ra)) { if (is_kernel_text_offset(value)) { found_sp = (ulong)stkp; break; } continue; } if (is_kernel_text_offset(value) && STREQ(closest_symbol(value), "do_no_page")) found_ra = value; } if (found_ra && found_sp) { req->hookp->eip = found_ra; req->hookp->esp = found_sp; return TRUE; } } if (req->hookp) { found_ra = req->hookp->eip; found_sp = req->hookp->esp; } break; } if (found_ra && found_sp) { looks_good = FALSE; hook.esp = found_sp; hook.eip = found_ra; if (CRASHDEBUG(1)) fprintf(pc->saved_fp, "----- RESYNC SPECULATE START -----\n"); open_tmpfile(); btloc = &bt_info; BZERO(btloc, sizeof(struct bt_info)); btloc->task = req->task; btloc->tc = bt->tc; btloc->stackbase = bt->stackbase; btloc->stacktop = bt->stacktop; btloc->flags = BT_SPECULATE; btloc->hp = &hook; back_trace(btloc); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1)) fprintf(pc->saved_fp, "%s", buf); if (strstr(buf, "NOTE: cannot resolve")) { looks_good = FALSE; break; } if (strstr(buf, "ALPHA EXCEPTION FRAME")) { looks_good = TRUE; break; } if (kernel_thread) { if (strstr(buf, " kernel_thread ") || strstr(buf, " __kernel_thread ") || strstr(buf, " start_kernel ") || strstr(buf, " smp_callin ")) { looks_good = TRUE; break; } } } close_tmpfile(); if (CRASHDEBUG(1)) fprintf(pc->saved_fp, "----- RESYNC SPECULATE DONE ------\n"); if (looks_good) { req->pc = found_ra; req->sp = found_sp; return TRUE; } } return FALSE; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ static int alpha_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm; ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { pgd = (ulong *)machdep->get_task_pgd(tc->task); } else { if (!tc->mm_struct) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte & _PAGE_VALID)) goto no_upage; page_middle = (ulong *) (PTOV((pgd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + ((vaddr >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte & _PAGE_VALID)) goto no_upage; page_table = (ulong *) (PTOV((pmd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + (BTOP(vaddr) & (PTRS_PER_PAGE - 1)); FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_VALID))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); alpha_translate_pte(pte, 0, 0); } goto no_upage; } *paddr = ((pte & _PFN_MASK) >> (32-PAGESHIFT())) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); alpha_translate_pte(pte, 0, 0); } return TRUE; no_upage: return FALSE; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int alpha_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { /* presume KSEG this early */ *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); return TRUE; } pgd = (ulong *)vt->kernel_pgd[0]; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte & _PAGE_VALID)) goto no_kpage; page_middle = (ulong *) (PTOV((pgd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte & _PAGE_VALID)) goto no_kpage; page_table = (ulong *) (PTOV((pmd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + (BTOP(kvaddr) & (PTRS_PER_PAGE - 1)); FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_VALID))) { if (pte && verbose) { fprintf(fp, "\n"); alpha_translate_pte(pte, 0, 0); } goto no_kpage; } *paddr = ((pte & _PFN_MASK) >> (32-PAGESHIFT())) + PAGEOFFSET(kvaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); alpha_translate_pte(pte, 0, 0); } return TRUE; no_kpage: return FALSE; } /* * Get the relevant page directory pointer from a task structure. */ static ulong alpha_get_task_pgd(ulong task) { long offset; ulong ptbr; offset = OFFSET_OPTION(task_struct_thread, task_struct_tss); offset += OFFSET(thread_struct_ptbr); readmem(task + offset, KVADDR, &ptbr, sizeof(ulong), "task thread ptbr", FAULT_ON_ERROR); return(PTOV(PTOB(ptbr))); } /* * Calculate and return the speed of the processor. */ static ulong alpha_processor_speed(void) { ulong hwrpb; long offset; long cycle_freq; ulong mhz; if (machdep->mhz) return machdep->mhz; mhz = 0; get_symbol_data("hwrpb", sizeof(void *), &hwrpb); offset = OFFSET(hwrpb_struct_cycle_freq); if (!hwrpb || (offset == -1) || !readmem(hwrpb+offset, KVADDR, &cycle_freq, sizeof(ulong), "hwrpb cycle_freq", RETURN_ON_ERROR)) return (machdep->mhz = mhz); mhz = cycle_freq/1000000; return (machdep->mhz = mhz); } void alpha_dump_machdep_table(ulong arg) { int others; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & HWRESET) fprintf(fp, "%sHWRESET", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: alpha_eframe_search()\n"); fprintf(fp, " back_trace: alpha_back_trace_cmd()\n"); fprintf(fp, " processor_speed: alpha_processor_speed()\n"); fprintf(fp, " uvtop: alpha_uvtop()\n"); fprintf(fp, " kvtop: alpha_uvtop()\n"); fprintf(fp, " get_task_pgd: alpha_get_task_pgd()\n"); if (machdep->dump_irq == generic_dump_irq) fprintf(fp, " dump_irq: generic_dump_irq()\n"); else fprintf(fp, " dump_irq: alpha_dump_irq()\n"); fprintf(fp, " get_stack_frame: alpha_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: alpha_translate_pte()\n"); fprintf(fp, " memory_size: alpha_get_memory_size()\n"); fprintf(fp, " vmalloc_start: alpha_get_vmalloc_start()\n"); fprintf(fp, " is_task_addr: alpha_is_task_addr()\n"); fprintf(fp, " verify_symbol: alpha_verify_symbol()\n"); fprintf(fp, " dis_filter: alpha_dis_filter()\n"); fprintf(fp, " cmd_mach: alpha_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: alpha_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: alpha_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } /* * Fix up jsr's to show the right target. * * If a value is passed with no buf, then cmd_dis is fishing for whether * the GP can be calculated from the first couple of instructions of the * target routine: * * 0xfffffc0000349fa0 : ldah gp,35(t12) * 0xfffffc0000349fa4 : lda gp,6216(gp) * * If a buf pointer is passed, then check whether the t12 register * is being set up as an offset from gp, then calculate the target address: * * 0xfffffc000042c364 : ldq t12,-29336(gp) * 0xfffffc000042c368 : * jsr ra,(t12),0xfffffc0000429dc0 * * If the next instruction is a jsr ra,(t12), then correct the bracketed * target address translation. * */ #define LDAH_GP_T12 (0x27bb0000) #define LDA_GP_GP (0x23bd0000) #define LDQ_T12_GP (0xa77d0000) #define JSR_RA_T12 (0x6b5b0000) #define OPCODE_OPERAND_MASK (0xffff0000) #define OPCODE_MEM_DISP_MASK (0x0000ffff) static struct instruction_data { uint inst[2]; short mem_disp[2]; ulong gp; ulong target; char *curfunc; } instruction_data = { {0} }; static int alpha_dis_filter(ulong vaddr, char *buf, unsigned int output_radix) { struct syment *sp; struct instruction_data *id; char buf2[BUFSIZE], *p1; id = &instruction_data; if (!buf) { BZERO(id, sizeof(struct instruction_data)); if (!(sp = value_search(vaddr, NULL))) return FALSE; readmem(sp->value, KVADDR, &id->inst[0], sizeof(uint) * 2, "two instructions", FAULT_ON_ERROR); if (((id->inst[0] & OPCODE_OPERAND_MASK) == LDAH_GP_T12) && ((id->inst[1] & OPCODE_OPERAND_MASK) == LDA_GP_GP)) { id->mem_disp[0] = (short)(id->inst[0] & OPCODE_MEM_DISP_MASK); id->mem_disp[1] = (short)(id->inst[1] & OPCODE_MEM_DISP_MASK); id->gp = sp->value + (65536*id->mem_disp[0]) + id->mem_disp[1]; id->curfunc = sp->name; if (CRASHDEBUG(1)) console("%s: ldah(%d) and lda(%d) gp: %lx\n", id->curfunc, id->mem_disp[0], id->mem_disp[1], id->gp); return TRUE; } /* send all lines through the generic */ return TRUE; /* dis_address_translation() filter */ } dis_address_translation(vaddr, buf, output_radix); if (!id->gp || !(sp = value_search(vaddr, NULL)) || !STREQ(id->curfunc, sp->name)) { BZERO(id, sizeof(struct instruction_data)); return FALSE; } readmem(vaddr, KVADDR, &id->inst[0], sizeof(uint), "one instruction", FAULT_ON_ERROR); if ((id->inst[0] & OPCODE_OPERAND_MASK) == JSR_RA_T12) { if (!id->target || !strstr(buf, "jsr\tra,(t12)") || !strstr(buf, "<")) return FALSE; p1 = strstr(strstr(buf, "jsr"), "0x"); sprintf(p1, "0x%lx <%s>%s", id->target, value_to_symstr(id->target, buf2, output_radix), CRASHDEBUG(1) ? " [PATCHED]\n" : "\n"); return TRUE; } if ((id->inst[0] & OPCODE_OPERAND_MASK) == LDQ_T12_GP) { id->mem_disp[0] = (short)(id->inst[0] & OPCODE_MEM_DISP_MASK); readmem(id->gp + id->mem_disp[0], KVADDR, &id->target, sizeof(ulong), "jsr target", FAULT_ON_ERROR); } else id->target = 0; return TRUE; } /* * For some reason gdb can go off into the weeds translating text addresses, * so this routine both fixes the references as well as imposing the current * output radix on the translations. */ static void dis_address_translation(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && (*p1 != ',')) p1--; if (!STRNEQ(p1, ",0x")) return; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); } /* * If we're generically-inclined, call generic_dump_irq(). Otherwise * dump the IRQ table the old-fashioned way. */ static void alpha_dump_irq(int irq) { ulong action; ulong value; char *arglist[MAXARGS]; int argc, others; char buf[BUFSIZE]; if (symbol_exists("irq_desc")) { machdep->dump_irq = generic_dump_irq; return(generic_dump_irq(irq)); } action = symbol_value("irq_action") + (sizeof(void *) * irq); readmem(action, KVADDR, &action, sizeof(void *), "irq_action pointer", FAULT_ON_ERROR); if (!action) { fprintf(fp, " IRQ: %d\n", irq); fprintf(fp, "handler:\n"); fprintf(fp, " flags: \n"); fprintf(fp, " mask: \n"); fprintf(fp, " name: \n"); fprintf(fp, " dev_id: \n"); fprintf(fp, " next: \n\n"); return; } fprintf(fp, " IRQ: %d\n", irq); open_tmpfile(); do_linked_action: dump_struct("irqaction", action, RADIX(16)); action = 0; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { strip_comma(buf); argc = parse_line(buf, arglist); if (STREQ(arglist[0], "struct") || STREQ(buf, "};")) continue; if (STREQ(arglist[0], "handler")) { fprintf(pc->saved_fp, "handler: %s ", strip_hex(arglist[2])); if (argc == 4) fprintf(pc->saved_fp, "%s", arglist[3]); fprintf(pc->saved_fp, "\n"); } if (STREQ(arglist[0], "flags")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " flags: %lx ", value); if (value) { others = 0; fprintf(pc->saved_fp, "("); if (value & SA_INTERRUPT) fprintf(pc->saved_fp, "%sSA_INTERRUPT", others++ ? "|" : ""); if (value & SA_PROBE) fprintf(pc->saved_fp, "%sSA_PROBE", others++ ? "|" : ""); if (value & SA_SAMPLE_RANDOM) fprintf(pc->saved_fp, "%sSA_SAMPLE_RANDOM", others++ ? "|" : ""); if (value & SA_SHIRQ) fprintf(pc->saved_fp, "%sSA_SHIRQ", others++ ? "|" : ""); fprintf(pc->saved_fp, ")"); if (value & ~ACTION_FLAGS) { fprintf(pc->saved_fp, " (bits %lx not translated)", value & ~ACTION_FLAGS); } } fprintf(pc->saved_fp, "\n"); } if (STREQ(arglist[0], "mask")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " mask: %lx\n", value); } if (STREQ(arglist[0], "name")) { fprintf(pc->saved_fp, " name: %s ", strip_hex(arglist[2])); if (argc == 4) fprintf(pc->saved_fp, "\"%s\"", arglist[3]); fprintf(pc->saved_fp, "\n"); } if (STREQ(arglist[0], "dev_id")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " dev_id: %lx\n", value); } if (STREQ(arglist[0], "next")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " next: %s\n", strip_hex(arglist[2])); if (value) action = value; } } close_tmpfile(); fprintf(fp, "\n"); if (action) goto do_linked_action; } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void alpha_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { struct syment *sp; ulong ksp; ulong ip; if (pcp) { if (DUMPFILE() && is_panic_thread(bt->task)) { sp = next_symbol("crash_save_current_state", NULL); if (HWRESET_TASK(bt->task)) ip = get_percpu_data(0, GET_HALT_PC, 0); else if (sp) ip = sp->value - 4; else ip = symbol_value("crash_save_current_state") + 16; } else get_alpha_frame(bt, &ip, NULL); *pcp = ip; } if (spp) { ip = 0; if (!get_panic_ksp(bt, &ksp)) get_alpha_frame(bt, HWRESET_TASK(bt->task) ? &ip : NULL, &ksp); if (!INSTACK(ksp, bt)) error(FATAL, "cannot determine starting stack address\n", bt->task); *spp = ksp; if (ip) *pcp = ip; } } /* * Do the work formerly done by alpha_get_sp() and alpha_get_pc(). */ static void get_alpha_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { int i; ulong ip; ulong r26; ulong ksp, sp; ulong *spp; ulong percpu_ra; ulong percpu_pv; struct percpu_data percpu_data; char buf[BUFSIZE]; ulong task; ulong *stack; task = bt->task; stack = (ulong *)bt->stackbuf; if (tt->flags & THREAD_INFO) { /* pcb.ksp is 1st word in thread_info */ readmem(bt->tc->thread_info, KVADDR, &ksp, sizeof(ulong), "thread_info pcb ksp", FAULT_ON_ERROR); sp = ksp; } else if (VALID_MEMBER(task_struct_tss_ksp)) ksp = sp = stack[OFFSET(task_struct_tss_ksp)/sizeof(long)]; else ksp = sp = stack[OFFSET(task_struct_thread_ksp)/sizeof(long)]; ip = 0; percpu_ra = percpu_pv = 0; spp = &stack[(sp - task)/sizeof(long)]; if (DUMPFILE() && getsp) { if (HWRESET_TASK(task)) { if (INSTACK(sp, bt)) { *getsp = sp; return; } else { get_percpu_data(0, 0, &percpu_data); percpu_ra = percpu_data.halt_ra; percpu_pv = percpu_data.halt_pv; spp = &stack[roundup(SIZE(task_struct), sizeof(ulong)) / sizeof(ulong)]; } } if (!percpu_ra && (STREQ(closest_symbol(*spp), "panic") || STREQ(closest_symbol(*spp), "handle_ipi"))) { *getsp = sp; return; } } percpu_retry: if (CRASHDEBUG(1) && percpu_ra) { fprintf(fp, "get_alpha_frame: look for %lx (%s)\n", percpu_ra, value_to_symstr(percpu_ra, buf, 0)); } for (i = 0, spp++; spp < &stack[LONGS_PER_STACK]; spp++,i++) { if (CRASHDEBUG(1) && (percpu_ra || percpu_pv) && is_kernel_text(*spp)) { fprintf(fp, "%lx: %lx (%s)\n", ((ulong)spp - (ulong)stack) + task, *spp, value_to_symstr(*spp, buf, 0)); } if (percpu_ra) { if (*spp == percpu_ra) { *getsp = ((ulong)spp - (ulong)stack) + task; return; } continue; } else if (percpu_pv) { if (*spp == percpu_pv) { *getsp = ((ulong)spp - (ulong)stack) + task; if (getpc) *getpc = percpu_pv; return; } continue; } if (!INSTACK(*spp, bt)) continue; if (is_kernel_text(*(spp+1))) { sp = *spp; ip = *(spp+1); break; } } if (percpu_ra) { percpu_ra = 0; error(INFO, "cannot find return address (percpu_ra) in HARDWARE RESET stack\n"); error(INFO, "looking for procedure address (percpu_pv) in HARDWARE RESET stack\n"); if (CRASHDEBUG(1)) { fprintf(fp, "get_alpha_frame: look for %lx (%s)\n", percpu_pv, value_to_symstr(percpu_pv, buf, 0)); } spp = &stack[roundup(SIZE(task_struct), sizeof(ulong)) / sizeof(ulong)]; goto percpu_retry; } if (percpu_pv) { error(INFO, "cannot find procedure address (percpu_pv) in HARDWARE RESET stack\n"); } /* * Check for a forked task that has not yet run in user space. */ if (!ip) { if (INSTACK(ksp + OFFSET(switch_stack_r26), bt)) { readmem(ksp + OFFSET(switch_stack_r26), KVADDR, &r26, sizeof(ulong), "ret_from_smp_fork check", FAULT_ON_ERROR); if (STREQ(closest_symbol(r26), "ret_from_smp_fork") || STREQ(closest_symbol(r26), "ret_from_smpfork")) { ip = r26; sp = ksp; } } } if (getsp) *getsp = sp; if (getpc) *getpc = ip; } /* * Fill the percpu_data structure with information from the * hwrpb/percpu_data structures for a given CPU. If requested, * return one of the specified entries. */ static ulong get_percpu_data(int cpu, ulong flag, struct percpu_data *pd) { ulong hwrpb, halt_ra, halt_PC, halt_pv; unsigned long processor_offset, processor_size; get_symbol_data("hwrpb", sizeof(void *), &hwrpb); readmem(hwrpb+OFFSET(hwrpb_struct_processor_offset), KVADDR, &processor_offset, sizeof(ulong), "hwrpb processor_offset", FAULT_ON_ERROR); readmem(hwrpb+OFFSET(hwrpb_struct_processor_size), KVADDR, &processor_size, sizeof(ulong), "hwrpb processor_size", FAULT_ON_ERROR); readmem(hwrpb + processor_offset + (cpu * processor_size) + OFFSET(percpu_struct_halt_PC), KVADDR, &halt_PC, sizeof(ulong), "percpu halt_PC", FAULT_ON_ERROR); readmem(hwrpb + processor_offset + (cpu * processor_size) + OFFSET(percpu_struct_halt_ra), KVADDR, &halt_ra, sizeof(ulong), "percpu halt_ra", FAULT_ON_ERROR); readmem(hwrpb + processor_offset + (cpu * processor_size) + OFFSET(percpu_struct_halt_pv), KVADDR, &halt_pv, sizeof(ulong), "percpu halt_pv", FAULT_ON_ERROR); if (pd) { pd->halt_PC = halt_PC; pd->halt_ra = halt_ra; pd->halt_pv = halt_pv; } switch (flag) { case GET_HALT_PC: return halt_PC; case GET_HALT_RA: return halt_ra; case GET_HALT_PV: return halt_pv; default: return 0; } } /* * Translate a PTE, returning TRUE if the page is _PAGE_VALID or _PAGE_PRESENT, * whichever is appropriate for the machine type. If a physaddr pointer is * passed in, don't print anything. */ static int alpha_translate_pte(ulong pte, void *physaddr, ulonglong unused) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; physaddr_t paddr; paddr = PTOB(pte >> 32); page_present = (pte & _PAGE_VALID); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%llx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_VALID) fprintf(fp, "%sVALID", others++ ? "|" : ""); if (pte & _PAGE_FOR) fprintf(fp, "%sFOR", others++ ? "|" : ""); if (pte & _PAGE_FOW) fprintf(fp, "%sFOW", others++ ? "|" : ""); if (pte & _PAGE_FOE) fprintf(fp, "%sFOE", others++ ? "|" : ""); if (pte & _PAGE_ASM) fprintf(fp, "%sASM", others++ ? "|" : ""); if (pte & _PAGE_KRE) fprintf(fp, "%sKRE", others++ ? "|" : ""); if (pte & _PAGE_URE) fprintf(fp, "%sURE", others++ ? "|" : ""); if (pte & _PAGE_KWE) fprintf(fp, "%sKWE", others++ ? "|" : ""); if (pte & _PAGE_UWE) fprintf(fp, "%sUWE", others++ ? "|" : ""); if (pte & _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (pte & _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return page_present; } /* * This is currently not machine-dependent, but eventually I'd prefer to use * the HWPCB for the real physical memory size. */ static uint64_t alpha_memory_size(void) { return (generic_memory_size()); } /* * Determine where vmalloc'd memory starts. */ static ulong alpha_vmalloc_start(void) { return VMALLOC_START; } /* * ALPHA tasks are all stacksize-aligned. */ static int alpha_is_task_addr(ulong task) { return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * Keep or reject a symbol from the kernel namelist. */ int alpha_verify_symbol(const char *name, ulong value, char type) { if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%016lx %s\n", value, name); return (name && strlen(name) && (value > MIN_SYMBOL_VALUE)); } /* * Override smp_num_cpus if possible and necessary. */ int alpha_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_online())) return cpus; else return kt->cpus; } /* * Machine dependent command. */ void alpha_cmd_mach(void) { int c, cflag; unsigned int radix; cflag = radix = 0; while ((c = getopt(argcnt, args, "cxd")) != EOF) { switch(c) { case 'c': cflag++; break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (cflag) display_hwrpb(radix); else alpha_display_machine_stats(); } /* * "mach" command output. */ static void alpha_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } /* * Display the hwrpb_struct and each percpu_struct. */ static void display_hwrpb(unsigned int radix) { int cpu; ulong hwrpb, percpu; ulong processor_offset, processor_size; get_symbol_data("hwrpb", sizeof(void *), &hwrpb); readmem(hwrpb+OFFSET(hwrpb_struct_processor_offset), KVADDR, &processor_offset, sizeof(ulong), "hwrpb processor_offset", FAULT_ON_ERROR); readmem(hwrpb+OFFSET(hwrpb_struct_processor_size), KVADDR, &processor_size, sizeof(ulong), "hwrpb processor_size", FAULT_ON_ERROR); fprintf(fp, "HWRPB:\n"); dump_struct("hwrpb_struct", hwrpb, radix); for (cpu = 0; cpu < kt->cpus; cpu++) { fprintf(fp, "\nCPU %d:\n", cpu); percpu = hwrpb + processor_offset + (processor_size * cpu); dump_struct("percpu_struct", percpu, radix); } } /* * Perform any leftover pre-prompt machine-specific initialization tasks here. */ static void alpha_post_init(void) { modify_signame(7, "SIGEMT", NULL); modify_signame(10, "SIGBUS", NULL); modify_signame(12, "SIGSYS", NULL); modify_signame(16, "SIGURG", NULL); modify_signame(17, "SIGSTOP", NULL); modify_signame(18, "SIGTSTP", NULL); modify_signame(19, "SIGCONT", NULL); modify_signame(20, "SIGCHLD", NULL); modify_signame(23, "SIGIO", "SIGPOLL"); modify_signame(29, "SIGINFO", "SIGPWR"); modify_signame(30, "SIGUSR1", NULL); modify_signame(31, "SIGUSR2", NULL); } #endif /* ALPHA */ crash-utility-crash-9cd43f5/unwind_arm.c0000664000372000037200000004632715107550337017717 0ustar juerghjuergh/* * Stack unwinding support for ARM * * This code is derived from the kernel source: * arch/arm/kernel/unwind.c * Copyright (C) 2008 ARM Limited * * Created by: Mika Westerberg * Copyright (C) 2010 Nokia Corporation * * For more information about ARM unwind tables see "Exception handling ABI for * the ARM architecture" document at: * * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef ARM #include "defs.h" /** * struct unwind_idx - index table entry * @addr: prel31 offset to the start of the function * @insn: index table entry. * * @insn can be encoded as follows: * 1. if bit31 is clear this points to the start of the EHT entry * (prel31 offset) * 2. if bit31 is set, this contains the EHT entry itself * 3. if 0x1, cannot unwind. */ struct unwind_idx { ulong addr; ulong insn; }; /** * struct unwind_table - per-module unwind table * @idx: pointer to the star of the unwind table * @start: pointer to the start of the index table * @end: pointer to the last element +1 of the index table * @begin_addr: start address which this table covers * @end_addr: end address which this table covers * @kv_base: kernel virtual address of the start of the index table * * Kernel stores per-module unwind tables in this format. There can be more than * one table per module as we have different ELF sections in the module. */ struct unwind_table { struct unwind_idx *idx; struct unwind_idx *start; struct unwind_idx *end; ulong begin_addr; ulong end_addr; ulong kv_base; }; /* * Unwind table pointers to master kernel table and for modules. */ static struct unwind_table *kernel_unwind_table; static struct unwind_table *module_unwind_tables; struct unwind_ctrl_block { ulong vrs[16]; ulong insn; ulong insn_kvaddr; int entries; int byte; }; struct stackframe { ulong fp; ulong sp; ulong lr; ulong pc; }; enum regs { R7 = 7, FP = 11, SP = 13, LR = 14, PC = 15, }; static int init_kernel_unwind_table(void); static int read_module_unwind_table(struct unwind_table *, ulong); static int init_module_unwind_tables(void); static int unwind_get_insn(struct unwind_ctrl_block *); static ulong unwind_get_byte(struct unwind_ctrl_block *); static ulong get_value_from_stack(ulong *); static int unwind_exec_insn(struct unwind_ctrl_block *); static int is_core_kernel_text(ulong); static struct unwind_table *search_table(ulong); static struct unwind_idx *search_index(const struct unwind_table *, ulong); static ulong prel31_to_addr(ulong, ulong); static void index_prel31_to_addr(struct unwind_table *); static int unwind_frame(struct stackframe *, ulong); /* * Function reads in-memory kernel and module unwind tables and makes * local copy of them for unwinding. If unwinding tables cannot be found, this * function returns FALSE, otherwise TRUE. */ int init_unwind_tables(void) { if (!symbol_exists("__start_unwind_idx") || !symbol_exists("__stop_unwind_idx") || !symbol_exists("__start_unwind_tab") || !symbol_exists("__stop_unwind_tab") || !symbol_exists("unwind_tables")) { return FALSE; } if (!init_kernel_unwind_table()) { error(WARNING, "UNWIND: failed to initialize kernel unwind table\n"); return FALSE; } /* * Initialize symbols for per-module unwind tables. Actually there are * several tables per module (one per code section). */ STRUCT_SIZE_INIT(unwind_table, "unwind_table"); MEMBER_OFFSET_INIT(unwind_table_list, "unwind_table", "list"); MEMBER_OFFSET_INIT(unwind_table_start, "unwind_table", "start"); MEMBER_OFFSET_INIT(unwind_table_stop, "unwind_table", "stop"); MEMBER_OFFSET_INIT(unwind_table_begin_addr, "unwind_table", "begin_addr"); MEMBER_OFFSET_INIT(unwind_table_end_addr, "unwind_table", "end_addr"); STRUCT_SIZE_INIT(unwind_idx, "unwind_idx"); MEMBER_OFFSET_INIT(unwind_idx_addr, "unwind_idx", "addr"); MEMBER_OFFSET_INIT(unwind_idx_insn, "unwind_idx", "insn"); if (!init_module_unwind_tables()) { error(WARNING, "UNWIND: failed to initialize module unwind tables\n"); } /* * We abuse DWARF_UNWIND flag a little here as ARM unwinding tables are * not in DWARF format but we can use the flags to indicate that we have * unwind tables support ready. */ kt->flags |= DWARF_UNWIND_CAPABLE; kt->flags |= DWARF_UNWIND; return TRUE; } /* * Allocate and fill master kernel unwind table. */ static int init_kernel_unwind_table(void) { ulong idx_start, idx_end, idx_size; kernel_unwind_table = calloc(sizeof(*kernel_unwind_table), 1); if (!kernel_unwind_table) return FALSE; idx_start = symbol_value("__start_unwind_idx"); idx_end = symbol_value("__stop_unwind_idx"); idx_size = idx_end - idx_start; kernel_unwind_table->idx = calloc(idx_size, 1); if (!kernel_unwind_table->idx) goto fail; /* now read in the index table */ if (!readmem(idx_start, KVADDR, kernel_unwind_table->idx, idx_size, "master kernel unwind table", RETURN_ON_ERROR)) { free(kernel_unwind_table->idx); goto fail; } /* * Kernel versions before v3.2 (specifically, before commit * de66a979012db "ARM: 7187/1: fix unwinding for XIP kernels") * converted the prel31 offsets in the unwind index table to absolute * addresses on startup. Newer kernels don't perform this conversion, * and have a slightly more involved search algorithm. * * We always just use the older search method (a straightforward binary * search) and convert the index table offsets ourselves if we detect * that the kernel didn't do it. */ machdep->machspec->unwind_index_prel31 = !is_kernel_text(kernel_unwind_table->idx[0].addr); kernel_unwind_table->start = kernel_unwind_table->idx; kernel_unwind_table->end = (struct unwind_idx *) ((char *)kernel_unwind_table->idx + idx_size); kernel_unwind_table->begin_addr = kernel_unwind_table->start->addr; kernel_unwind_table->end_addr = (kernel_unwind_table->end - 1)->addr; kernel_unwind_table->kv_base = idx_start; if (machdep->machspec->unwind_index_prel31) index_prel31_to_addr(kernel_unwind_table); if (CRASHDEBUG(1)) { fprintf(fp, "UNWIND: master kernel table start\n"); fprintf(fp, "UNWIND: size : %ld\n", idx_size); fprintf(fp, "UNWIND: start : %p\n", kernel_unwind_table->start); fprintf(fp, "UNWIND: end : %p\n", kernel_unwind_table->end); fprintf(fp, "UNWIND: begin_addr: 0x%lx\n", kernel_unwind_table->begin_addr); fprintf(fp, "UNWIND: begin_addr: 0x%lx\n", kernel_unwind_table->end_addr); fprintf(fp, "UNWIND: master kernel table end\n"); } return TRUE; fail: free(kernel_unwind_table); return FALSE; } /* * Read single module unwind table from addr. */ static int read_module_unwind_table(struct unwind_table *tbl, ulong addr) { ulong idx_start, idx_stop, idx_size; char *buf; buf = GETBUF(SIZE(unwind_table)); /* * First read in the unwind table for this module. It then contains * pointers to the index table which we will read later. */ if (!readmem(addr, KVADDR, buf, SIZE(unwind_table), "module unwind table", RETURN_ON_ERROR)) { error(WARNING, "UNWIND: cannot read unwind table\n"); goto fail; } #define TABLE_VALUE(b, offs) (*((ulong *)((b) + OFFSET(offs)))) idx_start = TABLE_VALUE(buf, unwind_table_start); idx_stop = TABLE_VALUE(buf, unwind_table_stop); idx_size = idx_stop - idx_start; /* * We know the size of the index table. Allocate memory for * the table and read the contents from the kernel memory. */ tbl->idx = calloc(idx_size, 1); if (!tbl->idx) goto fail; if (!readmem(idx_start, KVADDR, tbl->idx, idx_size, "module unwind index table", RETURN_ON_ERROR)) { free(tbl->idx); goto fail; } tbl->start = &tbl->idx[0]; tbl->end = (struct unwind_idx *)((char *)tbl->start + idx_size); tbl->begin_addr = TABLE_VALUE(buf, unwind_table_begin_addr); tbl->end_addr = TABLE_VALUE(buf, unwind_table_end_addr); tbl->kv_base = idx_start; if (machdep->machspec->unwind_index_prel31) index_prel31_to_addr(tbl); if (CRASHDEBUG(1)) { fprintf(fp, "UNWIND: module table start\n"); fprintf(fp, "UNWIND: start : %p\n", tbl->start); fprintf(fp, "UNWIND: end : %p\n", tbl->end); fprintf(fp, "UNWIND: begin_addr: 0x%lx\n", tbl->begin_addr); fprintf(fp, "UNWIND: begin_addr: 0x%lx\n", tbl->end_addr); fprintf(fp, "UNWIND: module table end\n"); } FREEBUF(buf); return TRUE; fail: FREEBUF(buf); return FALSE; } /* * Allocate and fill per-module unwind tables. */ static int init_module_unwind_tables(void) { ulong head = symbol_value("unwind_tables"); struct unwind_table *tbl; struct list_data ld; ulong *table_list; int cnt, i, n; BZERO(&ld, sizeof(ld)); ld.start = head; ld.member_offset = OFFSET(unwind_table_list); ld.flags = RETURN_ON_LIST_ERROR; if (CRASHDEBUG(1)) ld.flags |= VERBOSE; /* * Iterate through unwind table list and store start address of each * table in table_list. */ hq_open(); cnt = do_list(&ld); if (cnt == -1) { error(WARNING, "UNWIND: failed to gather unwind_table list\n"); hq_close(); return FALSE; } table_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(table_list, cnt); hq_close(); module_unwind_tables = calloc(sizeof(struct unwind_table), cnt); if (!module_unwind_tables) { error(WARNING, "UNWIND: failed to allocate memory for (%d tables)\n", cnt); FREEBUF(table_list); return FALSE; } /* we skip the first address as it is just head pointer */ for (i = 1, n = 0; i < cnt; i++, n++) { tbl = &module_unwind_tables[n]; if (!read_module_unwind_table(tbl, table_list[i])) goto fail; } /* just in case, zero the last entry (again) */ BZERO(&module_unwind_tables[n], sizeof(module_unwind_tables[n])); FREEBUF(table_list); return TRUE; fail: FREEBUF(table_list); while (--n >= 0) { tbl = &module_unwind_tables[n]; free(tbl->idx); } free(module_unwind_tables); module_unwind_tables = NULL; return FALSE; } /* * Read next unwind instruction pointed by ctrl->insn_kvaddr into * ctrl->insn. As a side-effect, increase the ctrl->insn_kvaddr to * point to the next instruction. */ static int unwind_get_insn(struct unwind_ctrl_block *ctrl) { if (readmem(ctrl->insn_kvaddr, KVADDR, &ctrl->insn, sizeof(ctrl->insn), "unwind insn", RETURN_ON_ERROR)) { ctrl->insn_kvaddr += sizeof(ctrl->insn); return TRUE; } return FALSE; } /* * Return next insn byte from ctl or 0 in case of failure. As a side-effect, * changes ctrl according the next byte. */ static ulong unwind_get_byte(struct unwind_ctrl_block *ctrl) { ulong ret; if (ctrl->entries <= 0) { error(WARNING, "UNWIND: corrupt unwind entry\n"); return 0; } ret = (ctrl->insn >> (ctrl->byte * 8)) & 0xff; if (!ctrl->byte && --ctrl->entries > 0) { if (!unwind_get_insn(ctrl)) return 0; ctrl->byte = 3; } else { ctrl->byte--; } return ret; } /* * Gets one value from stack pointed by vsp. */ static ulong get_value_from_stack(ulong *vsp) { ulong val; /* * We just read the value from kernel memory instead of peeking it from * the bt->stack. */ if (!readmem((ulong)vsp, KVADDR, &val, sizeof(val), "unwind stack value", RETURN_ON_ERROR)) { error(FATAL, "unwind: failed to read value from stack\n"); } return val; } /* * Execute the next unwind instruction. */ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl) { ulong insn = unwind_get_byte(ctrl); if ((insn & 0xc0) == 0) { /* * 00xx xxxx: vsp = vsp + (xx xxx << 2) + 4 * * Note that it seems that there is a typo in the spec and this * is corrected in kernel. */ ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4; } else if ((insn & 0xc0) == 0x40) { /* 00xx xxxx: vsp = vsp + (xx xxx << 2) + 4 */ ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4; } else if ((insn & 0xf0) == 0x80) { /* * Pop up to 12 integer registers under masks * {r15-r12}, {r11-r4}. */ ulong mask; ulong *vsp = (ulong *)ctrl->vrs[SP]; int load_sp, reg = 4; insn = (insn << 8) | unwind_get_byte(ctrl); mask = insn & 0x0fff; if (mask == 0) { error(WARNING, "UNWIND: refuse to unwind\n"); return FALSE; } /* pop {r4-r15} according to mask */ load_sp = mask & (1 << (13 - 4)); while (mask) { if (mask & 1) ctrl->vrs[reg] = get_value_from_stack(vsp++); mask >>= 1; reg++; } if (!load_sp) ctrl->vrs[SP] = (ulong)vsp; } else if ((insn & 0xf0) == 0x90 && (insn & 0x0d) != 0x0d) { /* 1001 nnnn: set vsp = r[nnnn] */ ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f]; } else if ((insn & 0xf0) == 0xa0) { /* * 1010 0nnn: pop r4-r[4+nnn] * 1010 1nnn: pop r4-r[4+nnn], r14 */ ulong *vsp = (ulong *)ctrl->vrs[SP]; int reg; for (reg = 4; reg <= 4 + (insn & 7); reg++) ctrl->vrs[reg] = get_value_from_stack(vsp++); if (insn & 0x80) ctrl->vrs[14] = get_value_from_stack(vsp++); ctrl->vrs[SP] = (ulong)vsp; } else if (insn == 0xb0) { /* 1011 0000: finish */ if (ctrl->vrs[PC] == 0) ctrl->vrs[PC] = ctrl->vrs[LR]; /* no further processing */ ctrl->entries = 0; } else if (insn == 0xb1) { /* 1011 0001 xxxx yyyy: spare */ ulong mask = unwind_get_byte(ctrl); ulong *vsp = (ulong *)ctrl->vrs[SP]; int reg = 0; if (mask == 0 || mask & 0xf0) { error(WARNING, "UNWIND: spare error\n"); return FALSE; } /* pop r0-r3 according to mask */ while (mask) { if (mask & 1) ctrl->vrs[reg] = get_value_from_stack(vsp++); mask >>= 1; reg++; } ctrl->vrs[SP] = (ulong)vsp; } else if (insn == 0xb2) { /* 1011 0010 uleb128: vsp = vsp + 0x204 (uleb128 << 2) */ ulong uleb128 = unwind_get_byte(ctrl); ctrl->vrs[SP] += 0x204 + (uleb128 << 2); } else { error(WARNING, "UNWIND: unhandled instruction: %02lx\n", insn); return FALSE; } return TRUE; } static int is_core_kernel_text(ulong pc) { ulong text_start = machdep->machspec->kernel_text_start; ulong text_end = machdep->machspec->kernel_text_end; if (text_start && text_end) return (pc >= text_start && pc <= text_end); return FALSE; } static struct unwind_table * search_table(ulong ip) { /* * First check if this address is in the master kernel unwind table or * some of the module unwind tables. */ if (is_core_kernel_text(ip)) { return kernel_unwind_table; } else if (module_unwind_tables) { struct unwind_table *tbl; for (tbl = &module_unwind_tables[0]; tbl->idx; tbl++) { if (ip >= tbl->begin_addr && ip < tbl->end_addr) return tbl; } } return NULL; } static struct unwind_idx * search_index(const struct unwind_table *tbl, ulong ip) { struct unwind_idx *start = tbl->start; struct unwind_idx *end = tbl->end; /* * Do a binary search for the addresses in the index table. * Addresses are guaranteed to be sorted in ascending order. */ while (start < end - 1) { struct unwind_idx *mid = start + ((end - start + 1) >> 1); if (ip < mid->addr) end = mid; else start = mid; } return start; } /* * Convert a prel31 symbol to an absolute kernel virtual address. */ static ulong prel31_to_addr(ulong addr, ulong insn) { /* sign extend to 32 bits */ long offset = ((long)insn << 1) >> 1; return addr + offset; } static void index_prel31_to_addr(struct unwind_table *tbl) { struct unwind_idx *idx = tbl->start; ulong kvaddr = tbl->kv_base; for (; idx < tbl->end; idx++, kvaddr += sizeof(struct unwind_idx)) idx->addr = prel31_to_addr(kvaddr, idx->addr); } static int unwind_frame(struct stackframe *frame, ulong stacktop) { const struct unwind_table *tbl; struct unwind_ctrl_block ctrl; struct unwind_idx *idx; ulong low, high; int fpindex = FP; low = frame->sp; high = stacktop; if (!is_kernel_text(frame->pc)) return FALSE; /* Thumb needs R7 instead of FP */ if (frame->pc & 1) fpindex = R7; tbl = search_table(frame->pc); if (!tbl) { error(WARNING, "UNWIND: cannot find unwind table for %lx\n", frame->pc); return FALSE; } idx = search_index(tbl, frame->pc); ctrl.vrs[fpindex] = frame->fp; ctrl.vrs[SP] = frame->sp; ctrl.vrs[LR] = frame->lr; ctrl.vrs[PC] = 0; if (CRASHDEBUG(5)) { fprintf(fp, "UNWIND: >frame: FP=%lx\n", ctrl.vrs[fpindex]); fprintf(fp, "UNWIND: >frame: SP=%lx\n", ctrl.vrs[SP]); fprintf(fp, "UNWIND: >frame: LR=%lx\n", ctrl.vrs[LR]); fprintf(fp, "UNWIND: >frame: PC=%lx\n", ctrl.vrs[PC]); } if (idx->insn == 1) { /* can't unwind */ return FALSE; } else if ((idx->insn & 0x80000000) == 0) { /* insn contains prel31 offset to the EHT entry */ /* * Calculate a byte offset for idx->insn from the * start of our copy of the index table. This offset * is used to get a kernel virtual address of the * unwind index entry (idx_kvaddr). */ ulong idx_offset = (ulong)&idx->insn - (ulong)tbl->start; ulong idx_kvaddr = tbl->kv_base + idx_offset; /* * Now compute a kernel virtual address for the EHT * entry by adding prel31 offset (idx->insn) to the * unwind index entry address (idx_kvaddr) and read * the EHT entry. */ ctrl.insn_kvaddr = prel31_to_addr(idx_kvaddr, idx->insn); if (!unwind_get_insn(&ctrl)) return FALSE; } else if ((idx->insn & 0xff000000) == 0x80000000) { /* EHT entry is encoded in the insn itself */ ctrl.insn = idx->insn; } else { error(WARNING, "UNWIND: unsupported instruction %lx\n", idx->insn); return FALSE; } /* check the personality routine */ if ((ctrl.insn & 0xff000000) == 0x80000000) { /* personality routine 0 */ ctrl.byte = 2; ctrl.entries = 1; } else if ((ctrl.insn & 0xff000000) == 0x81000000) { /* personality routine 1 */ ctrl.byte = 1; ctrl.entries = 1 + ((ctrl.insn & 0x00ff0000) >> 16); } else { error(WARNING, "UNWIND: unsupported personality routine\n"); return FALSE; } /* now, execute the instructions */ while (ctrl.entries > 0) { if (!unwind_exec_insn(&ctrl)) { error(WARNING, "UNWIND: failed to exec instruction\n"); return FALSE; } if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= high) return FALSE; } if (ctrl.vrs[PC] == 0) ctrl.vrs[PC] = ctrl.vrs[LR]; if (frame->pc == ctrl.vrs[PC]) return FALSE; frame->fp = ctrl.vrs[fpindex]; frame->sp = ctrl.vrs[SP]; frame->lr = ctrl.vrs[LR]; frame->pc = ctrl.vrs[PC]; if (CRASHDEBUG(5)) { fprintf(fp, "UNWIND: frameptr; frame.sp = bt->stkptr; frame.pc = bt->instptr; /* * In case bt->machdep contains pointer to a full register set, we take * LR from there. */ if (bt->machdep) { const struct arm_pt_regs *regs = bt->machdep; frame.fp = regs->ARM_fp; frame.lr = regs->ARM_lr; } while (IS_KVADDR(bt->instptr)) { if (!unwind_frame(&frame, bt->stacktop)) break; arm_dump_backtrace_entry(bt, n++, frame.lr, frame.sp); bt->instptr = frame.pc; bt->stkptr = frame.sp; } } #endif /* ARM */ crash-utility-crash-9cd43f5/lkcd_vmdump_v2_v3.h0000664000372000037200000001676315107550337021106 0ustar juerghjuergh/* lkcd_vmdump_v2_v3.h - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _VMDUMP_H #define _VMDUMP_H /* necessary header files */ #ifndef MCLX #include /* for utsname structure */ #include /* for architecture-specific header */ #endif #if defined(ARM) || defined(X86) || defined(PPC) || defined(S390) || \ defined(S390X) || defined(ARM64) || defined(MIPS) || \ defined(MIPS64) || defined(SPARC64) || defined(RISCV64) || \ defined(LOONGARCH64) /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * */ /* This header file holds the architecture specific crash dump header */ #ifndef _ASM_VMDUMP_H #define _ASM_VMDUMP_H /* necessary header files */ typedef unsigned int u32; #include /* for pt_regs */ /* definitions */ #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x1 /* version number */ /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* the esp for i386 systems */ uint32_t dha_esp; /* the eip for i386 systems */ uint32_t dha_eip; /* the dump registers */ #if !defined(S390) && !defined(S390X) && !defined(ARM64) && !defined(RISCV64) && !defined(LOONGARCH64) struct pt_regs dha_regs; #endif } dump_header_asm_t; #endif /* _ASM_VMDUMP_H */ #endif /* ARM || X86 || PPC */ #if defined(ALPHA) || defined(IA64) || defined(X86_64) || defined(PPC64) /* * Plug in the real ../arch/alpha/vmdump.h when available. For now the * data here are just placeholders... */ #ifndef IA64 typedef unsigned int u32; #include /* for pt_regs */ #endif /* definitions */ #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ #define DUMP_ASM_VERSION_NUMBER 0x1 /* version number */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* the esp for i386 systems */ uint32_t dha_esp; /* the eip for i386 systems */ uint32_t dha_eip; /* the dump registers */ #ifndef IA64 struct pt_regs dha_regs; #endif } dump_header_asm_t; #endif /* ALPHA or IA64 (?) */ /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ #ifdef CONFIG_VMDUMP /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_VERSION_NUMBER 0x2 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump flags -- add as necessary */ #define DUMP_RAW 0x1 /* raw page (no compression) */ #define DUMP_COMPRESSED 0x2 /* page is compressed */ #define DUMP_END 0x4 /* end marker on a full dump */ /* dump types - type specific stuff added later for page typing */ #define DUMP_NONE 0 /* no dumping at all -- just bail */ #define DUMP_HEADER 1 /* kernel dump header only */ #define DUMP_KERN 2 /* dump header and kernel pages */ #define DUMP_USED 3 /* dump header, kernel/user pages */ #define DUMP_ALL 4 /* dump header, all memory pages */ /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* the time of the system crash */ struct timeval dh_time; /* the utsname (uname) information */ struct new_utsname dh_utsname; /* the address of the current task */ struct task_struct *dh_current_task; } dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } dump_page_t; #endif /* CONFIG_VMDUMP */ #ifdef __KERNEL__ extern void dump_init(uint64_t, uint64_t); extern void dump_open(char *); extern void dump_execute(char *, struct pt_regs *); #endif #endif /* _VMDUMP_H */ crash-utility-crash-9cd43f5/remote.c0000664000372000037200000032007515107550337017042 0ustar juerghjuergh/* remote.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2009, 2011, 2018 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2009, 2011, 2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include #include #include #include #include #define FAILMSG "FAIL " #define DONEMSG "DONE " #define DATAMSG "DATA " #define DATA_HDRSIZE (13) /* strlen("XXXX ") + strlen("0131072") + NULL */ #define MAXRECVBUFSIZE (131072) #define READBUFSIZE (MAXRECVBUFSIZE+DATA_HDRSIZE) #ifdef DAEMON /* * The remote daemon. */ static int daemon_init(void); static ulong daemon_htol(char *); static int daemon_is_elf_file(char *); static int daemon_mount_point(char *); static int daemon_find_booted_kernel(char *); static char **daemon_build_searchdirs(int); static int daemon_is_directory(char *); static int daemon_file_readable(char *); static int daemon_parse_line(char *, char **); static char *daemon_clean_line(char *); int console(char *, ...); static void daemon_socket_options(int); static char *no_debugging_symbols_found(char *); static ulong daemon_filesize(int); static int daemon_find_module(char *, char *, char *); static int daemon_search_directory_tree(char *, char *, char *); static int daemon_file_exists(char *, struct stat *); static int daemon_checksum(char *, long *); static void daemon_send(void *, int); static int daemon_proc_version(char *); static void handle_connection(int); struct remote_context { int sock; int remdebug; char *remdebugfile; } remote_context = { 0, 0, "/dev/null" }; struct remote_context *rc = &remote_context; int main(int argc, char **argv) { int c, sockfd, newsockfd, clilen; struct sockaddr_in serv_addr, cli_addr; struct hostent *hp; ushort tcp_port; char hostname[MAXHOSTNAMELEN]; tcp_port = 0; optind = 0; while ((c = getopt(argc, argv, "vd:")) > 0) { switch (c) { case 'v': printf("%s %s\n", basename(argv[0]), /* BASELEVEL_REVISION */ "(deprecated)"); exit(0); case 'd': rc->remdebug++; rc->remdebugfile = optarg; break; } } console("\n", getpid()); while (argv[optind]) { if (!tcp_port) tcp_port = (ushort)atoi(argv[optind]); optind++; } console("port: %d\n", tcp_port); if (gethostname(hostname, MAXHOSTNAMELEN) < 0) { console("gethostname failed: %s\n", strerror(errno)); perror("gethostname"); exit(1); } console("hostname: %s\n", hostname); if ((hp = gethostbyname(hostname)) == NULL) { console("gethostbyname failed: %s\n", hstrerror(h_errno)); perror("gethostbyname"); exit(1); } console("attempting daemon_init...\n"); if (!daemon_init()) exit(1); console("\n", getpid()); if ((sockfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) exit(1); BZERO((char *)&serv_addr, sizeof(serv_addr)); serv_addr.sin_family = AF_INET; BCOPY(hp->h_addr, (char *)&serv_addr.sin_addr, hp->h_length); serv_addr.sin_port = htons(tcp_port); daemon_socket_options(sockfd); if (bind(sockfd, (struct sockaddr *)&serv_addr, sizeof(serv_addr)) < 0){ console("%d: bind failed: %s\n", getpid(), strerror(errno)); exit(1); } if (listen(sockfd, 5) < 0) { console("%d: listen failed: %s\n", getpid(), strerror(errno)); exit(1); } for (;;) { clilen = sizeof(cli_addr); if ((newsockfd = accept(sockfd, (struct sockaddr *)&cli_addr, &clilen)) < 0) { console("%d: accept failed: %s\n", getpid(), strerror(errno)); exit(1); } switch (fork()) { case -1: exit(1); case 0: close(sockfd); handle_connection(newsockfd); exit(0); default: close(newsockfd); break; } close(newsockfd); } } /* * This probably doesn't do much, but it might reduce the acknowledge * negotiations somewhat. (?) */ static void daemon_socket_options(int sockfd) { int nodelay; if (setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, (char *)&nodelay, sizeof(nodelay)) < 0) console("TCP_NODELAY setsockopt error\n"); } /* * This is the child daemon that handles the incoming requests. */ #define MAX_REMOTE_FDS (10) static void handle_connection(int sock) { int i; char recvbuf[BUFSIZE]; char savebuf[BUFSIZE]; char sendbuf[BUFSIZE]; char buf1[BUFSIZE]; char readbuf[READBUFSIZE+1]; char *file; FILE *tmp, *pipe; char *p1, *p2, *p3; size_t cnt; int fds[MAX_REMOTE_FDS]; int mfd; ulong addr, total, reqsize, bufsize; fd_set rfds; int len, first, retval, done; struct stat sbuf; rc->sock = sock; console("< new connection >\n"); for (i = 0; i < MAX_REMOTE_FDS; i++) fds[i] = -1; while (TRUE) { FD_ZERO(&rfds); FD_SET(sock, &rfds); retval = select(sock+1, &rfds, NULL, NULL, NULL); BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); switch (read(sock, recvbuf, BUFSIZE-1)) { case -1: console("[read returned -1]\n"); continue; case 0: console("[read returned 0]\n"); return; default: console("[%s]: ", recvbuf); break; } if (STRNEQ(recvbuf, "OPEN ")) { strcpy(sendbuf, recvbuf); p1 = strtok(recvbuf, " "); /* OPEN */ file = strtok(NULL, " "); /* filename */ for (i = 0; i < MAX_REMOTE_FDS; i++) { if (fds[i] == -1) break; } if (i < MAX_REMOTE_FDS) { if ((fds[i] = open(file, O_RDWR)) < 0) { if ((fds[i] = open(file, O_RDONLY)) < 0) strcat(sendbuf, " "); else { sprintf(buf1, " %d O_RDONLY %ld", fds[i], daemon_filesize(fds[i])); strcat(sendbuf, buf1); } } else { sprintf(buf1, " %d O_RDWR %ld", fds[i], daemon_filesize(fds[i])); strcat(sendbuf, buf1); } } else strcat(sendbuf, " "); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "READ_LIVE ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_LIVE */ p1 = strtok(NULL, " "); /* filename id */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ addr = daemon_htol(p2); len = atoi(p3); mfd = atoi(p1); errno = 0; BZERO(readbuf, READBUFSIZE); if (lseek(mfd, addr, SEEK_SET) == -1) len = 0; else if (read(mfd, &readbuf[DATA_HDRSIZE], len) != len) len = 0; if (!len) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); } else { sprintf(readbuf, "%s%07ld", DONEMSG,(ulong)len); console("(%ld)\n", len); } daemon_send(readbuf, len+DATA_HDRSIZE); continue; } else if (STRNEQ(recvbuf, "READ_NETDUMP ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_NETDUMP */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ addr = daemon_htol(p2); len = atoi(p3); BZERO(readbuf, READBUFSIZE); errno = 0; if ((len = read_netdump(UNUSED, &readbuf[DATA_HDRSIZE], len, UNUSED, addr)) < 0) len = 0; if (len) { sprintf(readbuf, "%s%07ld", DONEMSG,(ulong)len); console("(%ld)\n", (ulong)len); } else { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); } daemon_send(readbuf, len+DATA_HDRSIZE); continue; } else if (STRNEQ(recvbuf, "READ_MCLXCD ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_MCLXCD */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ addr = daemon_htol(p2); len = atoi(p3); errno = 0; BZERO(readbuf, READBUFSIZE); if (vas_lseek(addr, SEEK_SET)) len = 0; else if (vas_read((void *) &readbuf[DATA_HDRSIZE], len) != len) len = 0; if (len) { sprintf(readbuf, "%s%07ld", DONEMSG, (ulong)len); console("(%ld)\n", (ulong)len); } else { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); } daemon_send(readbuf, len+DATA_HDRSIZE); continue; } else if (STRNEQ(recvbuf, "CLOSE ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* SIZE */ p1 = strtok(NULL, " "); /* filename id */ mfd = atoi(p1); for (i = retval = 0; i < MAX_REMOTE_FDS; i++) { if (fds[i] == mfd) { close(mfd); fds[i] = -1; retval = TRUE; break; } } sprintf(sendbuf, "%s%s", savebuf, retval ? " OK" : " "); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "READ ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ */ p1 = strtok(NULL, " "); /* filename id */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ addr = daemon_htol(p2); len = atoi(p3); mfd = atoi(p1); BZERO(readbuf, READBUFSIZE); if (lseek(mfd, addr, SEEK_SET) == -1) len = 0; else if (read(mfd, readbuf, len) != len) len = 0; if (!len) { sprintf(readbuf, "%s ", savebuf); len = strlen(readbuf); console("[%s]\n", readbuf); } else console("(%ld)\n", len); daemon_send(readbuf, len); continue; } else if (STRNEQ(recvbuf, "MACHINE_PID")) { sprintf(sendbuf, "%s %s %d", recvbuf, MACHINE_TYPE, getpid()); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "TYPE ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* TYPE */ file = strtok(NULL, " "); /* filename */ if (stat(file, &sbuf) < 0) sprintf(sendbuf, "%s ", savebuf); else if (daemon_is_elf_file(file)) sprintf(sendbuf, "%s ELF", savebuf); else if (STREQ(file, "/dev/mem")) sprintf(sendbuf, "%s DEVMEM", savebuf); else if (is_netdump(file, NETDUMP_REMOTE)) sprintf(sendbuf, "%s NETDUMP", savebuf); else if (is_mclx_compressed_dump(file)) sprintf(sendbuf, "%s MCLXCD", savebuf); else if (is_lkcd_compressed_dump(file)) sprintf(sendbuf, "%s LKCD", savebuf); else if (is_s390_dump(file)) sprintf(sendbuf, "%s S390D", savebuf); else sprintf(sendbuf, "%s UNSUPPORTED", savebuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "LINUX_VERSION ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* LINUX_VERSION */ file = strtok(NULL, " "); /* filename */ sprintf(readbuf, "/usr/bin/strings %s | grep 'Linux version'", file); if ((pipe = popen(readbuf, "r"))) { BZERO(readbuf, BUFSIZE); if (fread(readbuf, sizeof(char), BUFSIZE-1, pipe) > 0) strcpy(sendbuf, readbuf); else sprintf(sendbuf, "%s ", savebuf); pclose(pipe); } else sprintf(sendbuf, "%s ", savebuf); console("[%s] (%d)\n", sendbuf, strlen(sendbuf)); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "READ_GZIP ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_GZIP */ p1 = strtok(NULL, " "); /* bufsize */ bufsize = atol(p1); file = strtok(NULL, " "); /* filename */ errno = 0; reqsize = bufsize - DATA_HDRSIZE; sprintf(readbuf, "/usr/bin/gzip -c %s", file); if ((pipe = popen(readbuf, "r")) == NULL) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); daemon_send(readbuf, DATA_HDRSIZE); continue; } errno = cnt = done = total = first = 0; while (!done) { BZERO(readbuf, READBUFSIZE); cnt = fread(&readbuf[DATA_HDRSIZE], sizeof(char), reqsize, pipe); total += cnt; if (feof(pipe)) { sprintf(readbuf, "%s%07ld", DONEMSG, (ulong)cnt); done = TRUE; } else if (ferror(pipe)) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); done = TRUE; } else sprintf(readbuf, "%s%07ld", DATAMSG, (ulong)cnt); console("%s[%s]\n", !first++ ? "\n" : "", readbuf); daemon_send(readbuf, bufsize); } console("GZIP total: %ld\n", total); pclose(pipe); continue; } else if (STRNEQ(recvbuf, "PROC_VERSION")) { BZERO(readbuf, READBUFSIZE); if (!daemon_proc_version(readbuf)) sprintf(readbuf, "%s ", recvbuf); console("[%s]\n", readbuf); daemon_send(readbuf, strlen(readbuf)); continue; } else if (STRNEQ(recvbuf, "DEBUGGING_SYMBOLS ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* DEBUGGING */ p2 = strtok(NULL, " "); /* filename */ sprintf(sendbuf, "%s %s", savebuf, no_debugging_symbols_found(p2)); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "PAGESIZE ")) { if (strstr(recvbuf, "LIVE")) sprintf(sendbuf, "%s %d", recvbuf, (uint)getpagesize()); else if (strstr(recvbuf, "NETDUMP")) sprintf(sendbuf, "%s %d", recvbuf, (uint)netdump_page_size()); else if (strstr(recvbuf, "MCLXCD")) sprintf(sendbuf, "%s %d", recvbuf, (uint)mclx_page_size()); else if (strstr(recvbuf, "LKCD")) sprintf(sendbuf, "%s %d", recvbuf, (uint)lkcd_page_size()); else if (strstr(recvbuf, "S390D")) sprintf(sendbuf, "%s %d", recvbuf, s390_page_size()); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "FIND_BOOTED_KERNEL")) { BZERO(readbuf, READBUFSIZE); if (daemon_find_booted_kernel(readbuf)) sprintf(sendbuf, "%s %s", recvbuf, readbuf); else sprintf(sendbuf, "%s ", recvbuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "FIND_MODULE ")) { strcpy(savebuf, recvbuf); strtok(recvbuf, " "); /* FIND_MODULE */ p1 = strtok(NULL, " "); /* release */ p2 = strtok(NULL, " "); /* module */ if (daemon_find_module(p1, p2, buf1)) { if (daemon_checksum(buf1, &total)) sprintf(sendbuf, "%s %s %lx", savebuf, buf1, total); else sprintf(sendbuf, "%s %s %lx", savebuf, buf1, (ulong)0xdeadbeef); } else sprintf(sendbuf, "%s ", savebuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "SUM ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* SUM */ p2 = strtok(NULL, " "); /* filename */ if (daemon_checksum(p2, &total)) sprintf(sendbuf, "%s %lx", savebuf, total); else sprintf(sendbuf, "%s ", savebuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "MEMORY ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* MEMORY */ p2 = strtok(NULL, " "); /* USED or FREE */ p3 = strtok(NULL, " "); /* MCLXCD, LKCD, etc. */ if (STREQ(p2, "FREE")) { if (STREQ(p3, "NETDUMP")) retval = netdump_free_memory(); else if (STREQ(p3, "MCLXCD")) retval = vas_free_memory(NULL); else if (STREQ(p3, "LKCD")) retval = lkcd_free_memory(); else if (STREQ(p3, "S390D")) retval = s390_free_memory(); } if (STREQ(p2, "USED")) { if (STREQ(p3, "NETDUMP")) retval = netdump_memory_used(); else if (STREQ(p3, "MCLXCD")) retval = vas_memory_used(); else if (STREQ(p3, "LKCD")) retval = lkcd_memory_used(); else if (STREQ(p3, "S390D")) retval = s390_memory_used(); } sprintf(sendbuf, "%s %d", savebuf, retval); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "MEMORY_DUMP")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* MEMORY_DUMP */ p1 = strtok(NULL, " "); /* bufsize */ p2 = strtok(NULL, " "); /* MCLXCD, LKCD, etc. */ bufsize = atol(p1); reqsize = bufsize - DATA_HDRSIZE; errno = 0; if ((tmp = tmpfile()) == NULL) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); daemon_send(readbuf, DATA_HDRSIZE); continue; } if (STREQ(p2, "NETDUMP")) retval = netdump_memory_dump(tmp); else if (STREQ(p2, "MCLXCD")) vas_memory_dump(tmp); else if (STREQ(p2, "LKCD")) lkcd_memory_dump(tmp); else if (STREQ(p2, "LKCD_VERBOSE")) { set_lkcd_fp(tmp); dump_lkcd_environment(0); set_lkcd_fp(NULL); } else if (STREQ(p2, "S390D")) s390_memory_dump(tmp); rewind(tmp); errno = cnt = done = total = first = 0; while (!done) { BZERO(readbuf, READBUFSIZE); cnt = fread(&readbuf[DATA_HDRSIZE], sizeof(char), reqsize, tmp); total += cnt; if (feof(tmp)) { sprintf(readbuf, "%s%07ld", DONEMSG, (ulong)cnt); done = TRUE; } else if (ferror(tmp)) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); done = TRUE; } else sprintf(readbuf, "%s%07ld", DATAMSG, (ulong)cnt); console("%s[%s]\n", !first++ ? "\n" : "", readbuf); daemon_send(readbuf, bufsize); } console("MEMORY_DUMP total: %ld\n", total); fclose(tmp); continue; } else if (STRNEQ(recvbuf, "NETDUMP_INIT ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* NETDUMP_INIT */ p2 = strtok(NULL, " "); /* fd */ p3 = strtok(NULL, " "); /* dumpfile */ mfd = atoi(p2); for (i = 0; i < MAX_REMOTE_FDS; i++) { if (fds[i] == mfd) { close(mfd); fds[i] = -1; break; } } sprintf(sendbuf, "%s %s", savebuf, netdump_init(p3, NULL) ? "OK" : ""); if ((addr = get_netdump_panic_task())) { sprintf(readbuf, "\npanic_task: %lx\n", addr); strcat(sendbuf, readbuf); } console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "LKCD_DUMP_INIT ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* LKCD_DUMP_INIT */ p2 = strtok(NULL, " "); /* fd */ p3 = strtok(NULL, " "); /* dumpfile */ sprintf(sendbuf, "%s %s", savebuf, lkcd_dump_init(NULL, atoi(p2), p3) ? "OK" : ""); if ((addr = get_lkcd_panic_task())) { sprintf(readbuf, "\npanic_task: %lx\n", addr); strcat(sendbuf, readbuf); } readbuf[0] = NULLCHAR; get_lkcd_panicmsg(readbuf); if (strlen(readbuf)) { strcat(sendbuf, "panicmsg: "); strcat(sendbuf, readbuf); } console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "READ_LKCD ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_LKCD */ p1 = strtok(NULL, " "); /* filename id */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ mfd = atoi(p1); addr = daemon_htol(p2); len = atoi(p3); BZERO(readbuf, READBUFSIZE); errno = 0; if (!lkcd_lseek(addr)) len = 0; else if (lkcd_read((void *) &readbuf[DATA_HDRSIZE], len) != len) len = 0; if (len) { sprintf(readbuf, "%s%07ld", DONEMSG,(ulong)len); console("(%ld)\n", (ulong)len); } else { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); } daemon_send(readbuf, len+DATA_HDRSIZE); continue; } else if (STRNEQ(recvbuf, "S390_DUMP_INIT ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* S390_DUMP_INIT */ p2 = strtok(NULL, " "); /* fd */ p3 = strtok(NULL, " "); /* filename */ mfd = atoi(p2); for (i = 0; i < MAX_REMOTE_FDS; i++) { if (fds[i] == mfd) { close(mfd); fds[i] = -1; break; } } sprintf(sendbuf, "%s %s", savebuf, s390_dump_init(p3) ? "OK" : ""); if ((addr = get_s390_panic_task())) { sprintf(readbuf, "\npanic_task: %lx\n", addr); strcat(sendbuf, readbuf); } readbuf[0] = NULLCHAR; get_s390_panicmsg(readbuf); if (strlen(readbuf)) { strcat(sendbuf, "panicmsg: "); strcat(sendbuf, readbuf); } console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "S390X_DUMP_INIT ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* S390X_DUMP_INIT */ p2 = strtok(NULL, " "); /* fd */ p3 = strtok(NULL, " "); /* filename */ mfd = atoi(p2); for (i = 0; i < MAX_REMOTE_FDS; i++) { if (fds[i] == mfd) { close(mfd); fds[i] = -1; break; } } sprintf(sendbuf, "%s %s", savebuf, s390x_dump_init(p3) ? "OK" : ""); if ((addr = get_s390x_panic_task())) { sprintf(readbuf, "\npanic_task: %lx\n", addr); strcat(sendbuf, readbuf); } readbuf[0] = NULLCHAR; get_s390x_panicmsg(readbuf); if (strlen(readbuf)) { strcat(sendbuf, "panicmsg: "); strcat(sendbuf, readbuf); } console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); continue; } else if (STRNEQ(recvbuf, "READ_S390D ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* READ_S390D */ p1 = strtok(NULL, " "); /* filename id */ p2 = strtok(NULL, " "); /* address */ p3 = strtok(NULL, " "); /* length */ mfd = atoi(p1); addr = daemon_htol(p2); len = atoi(p3); BZERO(readbuf, READBUFSIZE); errno = 0; if ((len = read_s390_dumpfile(UNUSED, &readbuf[DATA_HDRSIZE], len, UNUSED, addr)) < 0) len = 0; if (len) { sprintf(readbuf, "%s%07ld", DONEMSG,(ulong)len); console("(%ld)\n", (ulong)len); } else { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); } daemon_send(readbuf, len+DATA_HDRSIZE); continue; } else if (STRNEQ(recvbuf, "EXECUTE ")) { strcpy(savebuf, recvbuf); p1 = strtok(recvbuf, " "); /* EXECUTE */ p1 = strtok(NULL, " "); /* bufsize */ p2 = strtok(NULL, " "); /* MCLXCD or LKCD */ p3 = strstr(savebuf, p2); bufsize = atol(p1); reqsize = bufsize - DATA_HDRSIZE; sprintf(readbuf, "echo  | %s", p3); if ((pipe = popen(readbuf, "r")) == NULL) { BZERO(readbuf, READBUFSIZE); sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); console("[%s]\n", readbuf); daemon_send(readbuf, bufsize); continue; } errno = cnt = done = total = first = 0; while (!done) { BZERO(readbuf, READBUFSIZE); cnt = fread(&readbuf[DATA_HDRSIZE], sizeof(char), reqsize, pipe); total += cnt; if (feof(pipe)) { sprintf(readbuf, "%s%07ld", DONEMSG, (ulong)cnt); done = TRUE; } else if (ferror(pipe)) { sprintf(readbuf, "%s%07ld", FAILMSG, (ulong)errno); done = TRUE; } else sprintf(readbuf, "%s%07ld", DATAMSG, (ulong)cnt); console("%s[%s]\n", !first++ ? "\n" : "", readbuf); daemon_send(readbuf, bufsize); } console("EXECUTE total: %ld\n", total); pclose(pipe); continue; } else if (STRNEQ(recvbuf, "EXIT")) { sprintf(sendbuf, "%s OK", recvbuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); return; } else { sprintf(sendbuf, "%s ", recvbuf); console("[%s]\n", sendbuf); daemon_send(sendbuf, strlen(sendbuf)); } } } /* * Common error-checking send routine. */ #define MINSENDSIZE (1448) static void daemon_send(void *buffer, int len) { int remaining, count, ret; char *bufptr; remaining = len; bufptr = buffer; while (remaining) { count = MIN(MINSENDSIZE, remaining); switch (ret = send(rc->sock, bufptr, count, 0)) { case -1: switch (errno) { case ENOBUFS: case ENOMEM: sleep(1); continue; default: exit(1); } break; default: remaining -= ret; bufptr += ret; break; } } console("daemon_send: sent %d\n", len); } /* * debug print if the -d command line option was used. */ int console(char *fmt, ...) { char output[BUFSIZE*2]; va_list ap; int retval; FILE *fp; if (!rc->remdebug || !fmt || !strlen(fmt)) return 0; va_start(ap, fmt); (void)vsnprintf(output, BUFSIZE*2, fmt, ap); va_end(ap); if ((fp = fopen(rc->remdebugfile, "a")) == NULL) return 0; retval = fprintf(fp, "%s", output); fclose(fp); return retval; } /* * Fill in the file size of a freshly opened file. */ ulong daemon_filesize(int fd) { struct stat sbuf; if (fstat(fd, &sbuf) == 0) return(sbuf.st_size); else return 0; } /* * Check for gdb output stating "(no debugging symbols found)". */ char * no_debugging_symbols_found(char *file) { FILE *pipe; char buf[BUFSIZE]; sprintf(buf, "echo 'q' | /usr/bin/gdb %s", file); if ((pipe = popen(buf, "r")) == NULL) return "NO_GDB"; while (fgets(buf, BUFSIZE, pipe)) { if (strstr(buf, "(no debugging symbols found)")) { pclose(pipe); return "NO_DEBUG"; } } pclose(pipe); return "DEBUG_OK"; } /* * Read /proc/version into a buffer. */ static int daemon_proc_version(char *buf) { FILE *pipe; struct stat sbuf; if (stat("/proc/version", &sbuf) == -1) return FALSE; if ((pipe = popen("/bin/cat /proc/version", "r")) == NULL) return FALSE; if (fread(buf, sizeof(char), BUFSIZE-1, pipe) <= 0) { pclose(pipe); return FALSE; } pclose(pipe); return TRUE; } /* * c/o W. Richard Stevens... */ #define OPEN_MAX_GUESS (256) static int daemon_init(void) { int i; pid_t pid; int open_max; if ((pid = fork()) < 0) return FALSE; else if (pid != 0) exit(0); setsid(); chdir("/"); umask(0); if ((open_max = sysconf(_SC_OPEN_MAX)) < 0) open_max = OPEN_MAX_GUESS; for (i = 0; i < open_max; i++) close(i); signal(SIGCLD, SIG_IGN); unsetenv("DISPLAY"); return TRUE; } /* * Determine whether a file is in ELF format by checking the magic number * in the first EI_NIDENT characters of the file. If it's there, further * qualify it by doing a "file" operation on it. */ static int daemon_is_elf_file(char *s) { int fd, is_elf; char magic[EI_NIDENT]; char buf[BUFSIZE]; FILE *pipe; if ((fd = open(s, O_RDONLY)) < 0) return FALSE; if (read(fd, magic, EI_NIDENT) != EI_NIDENT) { close(fd); return FALSE; } close(fd); magic[EI_CLASS] = NULLCHAR; if (!STREQ(magic, ELFMAG)) return FALSE; sprintf(buf, "/usr/bin/file -L %s", s); if ((pipe = popen(buf, "r")) == NULL) { console("/usr/bin/strings popen failed\n"); return TRUE; } is_elf = FALSE; while (fgets(buf, BUFSIZE-1, pipe)) { if (strstr(buf, " ELF ") && strstr(buf, "executable")) { is_elf = TRUE; break; } } pclose(pipe); return is_elf; } /* * Translate ASCII hex addresses. */ static ulong daemon_htol(char *s) { long i, j; ulong n; if (strlen(s) > MAX_HEXADDR_STRLEN) exit(1); for (n = i = 0; s[i] != 0; i++) { switch (s[i]) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j = (s[i] - 'a') + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j = (s[i] - 'A') + 10; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': j = s[i] - '0'; break; case 'x': case 'X': continue; default: exit(0); } n = (16 * n) + j; } return(n); } /* * Adapted from filesys.c, seach the default directories for a kernel * that matches /proc/version. daemon_build_searchdirs() builds an * array of directory names. */ #define CREATE 1 #define DESTROY 0 #define DEFAULT_SEARCHDIRS 4 static int daemon_find_booted_kernel(char *namelist) { char kernel[BUFSIZE]; char command[BUFSIZE]; char buffer[BUFSIZE]; char proc_version[BUFSIZE]; char *version; char **searchdirs; int i; DIR *dirp; struct dirent *dp; FILE *pipe; int found; struct stat sbuf; console("\n"); if (stat("/proc/version", &sbuf) < 0) { console("/proc/version not found\n"); return FALSE; } if (!daemon_proc_version(proc_version)) { console("cannot read /proc/version\n"); return FALSE; } version = proc_version; searchdirs = daemon_build_searchdirs(CREATE); for (i = 0, found = FALSE; !found && searchdirs[i]; i++) { dirp = opendir(searchdirs[i]); if (!dirp) continue; for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { sprintf(kernel, "%s%s", searchdirs[i], dp->d_name); if (daemon_mount_point(kernel) || !daemon_file_readable(kernel) || !daemon_is_elf_file(kernel)) continue; sprintf(command, "/usr/bin/strings %s", kernel); if ((pipe = popen(command, "r")) == NULL) { console("/usr/bin/strings popen failed\n"); continue; } while (fgets(buffer, BUFSIZE-1, pipe)) { if (STREQ(buffer, version)) { found = TRUE; break; } } pclose(pipe); if (found) break; } closedir(dirp); } daemon_mount_point(DESTROY); daemon_build_searchdirs(DESTROY); if (found) { console("booted kernel: %s\n", kernel); strcpy(namelist, kernel); return TRUE; } console("cannot find booted kernel\n"); return FALSE; } static char ** daemon_build_searchdirs(int create) { int i; int cnt; DIR *dirp; struct dirent *dp; char dirbuf[BUFSIZE]; static char **searchdirs = { 0 }; static char *default_searchdirs[DEFAULT_SEARCHDIRS+1] = { "/usr/src/linux/", "/boot/", "/boot/efi/", "/", NULL }; if (!create) { if (searchdirs) { for (i = DEFAULT_SEARCHDIRS; searchdirs[i]; i++) free(searchdirs[i]); free(searchdirs); } return NULL; } cnt = DEFAULT_SEARCHDIRS; if ((dirp = opendir("/usr/src"))) { for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) cnt++; if ((searchdirs = (char **)malloc(cnt * sizeof(char *))) == NULL) { console("/usr/src/ directory list malloc failed: %s\n", strerror(errno)); closedir(dirp); return default_searchdirs; } for (i = 0; i < DEFAULT_SEARCHDIRS; i++) searchdirs[i] = default_searchdirs[i]; cnt = DEFAULT_SEARCHDIRS; rewinddir(dirp); for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (STREQ(dp->d_name, "linux") || STREQ(dp->d_name, ".") || STREQ(dp->d_name, "..")) continue; sprintf(dirbuf, "/usr/src/%s", dp->d_name); if (daemon_mount_point(dirbuf)) continue; if (!daemon_is_directory(dirbuf)) continue; if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { console("/usr/src/ directory entry malloc failed: %s\n", strerror(errno)); break; } sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } searchdirs[cnt] = NULL; closedir(dirp); } for (i = 0; searchdirs[i]; i++) console("searchdirs[%d]: %s\n", i, searchdirs[i]); return searchdirs; } /* * Determine whether a file is a mount point, without the benefit of stat(). * This horrendous kludge is necessary to avoid uninterruptible stat() or * fstat() calls on nfs mount-points where the remote directory is no longer * available. */ static int daemon_mount_point(char *name) { int i; static int mount_points_gathered = -1; static char **mount_points; char *arglist[MAXARGS]; char buf[BUFSIZE]; char cmd[BUFSIZE]; int argc, found; struct stat sbuf; FILE *pipe; /* * The first time through, stash a list of mount points. */ if (mount_points_gathered < 0) { found = mount_points_gathered = 0; if (stat("/proc/mounts", &sbuf) == 0) sprintf(cmd, "/bin/cat /proc/mounts"); else if (stat("/etc/mtab", &sbuf) == 0) sprintf(cmd, "/bin/cat /etc/mtab"); else return FALSE; if ((pipe = popen(cmd, "r")) == NULL) return FALSE; while (fgets(buf, BUFSIZE, pipe)) { argc = daemon_parse_line(buf, arglist); if (argc < 2) continue; found++; } pclose(pipe); if (!(mount_points = (char **)malloc(sizeof(char *) * found))) return FALSE; if ((pipe = popen(cmd, "r")) == NULL) return FALSE; i = 0; while (fgets(buf, BUFSIZE, pipe) && (mount_points_gathered < found)) { argc = daemon_parse_line(buf, arglist); if (argc < 2) continue; if ((mount_points[i] = (char *) malloc(strlen(arglist[1])*2))) { strcpy(mount_points[i], arglist[1]); mount_points_gathered++, i++; } } pclose(pipe); } /* * A null name string means we're done with this routine forever, * so the malloc'd memory can be freed. */ if (!name) { for (i = 0; i < mount_points_gathered; i++) free(mount_points[i]); free(mount_points); return FALSE; } for (i = 0; i < mount_points_gathered; i++) { if (STREQ(name, mount_points[i])) return TRUE; } return FALSE; } /* * Check whether a file is a directory. */ static int daemon_is_directory(char *file) { struct stat sbuf; if (!file || !strlen(file)) return(FALSE); if (stat(file, &sbuf) == -1) return(FALSE); /* This file doesn't exist. */ return((sbuf.st_mode & S_IFMT) == S_IFDIR ? TRUE : FALSE); } /* * Check whether a file is readable. */ static int daemon_file_readable(char *file) { struct stat sbuf; long tmp; int fd; if (stat(file, &sbuf) < 0) return FALSE; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; if (read(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) { close(fd); return FALSE; } close(fd); return TRUE; } /* * Parse a line into tokens, populate the passed-in argv[] array, and return * the count of arguments found. This function modifies the passed-string * by inserting a NULL character at the end of each token. Expressions * encompassed by parentheses, and strings encompassed by apostrophes, are * collected into single tokens. */ int daemon_parse_line(char *str, char *argv[]) { int i, j; int string; int expression; for (i = 0; i < MAXARGS; i++) argv[i] = NULL; daemon_clean_line(str); if (str == NULL || strlen(str) == 0) return(0); i = j = 0; string = expression = FALSE; argv[j++] = str; while (TRUE) { if (j == MAXARGS) { console("too many arguments in string!\n"); return 0; } while (str[i] != ' ' && str[i] != '\t' && str[i] != NULLCHAR) { i++; } switch (str[i]) { case ' ': case '\t': str[i++] = NULLCHAR; if (str[i] == '"') { str[i] = ' '; string = TRUE; i++; } if (str[i] == '(') { expression = TRUE; } while (str[i] == ' ' || str[i] == '\t') { i++; } if (str[i] != NULLCHAR && str[i] != '\n') { argv[j++] = &str[i]; if (string) { string = FALSE; while (str[i] != '"' && str[i] != NULLCHAR) i++; if (str[i] == '"') str[i] = ' '; } if (expression) { expression = FALSE; while (str[i] != ')' && str[i] != NULLCHAR) i++; } break; } /* else fall through */ case '\n': str[i] = NULLCHAR; /* keep falling... */ case NULLCHAR: argv[j] = NULLCHAR; return(j); } } } /* * Strip line-beginning and line-ending whitespace and linefeeds. */ char *strip_linefeeds(char *line) { return(daemon_clean_line(line)); } static char * daemon_clean_line(char *line) { char buf[BUFSIZE]; char *p; if (line == NULL || strlen(line) == 0) return(line); strcpy(buf, line); p = &buf[0]; while (*p == ' ' || *p == '\t') p++; strcpy(line, p); if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == '\n') *p = NULLCHAR; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == ' ' || *p == '\t') { *p = NULLCHAR; if (p == line) break; p--; } return(line); } /* * Service not offered by the daemon. */ int monitor_memory(long *a1, long *a2, long *a3, long *a4) { return FALSE; } static int daemon_find_module(char *release, char *filename, char *retbuf) { char dir[BUFSIZE]; int found; found = FALSE; sprintf(dir, "%s/%s", DEFAULT_REDHAT_DEBUG_LOCATION, release); found = daemon_search_directory_tree(dir, filename, retbuf); if (!found) { sprintf(dir, "/lib/modules/%s", release); found = daemon_search_directory_tree(dir, filename, retbuf); } return found; } int daemon_search_directory_tree(char *directory, char *file, char *retbuf) { char command[BUFSIZE]; char buf[BUFSIZE]; FILE *pipe; int found; if (!daemon_file_exists("/usr/bin/find", NULL) || !daemon_file_exists("/bin/echo", NULL) || !daemon_is_directory(directory)) return FALSE; sprintf(command, "/usr/bin/find %s -name %s -print; /bin/echo search done", directory, file); if ((pipe = popen(command, "r")) == NULL) return FALSE; found = FALSE; while (fgets(buf, BUFSIZE-1, pipe) || !found) { if (STREQ(buf, "search done\n")) break; if (!found && STREQ((char *)basename(strip_linefeeds(buf)), file)) { strcpy(retbuf, buf); found = TRUE; } } pclose(pipe); return found; } static int daemon_file_exists(char *file, struct stat *sp) { struct stat sbuf; if (stat(file, sp ? sp : &sbuf) == 0) return TRUE; return FALSE; } static int daemon_checksum(char *file, long *retsum) { int i; int fd; ssize_t cnt; char buf[MIN_PAGE_SIZE]; long csum; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; csum = 0; BZERO(buf, MIN_PAGE_SIZE); while ((cnt = read(fd, buf, MIN_PAGE_SIZE)) > 0) { for (i = 0; i < cnt; i++) csum += buf[i]; BZERO(buf, MIN_PAGE_SIZE); } close(fd); *retsum = csum; return TRUE; } #else static void copy_to_local_namelist(struct remote_file *); static char *create_local_namelist(struct remote_file *); static int remote_find_booted_kernel(struct remote_file *); static int remote_proc_version(char *); static int validate_phys_base(physaddr_t, physaddr_t, physaddr_t); static int remote_file_open(struct remote_file *); static int remote_file_close(struct remote_file *); static int identical_namelist(char *, struct remote_file *); void remote_socket_options(int); static int copy_remote_file(struct remote_file *, int, char *, char *); static void copy_remote_gzip_file(struct remote_file *, char *, char *); static int remote_file_checksum(struct remote_file *); static int remote_file_type(char *); static int remote_lkcd_dump_init(void); static int remote_s390_dump_init(void); static int remote_netdump_init(void); static int remote_tcp_read(int, const char *, size_t); static int remote_tcp_read_string(int, const char *, size_t, int); static int remote_tcp_write(int, const void *, size_t); static int remote_tcp_write_string(int, const char *); struct _remote_context { uint flags; int n_cpus; int vfd; char remote_type[10]; } remote_context; #define NIL_FLAG (0x01U) #define NIL_MODE() (rc->flags & NIL_FLAG) struct _remote_context *rc = &remote_context; /* * Parse, verify and establish a connection with the network daemon * specified on the crash command line. * * The format is: [remote-hostname]:port[,remote-namelist][,remote-dumpfile] * * where everything but the port number is optional, and the remote-namelist * and remote-dumpfile can be reversed. * * 1. The default remote host is the local host. * 2. The default dumpfile is /dev/mem. * 3. If no remote-namelist and remote-dumpfile are given, the daemon * is queried for a kernel that matches the remote /proc/version. * If no local kernel namelist is entered, the remote version will * be copied locally when fd_init() is called. * 4. If a remote-dumpfile is given with no remote namelist, it is presumed * that the kernel namelist will be entered locally. */ int is_remote_daemon(char *dp) { char *p1; static char defaulthost[MAXHOSTNAMELEN+1]; char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *portp, *filep, *file1, *file2; struct hostent *hp; struct sockaddr_in serv_addr; if (!strstr(dp, ":") || file_exists(dp, NULL)) return FALSE; pc->port = 0; pc->server = pc->server_memsrc = NULL; rc->vfd = pc->rmfd = pc->rkfd = -1; file1 = file2 = NULL; if ((filep = strstr(dp, ","))) { *filep = NULLCHAR; filep++; } if (*dp == ':') { BZERO(defaulthost, MAXHOSTNAMELEN+1); gethostname(defaulthost, MAXHOSTNAMELEN); pc->server = defaulthost; portp = dp+1; } else { pc->server = strtok(dp, ":"); portp = strtok(NULL, ":"); } if (portp == NULL) return FALSE; if (decimal(portp, 0)) pc->port = (ushort)atoi(portp); else return FALSE; if (filep) { file1 = strtok(filep, ","); file2 = strtok(NULL, ","); } if (!pc->server || !pc->port) return FALSE; if (CRASHDEBUG(1)) { fprintf(fp, "server: [%s]\n", pc->server); fprintf(fp, " port: [%d]\n", pc->port); fprintf(fp, " file1: [%s]\n", file1); fprintf(fp, " file2: [%s]\n", file2); } if ((hp = gethostbyname(pc->server)) == NULL) { herror(pc->server); error(FATAL, "gethostbyname [%s] failed\n", pc->server); } if (CRASHDEBUG(1)) { struct in_addr *ip; char **listptr; listptr = hp->h_addr_list; while ((ip = (struct in_addr *) *listptr++) != NULL) printf("%s\n", inet_ntoa(*ip)); } if ((pc->sockfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { perror("socket"); error(FATAL, "socket call failed\n"); } BZERO((char *)&serv_addr, sizeof(struct sockaddr_in)); serv_addr.sin_family = AF_INET; BCOPY(hp->h_addr, (char *)&serv_addr.sin_addr, hp->h_length); serv_addr.sin_port = htons(pc->port); if (connect(pc->sockfd, (struct sockaddr *)&serv_addr, sizeof(struct sockaddr_in)) < 0) { herror(hp->h_name); error(FATAL, "connect [%s:%d] failed\n", hp->h_name, pc->port); clean_exit(1); } if (CRASHDEBUG(1)) printf("connect [%s:%d]: success\n", hp->h_name, pc->port); remote_socket_options(pc->sockfd); /* * Try and use NIL mode. */ BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "NIL"); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, 0); if (!strstr(recvbuf, "")) { rc->flags |= NIL_FLAG; p1 = strtok(recvbuf, " "); /* NIL */ p1 = strtok(NULL, " "); /* remote type */ if (p1 && p1[0] != 'L') pc->flags2 |= REM_PAUSED_F; } /* * Get the remote machine type and verify a match. The daemon pid * is also used as a live system initial context. */ BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "MACHINE_PID"); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); p1 = strtok(recvbuf, " "); /* MACHINE */ p1 = strtok(NULL, " "); /* machine type */ if (CRASHDEBUG(1)) printf("remote MACHINE: %s\n", p1); if (!STREQ(pc->machine_type, p1)) error(FATAL, "machine type mismatch: local: %s remote: %s\n", pc->machine_type, p1); p1 = strtok(NULL, " "); /* pid */ pc->server_pid = atol(p1); if (file1) { switch (remote_file_type(file1)) { case TYPE_ELF: pc->server_namelist = file1; break; case TYPE_NETDUMP: pc->server_memsrc = file1; pc->flags |= REM_NETDUMP; break; case TYPE_MCLXCD: pc->server_memsrc = file1; pc->flags |= REM_MCLXCD; break; case TYPE_DEVMEM: pc->server_memsrc = file1; break; case TYPE_LKCD: pc->server_memsrc = file1; pc->flags |= REM_LKCD; break; case TYPE_S390D: pc->server_memsrc = file1; pc->flags |= REM_S390D; break; } } if (file2) { switch (remote_file_type(file2)) { case TYPE_ELF: if (pc->server_namelist) error(FATAL, "two remote namelists entered: %s and %s\n", file1, file2); pc->server_namelist = file2; break; case TYPE_NETDUMP: if (pc->server_memsrc) error(FATAL, "neither %s or %s is an ELF file\n", file1, file2); pc->server_memsrc = file2; pc->flags |= REM_NETDUMP; break; case TYPE_MCLXCD: if (pc->server_memsrc) error(FATAL, "neither %s or %s is an ELF file\n", file1, file2); pc->server_memsrc = file2; pc->flags |= REM_MCLXCD; break; case TYPE_LKCD: if (pc->server_memsrc) error(FATAL, "neither %s or %s is an ELF file\n", file1, file2); pc->server_memsrc = file2; pc->flags |= REM_LKCD; break; case TYPE_S390D: if (pc->server_memsrc) error(FATAL, "neither %s or %s is an ELF file\n", file1, file2); pc->server_memsrc = file2; pc->flags |= REM_S390D; break; case TYPE_DEVMEM: pc->server_memsrc = file2; break; } } return TRUE; } /* * Determine whether a file is a kernel or a memory source. */ static int remote_file_type(char *file) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "TYPE %s", file); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) error(FATAL, "invalid remote file name: %s\n", file); else if (strstr(recvbuf, " UNSUPPORTED")) error(FATAL, "unsupported remote file type: %s\n", file); else if (strstr(recvbuf, " NETDUMP")) return TYPE_NETDUMP; else if (strstr(recvbuf, " ELF")) return TYPE_ELF; else if (strstr(recvbuf, " MCLXCD")) return TYPE_MCLXCD; else if (strstr(recvbuf, " DEVMEM")) return TYPE_DEVMEM; else if (strstr(recvbuf, " LKCD")) return TYPE_LKCD; else if (strstr(recvbuf, " S390D")) return TYPE_S390D; return (error(FATAL, "unknown remote file type: %s\n", file)); } /* * Try to set the receive buffer size to READBUFSIZE with setsockopt(), * storing the value returned by getsockopt() after the attempt is made. * Then enforce a SO_RCVLOWAT (low water mark) of 1, to ensure that error * recovery won't get hung in the recv() call in remote_clear_pipeline(). */ void remote_socket_options(int sockfd) { int rcvbuf, optlen; pc->rcvbufsize = rcvbuf = READBUFSIZE; if (setsockopt(sockfd, SOL_SOCKET, SO_RCVBUF, (char *)&rcvbuf, sizeof(rcvbuf)) < 0) { error(INFO, "SO_RCVBUF setsockopt error\n"); return; } optlen = sizeof(rcvbuf); if (getsockopt(sockfd, SOL_SOCKET, SO_RCVBUF, (char *)&rcvbuf, (socklen_t *)&optlen) < 0) { error(INFO, "SO_RCVBUF getsockopt error\n"); return; } if (CRASHDEBUG(1)) printf("socket SO_RCVBUF size: %d\n", rcvbuf); rcvbuf = 1; if (setsockopt(sockfd, SOL_SOCKET, SO_RCVLOWAT, (char *)&rcvbuf, sizeof(rcvbuf)) < 0) { /* * Earlier versions of Linux TCP won't accept this option, * which is hardcoded to the desired count of 1 anyway. * Set it to 0, and verify it as 1 in the getsockopt() call. */ if (CRASHDEBUG(1)) error(INFO, "SO_RCVLOWAT setsockopt error: %s\n", strerror(errno)); rcvbuf = 0; } optlen = sizeof(rcvbuf); if (getsockopt(sockfd, SOL_SOCKET, SO_RCVLOWAT, (char *)&rcvbuf, (socklen_t *)&optlen) < 0) { error(INFO, "SO_RCVLOWAT getsockopt error\n"); return; } if (CRASHDEBUG(1) || (rcvbuf != 1)) error(INFO, "socket SO_RCVLOWAT value: %d\n", rcvbuf); } /* * Wrapper around recv to read full length packet. */ static int remote_tcp_read(int sock, const char *pv_buffer, size_t cb_buffer) { size_t cb_total = 0; do { ssize_t cb_read = recv(sock, (void*)pv_buffer, cb_buffer, MSG_NOSIGNAL); if (cb_read <= 0) return cb_read; cb_total += cb_read; cb_buffer -= cb_read; pv_buffer = (char *)pv_buffer + cb_read; } while (cb_buffer); return cb_total; } /* * Wrapper around recv to read full string packet. */ static int remote_tcp_read_string(int sock, const char *pv_buffer, size_t cb_buffer, int nil_mode) { size_t cb_total = 0; do { ssize_t cb_read = recv(sock, (void*)pv_buffer, cb_buffer, MSG_NOSIGNAL); if (cb_read <= 0) return cb_read; cb_total += cb_read; if (!nil_mode && cb_total >= 4) return cb_total; if (!pv_buffer[cb_read - 1]) return cb_total; cb_buffer -= cb_read; pv_buffer = (char *)pv_buffer + cb_read; } while (cb_buffer); return cb_total; } /* * Wrapper around send to send full packet. */ static int remote_tcp_write(int sock, const void *pv_buffer, size_t cb_buffer) { do { size_t cb_now = cb_buffer; ssize_t cb_written = send(sock, (const char *)pv_buffer, cb_now, MSG_NOSIGNAL); if (cb_written < 0) return 1; cb_buffer -= cb_written; pv_buffer = (char *)pv_buffer + cb_written; } while (cb_buffer); return 0; } /* * Wrapper around tcp_write to send a string */ static int remote_tcp_write_string(int sock, const char *pv_buffer) { return remote_tcp_write(sock, pv_buffer, strlen(pv_buffer) + 1); } /* * Request that the daemon open a file. */ static int remote_file_open(struct remote_file *rfp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "OPEN %s", rfp->filename); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (CRASHDEBUG(1)) fprintf(fp, "remote_file_open: [%s]\n", recvbuf); if (strstr(recvbuf, "O_RDWR") || strstr(recvbuf, "O_RDONLY")) { p1 = strtok(recvbuf, " "); /* OPEN */ p1 = strtok(NULL, " "); /* filename */ p1 = strtok(NULL, " "); /* fd */ rfp->fd = atoi(p1); p1 = strtok(NULL, " "); /* flags */ if (STREQ(p1, "O_RDWR")) rfp->flags |= O_RDWR; else if (STREQ(p1, "O_RDONLY")) rfp->flags |= O_RDONLY; p1 = strtok(NULL, " "); /* size */ rfp->size = atoi(p1); return TRUE; } else return FALSE; } /* * Request that the daemon close a previously-opened file. */ static int remote_file_close(struct remote_file *rfp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "CLOSE %d", rfp->fd); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); return (strstr(recvbuf, "OK") ? TRUE : FALSE); } /* * Get a copy of the daemon machine's /proc/version */ static int remote_proc_version(char *buf) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "PROC_VERSION"); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (STREQ(recvbuf, "")) { buf[0] = 0; return FALSE; } strcpy(buf, recvbuf); return TRUE; } /* * Check that virt_phys_base when accessed via * phys_base - text_start is phys_base. */ static int validate_phys_base(physaddr_t phys_base, physaddr_t text_start, physaddr_t virt_phys_base) { ulong value; if (CRASHDEBUG(3)) fprintf(fp, "validate_phys_base: virt_phys_base=0x%llx phys_base=0x%llx text_start=0x%llx calc=0x%llx\n", (long long unsigned int)virt_phys_base, (long long unsigned int)phys_base, (long long unsigned int)text_start, (long long unsigned int)virt_phys_base + phys_base - text_start); if (READMEM(pc->rmfd, (void*)&value, sizeof(value), virt_phys_base, virt_phys_base + phys_base - text_start) == sizeof(value)) { if (value == phys_base) return 1; } return 0; } /* * Get remote phys_base based on virtual address of "phys_base". */ physaddr_t get_remote_phys_base(physaddr_t text_start, physaddr_t virt_phys_base) { int vcpu; ulong value; if (rc->vfd < 0) { struct remote_file remote_file, *rfp; rfp = &remote_file; BZERO(rfp, sizeof(struct remote_file)); rfp->filename = "/dev/vmem"; if (remote_file_open(rfp)) { rc->vfd = rfp->fd; } else return 0; } for (vcpu = 0; vcpu < rc->n_cpus; vcpu++) if (remote_memory_read(rc->vfd, (void*)&value, sizeof(value), virt_phys_base, vcpu) == sizeof(value)) { if (validate_phys_base(value, text_start, virt_phys_base)) return value; } return 0; } /* * Do a remote VTOP if supported. */ physaddr_t remote_vtop(int cpu, physaddr_t virt_addr) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1; int errflag; ulong value; if (!rc->remote_type[0]) return 0; /* Not a special remote. */ BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "VTOP %d %llx", cpu, (long long unsigned int)virt_addr); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (CRASHDEBUG(2)) fprintf(fp, "remote_vtop: [%s]\n", recvbuf); if (strstr(recvbuf, "")) error(FATAL, "remote_vtop for CPU %d\n", cpu); p1 = strtok(recvbuf, " "); /* VTOP */ p1 = strtok(NULL, " "); /* cpu */ p1 = strtok(NULL, " "); /* vaddr */ p1 = strtok(NULL, " "); /* paddr */ errflag = 0; value = htol(p1, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { return value; } return 0; } /* * Get a copy of the daemon machine cpu regs. */ int get_remote_regs(struct bt_info *bt, ulong *eip, ulong *esp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1, *p2; int errflag; ulong value; if (!rc->remote_type[0]) return 0; /* Not a special remote. */ *eip = 0; *esp = 0; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "FETCH_LIVE_IP_SP_BP %d", bt->tc->processor); if (remote_tcp_write_string(pc->sockfd, sendbuf)) return 0; errflag = remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (errflag <= 0) return 0; if (CRASHDEBUG(1)) fprintf(fp, "get_remote_regs(cpu=%d): [%s]\n", bt->tc->processor, recvbuf); if (strstr(recvbuf, "")) { error(INFO, "get_remote_regs for CPU %d\n", bt->tc->processor); return 0; } p1 = strtok(recvbuf, " "); /* FETCH_LIVE_IP_SP_BP */ p1 = strtok(NULL, " "); /* cpu */ p1 = strtok(NULL, ":"); /* cs */ p1 = strtok(NULL, " "); /* ip */ p2 = strtok(NULL, ":"); /* ss */ p2 = strtok(NULL, " "); /* sp */ /* p2 = strtok(NULL, " "); bp */ errflag = 0; value = htol(p1, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { *eip = value; } errflag = 0; value = htol(p2, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { *esp = value; } return 1; } /* * Get a remote cr3 if supported. */ physaddr_t get_remote_cr3(int cpu) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1; int errflag; ulong value; if (!rc->remote_type[0]) return 0; /* Not a special remote. */ BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "FETCH_LIVE_CR3 %d", cpu); if (remote_tcp_write_string(pc->sockfd, sendbuf)) return 0; remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (CRASHDEBUG(1)) fprintf(fp, "get_remote_cr3: [%s]\n", recvbuf); if (strstr(recvbuf, "")) error(FATAL, "get_remote_cr3 for CPU %d\n", cpu); p1 = strtok(recvbuf, " "); /* FETCH_LIVE_CR3 */ p1 = strtok(NULL, " "); /* cpu */ p1 = strtok(NULL, " "); /* cr3 */ errflag = 0; value = htol(p1, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) return value; return 0; } /* * * Set up the file descriptors and file name strings if they haven't * been set up before: * * 1. pc->namelist must be set to a local kernel namelist, which will be * copied from the remote machine if it was not specified. * * 2. pc->dumpfile will never be set for a remote operation, because there * is no difference to readmem(). * * 3. pc->server_namelist may be set if it has to be copied across. * * 4. pc->server_memsrc will be set to either /dev/mem or the dumpfile. */ void remote_fd_init(void) { char filename[BUFSIZE]; struct remote_file remote_file, *rfp; rfp = &remote_file; if (pc->namelist && pc->server_namelist) { error(INFO, "too many namelists\n"); program_usage(SHORT_FORM); } if ((pc->namelist || pc->server_namelist) && pc->namelist_debug && pc->system_map) { error(INFO, "too many namelist options:\n %s\n %s\n %s\n", pc->namelist ? pc->namelist : pc->server_namelist, pc->namelist_debug, pc->system_map); program_usage(SHORT_FORM); } /* * Account for the remote possibility of a local dumpfile * being entered on the command line. */ if (pc->flags & MEMORY_SOURCES) { if (pc->server_memsrc) { error(INFO, "too many dumpfile/memory arguments\n"); program_usage(SHORT_FORM); } pc->flags2 |= MEMSRC_LOCAL; if (pc->flags & (DEVMEM|MEMMOD)) { if (!get_proc_version()) error(INFO, "/proc/version: %s\n", strerror(errno)); pc->flags |= LIVE_SYSTEM; } } else { /* * First open the remote memory source, defaulting to /dev/mem * if no remote dumpfile name was entered. If it is /dev/mem, * then also go get the remote /proc/version. */ pc->readmem = read_daemon; if (!pc->server_memsrc) pc->server_memsrc = "/dev/mem"; if (STREQ(pc->server_memsrc, "/dev/mem")) pc->flags |= REM_LIVE_SYSTEM; BZERO(rfp, sizeof(struct remote_file)); rfp->filename = pc->server_memsrc; if (remote_file_open(rfp)) { pc->rmfd = rfp->fd; if (rfp->flags & O_RDWR) pc->flags |= MFD_RDWR; if (BITS32() && REMOTE_ACTIVE()) { BZERO(rfp, sizeof(struct remote_file)); rfp->filename = "/dev/kmem"; if (remote_file_open(rfp)) pc->rkfd = rfp->fd; } if ((pc->flags & REM_NETDUMP) && !remote_netdump_init()) error(FATAL, "%s: remote initialization failed\n", pc->server_memsrc); if ((pc->flags & REM_LKCD) && !remote_lkcd_dump_init()) error(FATAL, "%s: remote initialization failed\n", pc->server_memsrc); if ((pc->flags & REM_S390D) && !remote_s390_dump_init()) error(FATAL, "%s: remote initialization failed\n", pc->server_memsrc); if (REMOTE_DUMPFILE()) pc->writemem = write_daemon; } else error(FATAL, "cannot open remote memory source: %s\n", pc->server_memsrc); if (REMOTE_ACTIVE() && !remote_proc_version(kt->proc_version)) error(WARNING, "daemon cannot access /proc/version\n\n"); } /* * If a local namelist was entered, check whether it's readable. * If a server namelist was entered, copy it across. * If no server namelist was entered, query the daemon for it, * and if found, copy it across, */ if (pc->namelist) { if ((pc->nfd = open(pc->namelist, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->namelist, strerror(errno)); close(pc->nfd); pc->nfd = -1; pc->flags |= NAMELIST_LOCAL; } else if (pc->server_namelist) { BZERO(rfp, sizeof(struct remote_file)); rfp->filename = pc->server_namelist; if (!remote_file_open(rfp)) { error(FATAL, "daemon cannot open: %s\n", pc->server_namelist); } copy_to_local_namelist(rfp); remote_file_close(rfp); } else { BZERO(rfp, sizeof(struct remote_file)); BZERO(filename, BUFSIZE); rfp->filename = filename; if (!remote_find_booted_kernel(rfp)) error(FATAL, "remote daemon cannot find booted kernel\n"); if (!remote_file_open(rfp)) error(FATAL, "remote daemon cannot open: %s\n", pc->server_namelist); copy_to_local_namelist(rfp); remote_file_close(rfp); } if (REMOTE_ACTIVE()) pc->flags |= LIVE_SYSTEM; } /* * Copy a remote kernel to a local file, which gets unlinked in the normal * course of events. However, the pc->nfd file descriptor will be kept * alive in case there's a command put in place to keep the file around. */ static void copy_to_local_namelist(struct remote_file *rfp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char readbuf[READBUFSIZE]; int tty; if (pc->flags & KERNEL_DEBUG_QUERY) { /* * Don't bother copying the kernel if the daemon can * figure it out. */ BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "DEBUGGING_SYMBOLS %s", rfp->filename); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "NO_DEBUG")) { sprintf(readbuf, "%s@%s", rfp->filename, pc->server); pc->namelist = readbuf; no_debugging_data(FATAL); } } pc->namelist = create_local_namelist(rfp); if (pc->flags & NAMELIST_LOCAL) return; if ((pc->nfd = open(pc->namelist, O_RDWR|O_CREAT|O_TRUNC, S_IRWXU)) < 0) { pc->flags &= ~UNLINK_NAMELIST; error(FATAL, "cannot create local copy of kernel (%s)\n", pc->namelist); } tty = !(pc->flags & SILENT) && isatty(fileno(stdin)); if (!(pc->flags & NAMELIST_NO_GZIP)) { copy_remote_gzip_file(rfp, pc->namelist, tty ? "please wait... (copying remote kernel namelist: " : NULL); if (tty) fprintf(stderr, "\r \r"); return; } if (copy_remote_file(rfp, pc->nfd, pc->namelist, tty ? "please wait... (copying remote kernel namelist: " : NULL)) { if (tty) fprintf(stderr, "\r \r"); } else error(FATAL, "write to local copy of kernel namelist failed\n"); } /* * Try to create a file of the format: vmlinux@@hostname * If it already exists, append "_0", "_1", etc. until one's not found. * * The file will be unlinked by display_sys_stats() the first time it's * called. */ static char * create_local_namelist(struct remote_file *rfp) { char buf[BUFSIZE]; char *p1; int i, use_local_copy; p1 = (char *)basename(rfp->filename); sprintf(buf, "%s@%s", p1, pc->server); for (i = 0, use_local_copy = FALSE; i >= 0; i++) { if (file_exists(buf, NULL)) { if (identical_namelist(buf, rfp)) { use_local_copy = TRUE; break; } sprintf(buf, "%s@%s_%d", p1,pc->server, i); } else break; } if ((p1 = (char *)malloc((size_t)(strlen(buf)+1))) == NULL) error(FATAL, "cannot malloc temporary file name buffer\n"); strcpy(p1, buf); if (use_local_copy) pc->flags |= NAMELIST_LOCAL; else pc->flags |= UNLINK_NAMELIST; return p1; } /* * Before copying a kernel across, check whether a kernel of the same * name is identical to the remote version. */ static int identical_namelist(char *file, struct remote_file *rfp) { char *vers; FILE *pipe; struct stat sbuf; long csum; char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char readbuf[BUFSIZE*2]; if (stat(file, &sbuf) < 0) return FALSE; if (sbuf.st_size != rfp->size) return FALSE; if (remote_file_checksum(rfp) && file_checksum(file, &csum) && (csum == rfp->csum)) return TRUE; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); BZERO(readbuf, BUFSIZE); sprintf(sendbuf, "LINUX_VERSION %s", rfp->filename); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) return FALSE; vers = recvbuf; sprintf(readbuf, "/usr/bin/strings %s | grep 'Linux version'", file); if ((pipe = popen(readbuf, "r"))) { BZERO(readbuf, BUFSIZE); if (fread(readbuf, sizeof(char), BUFSIZE-1, pipe) <= 0) { pclose(pipe); return FALSE; } pclose(pipe); } else return FALSE; if (CRASHDEBUG(1)) { fprintf(fp, "remote version: [%s]\n", vers); fprintf(fp, "local version: [%s]\n", readbuf); fprintf(fp, "%s vs. %s => %s\n", file, rfp->filename, STREQ(vers, readbuf) ? "IDENTICAL" : "DIFFERENT"); } return (STREQ(vers, readbuf)); } /* * If a remote file exists, get its checksum and return TRUE. */ static int remote_file_checksum(struct remote_file *rfp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "SUM %s", rfp->filename); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) { error(INFO, "%s: does not exist on server %s\n", rfp->filename, pc->server); return FALSE; } strtok(recvbuf, " "); /* SUM */ p1 = strtok(NULL, " "); /* filename */ p1 = strtok(NULL, " "); /* checksum */ rfp->csum = htol(p1, FAULT_ON_ERROR, NULL); return TRUE; } /* * Copy a remote file locally, distinguishing it by appending an ampersand * and the server name. * * If the kernel is requested, save the unlinked copy of the remote kernel * in a local file, using the same name created by create_local_namelist(). * * If a dumpfile, module, or any other file for that matter, append an * ampersand plus the server name. * * Other files may have their local filename altered if a file of the * same name exists with a different checksum. */ int get_remote_file(struct remote_file *rfp) { int i; char local[BUFSIZE]; char readbuf[READBUFSIZE]; char *p1; struct load_module *lm; int cnt, sfd, err, retval; long csum; if (!REMOTE()) { error(INFO, "no remote files in use\n"); return FALSE; } if (rfp->local) goto generic_file_save; sprintf(readbuf, "%s@%s", pc->server_memsrc, pc->server); if (STREQ(rfp->filename, "dumpfile") || STREQ(rfp->filename, pc->server_memsrc) || STREQ(rfp->filename, basename(pc->server_memsrc)) || STREQ(rfp->filename, readbuf)) goto dumpfile_save; sprintf(readbuf, "%s", pc->namelist); if ((p1 = strstr(readbuf, "@"))) *p1 = NULLCHAR; if (STREQ(rfp->filename, "kernel") || STREQ(rfp->filename, pc->namelist) || STREQ(rfp->filename, pc->server_namelist) || STREQ(rfp->filename, readbuf)) goto kernel_save; if (STREQ(rfp->filename, "modules")) { for (i = 0; i < kt->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_REMOTE) { fprintf(fp, "%s module saved as: %s\n", lm->mod_name, lm->mod_namelist); lm->mod_flags &= ~MOD_REMOTE; } } return TRUE; } if (is_module_name(rfp->filename, NULL, &lm)) { if (lm->mod_flags & MOD_REMOTE) { fprintf(fp, "%s module saved as: %s\n", lm->mod_name, lm->mod_namelist); lm->mod_flags &= ~MOD_REMOTE; } return TRUE; } strcpy(local, rfp->filename); if ((p1 = strstr(local, ".o"))) { *p1 = NULLCHAR; if (is_module_name(basename(local), NULL, &lm)) { if (lm->mod_flags & MOD_REMOTE) { fprintf(fp, "%s module saved as: %s\n", lm->mod_name, lm->mod_namelist); lm->mod_flags &= ~MOD_REMOTE; return TRUE; } } } generic_file_save: cnt = 0; sprintf(local, "%s@%s", basename(rfp->filename), pc->server); while (file_exists(local, NULL)) { if (CRASHDEBUG(1)) fprintf(fp, "%s already exists in this directory\n", local); if (file_checksum(local, &csum) && (csum == rfp->csum)) { if (CRASHDEBUG(1)) error(NOTE, "local %s checksum matches -- using it\n", local); strcpy(rfp->local, local); return TRUE; } sprintf(local, "%s@%s_%d", basename(rfp->filename), pc->server, ++cnt); } if (!remote_file_open(rfp)) { error(INFO, "daemon cannot open: %s\n", rfp->filename); return FALSE; } if ((sfd = open(local, O_RDWR|O_CREAT|O_TRUNC, S_IRWXU)) < 0) { error(INFO, "open: %s: %s\n", local, strerror(errno)); remote_file_close(rfp); return FALSE; } if (copy_remote_file(rfp, sfd, local, rfp->flags & REMOTE_VERBOSE ? "please wait... (copying remote file: " : NULL)) { if (rfp->flags & REMOTE_VERBOSE) fprintf(stderr, "\rremote file saved as: \"%s\" \n", local); retval = TRUE; rfp->flags |= REMOTE_COPY_DONE; } else { fprintf(stderr, "\r%s NOT saved \n", rfp->filename); retval = FALSE; } close(sfd); remote_file_close(rfp); if (cnt) strcpy(rfp->local, local); return retval; kernel_save: if (pc->flags & NAMELIST_SAVED) { error(INFO, "\"%s\" is already saved\n", pc->namelist); return FALSE; } if (pc->flags & NAMELIST_LOCAL) { error(INFO, "\"%s\" is a local file\n", pc->namelist); return FALSE; } if ((sfd = open(pc->namelist, O_RDWR|O_CREAT|O_TRUNC, S_IRWXU)) < 0) { error(INFO, "open: %s: %s\n", pc->namelist, strerror(errno)); return FALSE; } err = 0; lseek(sfd, 0, SEEK_SET); lseek(pc->nfd, 0, SEEK_SET); while ((cnt = read(pc->nfd, readbuf, READBUFSIZE)) > 0) { if (write(sfd, readbuf, cnt) != cnt) { error(INFO, "write:%s: %s\n", pc->namelist, strerror(errno)); err++; break; } } close(sfd); if (err) { fprintf(fp, "%s NOT saved\n", pc->namelist); unlink(pc->namelist); retval = FALSE; } else { fprintf(fp, "kernel saved as: \"%s\"\n", pc->namelist); close(pc->nfd); pc->nfd = -1; pc->flags |= NAMELIST_SAVED; retval = TRUE; } return (retval); dumpfile_save: if (pc->flags & DUMPFILE_SAVED) { error(INFO, "\"%s@%s\" is already saved\n", basename(pc->server_memsrc), pc->server); return FALSE; } if (pc->flags2 & MEMSRC_LOCAL) { error(INFO, "%s is a local file\n", pc->dumpfile); return FALSE; } if (!(REMOTE_DUMPFILE())) { error(INFO, "%s is not a dumpfile\n", pc->server_memsrc); return FALSE; } sprintf(local, "%s@%s", basename(pc->server_memsrc), pc->server); if (file_exists(local, NULL)) { error(INFO, "%s already exists in this directory\n", local); return FALSE; } rfp->filename = pc->server_memsrc; if (!remote_file_open(rfp)) { error(INFO, "daemon cannot open: %s\n", pc->server_memsrc); return FALSE; } if ((sfd = open(local, O_RDWR|O_CREAT|O_TRUNC, S_IRWXU)) < 0) { error(INFO, "open: %s: %s\n", local, strerror(errno)); remote_file_close(rfp); return FALSE; } if (copy_remote_file(rfp, sfd, local, "please wait... (copying remote dumpfile: ")) { fprintf(stderr, "\rdumpfile saved as: \"%s\" \n", local); pc->flags |= DUMPFILE_SAVED; retval = TRUE; } else { fprintf(stderr, "\r%s NOT saved \n", pc->server_memsrc); retval = FALSE; } close(sfd); remote_file_close(rfp); return (retval); } /* * Query the remote daemon for the kernel name that is running. */ static int remote_find_booted_kernel(struct remote_file *rfp) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "FIND_BOOTED_KERNEL"); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); strtok(recvbuf, " "); /* FIND_BOOTED_KERNEL */ p1 = strtok(NULL, " "); /* filename */ if (STREQ(p1, "")) return FALSE; strcpy(rfp->filename, p1); return TRUE; } static int remote_lkcd_dump_init(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1, *p2, *p3; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "LKCD_DUMP_INIT %d %s", pc->rmfd, pc->server_memsrc); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) return FALSE; p1 = strstr(recvbuf, "panic_task: "); p2 = strstr(recvbuf, "panicmsg: "); if (p1) { p1 += strlen("panic_task: "); p3 = strstr(p1, "\n"); *p3 = NULLCHAR; tt->panic_task = htol(p1, FAULT_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "panic_task: %lx\n", tt->panic_task); } if (p2) { p2 += strlen("panicmsg: "); if (CRASHDEBUG(1)) fprintf(fp, "panicmsg: %s", p2); } set_remote_lkcd_panic_data(tt->panic_task, p2); return TRUE; } static int remote_s390_dump_init(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1, *p2, *p3; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "S390_DUMP_INIT %d %s", pc->rmfd, pc->server_memsrc); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) return FALSE; p1 = strstr(recvbuf, "panic_task: "); p2 = strstr(recvbuf, "panicmsg: "); if (p1) { p1 += strlen("panic_task: "); p3 = strstr(p1, "\n"); *p3 = NULLCHAR; tt->panic_task = htol(p1, FAULT_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "panic_task: %lx\n", tt->panic_task); } if (p2) { p2 += strlen("panicmsg: "); if (CRASHDEBUG(1)) fprintf(fp, "panicmsg: %s", p2); } return TRUE; } static int remote_netdump_init(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1, *p2; ulong panic_task; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "NETDUMP_INIT %d %s", pc->rmfd, pc->server_memsrc); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) return FALSE; p1 = strstr(recvbuf, "panic_task: "); if (p1) { p1 += strlen("panic_task: "); p2 = strstr(p1, "\n"); *p2 = NULLCHAR; panic_task = htol(p1, FAULT_ON_ERROR, NULL); tt->panic_task = panic_task; /* kludge */ if (CRASHDEBUG(1)) fprintf(fp, "panic_task: %lx\n", tt->panic_task); } return TRUE; } uint remote_page_size(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *p1, *p2, *p3; uint psz; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); if (REMOTE_ACTIVE()) sprintf(sendbuf, "PAGESIZE LIVE"); else if (REMOTE_PAUSED()) sprintf(sendbuf, "PAGESIZE NIL"); else if (pc->flags & REM_NETDUMP) sprintf(sendbuf, "PAGESIZE NETDUMP"); else if (pc->flags & REM_MCLXCD) sprintf(sendbuf, "PAGESIZE MCLXCD"); else if (pc->flags & REM_LKCD) sprintf(sendbuf, "PAGESIZE LKCD"); else if (pc->flags & REM_S390D) sprintf(sendbuf, "PAGESIZE S390D"); else error(FATAL, "cannot determine remote page size (unknown memory source)\n"); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "FAIL")) error(FATAL, "cannot determine remote page size\n"); strtok(recvbuf, " "); /* PAGESIZE */ p1 = strtok(NULL, " "); /* LIVE, MCLXCD or LKCD */ p1 = strtok(NULL, " "); /* page size */ p2 = strtok(NULL, " "); /* remote type */ p3 = strtok(NULL, " "); /* number of Cpus */ psz = atoi(p1); if (psz > MAXRECVBUFSIZE) error(FATAL, "remote page size %d is larger than MAXRECVBUFSIZE!\n", psz); if (p2) { strncpy(rc->remote_type, p2, sizeof(rc->remote_type) - 1); rc->remote_type[sizeof(rc->remote_type) - 1] = 0; } if (p3) rc->n_cpus = atoi(p3); return psz; } /* * Copy a remote file to a local file, closing the passed-in fd when done. * A running tally of percentage-done numbers can optionally be displayed. */ static int copy_remote_file(struct remote_file *rfp, int fd, char *file, char *ttystr) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE*2]; char readbuf[READBUFSIZE]; char *bufptr; long pct, last; ulong size, offset, filesize; ulong ret, req, tot; int sysret ATTRIBUTE_UNUSED; ssize_t bytes ATTRIBUTE_UNUSED; last = -1; lseek(fd, 0, SEEK_SET); filesize = rfp->size; for (offset = 0; offset < filesize; ) { size = MIN(filesize-offset, pc->rcvbufsize); BZERO(sendbuf, BUFSIZE); sprintf(sendbuf, "READ %d %lx %ld", rfp->fd, offset, size); bytes = write(pc->sockfd, sendbuf, strlen(sendbuf) + 1); bzero(readbuf, READBUFSIZE); req = size; tot = 0; sprintf(recvbuf, "%s:FAIL", sendbuf); bufptr = readbuf; while (req) { ret = recv(pc->sockfd, bufptr, req, 0); if (!tot && STRNEQ(bufptr, recvbuf)) { tot = -1; break; } req -= ret; tot += ret; bufptr += ret; } if (tot == -1) break; if (write(fd, readbuf, size) != size) { error(INFO, "%swrite to local file \"%s\" failed", ttystr ? "\n" : "", file); close(fd); return FALSE; } offset += tot; if (ttystr) { pct = (offset*100)/filesize; if (pct > last) { /* readline work-around... */ if (last < 0) sprintf(readbuf, "echo -n \'%s0%%)\'", ttystr); else if (last >= 0 && last < 10) sprintf(readbuf, "echo -e -n \"\\b\\b\\b%ld%%)\"", pct); else if (last < 100) sprintf(readbuf, "echo -e -n \"\\b\\b\\b\\b%ld%%)\"", pct); sysret = system(readbuf); last = pct; } } } if (offset != filesize) { error(INFO, "%swrite to local file \"%s\" failed", ttystr ? "\n" : "", file); close(fd); return FALSE; } fsync(fd); return TRUE; } /* * Copy a remote file to a local file, closing the passed-in fd when done. * A running tally of percentage-done numbers can optionally be displayed. */ static void copy_remote_gzip_file(struct remote_file *rfp, char *file, char *ttystr) { int done; char sendbuf[BUFSIZE]; char readbuf[READBUFSIZE]; char gziphdr[DATA_HDRSIZE]; char *bufptr, *p1; FILE *pipe; size_t gtot; struct stat sbuf; ulong pct, ret, req, tot, total; sprintf(readbuf, "/usr/bin/gunzip > %s", pc->namelist); if ((pipe = popen(readbuf, "w")) == NULL) error(FATAL, "cannot open pipe to create %s\n", pc->namelist); BZERO(sendbuf, BUFSIZE); sprintf(sendbuf, "READ_GZIP %ld %s", pc->rcvbufsize, rfp->filename); remote_tcp_write_string(pc->sockfd, sendbuf); bzero(readbuf, READBUFSIZE); done = total = 0; gtot = 0; while (!done) { req = pc->rcvbufsize; bufptr = readbuf; tot = 0; while (req) { ret = (ulong)recv(pc->sockfd, bufptr, req, 0); if (!tot) { if (STRNEQ(bufptr, FAILMSG)) { fprintf(fp, "copy_remote_gzip_file: %s\n", bufptr); tot = -1; break; } if (STRNEQ(bufptr, DONEMSG) || STRNEQ(bufptr, DATAMSG)) { BCOPY(bufptr, gziphdr, DATA_HDRSIZE); if (CRASHDEBUG(1)) fprintf(fp, "copy_remote_gzip_file: [%s]\n", gziphdr); p1 = strtok(gziphdr, " "); /* DONE */ if (STREQ(p1, "DONE")) done = TRUE; p1 = strtok(NULL, " "); /* count */ gtot = atol(p1); total += gtot; } } req -= ret; tot += ret; bufptr += ret; } if (tot == -1) break; if (fwrite(&readbuf[DATA_HDRSIZE], sizeof(char), gtot, pipe) != gtot) error(FATAL, "fwrite to %s failed\n", pc->namelist); if (ttystr && (stat(pc->namelist, &sbuf) == 0)) { pct = (sbuf.st_size * 100)/rfp->size; fprintf(stderr, "\r%s%ld%%)%s", ttystr, pct, CRASHDEBUG(1) ? "\n" : ""); } } if (CRASHDEBUG(1)) fprintf(fp, "copy_remote_gzip_file: GZIP total: %ld\n", total); pclose(pipe); } /* * Set up to have get_remote_file() copy the remote module locally. * If it's already here, no copy is done. */ int find_remote_module_objfile(struct load_module *lm, char *module, char *retbuf) { int absolute; char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char local[BUFSIZE]; char found[BUFSIZE]; char *p1; long csum; struct remote_file remote_file, *rfp; rfp = &remote_file; BZERO(rfp, sizeof(struct remote_file)); absolute = (*module == '/'); if (absolute) { if ((p1 = strstr(module, "@"))) { *p1 = NULLCHAR; } else { error(FATAL, "module file name must have \"@server-name\" attached\n"); } sprintf(local, "%s@%s", basename(module), pc->server); rfp->filename = module; rfp->local = local; if (!remote_file_checksum(rfp)) { error(INFO, "%s: does not exist on server %s\n", module, pc->server); return FALSE; } } else { if ((p1 = strstr(module, "@"))) *p1 = NULLCHAR; sprintf(local, "%s@%s", module, pc->server); BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "FIND_MODULE %s %s", kt->utsname.release, module); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); if (strstr(recvbuf, "")) { fprintf(fp, "find_remote_module_objfile: [%s]\n", recvbuf); return FALSE; } strtok(recvbuf, " "); /* FIND_MODULE */ p1 = strtok(NULL, " "); /* release */ p1 = strtok(NULL, " "); /* module */ strcpy(found, strtok(NULL, " ")); /* resultant path */ p1 = strtok(NULL, " "); /* checksum */ csum = htol(p1, FAULT_ON_ERROR, NULL); rfp->filename = found; rfp->local = local; rfp->csum = csum; } if (get_remote_file(rfp)) { if (!is_elf_file(rfp->local)) { error(INFO, "%s@%s: not an ELF format object file\n", rfp->filename, pc->server); return FALSE; } strcpy(retbuf, rfp->local); if (rfp->flags & REMOTE_COPY_DONE) { lm->mod_flags |= MOD_REMOTE; pc->flags |= UNLINK_MODULES; } return TRUE; } return FALSE; } /* * Tell the daemon to free the current dumpfile memory. */ int remote_free_memory(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *type, *p1; if (pc->flags & REM_NETDUMP) type = "NETDUMP"; else if (pc->flags & REM_MCLXCD) type = "MCLXCD"; else if (pc->flags & REM_LKCD) type = "LKCD"; else if (pc->flags & REM_S390D) type = "S390D"; else return 0; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "MEMORY FREE %s", type); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); p1 = strtok(recvbuf, " "); /* MEMORY */ p1 = strtok(NULL, " "); /* FREE */ p1 = strtok(NULL, " "); /* MCLXCD, LKCD etc. */ p1 = strtok(NULL, " "); /* pages */ if (STREQ(p1, "")) return 0; return(atol(p1)); } /* * Return the number of dumpfile pages used by the daemon. */ int remote_memory_used(void) { char sendbuf[BUFSIZE]; char recvbuf[BUFSIZE]; char *type, *p1; if (pc->flags & REM_NETDUMP) type = "NETDUMP"; else if (pc->flags & REM_MCLXCD) type = "MCLXCD"; else if (pc->flags & REM_LKCD) type = "LKCD"; else if (pc->flags & REM_S390D) type = "S390D"; else return 0; BZERO(sendbuf, BUFSIZE); BZERO(recvbuf, BUFSIZE); sprintf(sendbuf, "MEMORY USED %s", type); remote_tcp_write_string(pc->sockfd, sendbuf); remote_tcp_read_string(pc->sockfd, recvbuf, BUFSIZE-1, NIL_MODE()); p1 = strtok(recvbuf, " "); /* MEMORY */ p1 = strtok(NULL, " "); /* FREE */ p1 = strtok(NULL, " "); /* MCLXCD, LKCD, etc. */ p1 = strtok(NULL, " "); /* pages */ if (STREQ(p1, "")) return 0; return(atol(p1)); } /* * Have the daemon return the output of vas_memory_dump(), lkcd_memory_dump(). * or dump_lkcd_environment() */ int remote_memory_dump(int verbose) { char sendbuf[BUFSIZE]; char readbuf[READBUFSIZE]; char datahdr[DATA_HDRSIZE]; char *type, *bufptr, *p1; ulong done, total; ulong ret, req, tot; size_t dtot; if (pc->flags & REM_NETDUMP) type = "NETDUMP"; else if (pc->flags & REM_MCLXCD) type = "MCLXCD"; else if (pc->flags & REM_LKCD) type = "LKCD"; else if (pc->flags & REM_S390D) type = "S390D"; else return 0; BZERO(sendbuf, BUFSIZE); sprintf(sendbuf, "MEMORY_DUMP %ld %s%s", pc->rcvbufsize, type, verbose ? "_VERBOSE" : ""); remote_tcp_write_string(pc->sockfd, sendbuf); bzero(readbuf, READBUFSIZE); done = total = 0; dtot = 0; while (!done) { req = pc->rcvbufsize; bufptr = readbuf; tot = 0; while (req) { ret = recv(pc->sockfd, bufptr, req, 0); if (!tot) { if (STRNEQ(bufptr, FAILMSG)) { fprintf(fp, "remote_memory_dump: %s\n", bufptr); tot = -1; break; } if (STRNEQ(bufptr, DONEMSG) || STRNEQ(bufptr, DATAMSG)) { BCOPY(bufptr, datahdr, DATA_HDRSIZE); if (CRASHDEBUG(1)) fprintf(fp, "remote_memory_dump: [%s]\n", datahdr); p1 = strtok(datahdr, " "); /* DONE */ if (STREQ(p1, "DONE")) done = TRUE; p1 = strtok(NULL, " "); /* count */ dtot = atol(p1); total += dtot; } } req -= ret; tot += ret; bufptr += ret; } if (tot == -1) break; if (fwrite(&readbuf[DATA_HDRSIZE], sizeof(char), dtot, fp) != dtot) error(FATAL, "fwrite to %s failed\n", pc->namelist); } return 1; } /* * Read memory from the remote memory source. The remote file descriptor * is abstracted to allow for a common /dev/mem-/dev/kmem call. Since * this is only called from read_daemon(), the request can never exceed * a page in length. */ int remote_memory_read(int rfd, char *buffer, int cnt, physaddr_t address, int vcpu) { char sendbuf[BUFSIZE]; char datahdr[DATA_HDRSIZE]; char *p1; int ret, tot; ulong addr; addr = (ulong)address; /* may be virtual */ BZERO(sendbuf, BUFSIZE); if (pc->flags & REM_NETDUMP) { sprintf(sendbuf, "READ_NETDUMP %lx %d", addr, cnt); } else if (pc->flags & REM_MCLXCD) sprintf(sendbuf, "READ_MCLXCD %lx %d", addr, cnt); else if (pc->flags & REM_LKCD) sprintf(sendbuf, "READ_LKCD %d %lx %d", rfd, addr, cnt); else if (pc->flags & REM_S390D) sprintf(sendbuf, "READ_S390D %d %lx %d", rfd, addr, cnt); else if (vcpu >= 0) sprintf(sendbuf, "READ_LIVE %d %lx %d %d", rfd, addr, cnt, vcpu); else sprintf(sendbuf, "READ_LIVE %d %lx %d", rfd, addr, cnt); if (remote_tcp_write_string(pc->sockfd, sendbuf)) return -1; /* * Read request will come back with a singular header * followed by the data. */ BZERO(datahdr, DATA_HDRSIZE); ret = remote_tcp_read_string(pc->sockfd, datahdr, DATA_HDRSIZE, 1); if (ret <= 0) return -1; if (CRASHDEBUG(3)) fprintf(fp, "remote_memory_read: [%s]\n", datahdr); if (STRNEQ(datahdr, FAILMSG)) { p1 = strtok(datahdr, " "); /* FAIL */ p1 = strtok(NULL, " "); /* errno */ errno = atoi(p1); return -1; } if (!STRNEQ(datahdr, DONEMSG) && !STRNEQ(datahdr, DATAMSG)) { error(INFO, "out of sync with remote memory source\n"); return -1; } p1 = strtok(datahdr, " "); /* DONE */ p1 = strtok(NULL, " "); /* count */ tot = atol(p1); if (cnt != tot) { error(FATAL, "requested %d bytes remote memory return %d bytes\n", cnt, tot); return -1; } ret = remote_tcp_read(pc->sockfd, buffer, tot); if (ret != tot) { error(FATAL, "requested %d bytes remote memory return %d bytes\n", ret, tot); return -1; } return tot; } /* * If a command was interrupted locally, there may be leftover data waiting * to be read. */ void remote_clear_pipeline(void) { int ret; fd_set rfds; char recvbuf[READBUFSIZE]; struct timeval tv; tv.tv_sec = 0; tv.tv_usec = 0; FD_ZERO(&rfds); FD_SET(pc->sockfd, &rfds); ret = select(pc->sockfd+1, &rfds, NULL, NULL, &tv); if (FD_ISSET(pc->sockfd, &rfds)) { ret = recv(pc->sockfd, recvbuf, pc->rcvbufsize, 0); if (CRASHDEBUG(1)) error(INFO, "remote_clear_pipeline(%d): %d bytes discarded\n", pc->sockfd, ret); } } /* * Attempt to run the user-entered command on the remote system. */ int remote_execute(void) { char command[BUFSIZE]; char sendbuf[BUFSIZE*2]; char readbuf[READBUFSIZE]; char datahdr[DATA_HDRSIZE]; char *bufptr, *p1; ulong done, total; ulong ret, req, tot; size_t dtot; if (!STRNEQ(args[0], "@") || strlen(args[0]) == 1) return FALSE; shift_string_left(concat_args(command, 0, FALSE), 1); if (QUOTED_STRING(command)) strip_ending_char(strip_beginning_char(command, '"'), '"'); if (CRASHDEBUG(1)) error(INFO, "remote command: %s\n", command); BZERO(sendbuf, BUFSIZE); sprintf(sendbuf, "EXECUTE %ld %s", pc->rcvbufsize, command); remote_tcp_write_string(pc->sockfd, sendbuf); bzero(readbuf, READBUFSIZE); done = total = 0; dtot = 0; while (!done) { req = pc->rcvbufsize; bufptr = readbuf; tot = 0; while (req) { ret = recv(pc->sockfd, bufptr, req, 0); if (!tot) { if (STRNEQ(bufptr, FAILMSG)) { fprintf(fp, "remote_execute: %s\n", bufptr); tot = -1; break; } if (STRNEQ(bufptr, DONEMSG) || STRNEQ(bufptr, DATAMSG)) { BCOPY(bufptr, datahdr, DATA_HDRSIZE); if (CRASHDEBUG(1)) fprintf(fp, "remote_execute: [%s]\n", datahdr); p1 = strtok(datahdr, " "); /* DONE */ if (STREQ(p1, "DONE")) done = TRUE; p1 = strtok(NULL, " "); /* count */ dtot = atol(p1); total += dtot; } } req -= ret; tot += ret; bufptr += ret; } if (tot == -1) break; if (fwrite(&readbuf[DATA_HDRSIZE], sizeof(char), dtot, fp) != dtot) error(FATAL, "fwrite failed\n"); } return TRUE; } /* * Clean up on exit. */ void remote_exit(void) { char buf[BUFSIZE]; if (pc->flags & UNLINK_NAMELIST) unlink(pc->namelist); if (pc->flags & UNLINK_MODULES) unlink_module(NULL); BZERO(buf, BUFSIZE); sprintf(buf, "EXIT"); remote_tcp_write_string(pc->sockfd, buf); /* * Read but ignore the return status -- we don't really care... */ remote_tcp_read_string(pc->sockfd, buf, BUFSIZE-1, NIL_MODE()); } #endif /* !DAEMON */ crash-utility-crash-9cd43f5/va_server.h0000664000372000037200000000433215107550337017543 0ustar juerghjuergh/* va_server.h - kernel crash dump file translation library * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * 10/99, Dave Winchell, Initial release for kernel crash dump support. * 11/12/99, Dave Winchell, Add support for in memory dumps. */ #include "vas_crash.h" extern int vas_page_size; extern u_long vas_base_va; int va_server_init(char *crash_file, u_long *start, u_long *end, u_long *stride); int va_server_init_v1(char *crash_file, u_long *start, u_long *end, u_long *stride); int vas_lseek(u_long position, int whence); int vas_lseek_v1(u_long position, int whence); size_t vas_read(void *buf_in, size_t count); size_t vas_read_v1(void *buf_in, size_t count); size_t vas_write(void *buf_in, size_t count); size_t vas_write_v1(void *buf_in, size_t count); void vas_free_data(u_long va); void vas_free_data_v1(u_long va); /* in-memory formats */ struct map_hdr { struct crash_map_entry *map; /* array of map entries */ int blk_size; /* blocksize for this map */ }; struct map_hdr_v1 { u_long start_va; u_long end_va; struct crash_map_entry_v1 *map; /* array of map entries */ int map_entries; /* entries in array pointed to by map */ u_long va_per_entry; /* va covered by each map_entry */ int blk_offset; /* add this to start_blk in map_entry * this allows relocation of compressed data * while using original maps */ int blk_size; /* blocksize for this map */ struct map_hdr_v1 *next; }; extern int clean_exit(int); crash-utility-crash-9cd43f5/netdump.h0000664000372000037200000000730715107550337017230 0ustar juerghjuergh/* netdump.h * * Copyright (C) 2002-2009, 2017-2018 David Anderson * Copyright (C) 2002-2009, 2017-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: David Anderson */ #include #include "vmcore.h" #define MIN_NETDUMP_ELF32_HEADER_SIZE \ sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)+sizeof(Elf32_Phdr) #define MIN_NETDUMP_ELF64_HEADER_SIZE \ sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)+sizeof(Elf64_Phdr) #define MIN_NETDUMP_ELF_HEADER_SIZE \ MAX(MIN_NETDUMP_ELF32_HEADER_SIZE, MIN_NETDUMP_ELF64_HEADER_SIZE) #define NETDUMP_ELF_HEADER_SPARE_SIZE 128 /* * "Safe" size, as in covering the ELF header and the first two program headers * plus any "padding" in-between, like section headers. */ #define SAFE_NETDUMP_ELF_HEADER_SIZE \ (MIN_NETDUMP_ELF_HEADER_SIZE+NETDUMP_ELF_HEADER_SPARE_SIZE) #define NT_TASKSTRUCT 4 #define NT_DISKDUMP 0x70000001 #ifdef NOTDEF /* * Note: Based upon the original, abandoned, proposal for * its contents -- keep around for potential future use. */ #ifndef NT_KDUMPINFO #define NT_KDUMPINFO 7 #endif #endif /* NOTDEF */ struct pt_load_segment { off_t file_offset; physaddr_t phys_start; physaddr_t phys_end; physaddr_t zero_fill; }; struct vmcore_data { ulong flags; int ndfd; FILE *ofp; uint header_size; char *elf_header; uint num_pt_load_segments; struct pt_load_segment *pt_load_segments; Elf32_Ehdr *elf32; Elf32_Phdr *notes32; Elf32_Phdr *load32; Elf64_Ehdr *elf64; Elf64_Phdr *notes64; Elf64_Phdr *load64; Elf64_Shdr *sect0_64; void *nt_prstatus; void *nt_prpsinfo; void *nt_taskstruct; ulong task_struct; uint page_size; ulong switch_stack; uint num_prstatus_notes; void *nt_prstatus_percpu[NR_CPUS]; void *vmcoreinfo; uint size_vmcoreinfo; /* Backup Region, first 640K of System RAM. */ #define KEXEC_BACKUP_SRC_END 0x0009ffff uint num_qemu_notes; void *nt_qemu_percpu[NR_CPUS]; ulonglong backup_src_start; ulong backup_src_size; ulonglong backup_offset; ulong arch_data; #define arch_data1 arch_data ulong phys_base; ulong arch_data2; void *nt_vmcoredd_array[NR_DEVICE_DUMPS]; uint num_vmcoredd_notes; }; #define DUMP_ELF_INCOMPLETE 0x1 /* dumpfile is incomplete */ /* * S390 CPU timer ELF note */ #ifndef NT_S390_TIMER #define NT_S390_TIMER 0x301 #endif /* * S390 TOD clock comparator ELF note */ #ifndef NT_S390_TODCMP #define NT_S390_TODCMP 0x302 #endif /* * S390 TOD programmable register ELF note */ #ifndef NT_S390_TODPREG #define NT_S390_TODPREG 0x303 #endif /* * S390 control registers ELF note */ #ifndef NT_S390_CTRS #define NT_S390_CTRS 0x304 #endif /* * S390 prefix ELF note */ #ifndef NT_S390_PREFIX #define NT_S390_PREFIX 0x305 #endif /* * S390 vector registers 0-15 upper half note (16 * u64) */ #ifndef NT_S390_VXRS_LOW #define NT_S390_VXRS_LOW 0x309 #endif /* * S390 vector registers 16-31 note (16 * u128) */ #ifndef NT_S390_VXRS_HIGH #define NT_S390_VXRS_HIGH 0x30a #endif #define MAX_KCORE_ELF_HEADER_SIZE (32768) struct proc_kcore_data { uint flags; uint segments; char *elf_header; size_t header_size; Elf64_Phdr *load64; Elf64_Phdr *notes64; Elf32_Phdr *load32; Elf32_Phdr *notes32; void *vmcoreinfo; uint size_vmcoreinfo; }; crash-utility-crash-9cd43f5/ibm_common.h0000664000372000037200000000726115107550337017672 0ustar juerghjuergh/* ibm_common.h - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * header file for zgetdump * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Despina Papadopoulou */ /* This header file holds the architecture specific crash dump header */ #ifndef _ZGETDUMP_H #define _ZGETDUMP_H /* definitions (this has to match with vmdump.h of lcrash */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ /* * Structure: s390_dump_header_t * Function: This is the header dumped at the top of every valid s390 crash * dump. */ typedef struct _s390_dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* 0x000 */ /* the version number of this dump */ uint32_t dh_version; /* 0x008 */ /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* 0x00c */ /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* 0x010 */ /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* 0x014 */ /* the size of all physical memory */ uint64_t dh_memory_size; /* 0x018 */ /* the start of physical memory */ uint64_t dh_memory_start; /* 0x020 */ /* the end of physical memory */ uint64_t dh_memory_end; /* 0x028 */ /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* 0x030 */ /* ensure that dh_tod and dh_cpu_id are 8 byte aligned */ uint32_t dh_pad; /* 0x034 */ /* the time of the dump generation using stck */ uint64_t dh_tod; /* 0x038 */ /* cpu id */ uint64_t dh_cpu_id; /* 0x040 */ /* arch */ uint32_t dh_arch; /* 0x048 */ /* volume number */ uint32_t dh_volnr; /* 0x04c */ /* build arch */ uint32_t dh_build_arch; /* 0x050 */ /* fill up to 4096 byte */ unsigned char end_pad[0x1000-0x054]; /* 0x054 */ } __attribute__((packed)) s390_dump_header_t; /* * Structure: s390_dump_end_marker_t * Function: This end marker should be at the end of every valid s390 crash * dump. */ typedef struct _s390_dump_end_marker_{ char end_string[8]; unsigned long long end_time; } __attribute__((packed)) s390_dump_end_marker_t; #endif /* _ASM_VMDUMP_H */ crash-utility-crash-9cd43f5/unwind_x86.h0000664000372000037200000000077515107550337017567 0ustar juerghjuergh/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ crash-utility-crash-9cd43f5/rse.h0000664000372000037200000000461515107550337016344 0ustar juerghjuergh#ifndef _ASM_IA64_RSE_H #define _ASM_IA64_RSE_H /* * Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999 David Mosberger-Tang */ /* * rse.h * * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * include/asm-ia64/rse.h (2.4.9-e.3) */ /* * Register stack engine related helper functions. This file may be * used in applications, so be careful about the name-space and give * some consideration to non-GNU C compilers (though __inline__ is * fine). */ static __inline__ unsigned long ia64_rse_slot_num (unsigned long *addr) { return (((unsigned long) addr) >> 3) & 0x3f; } /* * Return TRUE if ADDR is the address of an RNAT slot. */ static __inline__ unsigned long ia64_rse_is_rnat_slot (unsigned long *addr) { return ia64_rse_slot_num(addr) == 0x3f; } /* * Returns the address of the RNAT slot that covers the slot at * address SLOT_ADDR. */ static __inline__ unsigned long * ia64_rse_rnat_addr (unsigned long *slot_addr) { return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3)); } /* * Calcuate the number of registers in the dirty partition starting at * BSPSTORE with a size of DIRTY bytes. This isn't simply DIRTY * divided by eight because the 64th slot is used to store ar.rnat. */ static __inline__ unsigned long ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp) { unsigned long slots = (bsp - bspstore); return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40; } /* * The inverse of the above: given bspstore and the number of * registers, calculate ar.bsp. */ static __inline__ unsigned long * ia64_rse_skip_regs (unsigned long *addr, long num_regs) { long delta = ia64_rse_slot_num(addr) + num_regs; if (num_regs < 0) delta -= 0x3e; return addr + num_regs + delta/0x3f; } #endif /* _ASM_IA64_RSE_H */ crash-utility-crash-9cd43f5/lzorle_decompress.h0000664000372000037200000000443415107550337021305 0ustar juerghjuergh/* lzorle_decompress.h * * from kernel lib/lzo/lzodefs.h * * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer * Copyright (C) 2024 NIO * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef LZODEFS_H #define LZODEFS_H #define COPY4(dst, src) memcpy((dst), (src), sizeof(uint32_t)) #define COPY8(dst, src) memcpy((dst), (src), sizeof(uint64_t)) #define M1_MAX_OFFSET 0x0400 #define M2_MAX_OFFSET 0x0800 #define M3_MAX_OFFSET 0x4000 #define M4_MAX_OFFSET_V0 0xbfff #define M4_MAX_OFFSET_V1 0xbffe #define M1_MIN_LEN 2 #define M1_MAX_LEN 2 #define M2_MIN_LEN 3 #define M2_MAX_LEN 8 #define M3_MIN_LEN 3 #define M3_MAX_LEN 33 #define M4_MIN_LEN 3 #define M4_MAX_LEN 9 #define M1_MARKER 0 #define M2_MARKER 64 #define M3_MARKER 32 #define M4_MARKER 16 #define MIN_ZERO_RUN_LENGTH 4 #define MAX_ZERO_RUN_LENGTH (2047 + MIN_ZERO_RUN_LENGTH) #define lzo_dict_t unsigned short #define D_BITS 13 #define D_SIZE (1u << D_BITS) #define D_MASK (D_SIZE - 1) #define D_HIGH ((D_MASK >> 1) + 1) #define LZO_E_OK 0 #define LZO_E_ERROR (-1) #define LZO_E_OUT_OF_MEMORY (-2) #define LZO_E_NOT_COMPRESSIBLE (-3) #define LZO_E_INPUT_OVERRUN (-4) #define LZO_E_OUTPUT_OVERRUN (-5) #define LZO_E_LOOKBEHIND_OVERRUN (-6) #define LZO_E_EOF_NOT_FOUND (-7) #define LZO_E_INPUT_NOT_CONSUMED (-8) #define LZO_E_NOT_YET_IMPLEMELZO_HFILESNTED (-9) #define LZO_E_INVALID_ARGUMENT (-10) #define HAVE_IP(x) ((unsigned long)(ip_end - ip) >= (unsigned long)(x)) #define HAVE_OP(x) ((unsigned long)(op_end - op) >= (unsigned long)(x)) #define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun #define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun #define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun int lzorle_decompress_safe(const unsigned char *in, unsigned long in_len, unsigned char *out, unsigned long *out_len, void *other/* NOT USED */); #endif crash-utility-crash-9cd43f5/kernel.c0000664000372000037200000122560215107550337017030 0ustar juerghjuergh/* kernel.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2019 David Anderson * Copyright (C) 2002-2019 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "xen_hyper_defs.h" #include "xen_dom0.h" #include #include #include #include #include "xendump.h" #if defined(GDB_7_6) || defined(GDB_10_2) || defined(GDB_16_2) #define __CONFIG_H__ 1 #include "config.h" #endif #include "bfd.h" static void do_module_cmd(ulong, char *, ulong, char *, char *); static void show_module_taint(void); static char *find_module_objfile(char *, char *, char *); static char *module_objfile_search(char *, char *, char *); static char *get_loadavg(char *); static void get_lkcd_regs(struct bt_info *, ulong *, ulong *); static void dump_sys_call_table(char *, int); static int get_NR_syscalls(int *); static ulong get_irq_desc_addr(int); static void display_cpu_affinity(ulong *); static void display_bh_1(void); static void display_bh_2(void); static void display_bh_3(void); static void display_bh_4(void); static void dump_hrtimer_data(const ulong *cpus); static void dump_hrtimer_clock_base(const void *, const int); static void dump_hrtimer_base(const void *, const int); static void dump_active_timers(const void *, ulonglong); static int get_expires_len(const int, const ulong *, ulonglong, const int); static void print_timer(const void *, ulonglong); static ulonglong ktime_to_ns(const void *); static void dump_timer_data(const ulong *cpus); static void dump_timer_data_tvec_bases_v1(const ulong *cpus); static void dump_timer_data_tvec_bases_v2(const ulong *cpus); static void dump_timer_data_tvec_bases_v3(const ulong *cpus); static void dump_timer_data_timer_bases(const ulong *cpus); struct tv_range; static void init_tv_ranges(struct tv_range *, int, int, int); static int do_timer_list(ulong,int, ulong *, void *,ulong *, ulong *, struct tv_range *, ulong); static int do_timer_list_v3(ulong, int, ulong *, void *,ulong *, ulong *, ulong, long); struct timer_bases_data; static int do_timer_list_v4(struct timer_bases_data *, ulong); static int compare_timer_data(const void *, const void *); static void panic_this_kernel(void); static void dump_waitq(ulong, char *); static void reinit_modules(void); static int verify_modules(void); static void verify_namelist(void); static char *debug_kernel_version(char *); static int restore_stack(struct bt_info *); static ulong __xen_m2p(ulonglong, ulong); static ulong __xen_pvops_m2p_l2(ulonglong, ulong); static ulong __xen_pvops_m2p_l3(ulonglong, ulong); static ulong __xen_pvops_m2p_hyper(ulonglong, ulong); static ulong __xen_pvops_m2p_domU(ulonglong, ulong); static int read_xc_p2m(ulonglong, void *, long); static void read_p2m(ulong, int, void *); static int search_mapping_page(ulong, ulong *, ulong *, ulong *); static void read_in_kernel_config_err(int, char *); static void BUG_bytes_init(void); static int BUG_x86(void); static int BUG_x86_64(void); static void cpu_maps_init(void); static void get_xtime(struct timespec *); static char *log_from_idx(uint32_t, char *); static uint32_t log_next(uint32_t, char *); static void dump_log_entry(char *, int); static void dump_variable_length_record_log(int); static void hypervisor_init(void); static void dump_log_legacy(void); static void dump_variable_length_record(void); static int is_livepatch(void); static void show_kernel_taints(char *, int); static void dump_dmi_info(void); static void list_source_code(struct gnu_request *, int); static void source_tree_init(void); static ulong dump_audit_skb_queue(ulong); static ulong __dump_audit(char *); static void dump_audit(void); static void dump_printk_safe_seq_buf(int); static char *vmcoreinfo_read_string(const char *); static void check_vmcoreinfo(void); static int is_pvops_xen(void); static int get_linux_banner_from_vmlinux(char *, size_t); /* * popuplate the global kernel table (kt) with kernel version * information parsed from UTSNAME/OSRELEASE string */ void parse_kernel_version(char *str) { char *p1, *p2, separator; p1 = p2 = str; while (*p2 != '.' && *p2 != '\0') p2++; *p2 = NULLCHAR; kt->kernel_version[0] = atoi(p1); p1 = ++p2; while (*p2 != '.' && *p2 != '-' && *p2 != '\0') p2++; separator = *p2; *p2 = NULLCHAR; kt->kernel_version[1] = atoi(p1); if (separator == '.') { p1 = ++p2; while ((*p2 >= '0') && (*p2 <= '9')) p2++; *p2 = NULLCHAR; kt->kernel_version[2] = atoi(p1); } } /* * Gather a few kernel basics. */ void kernel_init() { int i, c; char buf[BUFSIZE]; struct syment *sp1, *sp2; char *rqstruct; char *rq_timestamp_name = NULL; char *irq_desc_type_name; struct gnu_request req; if (pc->flags & KERNEL_DEBUG_QUERY) return; if (!(kt->cpu_flags = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc cpu_flags array"); STRUCT_SIZE_INIT(cpumask_t, "cpumask_t"); cpu_maps_init(); kt->stext = symbol_value("_stext"); kt->etext = symbol_value("_etext"); get_text_init_space(); if (symbol_exists("__init_begin")) { kt->init_begin = symbol_value("__init_begin"); kt->init_end = symbol_value("__init_end"); } kt->end = highest_bss_symbol(); if ((sp1 = kernel_symbol_search("_end")) && (sp1->value > kt->end)) kt->end = sp1->value; check_vmcoreinfo(); /* * For the traditional (non-pv_ops) Xen architecture, default to writable * page tables unless: * * (1) it's an "xm save" CANONICAL_PAGE_TABLES dumpfile, or * (2) the --shadow_page_tables option was explicitly entered. * * But if the "phys_to_maching_mapping" array does not exist, and * it's not an "xm save" canonical dumpfile, then we have no choice * but to presume shadow page tables. */ if (!PVOPS() && symbol_exists("xen_start_info")) { kt->flags |= ARCH_XEN; if (!(kt->xen_flags & (SHADOW_PAGE_TABLES|CANONICAL_PAGE_TABLES))) kt->xen_flags |= WRITABLE_PAGE_TABLES; if (symbol_exists("phys_to_machine_mapping")) get_symbol_data("phys_to_machine_mapping", sizeof(ulong), &kt->phys_to_machine_mapping); else if (!(kt->xen_flags & CANONICAL_PAGE_TABLES)) { kt->xen_flags &= ~WRITABLE_PAGE_TABLES; kt->xen_flags |= SHADOW_PAGE_TABLES; } if (machine_type("X86")) get_symbol_data("max_pfn", sizeof(ulong), &kt->p2m_table_size); if (machine_type("X86_64")) { /* * kernel version < 2.6.27 => end_pfn * kernel version >= 2.6.27 => max_pfn */ if (!try_get_symbol_data("end_pfn", sizeof(ulong), &kt->p2m_table_size)) get_symbol_data("max_pfn", sizeof(ulong), &kt->p2m_table_size); } if ((kt->m2p_page = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc m2p page."); } if (is_pvops_xen()) { kt->flags |= ARCH_XEN | ARCH_PVOPS_XEN; kt->xen_flags |= WRITABLE_PAGE_TABLES; if (machine_type("X86")) get_symbol_data("max_pfn", sizeof(ulong), &kt->p2m_table_size); if (machine_type("X86_64")) { if (!try_get_symbol_data("end_pfn", sizeof(ulong), &kt->p2m_table_size)) get_symbol_data("max_pfn", sizeof(ulong), &kt->p2m_table_size); } if ((kt->m2p_page = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc m2p page."); if (symbol_exists("p2m_mid_missing")) { kt->pvops_xen.p2m_top_entries = XEN_P2M_TOP_PER_PAGE; get_symbol_data("p2m_top", sizeof(ulong), &kt->pvops_xen.p2m_top); get_symbol_data("p2m_mid_missing", sizeof(ulong), &kt->pvops_xen.p2m_mid_missing); get_symbol_data("p2m_missing", sizeof(ulong), &kt->pvops_xen.p2m_missing); } else if (!symbol_exists("xen_p2m_addr")) { kt->pvops_xen.p2m_top_entries = get_array_length("p2m_top", NULL, 0); kt->pvops_xen.p2m_top = symbol_value("p2m_top"); kt->pvops_xen.p2m_missing = symbol_value("p2m_missing"); } } if (symbol_exists("smp_num_cpus")) { kt->flags |= SMP; get_symbol_data("smp_num_cpus", sizeof(int), &kt->cpus); if (kt->cpus < 1 || kt->cpus > NR_CPUS) error(WARNING, "invalid value: smp_num_cpus: %d\n", kt->cpus); } else if (symbol_exists("__per_cpu_offset")) { kt->flags |= SMP; kt->cpus = 1; } else kt->cpus = 1; if ((sp1 = symbol_search("__per_cpu_start")) && (sp2 = symbol_search("__per_cpu_end")) && (sp1->type == 'A' || sp1->type == 'D') && (sp2->type == 'A' || sp2->type == 'D') && (sp2->value > sp1->value)) kt->flags |= SMP|PER_CPU_OFF; MEMBER_OFFSET_INIT(timekeeper_xtime, "timekeeper", "xtime"); MEMBER_OFFSET_INIT(timekeeper_xtime_sec, "timekeeper", "xtime_sec"); get_xtime(&kt->date); if (CRASHDEBUG(1)) fprintf(fp, "xtime timespec.tv_sec: %lx: %s\n", kt->date.tv_sec, ctime_tz(&kt->date.tv_sec)); if (kt->flags2 & GET_TIMESTAMP) { fprintf(fp, "%s\n\n", ctime_tz(&kt->date.tv_sec)); clean_exit(0); } MEMBER_OFFSET_INIT(uts_namespace_name, "uts_namespace", "name"); if (symbol_exists("system_utsname")) readmem(symbol_value("system_utsname"), KVADDR, &kt->utsname, sizeof(struct new_utsname), "system_utsname", RETURN_ON_ERROR); else if (symbol_exists("init_uts_ns")) { long offset = sizeof(int); if (VALID_MEMBER(uts_namespace_name)) offset = OFFSET(uts_namespace_name); readmem(symbol_value("init_uts_ns") + offset, KVADDR, &kt->utsname, sizeof(struct new_utsname), "init_uts_ns", RETURN_ON_ERROR); } else error(INFO, "cannot access utsname information\n\n"); if (CRASHDEBUG(1)) { fprintf(fp, "utsname:\n"); fprintf(fp, " sysname: %s\n", printable_string(kt->utsname.sysname) ? kt->utsname.sysname : "(not printable)"); fprintf(fp, " nodename: %s\n", printable_string(kt->utsname.nodename) ? kt->utsname.nodename : "(not printable)"); fprintf(fp, " release: %s\n", printable_string(kt->utsname.release) ? kt->utsname.release : "(not printable)"); fprintf(fp, " version: %s\n", printable_string(kt->utsname.version) ? kt->utsname.version : "(not printable)"); fprintf(fp, " machine: %s\n", printable_string(kt->utsname.machine) ? kt->utsname.machine : "(not printable)"); fprintf(fp, " domainname: %s\n", printable_string(kt->utsname.domainname) ? kt->utsname.domainname : "(not printable)"); } strncpy(buf, kt->utsname.release, 65); if (buf[64]) buf[64] = NULLCHAR; if (ascii_string(kt->utsname.release)) { parse_kernel_version(buf); if (CRASHDEBUG(1)) fprintf(fp, "base kernel version: %d.%d.%d\n", kt->kernel_version[0], kt->kernel_version[1], kt->kernel_version[2]); } else error(INFO, "cannot determine base kernel version\n"); verify_version(); if (symbol_exists("__per_cpu_offset")) { if (LKCD_KERNTYPES()) i = get_cpus_possible(); else i = get_array_length("__per_cpu_offset", NULL, 0); get_symbol_data("__per_cpu_offset", sizeof(long)*((i && (i <= NR_CPUS)) ? i : NR_CPUS), &kt->__per_cpu_offset[0]); kt->flags |= PER_CPU_OFF; } MEMBER_OFFSET_INIT(percpu_counter_count, "percpu_counter", "count"); MEMBER_OFFSET_INIT(percpu_counter_counters, "percpu_counter", "counters"); STRUCT_SIZE_INIT(percpu_counter, "percpu_counter"); if (STRUCT_EXISTS("runqueue")) { rqstruct = "runqueue"; rq_timestamp_name = "timestamp_last_tick"; } else if (STRUCT_EXISTS("rq")) { rqstruct = "rq"; if (MEMBER_EXISTS("rq", "clock")) rq_timestamp_name = "clock"; else if (MEMBER_EXISTS("rq", "most_recent_timestamp")) rq_timestamp_name = "most_recent_timestamp"; else if (MEMBER_EXISTS("rq", "timestamp_last_tick")) rq_timestamp_name = "timestamp_last_tick"; } else { rqstruct = NULL; error(FATAL, "neither runqueue nor rq structures exist\n"); } MEMBER_OFFSET_INIT(runqueue_cpu, rqstruct, "cpu"); /* * 'cpu' does not exist in 'struct rq'. */ if (VALID_MEMBER(runqueue_cpu) && (get_array_length("runqueue.cpu", NULL, 0) > 0)) { MEMBER_OFFSET_INIT(cpu_s_curr, "cpu_s", "curr"); MEMBER_OFFSET_INIT(cpu_s_idle, "cpu_s", "idle"); STRUCT_SIZE_INIT(cpu_s, "cpu_s"); kt->runq_siblings = get_array_length("runqueue.cpu", NULL, 0); if (symbol_exists("__cpu_idx") && symbol_exists("__rq_idx")) { if (!(kt->__cpu_idx = (long *) calloc(NR_CPUS, sizeof(long)))) error(FATAL, "cannot malloc __cpu_idx array"); if (!(kt->__rq_idx = (long *) calloc(NR_CPUS, sizeof(long)))) error(FATAL, "cannot malloc __rq_idx array"); if (!readmem(symbol_value("__cpu_idx"), KVADDR, &kt->__cpu_idx[0], sizeof(long) * NR_CPUS, "__cpu_idx[NR_CPUS]", RETURN_ON_ERROR)) error(INFO, "cannot read __cpu_idx[NR_CPUS] array\n"); if (!readmem(symbol_value("__rq_idx"), KVADDR, &kt->__rq_idx[0], sizeof(long) * NR_CPUS, "__rq_idx[NR_CPUS]", RETURN_ON_ERROR)) error(INFO, "cannot read __rq_idx[NR_CPUS] array\n"); } else if (kt->runq_siblings > 1) error(INFO, "runq_siblings: %d: __cpu_idx and __rq_idx arrays don't exist?\n", kt->runq_siblings); } else { MEMBER_OFFSET_INIT(runqueue_idle, rqstruct, "idle"); MEMBER_OFFSET_INIT(runqueue_curr, rqstruct, "curr"); ASSIGN_OFFSET(runqueue_cpu) = INVALID_OFFSET; } MEMBER_OFFSET_INIT(runqueue_active, rqstruct, "active"); MEMBER_OFFSET_INIT(runqueue_expired, rqstruct, "expired"); MEMBER_OFFSET_INIT(runqueue_arrays, rqstruct, "arrays"); MEMBER_OFFSET_INIT(rq_timestamp, rqstruct, rq_timestamp_name); MEMBER_OFFSET_INIT(prio_array_queue, "prio_array", "queue"); MEMBER_OFFSET_INIT(prio_array_nr_active, "prio_array", "nr_active"); STRUCT_SIZE_INIT(runqueue, rqstruct); STRUCT_SIZE_INIT(prio_array, "prio_array"); MEMBER_OFFSET_INIT(rq_cfs, "rq", "cfs"); MEMBER_OFFSET_INIT(task_group_cfs_rq, "task_group", "cfs_rq"); MEMBER_OFFSET_INIT(task_group_rt_rq, "task_group", "rt_rq"); MEMBER_OFFSET_INIT(task_group_parent, "task_group", "parent"); /* * In 2.4, smp_send_stop() sets smp_num_cpus back to 1 * in some, but not all, architectures. So if a count * of 1 is found, be suspicious, and check the * init_tasks[NR_CPUS] array (also intro'd in 2.4), * for idle thread addresses. For 2.2, prepare for the * eventuality by verifying the cpu count with the machine * dependent count. */ if ((kt->flags & SMP) && DUMPFILE() && (kt->cpus == 1)) { if (symbol_exists("init_tasks")) { ulong init_tasks[NR_CPUS]; int nr_cpus; BZERO(&init_tasks[0], sizeof(ulong) * NR_CPUS); nr_cpus = get_array_length("init_tasks", NULL, 0); if ((nr_cpus < 1) || (nr_cpus > NR_CPUS)) nr_cpus = NR_CPUS; get_idle_threads(&init_tasks[0], nr_cpus); for (i = kt->cpus = 0; i < nr_cpus; i++) if (init_tasks[i]) kt->cpus++; } else kt->cpus = machdep->get_smp_cpus(); } if ((kt->flags & SMP) && ACTIVE() && (kt->cpus == 1) && (kt->flags & PER_CPU_OFF)) kt->cpus = machdep->get_smp_cpus(); if (kt->cpus_override && (c = atoi(kt->cpus_override))) { error(WARNING, "forcing cpu count to: %d\n\n", c); kt->cpus = c; } if (kt->cpus > NR_CPUS) { error(WARNING, "%s number of cpus (%d) greater than compiled-in NR_CPUS (%d)\n", kt->cpus_override && atoi(kt->cpus_override) ? "configured" : "calculated", kt->cpus, NR_CPUS); error(FATAL, "recompile crash with larger NR_CPUS\n"); } hypervisor_init(); STRUCT_SIZE_INIT(spinlock_t, "spinlock_t"); verify_spinlock(); if (STRUCT_EXISTS("atomic_t")) if (MEMBER_EXISTS("atomic_t", "counter")) MEMBER_OFFSET_INIT(atomic_t_counter, "atomic_t", "counter"); STRUCT_SIZE_INIT(list_head, "list_head"); MEMBER_OFFSET_INIT(list_head_next, "list_head", "next"); MEMBER_OFFSET_INIT(list_head_prev, "list_head", "prev"); if (OFFSET(list_head_next) != 0) error(WARNING, "list_head.next offset: %ld: list command may fail\n", OFFSET(list_head_next)); MEMBER_OFFSET_INIT(hlist_node_next, "hlist_node", "next"); MEMBER_OFFSET_INIT(hlist_node_pprev, "hlist_node", "pprev"); STRUCT_SIZE_INIT(hlist_head, "hlist_head"); STRUCT_SIZE_INIT(hlist_node, "hlist_node"); if (STRUCT_EXISTS("irq_desc_t")) irq_desc_type_name = "irq_desc_t"; else irq_desc_type_name = "irq_desc"; STRUCT_SIZE_INIT(irq_desc_t, irq_desc_type_name); if (MEMBER_EXISTS(irq_desc_type_name, "irq_data")) MEMBER_OFFSET_INIT(irq_desc_t_irq_data, irq_desc_type_name, "irq_data"); else MEMBER_OFFSET_INIT(irq_desc_t_affinity, irq_desc_type_name, "affinity"); if (MEMBER_EXISTS(irq_desc_type_name, "kstat_irqs")) MEMBER_OFFSET_INIT(irq_desc_t_kstat_irqs, irq_desc_type_name, "kstat_irqs"); MEMBER_OFFSET_INIT(irq_desc_t_name, irq_desc_type_name, "name"); MEMBER_OFFSET_INIT(irq_desc_t_status, irq_desc_type_name, "status"); if (MEMBER_EXISTS(irq_desc_type_name, "handler")) MEMBER_OFFSET_INIT(irq_desc_t_handler, irq_desc_type_name, "handler"); else if (MEMBER_EXISTS(irq_desc_type_name, "chip")) MEMBER_OFFSET_INIT(irq_desc_t_chip, irq_desc_type_name, "chip"); MEMBER_OFFSET_INIT(irq_desc_t_action, irq_desc_type_name, "action"); MEMBER_OFFSET_INIT(irq_desc_t_depth, irq_desc_type_name, "depth"); STRUCT_SIZE_INIT(kernel_stat, "kernel_stat"); MEMBER_OFFSET_INIT(kernel_stat_irqs, "kernel_stat", "irqs"); if (STRUCT_EXISTS("hw_interrupt_type")) { MEMBER_OFFSET_INIT(hw_interrupt_type_typename, "hw_interrupt_type", "typename"); MEMBER_OFFSET_INIT(hw_interrupt_type_startup, "hw_interrupt_type", "startup"); MEMBER_OFFSET_INIT(hw_interrupt_type_shutdown, "hw_interrupt_type", "shutdown"); MEMBER_OFFSET_INIT(hw_interrupt_type_handle, "hw_interrupt_type", "handle"); MEMBER_OFFSET_INIT(hw_interrupt_type_enable, "hw_interrupt_type", "enable"); MEMBER_OFFSET_INIT(hw_interrupt_type_disable, "hw_interrupt_type", "disable"); MEMBER_OFFSET_INIT(hw_interrupt_type_ack, "hw_interrupt_type", "ack"); MEMBER_OFFSET_INIT(hw_interrupt_type_end, "hw_interrupt_type", "end"); MEMBER_OFFSET_INIT(hw_interrupt_type_set_affinity, "hw_interrupt_type", "set_affinity"); } else { /* * On later kernels where hw_interrupt_type was replaced * by irq_chip */ MEMBER_OFFSET_INIT(irq_chip_typename, "irq_chip", "name"); MEMBER_OFFSET_INIT(irq_chip_startup, "irq_chip", "startup"); MEMBER_OFFSET_INIT(irq_chip_shutdown, "irq_chip", "shutdown"); MEMBER_OFFSET_INIT(irq_chip_enable, "irq_chip", "enable"); MEMBER_OFFSET_INIT(irq_chip_disable, "irq_chip", "disable"); MEMBER_OFFSET_INIT(irq_chip_ack, "irq_chip", "ack"); MEMBER_OFFSET_INIT(irq_chip_mask, "irq_chip", "mask"); MEMBER_OFFSET_INIT(irq_chip_mask_ack, "irq_chip", "mask_ack"); MEMBER_OFFSET_INIT(irq_chip_unmask, "irq_chip", "unmask"); MEMBER_OFFSET_INIT(irq_chip_eoi, "irq_chip", "eoi"); MEMBER_OFFSET_INIT(irq_chip_end, "irq_chip", "end"); MEMBER_OFFSET_INIT(irq_chip_set_affinity, "irq_chip", "set_affinity"); MEMBER_OFFSET_INIT(irq_chip_retrigger, "irq_chip", "retrigger"); MEMBER_OFFSET_INIT(irq_chip_set_type, "irq_chip", "set_type"); MEMBER_OFFSET_INIT(irq_chip_set_wake, "irq_chip", "set_wake"); } MEMBER_OFFSET_INIT(irqaction_handler, "irqaction", "handler"); MEMBER_OFFSET_INIT(irqaction_flags, "irqaction", "flags"); MEMBER_OFFSET_INIT(irqaction_mask, "irqaction", "mask"); MEMBER_OFFSET_INIT(irqaction_name, "irqaction", "name"); MEMBER_OFFSET_INIT(irqaction_dev_id, "irqaction", "dev_id"); MEMBER_OFFSET_INIT(irqaction_next, "irqaction", "next"); /* 6.5 and later: CONFIG_SPARSE_IRQ */ if (kernel_symbol_exists("sparse_irqs")) kt->flags2 |= IRQ_DESC_TREE_MAPLE; else if (kernel_symbol_exists("irq_desc_tree")) { get_symbol_type("irq_desc_tree", NULL, &req); if (STREQ(req.type_tag_name, "xarray")) { kt->flags2 |= IRQ_DESC_TREE_XARRAY; } else { if (MEMBER_EXISTS("radix_tree_root", "xa_head")) kt->flags2 |= IRQ_DESC_TREE_XARRAY; else kt->flags2 |= IRQ_DESC_TREE_RADIX; } } STRUCT_SIZE_INIT(irq_data, "irq_data"); if (VALID_STRUCT(irq_data)) { MEMBER_OFFSET_INIT(irq_data_irq, "irq_data", "irq"); MEMBER_OFFSET_INIT(irq_data_chip, "irq_data", "chip"); MEMBER_OFFSET_INIT(irq_data_affinity, "irq_data", "affinity"); MEMBER_OFFSET_INIT(irq_desc_irq_data, "irq_desc", "irq_data"); } STRUCT_SIZE_INIT(irq_common_data, "irq_common_data"); if (VALID_STRUCT(irq_common_data)) { MEMBER_OFFSET_INIT(irq_common_data_affinity, "irq_common_data", "affinity"); MEMBER_OFFSET_INIT(irq_desc_irq_common_data, "irq_desc", "irq_common_data"); } STRUCT_SIZE_INIT(irq_cpustat_t, "irq_cpustat_t"); MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_active, "irq_cpustat_t", "__softirq_active"); MEMBER_OFFSET_INIT(irq_cpustat_t___softirq_mask, "irq_cpustat_t", "__softirq_mask"); STRUCT_SIZE_INIT(timer_list, "timer_list"); MEMBER_OFFSET_INIT(timer_list_list, "timer_list", "list"); MEMBER_OFFSET_INIT(timer_list_next, "timer_list", "next"); MEMBER_OFFSET_INIT(timer_list_entry, "timer_list", "entry"); MEMBER_OFFSET_INIT(timer_list_expires, "timer_list", "expires"); MEMBER_OFFSET_INIT(timer_list_function, "timer_list", "function"); STRUCT_SIZE_INIT(timer_vec_root, "timer_vec_root"); if (VALID_STRUCT(timer_vec_root)) MEMBER_OFFSET_INIT(timer_vec_root_vec, "timer_vec_root", "vec"); STRUCT_SIZE_INIT(timer_vec, "timer_vec"); if (VALID_STRUCT(timer_vec)) MEMBER_OFFSET_INIT(timer_vec_vec, "timer_vec", "vec"); STRUCT_SIZE_INIT(tvec_root_s, "tvec_root_s"); if (VALID_STRUCT(tvec_root_s)) { STRUCT_SIZE_INIT(tvec_t_base_s, "tvec_t_base_s"); MEMBER_OFFSET_INIT(tvec_t_base_s_tv1, "tvec_t_base_s", "tv1"); MEMBER_OFFSET_INIT(tvec_root_s_vec, "tvec_root_s", "vec"); STRUCT_SIZE_INIT(tvec_s, "tvec_s"); MEMBER_OFFSET_INIT(tvec_s_vec, "tvec_s", "vec"); } else { STRUCT_SIZE_INIT(tvec_root_s, "tvec_root"); if (VALID_STRUCT(tvec_root_s)) { STRUCT_SIZE_INIT(tvec_t_base_s, "tvec_base"); MEMBER_OFFSET_INIT(tvec_t_base_s_tv1, "tvec_base", "tv1"); MEMBER_OFFSET_INIT(tvec_root_s_vec, "tvec_root", "vec"); STRUCT_SIZE_INIT(tvec_s, "tvec"); MEMBER_OFFSET_INIT(tvec_s_vec, "tvec", "vec"); } } if (per_cpu_symbol_search("timer_bases")) { kt->flags2 |= TIMER_BASES; MEMBER_OFFSET_INIT(timer_base_vectors, "timer_base", "vectors"); STRUCT_SIZE_INIT(timer_base, "timer_base"); } else if (per_cpu_symbol_search("per_cpu__tvec_bases")) { if (MEMBER_EXISTS("tvec_base", "migration_enabled")) kt->flags2 |= TVEC_BASES_V3; else kt->flags |= TVEC_BASES_V2; } else if (symbol_exists("tvec_bases")) kt->flags |= TVEC_BASES_V1; STRUCT_SIZE_INIT(__wait_queue, "__wait_queue"); STRUCT_SIZE_INIT(wait_queue_entry, "wait_queue_entry"); if (VALID_STRUCT(wait_queue_entry)) { MEMBER_OFFSET_INIT(wait_queue_entry_private, "wait_queue_entry", "private"); MEMBER_OFFSET_INIT(wait_queue_head_head, "wait_queue_head", "head"); MEMBER_OFFSET_INIT(wait_queue_entry_entry, "wait_queue_entry", "entry"); } else if (VALID_STRUCT(__wait_queue)) { if (MEMBER_EXISTS("__wait_queue", "task")) MEMBER_OFFSET_INIT(__wait_queue_task, "__wait_queue", "task"); else MEMBER_OFFSET_INIT(__wait_queue_task, "__wait_queue", "private"); MEMBER_OFFSET_INIT(__wait_queue_head_task_list, "__wait_queue_head", "task_list"); MEMBER_OFFSET_INIT(__wait_queue_task_list, "__wait_queue", "task_list"); } else { STRUCT_SIZE_INIT(wait_queue, "wait_queue"); if (VALID_STRUCT(wait_queue)) { MEMBER_OFFSET_INIT(wait_queue_task, "wait_queue", "task"); MEMBER_OFFSET_INIT(wait_queue_next, "wait_queue", "next"); } } STRUCT_SIZE_INIT(pt_regs, "pt_regs"); STRUCT_SIZE_INIT(softirq_state, "softirq_state"); STRUCT_SIZE_INIT(softirq_action, "softirq_action"); STRUCT_SIZE_INIT(desc_struct, "desc_struct"); STRUCT_SIZE_INIT(fred_frame, "fred_frame"); STRUCT_SIZE_INIT(char_device_struct, "char_device_struct"); if (VALID_STRUCT(char_device_struct)) { MEMBER_OFFSET_INIT(char_device_struct_next, "char_device_struct", "next"); MEMBER_OFFSET_INIT(char_device_struct_name, "char_device_struct", "name"); MEMBER_OFFSET_INIT(char_device_struct_fops, "char_device_struct", "fops"); MEMBER_OFFSET_INIT(char_device_struct_major, "char_device_struct", "major"); MEMBER_OFFSET_INIT(char_device_struct_baseminor, "char_device_struct", "baseminor"); MEMBER_OFFSET_INIT(char_device_struct_cdev, "char_device_struct", "cdev"); } STRUCT_SIZE_INIT(cdev, "cdev"); if (VALID_STRUCT(cdev)) MEMBER_OFFSET_INIT(cdev_ops, "cdev", "ops"); STRUCT_SIZE_INIT(probe, "probe"); if (VALID_STRUCT(probe)) { MEMBER_OFFSET_INIT(probe_next, "probe", "next"); MEMBER_OFFSET_INIT(probe_dev, "probe", "dev"); MEMBER_OFFSET_INIT(probe_data, "probe", "data"); } STRUCT_SIZE_INIT(kobj_map, "kobj_map"); if (VALID_STRUCT(kobj_map)) MEMBER_OFFSET_INIT(kobj_map_probes, "kobj_map", "probes"); MEMBER_OFFSET_INIT(module_kallsyms_start, "module", "kallsyms_start"); STRUCT_SIZE_INIT(kallsyms_header, "kallsyms_header"); if (VALID_MEMBER(module_kallsyms_start) && VALID_SIZE(kallsyms_header)) { MEMBER_OFFSET_INIT(kallsyms_header_sections, "kallsyms_header", "sections"); MEMBER_OFFSET_INIT(kallsyms_header_section_off, "kallsyms_header", "section_off"); MEMBER_OFFSET_INIT(kallsyms_header_symbols, "kallsyms_header", "symbols"); MEMBER_OFFSET_INIT(kallsyms_header_symbol_off, "kallsyms_header", "symbol_off"); MEMBER_OFFSET_INIT(kallsyms_header_string_off, "kallsyms_header", "string_off"); MEMBER_OFFSET_INIT(kallsyms_symbol_section_off, "kallsyms_symbol", "section_off"); MEMBER_OFFSET_INIT(kallsyms_symbol_symbol_addr, "kallsyms_symbol", "symbol_addr"); MEMBER_OFFSET_INIT(kallsyms_symbol_name_off, "kallsyms_symbol", "name_off"); MEMBER_OFFSET_INIT(kallsyms_section_start, "kallsyms_section", "start"); MEMBER_OFFSET_INIT(kallsyms_section_size, "kallsyms_section", "size"); MEMBER_OFFSET_INIT(kallsyms_section_name_off, "kallsyms_section", "name_off"); STRUCT_SIZE_INIT(kallsyms_symbol, "kallsyms_symbol"); STRUCT_SIZE_INIT(kallsyms_section, "kallsyms_section"); if (!(kt->flags & NO_KALLSYMS)) kt->flags |= KALLSYMS_V1; } MEMBER_OFFSET_INIT(module_num_symtab, "module", "num_symtab"); if (VALID_MEMBER(module_num_symtab)) { MEMBER_OFFSET_INIT(module_symtab, "module", "symtab"); MEMBER_OFFSET_INIT(module_strtab, "module", "strtab"); if (!(kt->flags & NO_KALLSYMS)) kt->flags |= KALLSYMS_V2; } if (INVALID_MEMBER(module_num_symtab) && MEMBER_EXISTS("module", "core_kallsyms")) { ASSIGN_OFFSET(module_num_symtab) = MEMBER_OFFSET("module", "core_kallsyms") + MEMBER_OFFSET("mod_kallsyms", "num_symtab"); ASSIGN_OFFSET(module_symtab) = MEMBER_OFFSET("module", "core_kallsyms") + MEMBER_OFFSET("mod_kallsyms", "symtab"); ASSIGN_OFFSET(module_strtab) = MEMBER_OFFSET("module", "core_kallsyms") + MEMBER_OFFSET("mod_kallsyms", "strtab"); if (!(kt->flags & NO_KALLSYMS)) kt->flags |= KALLSYMS_V2; } if (!(kt->flags & DWARF_UNWIND)) kt->flags |= NO_DWARF_UNWIND; /* * OpenVZ */ if (kernel_symbol_exists("pcpu_info") && STRUCT_EXISTS("pcpu_info") && STRUCT_EXISTS("vcpu_struct")) { MEMBER_OFFSET_INIT(pcpu_info_vcpu, "pcpu_info", "vcpu"); MEMBER_OFFSET_INIT(pcpu_info_idle, "pcpu_info", "idle"); MEMBER_OFFSET_INIT(vcpu_struct_rq, "vcpu_struct", "rq"); STRUCT_SIZE_INIT(pcpu_info, "pcpu_info"); STRUCT_SIZE_INIT(vcpu_struct, "vcpu_struct"); kt->flags |= ARCH_OPENVZ; } STRUCT_SIZE_INIT(mem_section, "mem_section"); BUG_bytes_init(); /* * for hrtimer */ STRUCT_SIZE_INIT(hrtimer_clock_base, "hrtimer_clock_base"); if (VALID_STRUCT(hrtimer_clock_base)) { MEMBER_OFFSET_INIT(hrtimer_clock_base_offset, "hrtimer_clock_base", "offset"); MEMBER_OFFSET_INIT(hrtimer_clock_base_active, "hrtimer_clock_base", "active"); MEMBER_OFFSET_INIT(hrtimer_clock_base_first, "hrtimer_clock_base", "first"); MEMBER_OFFSET_INIT(hrtimer_clock_base_get_time, "hrtimer_clock_base", "get_time"); } STRUCT_SIZE_INIT(hrtimer_base, "hrtimer_base"); if (VALID_STRUCT(hrtimer_base)) { MEMBER_OFFSET_INIT(hrtimer_base_first, "hrtimer_base", "first"); MEMBER_OFFSET_INIT(hrtimer_base_pending, "hrtimer_base", "pending"); MEMBER_OFFSET_INIT(hrtimer_base_get_time, "hrtimer_base", "get_time"); } MEMBER_OFFSET_INIT(hrtimer_cpu_base_clock_base, "hrtimer_cpu_base", "clock_base"); MEMBER_OFFSET_INIT(hrtimer_node, "hrtimer", "node"); MEMBER_OFFSET_INIT(hrtimer_list, "hrtimer", "list"); MEMBER_OFFSET_INIT(hrtimer_expires, "hrtimer", "expires"); if (INVALID_MEMBER(hrtimer_expires)) MEMBER_OFFSET_INIT(hrtimer_expires, "hrtimer", "_expires"); if (INVALID_MEMBER(hrtimer_expires)) { MEMBER_OFFSET_INIT(timerqueue_head_next, "timerqueue_head", "next"); MEMBER_OFFSET_INIT(timerqueue_node_expires, "timerqueue_node", "expires"); MEMBER_OFFSET_INIT(timerqueue_node_node, "timerqueue_node", "node"); if (INVALID_MEMBER(timerqueue_head_next)) { MEMBER_OFFSET_INIT(timerqueue_head_rb_root, "timerqueue_head", "rb_root"); MEMBER_OFFSET_INIT(rb_root_cached_rb_leftmost, "rb_root_cached", "rb_leftmost"); } } MEMBER_OFFSET_INIT(hrtimer_softexpires, "hrtimer", "_softexpires"); MEMBER_OFFSET_INIT(hrtimer_function, "hrtimer", "function"); MEMBER_OFFSET_INIT(ktime_t_tv64, "ktime", "tv64"); if (INVALID_MEMBER(ktime_t_tv64)) MEMBER_OFFSET_INIT(ktime_t_tv64, "ktime_t", "tv64"); MEMBER_OFFSET_INIT(ktime_t_sec, "ktime", "sec"); if (INVALID_MEMBER(ktime_t_sec)) MEMBER_OFFSET_INIT(ktime_t_sec, "ktime_t", "sec"); MEMBER_OFFSET_INIT(ktime_t_nsec, "ktime", "nsec"); if (INVALID_MEMBER(ktime_t_nsec)) MEMBER_OFFSET_INIT(ktime_t_nsec, "ktime_t", "nsec"); if (kt->source_tree) source_tree_init(); kt->flags &= ~PRE_KERNEL_INIT; } /* * Get cpu map address. Types are: possible, online, present and active. * They exist as either: * * (1) cpu__map symbols, or * (2) what is pointed to by cpu__mask */ ulong cpu_map_addr(const char *type) { char map_symbol[32]; ulong addr; sprintf(map_symbol, "cpu_%s_map", type); if (kernel_symbol_exists(map_symbol)) return symbol_value(map_symbol); sprintf(map_symbol, "cpu_%s_mask", type); if (kernel_symbol_exists(map_symbol)) { get_symbol_data(map_symbol, sizeof(ulong), &addr); return addr; } sprintf(map_symbol, "__cpu_%s_mask", type); if (kernel_symbol_exists(map_symbol)) return symbol_value(map_symbol); return 0; } static char * cpu_map_type(char *name) { char map_symbol[32]; sprintf(map_symbol, "cpu_%s_map", name); if (kernel_symbol_exists(map_symbol)) return "map"; sprintf(map_symbol, "cpu_%s_mask", name); if (kernel_symbol_exists(map_symbol)) return "mask"; sprintf(map_symbol, "__cpu_%s_map", name); if (kernel_symbol_exists(map_symbol)) return "map"; sprintf(map_symbol, "__cpu_%s_mask", name); if (kernel_symbol_exists(map_symbol)) return "mask"; return NULL; } /* * Get cpu map (possible, online, etc.) size */ static int cpu_map_size(const char *type) { int len; char map_symbol[32]; struct gnu_request req; if (LKCD_KERNTYPES()) { if (INVALID_SIZE(cpumask_t)) error(FATAL, "cannot determine type cpumask_t\n"); return SIZE(cpumask_t); } sprintf(map_symbol, "cpu_%s_map", type); if (kernel_symbol_exists(map_symbol)) { len = get_symbol_type(map_symbol, NULL, &req) == TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; return len; } if (INVALID_SIZE(cpumask_t)) return sizeof(ulong); return SIZE(cpumask_t); } /* * If the cpu_present_map, cpu_online_map and cpu_possible_maps exist, * set up the kt->cpu_flags[NR_CPUS] with their settings. */ static void cpu_maps_init(void) { int i, c, m, cpu, len; char *buf; ulong *maskptr, addr, error_handle; struct mapinfo { ulong cpu_flag; char *name; } mapinfo[] = { { POSSIBLE_MAP, "possible" }, { PRESENT_MAP, "present" }, { ONLINE_MAP, "online" }, { ACTIVE_MAP, "active" }, }; if (INVALID_SIZE(cpumask_t)) len = sizeof(ulong); else len = SIZE(cpumask_t); buf = GETBUF(len); for (m = 0; m < sizeof(mapinfo)/sizeof(struct mapinfo); m++) { if (!(addr = cpu_map_addr(mapinfo[m].name))) continue; error_handle = pc->flags & DEVMEM ? RETURN_ON_ERROR|QUIET : RETURN_ON_ERROR; if (!readmem(addr, KVADDR, buf, len, mapinfo[m].name, error_handle)) { error(WARNING, "cannot read cpu_%s_map\n", mapinfo[m].name); continue; } maskptr = (ulong *)buf; for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) { if (*maskptr == 0) continue; for (c = 0; c < BITS_PER_LONG; c++) if (*maskptr & (0x1UL << c)) { cpu = (i * BITS_PER_LONG) + c; if (cpu >= NR_CPUS) { error(WARNING, "cpu_%s_%s indicates more than" " %d (NR_CPUS) cpus\n", mapinfo[m].name, cpu_map_type(mapinfo[m].name), NR_CPUS); break; } kt->cpu_flags[cpu] |= mapinfo[m].cpu_flag; } } if (CRASHDEBUG(1)) { fprintf(fp, "%scpu_%s_%s: cpus: ", space(strlen("possible")-strlen(mapinfo[m].name)), mapinfo[m].name, cpu_map_type(mapinfo[m].name)); for (i = c = 0; i < NR_CPUS; i++) { if (kt->cpu_flags[i] & mapinfo[m].cpu_flag) { fprintf(fp, "%d ", i); c++; } } fprintf(fp, "%s\n", c ? "" : "(none)"); } } FREEBUF(buf); } /* * Determine whether a cpu is in one of the cpu masks. */ int in_cpu_map(int map, int cpu) { if (cpu >= (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS)) { error(INFO, "in_cpu_map: invalid cpu: %d\n", cpu); return FALSE; } switch (map) { case POSSIBLE_MAP: if (!cpu_map_addr("possible")) { error(INFO, "cpu_possible_map does not exist\n"); return FALSE; } return (kt->cpu_flags[cpu] & POSSIBLE_MAP); case PRESENT_MAP: if (!cpu_map_addr("present")) { error(INFO, "cpu_present_map does not exist\n"); return FALSE; } return (kt->cpu_flags[cpu] & PRESENT_MAP); case ONLINE_MAP: if (!cpu_map_addr("online")) { error(INFO, "cpu_online_map does not exist\n"); return FALSE; } return (kt->cpu_flags[cpu] & ONLINE_MAP); case ACTIVE_MAP: if (!cpu_map_addr("active")) { error(INFO, "cpu_active_map does not exist\n"); return FALSE; } return (kt->cpu_flags[cpu] & ACTIVE_MAP); } return FALSE; } /* * For lack of a better manner of verifying that the namelist and dumpfile * (or live kernel) match up, verify that the Linux banner is where * the namelist says it is. Since this is common place to bail, extra * debug statements are available. */ void verify_version(void) { char buf[BUFSIZE]; ulong linux_banner; int argc, len; char *arglist[MAXARGS]; char *p1, *p2; struct syment *sp; if (pc->flags & KERNEL_DEBUG_QUERY) return; BZERO(buf, BUFSIZE); if (!(sp = symbol_search("linux_banner"))) error(FATAL, "linux_banner symbol does not exist?\n"); else { switch (get_symbol_type("linux_banner", NULL, NULL)) { case TYPE_CODE_ARRAY: linux_banner = sp->value; break; case TYPE_CODE_PTR: get_symbol_data("linux_banner", sizeof(ulong), &linux_banner); break; default: error(WARNING, "linux_banner is unknown type\n"); linux_banner = sp->value; break; } } if (!IS_KVADDR(linux_banner)) error(WARNING, "invalid linux_banner pointer: %lx\n", linux_banner); if (!accessible(linux_banner)) goto bad_match; if (!read_string(linux_banner, buf, BUFSIZE-1)) error(WARNING, "cannot read linux_banner string\n"); if (ACTIVE()) { len = strlen(kt->proc_version); if ((len > 0) && (strncmp(buf, kt->proc_version, len) != 0)) { if (CRASHDEBUG(1)) { fprintf(fp, "/proc/version:\n%s\n", kt->proc_version); fprintf(fp, "linux_banner:\n%s\n", buf); } goto bad_match; } else if (CRASHDEBUG(1)) fprintf(fp, "linux_banner:\n%s\n", buf); } if (DUMPFILE()) { if (!STRNEQ(buf, "Linux version")) { if (CRASHDEBUG(1)) fprintf(fp, "linux_banner:\n%s\n", buf); goto bad_match; } strcpy(kt->proc_version, strip_linefeeds(buf)); } verify_namelist(); if (strstr(kt->proc_version, "gcc version 3.3.3")) kt->flags |= GCC_3_3_3; if (strstr(kt->proc_version, "gcc version 3.3.2")) kt->flags |= GCC_3_3_2; else if (strstr(kt->proc_version, "gcc version 3.2.3")) kt->flags |= GCC_3_2_3; else if (strstr(kt->proc_version, "gcc version 3.2")) kt->flags |= GCC_3_2; else if (strstr(kt->proc_version, "gcc version 2.96")) kt->flags |= GCC_2_96; /* * Keeping the gcc version with #define's is getting out of hand. */ if ((p1 = strstr(kt->proc_version, "gcc version "))) { BZERO(buf, BUFSIZE); p1 += strlen("gcc version "); p2 = buf; while (((*p1 >= '0') && (*p1 <= '9')) || (*p1 == '.')) { if (*p1 == '.') *p2++ = ' '; else *p2++ = *p1; p1++; } argc = parse_line(buf, arglist); switch (argc) { case 0: case 1: break; case 2: kt->gcc_version[0] = atoi(arglist[0]); kt->gcc_version[1] = atoi(arglist[1]); break; default: kt->gcc_version[0] = atoi(arglist[0]); kt->gcc_version[1] = atoi(arglist[1]); kt->gcc_version[2] = atoi(arglist[2]); break; } } if (CRASHDEBUG(1)) gdb_readnow_warning(); return; bad_match: if (REMOTE()) sprintf(buf, "%s:%s", pc->server, pc->server_memsrc); else sprintf(buf, "%s", ACTIVE() ? pc->live_memsrc : pc->dumpfile); error(INFO, "%s and %s do not match!\n", pc->system_map ? pc->system_map : pc->namelist_debug ? pc->namelist_debug : pc->namelist, buf); program_usage(SHORT_FORM); } /* * Quick test to verify that we're not using a UP debug kernel on * an SMP system. */ void verify_spinlock(void) { char buf[BUFSIZE]; if ((kt->flags & SMP) && (SIZE(spinlock_t) == 0)) { error(INFO, "debug data shows spinlock_t as an incomplete type (undefined),\n"); fprintf(fp, "%sbut \"%s\" is an SMP kernel.\n", space(strlen(pc->program_name)+2), pc->namelist); if (CRASHDEBUG(1)) { fprintf(fp, "\ngdb> ptype spinlock_t\n"); sprintf(buf, "ptype spinlock_t"); gdb_pass_through(buf, NULL, GNU_RETURN_ON_ERROR); } non_matching_kernel(); } } /* * Something doesn't jive. */ void non_matching_kernel(void) { int kernels = 0; if (pc->namelist) kernels++; if (pc->namelist_debug) kernels++; if (pc->debuginfo_file) kernels++; fprintf(fp, "\nErrors like the one above typically occur when the kernel%s and memory source\ndo not match. These are the files being used:\n\n", kernels > 1 ? "s" : ""); if (REMOTE()) { switch (pc->flags & (NAMELIST_LOCAL|NAMELIST_UNLINKED|NAMELIST_SAVED)) { case NAMELIST_UNLINKED: fprintf(fp, " KERNEL: %s (temporary)\n", pc->namelist); break; case (NAMELIST_UNLINKED|NAMELIST_SAVED): fprintf(fp, " KERNEL: %s\n", pc->namelist); break; case NAMELIST_LOCAL: fprintf(fp, " KERNEL: %s\n", pc->namelist); break; } } else { if (pc->system_map) { fprintf(fp, " SYSTEM MAP: %s\n", pc->system_map); fprintf(fp, "DEBUG KERNEL: %s %s\n", pc->namelist, debug_kernel_version(pc->namelist)); } else fprintf(fp, " KERNEL: %s\n", pc->namelist); if (pc->namelist_orig) fprintf(fp, " (uncompressed from %s)\n", pc->namelist_orig); } if (pc->debuginfo_file) { fprintf(fp, " DEBUGINFO: %s\n", pc->debuginfo_file); if (STREQ(pc->debuginfo_file, pc->namelist_debug) && pc->namelist_debug_orig) fprintf(fp, " (uncompressed from %s)\n", pc->namelist_debug_orig); } else if (pc->namelist_debug) { fprintf(fp, "DEBUG KERNEL: %s %s\n", pc->namelist_debug, debug_kernel_version(pc->namelist_debug)); if (pc->namelist_debug_orig) fprintf(fp, " (uncompressed from %s)\n", pc->namelist_debug_orig); } if (dumpfile_is_split() || sadump_is_diskset() || is_ramdump_image()) fprintf(fp, " DUMPFILES: "); else fprintf(fp, " DUMPFILE: "); if (ACTIVE()) { if (REMOTE_ACTIVE()) fprintf(fp, "%s@%s (remote live system)\n", pc->server_memsrc, pc->server); else fprintf(fp, "%s\n", pc->live_memsrc); } else { if (REMOTE_DUMPFILE()) fprintf(fp, "%s@%s (remote dumpfile)\n", pc->server_memsrc, pc->server); else if (REMOTE_PAUSED()) fprintf(fp, "%s %s (remote paused system)\n", pc->server_memsrc, pc->server); else { if (dumpfile_is_split()) show_split_dumpfiles(); else if (sadump_is_diskset()) sadump_show_diskset(); else if (is_ramdump_image()) show_ramdump_files(); else fprintf(fp, "%s", pc->dumpfile); } if (LIVE()) fprintf(fp, " [LIVE DUMP]"); } fprintf(fp, "\n\n"); if ((pc->flags & FINDKERNEL) && !(pc->system_map)) { fprintf(fp, "The kernel \"%s\" is most likely incorrect.\n", pc->namelist); fprintf(fp, "Try a different kernel name, or use a System.map file argument.\n\n"); } clean_exit(1); } /* * Only two checks are made here: * * 1. if the namelist is SMP and the memory source isn't, bail out. * 2. if the basic gcc versions differ, issue a warning only. */ static void verify_namelist() { int i; char command[BUFSIZE]; char buffer[BUFSIZE/2]; char buffer2[BUFSIZE/2]; char buffer3[BUFSIZE/2]; char buffer4[BUFSIZE/2]; char buffer5[BUFSIZE*2]; char *p1; FILE *pipe; int found; char *namelist; int namelist_smp; int target_smp; if (pc->flags & KERNEL_DEBUG_QUERY) return; /* the kerntypes may not match in terms of gcc version or SMP */ if (LKCD_KERNTYPES()) return; if (!strlen(kt->utsname.version)) return; namelist = pc->namelist ? pc->namelist : pc->namelist_debug; target_smp = strstr(kt->utsname.version, " SMP ") ? TRUE : FALSE; namelist_smp = FALSE; if (get_linux_banner_from_vmlinux(buffer, sizeof(buffer)) && strstr(buffer, kt->proc_version)) { found = TRUE; goto found; } sprintf(command, "/usr/bin/strings %s", namelist); if ((pipe = popen(command, "r")) == NULL) { error(INFO, "%s: %s\n", namelist, strerror(errno)); return; } found = FALSE; sprintf(buffer3, "(unknown)"); while (fgets(buffer, (BUFSIZE/2)-1, pipe)) { if (!strstr(buffer, "Linux version 2.") && !strstr(buffer, "Linux version 3.") && !strstr(buffer, "Linux version 4.") && !strstr(buffer, "Linux version 5.") && !strstr(buffer, "Linux version 6.")) continue; if (strstr(buffer, kt->proc_version)) { found = TRUE; break; } if (strstr(buffer, " SMP ")) { namelist_smp = TRUE; strcpy(buffer2, buffer); } if ((p1 = strstr(buffer, "(gcc version "))) { p1 += strlen("(gcc version "); i = 0; while (*p1 != ' ') buffer3[i++] = *p1++; buffer3[i] = NULLCHAR; } } pclose(pipe); if (!found && (p1 = strstr(kt->proc_version, "(gcc version "))) { p1 += strlen("(gcc version "); i = 0; while (*p1 != ' ') buffer4[i++] = *p1++; buffer4[i] = NULLCHAR; if (!STREQ(buffer3, buffer4)) { if (REMOTE()) sprintf(buffer, "%s:%s kernel", pc->server, pc->server_memsrc); else sprintf(buffer, "%s kernel", ACTIVE() ? "live system" : pc->dumpfile); sprintf(buffer5, " %s: %s\n %s: %s\n\n", namelist, buffer3, buffer, buffer4); error(WARNING, "kernels compiled by different gcc versions:\n%s", buffer5); } } found: if (found) { if (CRASHDEBUG(1)) { fprintf(fp, "verify_namelist:\n"); fprintf(fp, "%s /proc/version:\n%s\n", ACTIVE() ? "live memory" : "dumpfile", kt->proc_version); fprintf(fp, "%s:\n%s\n", namelist, buffer); } return; } if (!(pc->flags & SYSMAP_ARG)) error(WARNING, "kernel version inconsistency between vmlinux and %s\n\n", ACTIVE() ? "live memory" : "dumpfile"); if (CRASHDEBUG(1)) { error(WARNING, "\ncannot find matching kernel version in %s file:\n\n", namelist); fprintf(fp, "verify_namelist:\n"); fprintf(fp, "%s /proc/version:\n%s\n", ACTIVE() ? "live memory" : "dumpfile", kt->proc_version); fprintf(fp, "%s:\n%s\n", namelist, buffer2); } if (target_smp == namelist_smp) return; if (REMOTE()) sprintf(buffer, "%s:%s", pc->server, pc->server_memsrc); else sprintf(buffer, "%s", ACTIVE() ? "live system" : pc->dumpfile); sprintf(buffer5, " %s is %s -- %s is %s\n", namelist, namelist_smp ? "SMP" : "not SMP", buffer, target_smp ? "SMP" : "not SMP"); error(INFO, "incompatible arguments: %s%s", strlen(buffer5) > 48 ? "\n " : "", buffer5); program_usage(SHORT_FORM); } /* * Set up the gdb source code path. */ static void source_tree_init(void) { FILE *pipe; char command[BUFSIZE*2]; char buf[BUFSIZE]; if (!is_directory(kt->source_tree)) { error(INFO, "invalid --src argument: %s\n\n", kt->source_tree); kt->source_tree = NULL; return; } sprintf(command, "/usr/bin/ls -d %s/arch/*/include/asm 2>/dev/null", kt->source_tree); if ((pipe = popen(command, "r"))) { if (fgets(buf, BUFSIZE-1, pipe)) { sprintf(command, "directory %s", buf); gdb_pass_through(command, NULL, GNU_RETURN_ON_ERROR); } pclose(pipe); } else error(INFO, "%s: %s\n", command, strerror(errno)); sprintf(command, "directory %s", kt->source_tree); gdb_pass_through(command, NULL, GNU_RETURN_ON_ERROR); } static void list_source_code(struct gnu_request *req, int count_entered) { int argc, line, last, done, assembly; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE*2]; char file[BUFSIZE]; char *argv[MAXARGS]; struct syment *sp; ulong remaining, offset; struct load_module *lm; char *p1; sp = value_search(req->addr, &offset); if (!sp || !is_symbol_text(sp)) error(FATAL, "%lx: not a kernel text address\n", req->addr); if (module_symbol(req->addr, NULL, &lm, NULL, 0)) { if (!(lm->mod_flags & MOD_LOAD_SYMS)) error(FATAL, "%s: module source code is not available\n", lm->mod_name); get_line_number(req->addr, buf1, FALSE); } sprintf(buf1, "list *0x%lx", req->addr); open_tmpfile(); if (!gdb_pass_through(buf1, pc->tmpfile, GNU_RETURN_ON_ERROR)) { close_tmpfile(); error(FATAL, "gdb command failed: %s\n", buf1); } done = FALSE; last = line = assembly = file[0] = 0; remaining = count_entered ? req->count : 0; rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { strcpy(buf2, buf1); argc = parse_line(buf2, argv); if (!line && hexadecimal(argv[0], 0) && STREQ(argv[1], "is") && (STREQ(argv[2], "in") || STREQ(argv[2], "at"))) { /* * Don't bother continuing beyond the initial * list command if it's assembly language. */ if (STREQ(argv[2], "at")) assembly = TRUE; strip_beginning_char(argv[argc-1], '('); strip_ending_char(argv[argc-1], '.'); strip_ending_char(argv[argc-1], ')'); p1 = strstr_rightmost(argv[argc-1], ":"); *p1 = NULLCHAR; strcpy(file, argv[argc-1]); line = atoi(p1+1); fprintf(pc->saved_fp, "FILE: %s\nLINE: %d\n\n", file, line); continue; } /* * Check for 2 possible results of unavailable source. */ if ((argc == 3) && decimal(argv[0], 0) && STREQ(argv[1], "in") && STREQ(argv[2], file)) error(FATAL, "%s: source code is not available\n\n", req->buf); sprintf(buf3, "%s: No such file or directory", file); if ((decimal(argv[0], 0) || decimal(argv[1], 0)) && strstr(buf1, buf3)) error(FATAL, "%s: source code is not available\n\n", req->buf); if (decimal(argv[0], 0)) { if (count_entered && (last >= line)) { if (!remaining--) { done = TRUE; break; } } last = atoi(argv[0]); fprintf(pc->saved_fp, "%s%s", last == line ? "* " : " ", buf1); } else continue; if (!count_entered && (last > line) && STREQ(first_space(buf1), "\t}\n")) { done = TRUE; break; } } close_tmpfile(); if (!line) { fprintf(fp, "FILE: (unknown)\nLINE: (unknown)\n\n"); error(FATAL, "%s: source code is not available\n\n", req->buf); } if ((count_entered && !remaining) || (!count_entered && assembly)) { fprintf(fp, "\n"); return; } /* * If the end of the containing function or a specified count * has not been reached, continue the listing until it has. */ while (!done) { open_tmpfile(); if (!gdb_pass_through("list", fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); return; } rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { strcpy(buf2, buf1); argc = parse_line(buf2, argv); if (decimal(argv[0], 0)) line = atoi(argv[0]); else continue; if (count_entered) { if (!remaining--) { done = TRUE; break; } } if (line == last) { done = TRUE; break; } last = line; fprintf(pc->saved_fp, " %s", buf1); if (!count_entered && STREQ(first_space(buf1), "\t}\n")) { done = TRUE; break; } } close_tmpfile(); } fprintf(fp, "\n"); } /* * From either a syment pointer, or a virtual address evaluated * from a symbol name plus an offset value, determine whether * there are multiple symbols with the same name, or if it is * determined to be an invalid expression of a text address. * * If there are multiple text symbols with the same name, then * display a "duplicate text symbols found" message followed by * a list of each symbol's information, and return FALSE. * * If a symbol name plus and offset value evaluates to an address * that goes beyond the end of the text function, print an "invalid * expression" message, and return FALSE; * * If there is one text symbol and one or more data symbols with * the same name, reset the incoming address based upon the * single text symbol, and return TRUE. * * All of the remaining possibilities return TRUE without changing * the incoming address: * * (1) if an evaluated address cannot be resolved to any symbol. * (2) if an evaluated address argument did not contain a symbol name. * (3) if there is only one possible symbol resolution. * (4) if there are multiple data symbols. */ static int resolve_text_symbol(char *arg, struct syment *sp_in, struct gnu_request *req, int radix) { int text_symbols; struct syment *sp, *sp_orig, *first_text_sp, *sp_arg, *sp_addr; ulong offset, radix_flag; char buf[BUFSIZE]; char *op; sp_arg = NULL; if (!sp_in && !IS_A_NUMBER(arg)) { strcpy(buf, arg); strip_beginning_char(buf, '('); strip_ending_char(buf, ')'); clean_line(buf); if ((op = strpbrk(buf, "><+-&|*/%^"))) { *op = NULLCHAR; clean_line(buf); if ((sp = symbol_search(buf)) && is_symbol_text(sp)) { sp_arg = sp; text_symbols = 1; while ((sp = symbol_search_next(sp->name, sp))) { if (is_symbol_text(sp)) text_symbols++; } if (text_symbols > 1) { sp_orig = sp_arg; goto duplicates; } } } } if (sp_in) { sp_orig = sp_in; offset = 0; } else if ((sp_orig = value_search(req->addr, &offset))) { if (!strstr(arg, sp_orig->name)) { if (sp_arg && (sp_orig != sp_arg)) { error(INFO, "invalid expression: %s evaluates to: %s+%lx\n", arg, sp_orig->name, offset); return FALSE; } return TRUE; } } else { if (CRASHDEBUG(1)) error(INFO, "%s: no text symbol found\n", arg); return TRUE; } if (symbol_name_count(sp_orig->name) <= 1) return TRUE; if (sp_arg) { sp_addr = value_search(req->addr, &offset); if (sp_arg != sp_addr) { if (STREQ(sp_arg->name, sp_addr->name)) { sp_orig = sp_arg; goto duplicates; } error(INFO, "invalid expression: %s evaluates to %s: %s+%lx\n", arg, sp_addr->name, offset); return FALSE; } } text_symbols = 0; first_text_sp = NULL; sp = sp_orig; do { if (is_symbol_text(sp)) { if (!first_text_sp) first_text_sp = sp; text_symbols++; } } while ((sp = symbol_search_next(sp->name, sp))); /* * If no text symbols for a symbol name exist, let it be... */ if (!text_symbols) { if (CRASHDEBUG(1)) error(INFO, "%s: no text symbol found\n", arg); return TRUE; } /* * If only one symbol with the specified name is text, * reset the req->addr as appropriate in case a * lower-value data symbol was originally selected. */ if (text_symbols == 1) { if (sp_in) req->addr = first_text_sp->value; else req->addr = first_text_sp->value + offset; return TRUE; } duplicates: /* * Multiple text symbols with the same name exist. * Display them all and return FALSE. */ error(INFO, "%s: duplicate text symbols found:\n", arg); radix_flag = radix == 10 ? SHOW_DEC_OFFS : SHOW_HEX_OFFS; sp = sp_orig; do { if (is_symbol_text(sp)) { if (module_symbol(sp->value, NULL, NULL, NULL, 0)) show_symbol(sp, 0, SHOW_LINENUM|SHOW_MODULE|radix_flag); else show_symbol(sp, 0, SHOW_LINENUM|radix_flag); } } while ((sp = symbol_search_next(sp->name, sp))); return FALSE; } static int set_reverse_tmpfile_offset(struct gnu_request *req, ulong target) { long index, *tmpfile_offsets; ulong curaddr; char buf[BUFSIZE]; tmpfile_offsets = (long *)GETBUF(sizeof(long) * req->count); rewind(pc->tmpfile); index = 0; tmpfile_offsets[index] = ftell(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { strip_beginning_whitespace(buf); if (STRNEQ(buf, "0x")) { extract_hex(buf, &curaddr, ':', TRUE); if (curaddr >= target) break; } index = (index+1) % req->count; tmpfile_offsets[index] = ftell(pc->tmpfile); } if (((index+1) < req->count) && tmpfile_offsets[index+1]) index++; else index = 0; if (fseek(pc->tmpfile, tmpfile_offsets[index], SEEK_SET) < 0) { FREEBUF(tmpfile_offsets); rewind(pc->tmpfile); return FALSE; } FREEBUF(tmpfile_offsets); return TRUE; } /* * This routine disassembles text in one of four manners. A starting * address, an expression, or symbol must be entered. Then: * * 1. if a count is appended, disassemble that many instructions starting * at the target address. * 2. if a count is NOT entered, and the target address is the starting * address of a function, disassemble the whole function. * 3. if the target address is other than the starting address of a * function, and no count argument is appended, then disassemble one * instruction. * 4. If the -r option is used, disassemble all instructions in a routine * up to and including the target address. * 5. If -u option, just pass the user address and count, ignoring any of * the above. */ static char *dis_err = "gdb unable to disassemble kernel virtual address %lx\n"; void cmd_dis(void) { int c; int do_load_module_filter, do_machdep_filter, reverse, forward; int unfiltered, user_mode, count_entered, bug_bytes_entered, sources; unsigned int radix; ulong curaddr; ulong target; ulong count; ulong offset; ulong low, high; struct syment *sp; struct gnu_request *req; char *savename; char *ret ATTRIBUTE_UNUSED; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; if ((argcnt == 2) && STREQ(args[1], "-b")) { fprintf(fp, "encoded bytes being skipped after ud2a: "); if (kt->BUG_bytes < 0) fprintf(fp, "undetermined\n"); else fprintf(fp, "%d\n", kt->BUG_bytes); return; } reverse = forward = count_entered = bug_bytes_entered = sources = FALSE; sp = NULL; unfiltered = user_mode = do_machdep_filter = do_load_module_filter = 0; radix = 0; target = 0; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->flags |= GNU_FROM_TTY_OFF|GNU_RETURN_ON_ERROR; req->count = 1; while ((c = getopt(argcnt, args, "dxhulsrfUb:B:")) != EOF) { switch(c) { case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; case 'x': case 'h': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'U': unfiltered = TRUE; break; case 'u': if (sources) error(FATAL, "-s can only be used with kernel addresses\n"); user_mode = TRUE; break; case 'r': if (forward) error(FATAL, "-r and -f are mutually exclusive\n"); if (sources) error(FATAL, "-r and -s are mutually exclusive\n"); reverse = TRUE; break; case 'f': if (reverse) error(FATAL, "-r and -f are mutually exclusive\n"); if (sources) error(FATAL, "-f and -s are mutually exclusive\n"); forward = TRUE; break; case 'l': if (NO_LINE_NUMBERS()) error(INFO, "line numbers are not available\n"); else req->flags |= GNU_PRINT_LINE_NUMBERS; BZERO(buf4, BUFSIZE); break; case 's': if (reverse) error(FATAL, "-r and -s are mutually exclusive\n"); if (forward) error(FATAL, "-f and -s are mutually exclusive\n"); if (user_mode) error(FATAL, "-s can only be used with kernel addresses\n"); if (NO_LINE_NUMBERS()) error(INFO, "line numbers are not available\n"); sources = TRUE; break; case 'B': case 'b': kt->BUG_bytes = atoi(optarg); bug_bytes_entered = TRUE; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!radix) radix = pc->output_radix; if (args[optind]) { if (can_eval(args[optind])) { req->buf = args[optind]; req->addr = eval(args[optind], FAULT_ON_ERROR, NULL); if (!user_mode && !resolve_text_symbol(args[optind], NULL, req, radix)) { FREEBUF(req); return; } } else if (hexadecimal(args[optind], 0) && !symbol_exists(args[optind])) { req->buf = args[optind]; req->addr = htol(args[optind], FAULT_ON_ERROR, NULL); sp = value_search(req->addr, &offset); if (!user_mode && !sp) { error(WARNING, "%lx: no associated kernel symbol found\n", req->addr); unfiltered = TRUE; } if (!offset && sp && is_symbol_text(sp)) req->flags |= GNU_FUNCTION_ONLY; } else if ((sp = symbol_search(args[optind]))) { req->buf = args[optind]; req->addr = sp->value; if (!resolve_text_symbol(args[optind], sp, req, radix)) { FREEBUF(req); return; } if (is_symbol_text(sp)) req->flags |= GNU_FUNCTION_ONLY; } else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); FREEBUF(req); return; } if (args[++optind]) { if (forward) forward = FALSE; req->count = stol(args[optind], FAULT_ON_ERROR, NULL); req->flags &= ~GNU_FUNCTION_ONLY; if (!req->count) error(FATAL, "invalid count argument: 0\n"); count_entered++; } if (sources) { list_source_code(req, count_entered); return; } if (unfiltered) { sprintf(buf1, "x/%ldi 0x%lx", req->count ? req->count : 1, req->addr); gdb_pass_through(buf1, NULL, GNU_RETURN_ON_ERROR); return; } if (!user_mode && !IS_KVADDR(req->addr)) error(FATAL, "%lx is not a kernel virtual address\n", req->addr); if (user_mode) { sprintf(buf1, "x/%ldi 0x%lx", req->count ? req->count : 1, req->addr); pc->curcmd_flags |= MEMTYPE_UVADDR; gdb_pass_through(buf1, NULL, GNU_RETURN_ON_ERROR); return; } req->command = GNU_RESOLVE_TEXT_ADDR; gdb_interface(req); req->flags &= ~GNU_COMMAND_FAILED; if (reverse || forward || req->flags & GNU_FUNCTION_ONLY) { if (get_text_function_range(sp ? sp->value : req->addr, &low, &high)) req->addr2 = high; else if (sp) { savename = sp->name; if ((sp = next_symbol(NULL, sp))) req->addr2 = sp->value; else error(FATAL, "unable to determine symbol after %s\n", savename); } else { if ((sp = value_search(req->addr, NULL)) && (sp = next_symbol(NULL, sp))) req->addr2 = sp->value; else error(FATAL, dis_err, req->addr); } } if (reverse || forward) { target = req->addr; if ((sp = value_search(target, NULL)) == NULL) error(FATAL, "cannot resolve address: %lx\n", target); req->addr = sp->value; } else count = 0; do_load_module_filter = module_symbol(req->addr, NULL, NULL, NULL, *gdb_output_radix); do_machdep_filter = machdep->dis_filter(req->addr, NULL, radix); open_tmpfile(); if (reverse) sprintf(buf5, "x/%ldi 0x%lx", (target - req->addr) ? target - req->addr : 1, req->addr); else sprintf(buf5, "x/%ldi 0x%lx", count_entered && req->count ? req->count : forward || req->flags & GNU_FUNCTION_ONLY ? req->addr2 - req->addr : 1, req->addr); gdb_pass_through(buf5, NULL, GNU_RETURN_ON_ERROR); if (req->flags & GNU_COMMAND_FAILED) { close_tmpfile(); error(FATAL, dis_err, req->addr); } if (reverse && count_entered && set_reverse_tmpfile_offset(req, target)) count_entered = FALSE; else rewind(pc->tmpfile); while (fgets(buf2, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf2, "=>")) shift_string_left(buf2, 2); strip_beginning_whitespace(buf2); if (do_load_module_filter) load_module_filter(buf2, LM_DIS_FILTER); if (STRNEQ(buf2, "0x")) extract_hex(buf2, &curaddr, ':', TRUE); if (forward) { if (curaddr < target) continue; else forward = FALSE; } if (!reverse) if (!count_entered && req->addr2 && (curaddr >= req->addr2)) break; if (do_machdep_filter) machdep->dis_filter(curaddr, buf2, radix); if (req->flags & GNU_PRINT_LINE_NUMBERS) { get_line_number(curaddr, buf3, FALSE); if (!STREQ(buf3, buf4)) { print_verbatim( pc->saved_fp, buf3); print_verbatim( pc->saved_fp, "\n"); strcpy(buf4, buf3); } } print_verbatim(pc->saved_fp, buf2); if (reverse) { if (curaddr >= target) { if (LASTCHAR(clean_line(buf2)) != ':') break; ret = fgets(buf2, BUFSIZE, pc->tmpfile); if (do_load_module_filter) load_module_filter(buf2, LM_DIS_FILTER); if (do_machdep_filter) machdep->dis_filter(curaddr, buf2, radix); print_verbatim(pc->saved_fp, buf2); break; } } if (count_entered && LASTCHAR(clean_line(buf2)) != ':') if (++count == req->count) break; } close_tmpfile(); } else if (bug_bytes_entered) return; else cmd_usage(pc->curcmd, SYNOPSIS); FREEBUF(req); return; } /* * x86 and x86_64 kernels may have file/line-number encoding * asm()'d in just after the "ud2a" instruction, which confuses * the disassembler and the x86 backtracer. Determine the * number of bytes to skip. */ static void BUG_bytes_init(void) { if (machine_type("X86")) kt->BUG_bytes = BUG_x86(); else if (machine_type("X86_64")) kt->BUG_bytes = BUG_x86_64(); } static int BUG_x86(void) { struct syment *sp, *spn; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *arglist[MAXARGS]; ulong vaddr, fileptr; int found; /* * Prior to 2.4.19, a call to do_BUG() preceded * the standalone ud2a instruction. */ if (THIS_KERNEL_VERSION < LINUX(2,4,19)) return 0; /* * 2.6.20 introduced __bug_table support for i386, * but even if CONFIG_DEBUG_BUGVERBOSE is not configured, * the ud2a stands alone. */ if (THIS_KERNEL_VERSION >= LINUX(2,6,20)) return 0; /* * For previous kernel versions, it may depend upon * whether CONFIG_DEBUG_BUGVERBOSE was configured: * * #ifdef CONFIG_DEBUG_BUGVERBOSE * #define BUG() \ * __asm__ __volatile__( "ud2\n" \ * "\t.word %c0\n" \ * "\t.long %c1\n" \ * : : "i" (__LINE__), "i" (__FILE__)) * #else * #define BUG() __asm__ __volatile__("ud2\n") * #endif * * But that's not necessarily true, since there are * pre-2.6.11 versions that force it like so: * * #if 1 /- Set to zero for a slightly smaller kernel -/ * #define BUG() \ * __asm__ __volatile__( "ud2\n" \ * "\t.word %c0\n" \ * "\t.long %c1\n" \ * : : "i" (__LINE__), "i" (__FILE__)) * #else * #define BUG() __asm__ __volatile__("ud2\n") * #endif */ /* * This works if in-kernel config data is available. */ if ((THIS_KERNEL_VERSION >= LINUX(2,6,11)) && (kt->flags & BUGVERBOSE_OFF)) return 0; /* * At this point, it's a pretty safe bet that it's configured, * but to be sure, disassemble a known BUG() caller and * verify that the encoding is there. */ #define X86_BUG_BYTES (6) /* sizeof(short) + sizeof(pointer) */ if (!(sp = symbol_search("do_exit")) || !(spn = next_symbol(NULL, sp))) return X86_BUG_BYTES; sprintf(buf1, "x/%ldi 0x%lx", spn->value - sp->value, sp->value); found = FALSE; vaddr = 0; open_tmpfile(); gdb_pass_through(buf1, pc->tmpfile, GNU_RETURN_ON_ERROR); rewind(pc->tmpfile); while (fgets(buf2, BUFSIZE, pc->tmpfile)) { if (parse_line(buf2, arglist) < 3) continue; if ((vaddr = htol(strip_ending_char(arglist[0], ':'), RETURN_ON_ERROR|QUIET, NULL)) >= spn->value) continue; if (STREQ(arglist[2], "ud2a")) { found = TRUE; break; } } close_tmpfile(); if (!found || !readmem(vaddr+4, KVADDR, &fileptr, sizeof(ulong), "BUG filename pointer", RETURN_ON_ERROR|QUIET)) return X86_BUG_BYTES; if (!IS_KVADDR(fileptr)) { if (CRASHDEBUG(1)) fprintf(fp, "no filename pointer: kt->BUG_bytes: 0\n"); return 0; } if (!read_string(fileptr, buf1, BUFSIZE-1)) error(WARNING, "cannot read BUG (ud2a) encoded filename address: %lx\n", fileptr); else if (CRASHDEBUG(1)) fprintf(fp, "BUG bytes filename encoding: [%s]\n", buf1); return X86_BUG_BYTES; } static int BUG_x86_64(void) { /* * 2.6.20 introduced __bug_table support for x86_64, * but even if CONFIG_DEBUG_BUGVERBOSE is not configured, * the ud2a stands alone. */ if (THIS_KERNEL_VERSION >= LINUX(2,6,20)) return 0; /* * The original bug_frame structure looks like this, which * causes the disassembler to go off into the weeds: * * struct bug_frame { * unsigned char ud2[2]; * char *filename; * unsigned short line; * } * * In 2.6.13, fake push and ret instructions were encoded * into the frame so that the disassembly would at least * "work", although the two fake instructions show nonsensical * arguments: * * struct bug_frame { * unsigned char ud2[2]; * unsigned char push; * signed int filename; * unsigned char ret; * unsigned short line; * } */ if (STRUCT_EXISTS("bug_frame")) return (int)(STRUCT_SIZE("bug_frame") - 2); return 0; } /* * Callback from gdb disassembly code. */ int kernel_BUG_encoding_bytes(void) { return kt->BUG_bytes; } #ifdef NOT_USED /* * To avoid premature stoppage/extension of a dis that includes * one of the following x86/gcc 3.2 constant declarations, don't allow them * to be considered the next text symbol. */ static struct syment * next_text_symbol(struct syment *sp_in) { return next_symbol(NULL, sp_in); struct syment *sp; sp = sp_in; while ((sp = next_symbol(NULL, sp))) { if (STREQ(sp->name, "__constant_c_and_count_memset") || STREQ(sp->name, "__constant_copy_from_user") || STREQ(sp->name, "__constant_copy_from_user_nocheck") || STREQ(sp->name, "__constant_copy_to_user") || STREQ(sp->name, "__constant_copy_to_user_nocheck") || STREQ(sp->name, "__constant_memcpy") || STREQ(sp->name, "__constant_c_and_count_memset") || STREQ(sp->name, "__constant_c_x_memset") || STREQ(sp->name, "__constant_memcpy")) { continue; } break; } return sp; } #endif /* NOT_USED */ /* * Nothing to do. */ int generic_dis_filter(ulong value, char *buf, unsigned int output_radix) { return TRUE; } #define FRAMESIZE_DEBUG_MESSAGE \ "\nx86 usage: bt -D [size|clear|dump|seek|noseek|validate|novalidate] [-I eip]\n If eip: set its associated framesize to size.\n \"validate/novalidate\" will turn on/off V bit for this eip entry.\n If !eip: \"clear\" will clear the framesize cache and RA seek/noseek flags.\n \"dump\" will dump the current framesize cache entries.\n \"seek/noseek\" turns on/off RA seeking.\n \"validate/novalidate\" turns on/off V bit for all current entries.\n\nx86_64 usage: bt -D [clear|dump|validate|framepointer|noframepointer] [-I rip]\n If rip: \"validate\" will verbosely recalculate the framesize without\n framepointers (no stack reference).\n If !rip: \"clear\" will clear the framesize cache.\n \"dump\" will dump the current framesize cache entries.\n \"framepointer/noframepointer\" toggle the FRAMEPOINTER flag and\n clear the framesize cache." /* * Display a kernel stack backtrace. Arguments may be any number pid or task * values, or, if no arguments are given, the stack trace of the current * context will be displayed. Alternatively: * * -a displays the stack traces of the active tasks on each CPU. * (only applicable to crash dumps) * -r display raw stack data, consisting of a memory dump of the two * pages of memory containing the task_union structure. * -s displays arguments symbolically. */ void clone_bt_info(struct bt_info *orig, struct bt_info *new, struct task_context *tc) { BCOPY(orig, new, sizeof(*new)); new->stackbuf = NULL; new->tc = tc; new->task = tc->task; new->stackbase = GET_STACKBASE(tc->task); new->stacktop = GET_STACKTOP(tc->task); } #define BT_SETUP(TC) \ clone_bt_info(&bt_setup, bt, (TC)); \ if (refptr) { \ BZERO(&reference, sizeof(struct reference)); \ bt->ref = &reference; \ bt->ref->str = refptr; \ } #define DO_TASK_BACKTRACE() \ { \ BT_SETUP(tc); \ if (!BT_REFERENCE_CHECK(bt)) \ print_task_header(fp, tc, subsequent++); \ back_trace(bt); \ } #define DO_THREAD_GROUP_BACKTRACE() \ { \ tc = pid_to_context(tgid); \ BT_SETUP(tc); \ if (!BT_REFERENCE_CHECK(bt)) \ print_task_header(fp, tc, subsequent++); \ if (setjmp(pc->foreach_loop_env)) { \ pc->flags &= ~IN_FOREACH; \ free_all_bufs(); \ } else { \ pc->flags |= IN_FOREACH; \ back_trace(bt); \ pc->flags &= ~IN_FOREACH; \ } \ tc = FIRST_CONTEXT(); \ for (i = 0; i < RUNNING_TASKS(); i++, tc++) { \ if (tc->pid == tgid) \ continue; \ if (task_tgid(tc->task) != tgid) \ continue; \ BT_SETUP(tc); \ if (!BT_REFERENCE_CHECK(bt)) \ print_task_header(fp, tc, subsequent++);\ if (setjmp(pc->foreach_loop_env)) { \ pc->flags &= ~IN_FOREACH; \ free_all_bufs(); \ } else { \ pc->flags |= IN_FOREACH; \ back_trace(bt); \ pc->flags &= ~IN_FOREACH; \ } \ } \ pc->flags &= ~IN_FOREACH; \ } void cmd_bt(void) { int i, c; ulong value, *cpus; struct task_context *tc; int subsequent, active, panic; struct stack_hook hook; struct bt_info bt_info, bt_setup, *bt; struct reference reference; char *refptr; ulong tgid, task; char arg_buf[BUFSIZE]; tc = NULL; cpus = NULL; subsequent = active = panic = 0; hook.eip = hook.esp = 0; refptr = 0; bt = &bt_info; BZERO(bt, sizeof(struct bt_info)); if (kt->flags & USE_OPT_BT) bt->flags |= BT_OPT_BACK_TRACE; while ((c = getopt(argcnt, args, "D:fFI:S:c:n:aAloreEgstTdxR:Ovp")) != EOF) { switch (c) { case 'f': bt->flags |= BT_FULL; break; case 'F': if (bt->flags & BT_FULL_SYM_SLAB) bt->flags |= BT_FULL_SYM_SLAB2; else bt->flags |= (BT_FULL|BT_FULL_SYM_SLAB); break; case 'o': if (!(machine_type("X86") || machine_type("X86_64") || machine_type("ARM64")) || XEN_HYPER_MODE()) option_not_supported(c); bt->flags |= BT_OPT_BACK_TRACE; break; case 'O': if (!(machine_type("X86") || machine_type("X86_64") || machine_type("ARM64")) || XEN_HYPER_MODE()) option_not_supported(c); else if (kt->flags & USE_OPT_BT) { /* * Make this setting idempotent across the use of * $HOME/.crashrc, ./.crashrc, and "-i input" files. * If we've been here before during initialization, * leave it alone. */ if (pc->flags & INIT_IFILE) { error(INFO, "use %s bt method by default (already set)\n", machine_type("ARM64") ? "optional" : "old"); return; } kt->flags &= ~USE_OPT_BT; error(INFO, "use %s bt method by default\n", machine_type("ARM64") ? "original" : "new"); } else { kt->flags |= USE_OPT_BT; error(INFO, "use %s bt method by default\n", machine_type("ARM64") ? "optional" : "old"); } return; case 'R': if (refptr) error(INFO, "only one -R option allowed\n"); else refptr = optarg; break; case 'l': if (NO_LINE_NUMBERS()) error(INFO, "line numbers are not available\n"); else bt->flags |= BT_LINE_NUMBERS; break; case 'E': if (XEN_HYPER_MODE()) option_not_supported(c); bt->flags |= BT_EFRAME_SEARCH|BT_EFRAME_SEARCH2; bt->hp = &hook; break; case 'e': if (XEN_HYPER_MODE()) option_not_supported(c); bt->flags |= BT_EFRAME_SEARCH; break; case 'g': #ifdef GDB_5_3 bt->flags |= BT_USE_GDB; #else bt->flags |= BT_THREAD_GROUP; #endif break; case 'x': if (bt->radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); bt->radix = 16; break; case 'd': if (bt->radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); bt->radix = 10; break; case 'I': bt->hp = &hook; hook.eip = convert(optarg, FAULT_ON_ERROR, NULL, NUM_HEX|NUM_EXPR); break; case 'D': if (STREQ(optarg, "seek")) { kt->flags |= RA_SEEK; kt->flags &= ~NO_RA_SEEK; return; } else if (STREQ(optarg, "noseek")) { kt->flags |= NO_RA_SEEK; kt->flags &= ~RA_SEEK; return; } bt->hp = &hook; bt->flags |= BT_FRAMESIZE_DEBUG; if (STREQ(optarg, "dump")) hook.esp = 1; else if (STRNEQ(optarg, "level-")) bt->debug = dtol(optarg+6, FAULT_ON_ERROR, NULL); else if (STREQ(optarg, "validate")) hook.esp = (ulong)-1; else if (STREQ(optarg, "novalidate")) hook.esp = (ulong)-2; else if (STREQ(optarg, "framepointer")) hook.esp = (ulong)-3; else if (STREQ(optarg, "noframepointer")) hook.esp = (ulong)-4; else if (STREQ(optarg, "orc")) hook.esp = (ulong)-5; else if (STREQ(optarg, "clear")) { kt->flags &= ~(RA_SEEK|NO_RA_SEEK); hook.esp = 0; } else if (*optarg == '-') { hook.esp = dtol(optarg+1, FAULT_ON_ERROR, NULL); hook.esp = (ulong)(0 - (long)hook.esp); } else if (STREQ(optarg, "dwarf") || STREQ(optarg, "cfi")) { if (!(kt->flags & DWARF_UNWIND_CAPABLE)) return; } else hook.esp = dtol(optarg, FAULT_ON_ERROR, NULL); break; case 'S': bt->hp = &hook; hook.esp = htol(optarg, FAULT_ON_ERROR, NULL); if (!hook.esp) error(FATAL, "invalid stack address for this task: 0\n"); break; case 'c': if (bt->flags & BT_CPUMASK) { error(INFO, "only one -c option allowed\n"); argerrs++; } else { bt->flags |= BT_CPUMASK; BZERO(arg_buf, BUFSIZE); strcpy(arg_buf, optarg); cpus = get_cpumask_buf(); } break; case 'A': if (!machine_type("S390X")) option_not_supported(c); bt->flags |= BT_SHOW_ALL_REGS; /* FALLTHROUGH */ case 'a': active++; break; case 'n': if ((machine_type("X86_64") || machine_type("ARM64")) && STREQ(optarg, "idle")) bt->flags |= BT_SKIP_IDLE; else option_not_supported(c); break; case 'r': bt->flags |= BT_RAW; break; case 's': bt->flags |= BT_SYMBOL_OFFSET; break; case 'T': bt->flags |= BT_TEXT_SYMBOLS_ALL; case 't': bt->flags |= BT_TEXT_SYMBOLS; break; case 'v': if (XEN_HYPER_MODE()) option_not_supported(c); check_stack_overflow(); return; case 'p': if (LIVE()) error(FATAL, "-p option not supported on a live system or live dump\n"); if (!tt->panic_task) error(FATAL, "no panic task found!\n"); panic++; break; default: argerrs++; if (optopt == 'D') { fprintf(fp, FRAMESIZE_DEBUG_MESSAGE); return; } break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (bt->flags & BT_FRAMESIZE_DEBUG) { if (machdep->flags & FRAMESIZE_DEBUG) { while (args[optind]) { if (!hook.eip) hook.eip = convert(args[optind], FAULT_ON_ERROR, NULL, NUM_HEX|NUM_EXPR); else { fprintf(fp, FRAMESIZE_DEBUG_MESSAGE); return; } optind++; } machdep->back_trace(bt); return; } error(FATAL, "framesize debug not available\n"); } BCOPY(bt, &bt_setup, sizeof(struct bt_info)); if (bt->flags & BT_EFRAME_SEARCH2) { tc = CURRENT_CONTEXT(); /* borrow stack */ BT_SETUP(tc); if (bt->flags & BT_CPUMASK) { make_cpumask(arg_buf, cpus, FAULT_ON_ERROR, NULL); bt->cpumask = cpus; } back_trace(bt); return; } if (XEN_HYPER_MODE()) { #ifdef XEN_HYPERVISOR_ARCH /* "task" means vcpu for xen hypervisor */ if (active) { for (c = 0; c < XEN_HYPER_MAX_CPUS(); c++) { if (!xen_hyper_test_pcpu_id(c)) continue; fake_tc.task = xen_hyper_pcpu_to_active_vcpu(c); BT_SETUP(&fake_tc); if (!BT_REFERENCE_CHECK(bt)) xen_hyper_print_bt_header(fp, fake_tc.task, subsequent++); back_trace(bt); } } else { if (args[optind]) { fake_tc.task = xen_hyper_pcpu_to_active_vcpu( convert(args[optind], 0, NULL, NUM_DEC | NUM_HEX)); } else { fake_tc.task = XEN_HYPER_VCPU_LAST_CONTEXT()->vcpu; } BT_SETUP(&fake_tc); if (!BT_REFERENCE_CHECK(bt)) xen_hyper_print_bt_header(fp, fake_tc.task, 0); back_trace(bt); } return; #else error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); #endif } if (bt->flags & BT_CPUMASK) { if (LIVE()) error(FATAL, "-c option not supported on a live system or live dump\n"); if (bt->flags & BT_THREAD_GROUP) error(FATAL, "-c option cannot be used with the -g option\n"); make_cpumask(arg_buf, cpus, FAULT_ON_ERROR, NULL); for (i = 0; i < kt->cpus; i++) { if (NUM_IN_BITMAP(cpus, i)) { if (hide_offline_cpu(i)) { error(INFO, "%sCPU %d is OFFLINE.\n", subsequent++ ? "\n" : "", i); continue; } if ((task = get_active_task(i))) tc = task_to_context(task); else error(FATAL, "cannot determine active task on cpu %ld\n", i); DO_TASK_BACKTRACE(); } } FREEBUF(cpus); return; } if (active) { if (LIVE()) error(FATAL, "-%c option not supported on a live system or live dump\n", bt->flags & BT_SHOW_ALL_REGS ? 'A' : 'a'); if (bt->flags & BT_THREAD_GROUP) error(FATAL, "-a option cannot be used with the -g option\n"); for (c = 0; c < NR_CPUS; c++) { if (setjmp(pc->foreach_loop_env)) { pc->flags &= ~IN_FOREACH; free_all_bufs(); continue; } if ((tc = task_to_context(tt->panic_threads[c]))) { pc->flags |= IN_FOREACH; DO_TASK_BACKTRACE(); pc->flags &= ~IN_FOREACH; } } return; } if (!args[optind]) { if (CURRENT_PID() && (bt->flags & BT_THREAD_GROUP)) { tgid = task_tgid(CURRENT_TASK()); DO_THREAD_GROUP_BACKTRACE(); } else { if (panic) tc = task_to_context(tt->panic_task); else tc = CURRENT_CONTEXT(); DO_TASK_BACKTRACE(); } return; } while (args[optind]) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) { if (tc->pid && (bt->flags & BT_THREAD_GROUP)) { tgid = task_tgid(tc->task); DO_THREAD_GROUP_BACKTRACE(); break; } else if (tc->tc_next) { if (setjmp(pc->foreach_loop_env)) { pc->flags &= ~IN_FOREACH; free_all_bufs(); continue; } pc->flags |= IN_FOREACH; DO_TASK_BACKTRACE(); pc->flags &= ~IN_FOREACH; } else DO_TASK_BACKTRACE(); } break; case STR_TASK: if (tc->pid && (bt->flags & BT_THREAD_GROUP)) { tgid = task_tgid(value); DO_THREAD_GROUP_BACKTRACE(); } else DO_TASK_BACKTRACE(); break; case STR_INVALID: error(INFO, "%sinvalid task or pid value: %s\n", subsequent++ ? "\n" : "", args[optind]); break; } optind++; } } void print_stack_text_syms(struct bt_info *bt, ulong esp, ulong eip) { ulong next_sp, next_pc; int i; ulong *up; struct load_module *lm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (bt->flags & BT_TEXT_SYMBOLS) { if (!(bt->flags & BT_TEXT_SYMBOLS_ALL)) fprintf(fp, "%sSTART: %s at %lx\n", space(VADDR_PRLEN > 8 ? 14 : 6), bt->flags & BT_SYMBOL_OFFSET ? value_to_symstr(eip, buf2, bt->radix) : closest_symbol(eip), eip); } if (bt->hp) bt->hp->eip = bt->hp->esp = 0; next_pc = next_sp = 0; for (i = (esp - bt->stackbase)/sizeof(ulong); i < LONGS_PER_STACK; i++) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); if (is_kernel_text_offset(*up)) { if (!next_pc) next_pc = *up; else if (!next_sp) next_sp = bt->stackbase + (i * sizeof(long)); } if (is_kernel_text(*up) && (bt->flags & (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT))) { if (bt->flags & (BT_ERROR_MASK|BT_TEXT_SYMBOLS)) { fprintf(fp, " %s[%s] %s at %lx", bt->flags & BT_ERROR_MASK ? " " : "", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(bt->stackbase + (i * sizeof(long)))), bt->flags & BT_SYMBOL_OFFSET ? value_to_symstr(*up, buf2, bt->radix) : closest_symbol(*up), *up); if (module_symbol(*up, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); } else fprintf(fp, "%lx: %s\n", bt->stackbase + (i * sizeof(long)), value_to_symstr(*up, buf1, 0)); } } if (bt->hp) { bt->hp->eip = next_pc; bt->hp->esp = next_sp; } } int in_alternate_stack(int cpu, ulong address) { if (cpu >= NR_CPUS) return FALSE; if (machdep->in_alternate_stack) if (machdep->in_alternate_stack(cpu, address)) return TRUE; if (tt->flags & IRQSTACKS) { if (in_irq_ctx(BT_SOFTIRQ, cpu, address) || in_irq_ctx(BT_HARDIRQ, cpu, address)) return TRUE; } return FALSE; } /* * Gather the EIP, ESP and stack address for the target task, and passing * them on to the machine-specific back trace command. */ void back_trace(struct bt_info *bt) { int i; ulong *up; char buf[BUFSIZE]; ulong eip, esp; struct bt_info btsave = { 0 }; if (bt->flags & BT_RAW) { if (bt->hp && bt->hp->esp) esp = bt->hp->esp; else esp = GET_STACKBASE(bt->task); raw_stack_dump(esp, STACKSIZE()); return; } if (LIVE() && !(bt->flags & BT_EFRAME_SEARCH) && is_task_active(bt->task)) { if (BT_REFERENCE_CHECK(bt) || bt->flags & (BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) return; if (!(bt->flags & (BT_KSTACKP|BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_ALL))) fprintf(fp, "(active)\n"); if (!(bt->flags & (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_ALL) || REMOTE_PAUSED())) return; } if (bt->stackbase == 0) { fprintf(fp, "(no stack)\n"); return; } fill_stackbuf(bt); if (CRASHDEBUG(4)) { for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++) { if (is_kernel_text(*up)) fprintf(fp, "%lx: %s\n", tt->flags & THREAD_INFO ? bt->tc->thread_info + (i * sizeof(long)) : bt->task + (i * sizeof(long)), value_to_symstr(*up, buf, 0)); } } if (BT_REFERENCE_CHECK(bt)) { if (can_eval(bt->ref->str)) { bt->ref->hexval = eval(bt->ref->str, FAULT_ON_ERROR, NULL); bt->ref->cmdflags |= BT_REF_HEXVAL; } else if (hexadecimal(bt->ref->str, 0)) { bt->ref->hexval = htol(bt->ref->str, FAULT_ON_ERROR, NULL); bt->ref->cmdflags |= BT_REF_HEXVAL; } else bt->ref->cmdflags |= BT_REF_SYMBOL; } if (bt->flags & BT_EFRAME_SEARCH) { machdep->eframe_search(bt); return; } if (bt->hp) { if (bt->hp->esp && !INSTACK(bt->hp->esp, bt) && !in_alternate_stack(bt->tc->processor, bt->hp->esp)) error(FATAL, "non-process stack address for this task: %lx\n" " (valid range: %lx - %lx)\n", bt->hp->esp, bt->stackbase, bt->stacktop); eip = bt->hp->eip; esp = bt->hp->esp; machdep->get_stack_frame(bt, eip ? NULL : &eip, esp ? NULL : &esp); if (in_irq_ctx(BT_HARDIRQ, bt->tc->processor, esp)) { bt->stackbase = tt->hardirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_HARDIRQ; } else if (in_irq_ctx(BT_SOFTIRQ, bt->tc->processor, esp)) { bt->stackbase = tt->softirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_SOFTIRQ; } } else if (XEN_HYPER_MODE()) machdep->get_stack_frame(bt, &eip, &esp); else if (NETDUMP_DUMPFILE()) get_netdump_regs(bt, &eip, &esp); else if (KDUMP_DUMPFILE()) get_kdump_regs(bt, &eip, &esp); else if (DISKDUMP_DUMPFILE()) get_diskdump_regs(bt, &eip, &esp); else if (KVMDUMP_DUMPFILE()) get_kvmdump_regs(bt, &eip, &esp); else if (LKCD_DUMPFILE()) get_lkcd_regs(bt, &eip, &esp); else if (XENDUMP_DUMPFILE()) get_xendump_regs(bt, &eip, &esp); else if (SADUMP_DUMPFILE()) get_sadump_regs(bt, &eip, &esp); else if (VMSS_DUMPFILE()) get_vmware_vmss_regs(bt, &eip, &esp); else if (REMOTE_PAUSED()) { if (!is_task_active(bt->task) || !get_remote_regs(bt, &eip, &esp)) machdep->get_stack_frame(bt, &eip, &esp); } else machdep->get_stack_frame(bt, &eip, &esp); /* skip idle task stack */ if (bt->flags & BT_SKIP_IDLE) return; if (bt->flags & BT_KSTACKP) { bt->stkptr = esp; return; } if (ACTIVE() && !INSTACK(esp, bt)) { if (!LOCAL_ACTIVE()) { error(INFO, "task no longer exists\n"); return; } sprintf(buf, "/proc/%ld", bt->tc->pid); if (!file_exists(buf, NULL)) error(INFO, "task no longer exists\n"); else error(INFO, "invalid/stale stack pointer for this task: %lx\n", esp); return; } if (bt->flags & (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) { if (bt->flags & BT_TEXT_SYMBOLS_ALL) { esp = bt->stackbase + ((tt->flags & THREAD_INFO) ? SIZE(thread_info) : SIZE(task_struct)); eip = 0; } if (machdep->flags & MACHDEP_BT_TEXT) { bt->instptr = eip; bt->stkptr = esp; machdep->back_trace(bt); } else print_stack_text_syms(bt, esp, eip); if (bt->flags & (BT_HARDIRQ|BT_SOFTIRQ)) { struct bt_info btloc; struct stack_hook stack_hook; BZERO(&btloc, sizeof(struct bt_info)); BZERO(&stack_hook, sizeof(struct stack_hook)); btloc.flags = bt->flags & ~(BT_HARDIRQ|BT_SOFTIRQ); btloc.hp = &stack_hook; btloc.tc = bt->tc; btloc.task = bt->task; btloc.stackbase = GET_STACKBASE(bt->task); btloc.stacktop = GET_STACKTOP(bt->task); switch (bt->flags & (BT_HARDIRQ|BT_SOFTIRQ)) { case BT_HARDIRQ: if (kernel_symbol_exists("hardirq_stack") && STRUCT_EXISTS("irq_stack")) { btloc.hp->eip = symbol_value("handle_irq"); btloc.hp->esp = ULONG(bt->stackbuf); } else { btloc.hp->eip = symbol_value("do_IRQ"); if (symbol_exists("__do_IRQ")) btloc.hp->esp = ULONG(bt->stackbuf + OFFSET(thread_info_previous_esp)); else btloc.hp->esp = ULONG(bt->stackbuf + SIZE(irq_ctx) - (sizeof(char *)*2)); } fprintf(fp, "--- ---\n"); if (in_irq_ctx(BT_SOFTIRQ, bt->tc->processor, btloc.hp->esp)) { btloc.flags |= BT_SOFTIRQ; btloc.stackbase = tt->softirq_ctx[bt->tc->processor]; btloc.stacktop = btloc.stackbase + STACKSIZE(); } break; case BT_SOFTIRQ: btloc.hp->eip = symbol_value("do_softirq"); if (kernel_symbol_exists("softirq_stack") && STRUCT_EXISTS("irq_stack")) { if (kernel_symbol_exists("do_softirq_own_stack")) btloc.hp->eip = symbol_value("do_softirq_own_stack"); btloc.hp->esp = ULONG(bt->stackbuf); } else btloc.hp->esp = ULONG(bt->stackbuf + OFFSET(thread_info_previous_esp)); fprintf(fp, "--- ---\n"); break; } back_trace(&btloc); } return; } bt->instptr = eip; bt->stkptr = esp; complete_trace: if (BT_REFERENCE_CHECK(bt)) BCOPY(bt, &btsave, sizeof(struct bt_info)); if (CRASHDEBUG(4)) dump_bt_info(bt, "back_trace"); machdep->back_trace(bt); if ((bt->flags & (BT_HARDIRQ|BT_SOFTIRQ)) && restore_stack(bt)) goto complete_trace; if (BT_REFERENCE_FOUND(bt)) { #ifdef XEN_HYPERVISOR_ARCH if (XEN_HYPER_MODE()) xen_hyper_print_bt_header(fp, bt->task, 0); else print_task_header(fp, task_to_context(bt->task), 0); #else print_task_header(fp, task_to_context(bt->task), 0); #endif /* XEN_HYPERVISOR_ARCH */ BCOPY(&btsave, bt, sizeof(struct bt_info)); bt->ref = NULL; machdep->back_trace(bt); fprintf(fp, "\n"); } } /* * Restore a bt_info to make the jump from an IRQ stack to the task's * normal stack. */ static int restore_stack(struct bt_info *bt) { ulonglong type; struct syment *sp; ulong retvaddr; bt->instptr = bt->stkptr = 0; type = 0; switch (bt->flags & (BT_HARDIRQ|BT_SOFTIRQ)) { case BT_HARDIRQ: if (kernel_symbol_exists("hardirq_stack") && STRUCT_EXISTS("irq_stack")) { bt->instptr = symbol_value("handle_irq"); bt->stkptr = ULONG(bt->stackbuf); } else { retvaddr = ULONG(bt->stackbuf + SIZE(irq_ctx) - sizeof(char *)); if ((sp = value_search(retvaddr, NULL)) && STREQ(sp->name, "do_IRQ")) bt->instptr = retvaddr; else bt->instptr = symbol_value("do_IRQ"); if (symbol_exists("__do_IRQ")) bt->stkptr = ULONG(bt->stackbuf + OFFSET(thread_info_previous_esp)); else bt->stkptr = ULONG(bt->stackbuf + SIZE(irq_ctx) - (sizeof(char *)*2)); } type = BT_HARDIRQ; break; case BT_SOFTIRQ: if (kernel_symbol_exists("softirq_stack") && STRUCT_EXISTS("irq_stack")) { if (kernel_symbol_exists("do_softirq_own_stack")) bt->instptr = symbol_value("do_softirq_own_stack"); else bt->instptr = symbol_value("do_softirq"); bt->stkptr = ULONG(bt->stackbuf); } else { retvaddr = ULONG(bt->stackbuf + SIZE(irq_ctx) - sizeof(char *)); if ((sp = value_search(retvaddr, NULL)) && STREQ(sp->name, "do_softirq")) bt->instptr = retvaddr; else bt->instptr = symbol_value("do_softirq"); bt->stkptr = ULONG(bt->stackbuf + OFFSET(thread_info_previous_esp)); } type = BT_SOFTIRQ; break; } if ((type == BT_HARDIRQ) && bt->instptr && in_irq_ctx(BT_SOFTIRQ, bt->tc->processor, bt->stkptr)) { bt->flags &= ~BT_HARDIRQ; bt->flags |= BT_SOFTIRQ; bt->stackbase = tt->softirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "restore softirq_ctx stack", RETURN_ON_ERROR)) { error(INFO, "read of softirq stack at %lx failed\n", bt->stackbase); type = 0; } } else { bt->flags &= ~(BT_HARDIRQ|BT_SOFTIRQ); bt->stackbase = GET_STACKBASE(bt->tc->task); bt->stacktop = GET_STACKTOP(bt->tc->task); if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "restore_stack contents", RETURN_ON_ERROR)) { error(INFO, "restore_stack of stack at %lx failed\n", bt->stackbase); type = 0; } if (!(bt->instptr && INSTACK(bt->stkptr, bt))) type = 0; } if (type) { if (!BT_REFERENCE_CHECK(bt)) fprintf(fp, "--- %s ---\n", type == BT_HARDIRQ ? "" : ""); return TRUE; } return FALSE; } #define MAXHOOKS (100) struct stack_hook * gather_text_list(struct bt_info *bt) { int cnt; struct bt_info btloc; char buf[BUFSIZE], *p1; struct stack_hook *hooks; ulong esp, eip; FILE *savedfp; BCOPY(bt, &btloc, sizeof(struct bt_info)); hooks = (struct stack_hook *)GETBUF(sizeof(struct stack_hook)*MAXHOOKS); cnt = 0; savedfp = fp; open_tmpfile2(); fp = pc->tmpfile2; btloc.flags = BT_TEXT_SYMBOLS_PRINT; back_trace(&btloc); rewind(pc->tmpfile2); while (fgets(buf, BUFSIZE, pc->tmpfile2)) { if ((p1 = strstr(buf, ":"))) { esp = eip = 0; *p1 = NULLCHAR; if (((esp = htol(buf, RETURN_ON_ERROR, NULL)) != BADADDR) && INSTACK(esp, bt)) eip = GET_STACK_ULONG(esp); if (esp && eip) { hooks[cnt].esp = esp; hooks[cnt].eip = eip; if (++cnt == MAXHOOKS) break; } } } close_tmpfile2(); fp = savedfp; if (cnt) return (bt->textlist = hooks); else { FREEBUF(hooks); return (bt->textlist = NULL); } } /* * Debug routine most likely useful from above in back_trace() */ void dump_bt_info(struct bt_info *bt, char *where) { fprintf(fp, "[%lx] %s:\n", (ulong)bt, where); fprintf(fp, " task: %lx\n", bt->task); fprintf(fp, " flags: %llx\n", bt->flags); fprintf(fp, " instptr: %lx\n", bt->instptr); fprintf(fp, " stkptr: %lx\n", bt->stkptr); fprintf(fp, " bptr: %lx\n", bt->bptr); fprintf(fp, " stackbase: %lx\n", bt->stackbase); fprintf(fp, " stacktop: %lx\n", bt->stacktop); fprintf(fp, " tc: %lx ", (ulong)bt->tc); if (bt->tc) fprintf(fp, "(%ld, %lx)\n", bt->tc->pid, bt->tc->task); else fprintf(fp, "(unknown context)\n"); fprintf(fp, " hp: %lx\n", (ulong)bt->hp); fprintf(fp, " ref: %lx\n", (ulong)bt->ref); fprintf(fp, " stackbuf: %lx\n", (ulong)bt->stackbuf); fprintf(fp, " textlist: %lx\n", (ulong)bt->textlist); fprintf(fp, " frameptr: %lx\n", (ulong)bt->frameptr); fprintf(fp, " call_target: %s\n", bt->call_target ? bt->call_target : "none"); fprintf(fp, " eframe_ip: %lx\n", bt->eframe_ip); fprintf(fp, " debug: %lx\n", bt->debug); fprintf(fp, " radix: %ld\n", bt->radix); fprintf(fp, " cpumask: %lx\n", (ulong)bt->cpumask); } /* * LKCD doesn't save state of the active tasks in the TSS, so poke around * the raw stack for some reasonable hooks. */ static void get_lkcd_regs(struct bt_info *bt, ulong *eip, ulong *esp) { int i; char *sym; ulong *up; ulong sysrq_eip, sysrq_esp; if (!is_task_active(bt->task)) { machdep->get_stack_frame(bt, eip, esp); return; } /* try to get it from the header */ if (get_lkcd_regs_for_cpu(bt, eip, esp) == 0) return; /* if that fails: do guessing */ sysrq_eip = sysrq_esp = 0; for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){ sym = closest_symbol(*up); if (STREQ(sym, "dump_execute") && INSTACK(*(up-1), bt)) { *eip = *up; *esp = *(up-1); return; } /* Begin 3PAR change -- required for our panic path */ if (STREQ(sym, "dump_ipi") && INSTACK(*(up-1), bt)) { *eip = *up; *esp = *(up-1); return; } /* End 3PAR change */ if (STREQ(sym, "panic") && INSTACK(*(up-1), bt)) { *eip = *up; *esp = *(up-1); return; } /* Egenera */ if (STREQ(sym, "netdump_ipi")) { *eip = *up; *esp = bt->task + ((char *)(up-1) - bt->stackbuf); return; } if (STREQ(sym, "dump_execute")) { *eip = *up; *esp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } if (STREQ(sym, "vmdump_nmi_callback")) { *eip = *up; *esp = bt->stackbase + ((char *)(up) - bt->stackbuf); return; } if (STREQ(sym, "smp_stop_cpu_interrupt")) { *eip = *up; *esp = bt->task + ((char *)(up-1) - bt->stackbuf); return; } if (STREQ(sym, "stop_this_cpu")) { *eip = *up; *esp = bt->task + ((char *)(up-1) - bt->stackbuf); return; } if (SYSRQ_TASK(bt->task) && STREQ(sym, "smp_call_function_interrupt")) { sysrq_eip = *up; sysrq_esp = bt->task + ((char *)(up-1) - bt->stackbuf); } } if (sysrq_eip) { *eip = sysrq_eip; *esp = sysrq_esp; return; } machdep->get_stack_frame(bt, eip, esp); } void get_dumpfile_regs(struct bt_info *bt, ulong *eip, ulong *esp) { bt->flags |= BT_NO_PRINT_REGS; if (NETDUMP_DUMPFILE()) get_netdump_regs(bt, eip, esp); else if (KDUMP_DUMPFILE()) get_kdump_regs(bt, eip, esp); else if (DISKDUMP_DUMPFILE()) get_diskdump_regs(bt, eip, esp); else if (KVMDUMP_DUMPFILE()) get_kvmdump_regs(bt, eip, esp); else if (LKCD_DUMPFILE()) get_lkcd_regs(bt, eip, esp); else if (XENDUMP_DUMPFILE()) get_xendump_regs(bt, eip, esp); else if (SADUMP_DUMPFILE()) get_sadump_regs(bt, eip, esp); else if (VMSS_DUMPFILE()) get_vmware_vmss_regs(bt, eip, esp); else if (REMOTE_PAUSED()) { if (!is_task_active(bt->task) || !get_remote_regs(bt, eip, esp)) machdep->get_stack_frame(bt, eip, esp); } else machdep->get_stack_frame(bt, eip, esp); bt->flags &= ~BT_NO_PRINT_REGS; bt->instptr = *eip; bt->stkptr = *esp; } /* * Store the head of the kernel module list for future use. * Count the number of symbols defined by all modules in the system, * and pass it on to store_module_symbols() to deal with. */ void module_init(void) { int i, c; ulong size, mod, mod_next; uint nsyms; ulong total, numksyms; char *modbuf, *kallsymsbuf; ulong kallsyms_header; struct syment *sp, *sp_array[10]; struct kernel_list_head list; int modules_found; if (kernel_symbol_exists("module_list")) kt->flags |= KMOD_V1; else if (kernel_symbol_exists("modules")) kt->flags |= KMOD_V2; else error(WARNING, "cannot determine how modules are linked\n"); if (kt->flags & NO_MODULE_ACCESS || !(kt->flags & (KMOD_V1|KMOD_V2))) { error(WARNING, "no kernel module access\n\n"); kt->module_list = 0; kt->mods_installed = 0; return; } STRUCT_SIZE_INIT(module, "module"); MEMBER_OFFSET_INIT(module_name, "module", "name"); MEMBER_OFFSET_INIT(module_syms, "module", "syms"); mod_next = nsyms = 0; switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V1: MEMBER_OFFSET_INIT(module_size_of_struct, "module", "size_of_struct"); MEMBER_OFFSET_INIT(module_next, "module", "next"); MEMBER_OFFSET_INIT(module_nsyms, "module", "nsyms"); MEMBER_OFFSET_INIT(module_size, "module", "size"); MEMBER_OFFSET_INIT(module_flags, "module", "flags"); get_symbol_data("module_list", sizeof(ulong), &kt->module_list); kt->kernel_module = symbol_value("kernel_module"); break; case KMOD_V2: MEMBER_OFFSET_INIT(module_num_syms, "module", "num_syms"); MEMBER_OFFSET_INIT(module_list, "module", "list"); MEMBER_OFFSET_INIT(module_gpl_syms, "module", "gpl_syms"); MEMBER_OFFSET_INIT(module_num_gpl_syms, "module", "num_gpl_syms"); if (MEMBER_EXISTS("module", "mem")) { /* 6.4 and later */ kt->flags2 |= KMOD_MEMORY; /* MODULE_MEMORY() can be used. */ MEMBER_OFFSET_INIT(module_mem, "module", "mem"); MEMBER_OFFSET_INIT(module_memory_base, "module_memory", "base"); MEMBER_OFFSET_INIT(module_memory_size, "module_memory", "size"); STRUCT_SIZE_INIT(module_memory, "module_memory"); if (CRASHDEBUG(1)) error(INFO, "struct module_memory detected.\n"); if (get_array_length("module.mem", NULL, 0) != MOD_MEM_NUM_TYPES) error(WARNING, "module memory types have changed!\n"); } else if (MEMBER_EXISTS("module", "module_core")) { MEMBER_OFFSET_INIT(module_core_size, "module", "core_size"); MEMBER_OFFSET_INIT(module_init_size, "module", "init_size"); MEMBER_OFFSET_INIT(module_core_text_size, "module", "core_text_size"); MEMBER_OFFSET_INIT(module_init_text_size, "module", "init_text_size"); MEMBER_OFFSET_INIT(module_module_core, "module", "module_core"); MEMBER_OFFSET_INIT(module_module_init, "module", "module_init"); } else if (MEMBER_EXISTS("module", "module_core_rx")) { if (CRASHDEBUG(1)) error(INFO, "PaX module layout detected.\n"); kt->flags2 |= KMOD_PAX; MEMBER_OFFSET_INIT(module_core_size_rw, "module", "core_size_rw"); MEMBER_OFFSET_INIT(module_core_size_rx, "module", "core_size_rx"); MEMBER_OFFSET_INIT(module_init_size_rw, "module", "init_size_rw"); MEMBER_OFFSET_INIT(module_init_size_rx, "module", "init_size_rx"); MEMBER_OFFSET_INIT(module_module_core_rw, "module", "module_core_rw"); MEMBER_OFFSET_INIT(module_module_core_rx, "module", "module_core_rx"); MEMBER_OFFSET_INIT(module_module_init_rw, "module", "module_init_rw"); MEMBER_OFFSET_INIT(module_module_init_rx, "module", "module_init_rx"); } else if (MEMBER_EXISTS("module_layout", "base_rx")) { if (CRASHDEBUG(1)) error(INFO, "PaX module layout detected.\n"); kt->flags2 |= KMOD_PAX; ASSIGN_OFFSET(module_core_size_rw) = MEMBER_OFFSET("module", "core_layout") + MEMBER_OFFSET("module_layout", "size_rw"); ASSIGN_OFFSET(module_core_size_rx) = MEMBER_OFFSET("module", "core_layout") + MEMBER_OFFSET("module_layout", "size_rx"); ASSIGN_OFFSET(module_init_size_rw) = MEMBER_OFFSET("module", "init_layout") + MEMBER_OFFSET("module_layout", "size_rw"); ASSIGN_OFFSET(module_init_size_rx) = MEMBER_OFFSET("module", "init_layout") + MEMBER_OFFSET("module_layout", "size_rx"); ASSIGN_OFFSET(module_module_core_rw) = MEMBER_OFFSET("module", "core_layout") + MEMBER_OFFSET("module_layout", "base_rw"); ASSIGN_OFFSET(module_module_core_rx) = MEMBER_OFFSET("module", "core_layout") + MEMBER_OFFSET("module_layout", "base_rx"); ASSIGN_OFFSET(module_module_init_rw) = MEMBER_OFFSET("module", "init_layout") + MEMBER_OFFSET("module_layout", "base_rw"); ASSIGN_OFFSET(module_module_init_rx) = MEMBER_OFFSET("module", "init_layout") + MEMBER_OFFSET("module_layout", "base_rx"); } else { ASSIGN_OFFSET(module_core_size) = MEMBER_OFFSET("module", "core_layout") + MEMBER_OFFSET("module_layout", "size"); ASSIGN_OFFSET(module_init_size) = MEMBER_OFFSET("module", "init_layout") + MEMBER_OFFSET("module_layout", "size"); ASSIGN_OFFSET(module_core_text_size) = MEMBER_OFFSET("module", "core_layout") + MEMBER_OFFSET("module_layout", "text_size"); ASSIGN_OFFSET(module_init_text_size) = MEMBER_OFFSET("module", "init_layout") + MEMBER_OFFSET("module_layout", "text_size"); ASSIGN_OFFSET(module_module_core) = MEMBER_OFFSET("module", "core_layout") + MEMBER_OFFSET("module_layout", "base"); ASSIGN_OFFSET(module_module_init) = MEMBER_OFFSET("module", "init_layout") + MEMBER_OFFSET("module_layout", "base"); } MEMBER_OFFSET_INIT(module_percpu, "module", "percpu"); /* * Make sure to pick the kernel "modules" list_head symbol, * not to be confused with the ia64/sn "modules[]" array. * The kernel modules list_head will either point to itself * (empty) or contain vmalloc'd module addresses; the ia64/sn * modules array contains a list of kmalloc'd addresses. */ if ((c = get_syment_array("modules", sp_array, 10)) > 1) { modules_found = FALSE; for (i = 0; i < c; i++) { sp = sp_array[i]; if (!readmem(sp->value, KVADDR, &list, sizeof(struct kernel_list_head), "modules list_head test", RETURN_ON_ERROR|QUIET)) continue; if ((ulong)list.next == symbol_value("modules")) { kt->mods_installed = 0; return; } if (IS_VMALLOC_ADDR((ulong)list.next) && IS_VMALLOC_ADDR((ulong)list.prev)) { kt->kernel_module = sp->value; kt->module_list = (ulong)list.next; modules_found = TRUE; break; } } if (!modules_found) { error(WARNING, "cannot determine which of %d \"modules\" symbols is appropriate\n\n", c); kt->mods_installed = 0; kt->flags |= NO_MODULE_ACCESS; return; } } else { get_symbol_data("modules", sizeof(ulong), &kt->module_list); if (kt->module_list == symbol_value("modules")) { kt->mods_installed = 0; return; } kt->kernel_module = symbol_value("modules"); } kt->module_list -= OFFSET(module_list); break; } total = kt->mods_installed = 0; modbuf = GETBUF(SIZE(module)); kallsymsbuf = kt->flags & KALLSYMS_V1 ? GETBUF(SIZE(kallsyms_header)) : NULL; please_wait("gathering module symbol data"); for (mod = kt->module_list; mod != kt->kernel_module; mod = mod_next) { if (CRASHDEBUG(3)) fprintf(fp, "module: %lx\n", mod); if (!readmem(mod, KVADDR, modbuf, SIZE(module), "module struct", RETURN_ON_ERROR|QUIET)) { error(WARNING, "%scannot access vmalloc'd module memory\n\n", DUMPFILE() ? "\n" : ""); kt->mods_installed = 0; kt->flags |= NO_MODULE_ACCESS; FREEBUF(modbuf); return; } switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V1: nsyms = UINT(modbuf + OFFSET(module_nsyms)); break; case KMOD_V2: nsyms = UINT(modbuf + OFFSET(module_num_syms)) + UINT(modbuf + OFFSET(module_num_gpl_syms)); break; } total += nsyms; total += 2; /* store the module's start/ending addresses */ total += 2; /* and the init start/ending addresses */ if (MODULE_MEMORY()) /* 7 regions at most -> 14, so needs +10 */ total += 10; /* * If the module has kallsyms, set up to grab them as well. */ switch (kt->flags & (KALLSYMS_V1|KALLSYMS_V2)) { case KALLSYMS_V1: kallsyms_header = ULONG(modbuf + OFFSET(module_kallsyms_start)); if (kallsyms_header) { if (!readmem(kallsyms_header, KVADDR, kallsymsbuf, SIZE(kallsyms_header), "kallsyms_header", RETURN_ON_ERROR|QUIET)) { error(WARNING, "%scannot access module kallsyms_header\n", DUMPFILE() ? "\n" : ""); } else { nsyms = UINT(kallsymsbuf + OFFSET(kallsyms_header_symbols)); total += nsyms; } } break; case KALLSYMS_V2: if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) { numksyms = UINT(modbuf + OFFSET(module_num_symtab)); if (MODULE_MEMORY()) { /* * The mem[MOD_TEXT].size may be zero, lets count * the module size as below. */ int t; size = 0; for_each_mod_mem_type(t) { if (t == MOD_INIT_TEXT) break; size += UINT(modbuf + OFFSET(module_mem) + SIZE(module_memory) * t + OFFSET(module_memory_size)); } } else size = UINT(modbuf + MODULE_OFFSET2(module_core_size, rx)); } else { numksyms = ULONG(modbuf + OFFSET(module_num_symtab)); size = ULONG(modbuf + MODULE_OFFSET2(module_core_size, rx)); } if (!size) { /* * Bail out here instead of a crashing with a * getbuf(0) failure during storage later on. */ error(WARNING, "invalid kernel module size: 0\n"); kt->mods_installed = 0; kt->flags |= NO_MODULE_ACCESS; FREEBUF(modbuf); return; } total += numksyms; break; } kt->mods_installed++; NEXT_MODULE(mod_next, modbuf); } FREEBUF(modbuf); if (kallsymsbuf) FREEBUF(kallsymsbuf); switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V1: store_module_symbols_v1(total, kt->mods_installed); break; case KMOD_V2: if (MODULE_MEMORY()) store_module_symbols_6_4(total, kt->mods_installed); else store_module_symbols_v2(total, kt->mods_installed); break; } please_wait_done(); } /* * Verify that the current set of modules jives with what's stored. */ static int verify_modules(void) { int i, t; int found, irregularities; ulong mod, mod_next, mod_base; long mod_size; char *modbuf, *module_name; ulong module_list, mod_name; physaddr_t paddr; int mods_installed; struct load_module *lm; char buf[BUFSIZE]; if (DUMPFILE() || !kt->module_list || (kt->flags & NO_MODULE_ACCESS)) return TRUE; switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V1: get_symbol_data("module_list", sizeof(ulong), &module_list); break; case KMOD_V2: if (kt->module_list == symbol_value("modules")) { if (!kt->mods_installed) return TRUE; } get_symbol_data("modules", sizeof(ulong), &module_list); module_list -= OFFSET(module_list); break; } mods_installed = irregularities = 0; mod_base = mod_next = 0; modbuf = GETBUF(SIZE(module)); for (mod = module_list; mod != kt->kernel_module; mod = mod_next) { if (!readmem(mod, KVADDR, modbuf, SIZE(module), "module struct", RETURN_ON_ERROR|QUIET)) { error(WARNING, "cannot access vmalloc'd module memory\n"); FREEBUF(modbuf); return FALSE; } for (i = 0, found = FALSE; i < kt->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_base && !kvtop(NULL, lm->mod_base, &paddr, 0)) { irregularities++; break; } switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V1: mod_base = mod; break; case KMOD_V2: if (MODULE_MEMORY()) /* mem[MOD_TEXT].base */ mod_base = ULONG(modbuf + OFFSET(module_mem) + OFFSET(module_memory_base)); else mod_base = ULONG(modbuf + MODULE_OFFSET2(module_module_core, rx)); break; } if (lm->mod_base == mod_base) { switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V1: mod_name = ULONG(modbuf + OFFSET(module_name)); mod_size = LONG(modbuf + OFFSET(module_size)); if (!read_string(mod_name, buf, BUFSIZE-1) || !STREQ(lm->mod_name, buf) || (mod_size != lm->mod_size)){ irregularities++; goto irregularity; } break; case KMOD_V2: module_name = modbuf + OFFSET(module_name); if (MODULE_MEMORY()) { mod_size = 0; for_each_mod_mem_type(t) { if (t == MOD_INIT_TEXT) break; mod_size += UINT(modbuf + OFFSET(module_mem) + SIZE(module_memory) * t + OFFSET(module_memory_size)); } } else if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) mod_size = UINT(modbuf + MODULE_OFFSET2(module_core_size, rx)); else mod_size = ULONG(modbuf + MODULE_OFFSET2(module_core_size, rx)); if (strlen(module_name) < MAX_MOD_NAME) strcpy(buf, module_name); else strncpy(buf, module_name, MAX_MOD_NAME-1); if (!STREQ(lm->mod_name, buf) || (mod_size != lm->mod_size)) { irregularities++; goto irregularity; } break; } found = TRUE; irregularity: break; } } if (!found || irregularities) return FALSE; mods_installed++; NEXT_MODULE(mod_next, modbuf); } FREEBUF(modbuf); if (mods_installed != kt->mods_installed) return FALSE; return TRUE; } /* * With no arguments, just dump basic data concerning each of the * currently-loaded modules. The -s and -S arguments dynamically * loads module symbols from its object file. */ #define LIST_MODULE_HDR (0) #define LIST_MODULE (1) #define LOAD_ALL_MODULE_SYMBOLS (2) #define LOAD_SPECIFIED_MODULE_SYMBOLS (3) #define DELETE_MODULE_SYMBOLS (4) #define DELETE_ALL_MODULE_SYMBOLS (5) #define REMOTE_MODULE_SAVE_MSG (6) #define REINIT_MODULES (7) #define LIST_ALL_MODULE_TAINT (8) void cmd_mod(void) { int c, ctmp; char *p, *objfile, *modref, *tree, *symlink; ulong flag, address; char buf[BUFSIZE]; if (kt->flags & NO_MODULE_ACCESS) error(FATAL, "cannot access vmalloc'd module memory\n"); if (!verify_modules()) { error(NOTE, "modules have changed on this system -- reinitializing\n"); reinit_modules(); } if (!kt->mods_installed) { fprintf(fp, "no modules installed\n"); return; } for (c = 1, p = NULL; c < argcnt; c++) { if (args[c][0] != '-') continue; if (STREQ(args[c], "-g")) { ctmp = c; pc->curcmd_flags |= MOD_SECTIONS; while (ctmp < argcnt) { args[ctmp] = args[ctmp+1]; ctmp++; } argcnt--; c--; } else if (STREQ(args[c], "-r")) { ctmp = c; pc->curcmd_flags |= MOD_READNOW; while (ctmp < argcnt) { args[ctmp] = args[ctmp+1]; ctmp++; } argcnt--; c--; } else { if ((p = strstr(args[c], "g"))) { pc->curcmd_flags |= MOD_SECTIONS; shift_string_left(p, 1); } if ((p = strstr(args[c], "r"))) { pc->curcmd_flags |= MOD_READNOW; shift_string_left(p, 1); } /* if I've removed everything but the '-', toss it */ if (STREQ(args[c], "-")) { ctmp = c; while (ctmp < argcnt) { args[ctmp] = args[ctmp+1]; ctmp++; } argcnt--; c--; } } } if (pc->flags & READNOW) pc->curcmd_flags |= MOD_READNOW; modref = objfile = tree = symlink = NULL; address = 0; flag = LIST_MODULE_HDR; while ((c = getopt(argcnt, args, "Rd:Ds:Sot")) != EOF) { switch(c) { case 'R': if (flag) cmd_usage(pc->curcmd, SYNOPSIS); flag = REINIT_MODULES; break; case 'D': if (flag) cmd_usage(pc->curcmd, SYNOPSIS); flag = DELETE_ALL_MODULE_SYMBOLS; break; case 'd': if (flag) cmd_usage(pc->curcmd, SYNOPSIS); else flag = DELETE_MODULE_SYMBOLS; if (hexadecimal(optarg, 0) && (strlen(optarg) == VADDR_PRLEN)) { address = htol(optarg, FAULT_ON_ERROR, NULL); if (!is_module_address(address, buf)) cmd_usage(pc->curcmd, SYNOPSIS); modref = buf; } else if (is_module_name(optarg, &address, NULL)) modref = optarg; else cmd_usage(pc->curcmd, SYNOPSIS); break; /* * Revert to using old-style add-symbol-file command * for KMOD_V2 kernels. */ case 'o': if (flag) cmd_usage(pc->curcmd, SYNOPSIS); if (kt->flags & KMOD_V1) error(INFO, "-o option is not applicable to this kernel version\n"); st->flags |= USE_OLD_ADD_SYM; return; case 'S': if (flag) cmd_usage(pc->curcmd, SYNOPSIS); else flag = LOAD_ALL_MODULE_SYMBOLS; break; case 's': if (flag) cmd_usage(pc->curcmd, SYNOPSIS); else flag = LOAD_SPECIFIED_MODULE_SYMBOLS; if (hexadecimal(optarg, 0) && (strlen(optarg) == VADDR_PRLEN)) { address = htol(optarg, FAULT_ON_ERROR, NULL); if (!is_module_address(address, buf)) cmd_usage(pc->curcmd, SYNOPSIS); modref = buf; } else if (is_module_name(optarg, &address, NULL)) modref = optarg; else cmd_usage(pc->curcmd, SYNOPSIS); break; case 't': if (flag) cmd_usage(pc->curcmd, SYNOPSIS); else flag = LIST_ALL_MODULE_TAINT; break; default: argerrs++; break; } } if (tree && (flag != LOAD_ALL_MODULE_SYMBOLS)) argerrs++; if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (NO_MODULES()) { error(INFO, "no modules loaded in this kernel\n"); if (flag != LIST_MODULE_HDR) cmd_usage(pc->curcmd, SYNOPSIS); return; } switch (flag) { case LOAD_ALL_MODULE_SYMBOLS: switch (argcnt) { case 3: if (is_directory(args[2])) tree = args[2]; else { error(INFO, "%s is not a directory\n", args[2]); cmd_usage(pc->curcmd, SYNOPSIS); } break; case 2: break; default: cmd_usage(pc->curcmd, SYNOPSIS); } break; case LOAD_SPECIFIED_MODULE_SYMBOLS: switch (argcnt) { case 4: objfile = args[3]; if (!file_exists(objfile, NULL)) { if (!(objfile = find_module_objfile(modref, objfile, tree))) error(FATAL, "%s: cannot find or load object file: %s\n", modref, args[3]); } break; case 3: if (!(objfile = find_module_objfile(modref,NULL,tree))) error(FATAL, "cannot find or load object file for %s module\n", modref); break; default: cmd_usage(pc->curcmd, SYNOPSIS); } if (!is_elf_file(objfile)) { error(INFO, "%s: not an ELF format object file\n", objfile); cmd_usage(pc->curcmd, SYNOPSIS); } break; default: break; } if ((flag == LOAD_ALL_MODULE_SYMBOLS) && (tree || kt->module_tree)) { if (!tree) tree = kt->module_tree; } do_module_cmd(flag, modref, address, objfile, tree); if (symlink) FREEBUF(symlink); } int check_specified_module_tree(char *module, char *gdb_buffer) { char *p1, *treebuf; int retval; retval = FALSE; /* * Search for "/lib/modules" in the module name string * and insert "/usr/lib/debug" there. */ if (strstr(module, "/lib/modules")) { treebuf = GETBUF(strlen(module) + strlen("/usr/lib/debug") + strlen(".debug") + 1); strcpy(treebuf, module); p1 = strstr(treebuf, "/lib/modules"); shift_string_right(p1, strlen("/usr/lib/debug")); BCOPY("/usr/lib/debug", p1, strlen("/usr/lib/debug")); strcat(treebuf, ".debug"); if (file_exists(treebuf, NULL)) { strcpy(gdb_buffer, treebuf); retval = TRUE; } FREEBUF(treebuf); } return retval; } static void show_module_taint_4_10(void) { int i, j, bx; struct load_module *lm; int maxnamelen; int found; char buf1[BUFSIZE]; char buf2[BUFSIZE]; struct syment *sp; ulong *taintsp, taints; bool tnt_mod; char tnt_true; int tnts_len; ulong tnts_addr; char *modbuf; if (INVALID_MEMBER(module_taints)) { MEMBER_OFFSET_INIT(module_taints, "module", "taints"); STRUCT_SIZE_INIT(taint_flag, "taint_flag"); MEMBER_OFFSET_INIT(tnt_true, "taint_flag", "true"); if (INVALID_MEMBER(tnt_true)) MEMBER_OFFSET_INIT(tnt_true, "taint_flag", "c_true"); MEMBER_OFFSET_INIT(tnt_mod, "taint_flag", "module"); } modbuf = GETBUF(SIZE(module)); for (i = found = maxnamelen = 0; i < kt->mods_installed; i++) { lm = &st->load_modules[i]; readmem(lm->module_struct, KVADDR, modbuf, SIZE(module), "module struct", FAULT_ON_ERROR); if (MEMBER_SIZE("module", "taints") == sizeof(ulong)) taints = ULONG(modbuf + OFFSET(module_taints)); else taints = UINT(modbuf + OFFSET(module_taints)); if (taints) { found++; maxnamelen = strlen(lm->mod_name) > maxnamelen ? strlen(lm->mod_name) : maxnamelen; } } if (!found) { fprintf(fp, "no tainted modules\n"); FREEBUF(modbuf); return; } tnts_len = get_array_length("taint_flags", NULL, 0); sp = symbol_search("taint_flags"); tnts_addr = sp->value; fprintf(fp, "%s %s\n", mkstring(buf2, maxnamelen, LJUST, "NAME"), "TAINTS"); for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; bx = 0; buf1[0] = '\0'; readmem(lm->module_struct, KVADDR, modbuf, SIZE(module), "module struct", FAULT_ON_ERROR); if (MEMBER_SIZE("module", "taints") == sizeof(ulong)) taints = ULONG(modbuf + OFFSET(module_taints)); else taints = UINT(modbuf + OFFSET(module_taints)); if (!taints) continue; taintsp = &taints; for (j = 0; j < tnts_len; j++) { readmem((tnts_addr + j * SIZE(taint_flag)) + OFFSET(tnt_mod), KVADDR, &tnt_mod, sizeof(bool), "tnt mod", FAULT_ON_ERROR); if (!tnt_mod) continue; if (NUM_IN_BITMAP(taintsp, j)) { readmem((tnts_addr + j * SIZE(taint_flag)) + OFFSET(tnt_true), KVADDR, &tnt_true, sizeof(char), "tnt true", FAULT_ON_ERROR); buf1[bx++] = tnt_true; } } buf1[bx++] = '\0'; fprintf(fp, "%s %s\n", mkstring(buf2, maxnamelen, LJUST, lm->mod_name), buf1); } FREEBUF(modbuf); } static void show_module_taint(void) { int i, j, bx; struct load_module *lm; int maxnamelen; int found; char buf1[BUFSIZE]; char buf2[BUFSIZE]; int gpgsig_ok, license_gplok; struct syment *sp; uint *taintsp, taints; uint8_t tnt_bit; char tnt_true, tnt_false; int tnts_exists, tnts_len; ulong tnts_addr; char *modbuf; if (VALID_STRUCT(taint_flag) || (kernel_symbol_exists("taint_flags") && STRUCT_EXISTS("taint_flag"))) { show_module_taint_4_10(); return; } if (INVALID_MEMBER(module_taints) && INVALID_MEMBER(module_license_gplok)) { MEMBER_OFFSET_INIT(module_taints, "module", "taints"); MEMBER_OFFSET_INIT(module_license_gplok, "module", "license_gplok"); MEMBER_OFFSET_INIT(module_gpgsig_ok, "module", "gpgsig_ok"); STRUCT_SIZE_INIT(tnt, "tnt"); MEMBER_OFFSET_INIT(tnt_bit, "tnt", "bit"); MEMBER_OFFSET_INIT(tnt_true, "tnt", "true"); MEMBER_OFFSET_INIT(tnt_false, "tnt", "false"); } if (INVALID_MEMBER(module_taints) && INVALID_MEMBER(module_license_gplok)) option_not_supported('t'); modbuf = GETBUF(SIZE(module)); for (i = found = maxnamelen = 0; i < kt->mods_installed; i++) { lm = &st->load_modules[i]; readmem(lm->module_struct, KVADDR, modbuf, SIZE(module), "module struct", FAULT_ON_ERROR); taints = VALID_MEMBER(module_taints) ? UINT(modbuf + OFFSET(module_taints)) : 0; license_gplok = VALID_MEMBER(module_license_gplok) ? INT(modbuf + OFFSET(module_license_gplok)) : 0; gpgsig_ok = VALID_MEMBER(module_gpgsig_ok) ? INT(modbuf + OFFSET(module_gpgsig_ok)) : 1; if (VALID_MEMBER(module_license_gplok) || taints || !gpgsig_ok) { found++; maxnamelen = strlen(lm->mod_name) > maxnamelen ? strlen(lm->mod_name) : maxnamelen; } } if (!found) { fprintf(fp, "no tainted modules\n"); FREEBUF(modbuf); return; } if (VALID_STRUCT(tnt) && (sp = symbol_search("tnts"))) { tnts_exists = TRUE; tnts_len = get_array_length("tnts", NULL, 0); tnts_addr = sp->value; } else { tnts_exists = FALSE; tnts_len = 0; tnts_addr = 0; } fprintf(fp, "%s %s\n", mkstring(buf2, maxnamelen, LJUST, "NAME"), VALID_MEMBER(module_taints) ? "TAINTS" : "LICENSE_GPLOK"); for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; bx = 0; buf1[0] = '\0'; readmem(lm->module_struct, KVADDR, modbuf, SIZE(module), "module struct", FAULT_ON_ERROR); taints = VALID_MEMBER(module_taints) ? UINT(modbuf + OFFSET(module_taints)) : 0; license_gplok = VALID_MEMBER(module_license_gplok) ? INT(modbuf + OFFSET(module_license_gplok)) : 0; gpgsig_ok = VALID_MEMBER(module_gpgsig_ok) ? INT(modbuf + OFFSET(module_gpgsig_ok)) : 1; if (INVALID_MEMBER(module_license_gplok)) { if (!taints && gpgsig_ok) continue; } if (tnts_exists && taints) { taintsp = &taints; for (j = 0; j < (tnts_len * SIZE(tnt)); j += SIZE(tnt)) { readmem((tnts_addr + j) + OFFSET(tnt_bit), KVADDR, &tnt_bit, sizeof(uint8_t), "tnt bit", FAULT_ON_ERROR); if (NUM_IN_BITMAP(taintsp, tnt_bit)) { readmem((tnts_addr + j) + OFFSET(tnt_true), KVADDR, &tnt_true, sizeof(char), "tnt true", FAULT_ON_ERROR); buf1[bx++] = tnt_true; } else { readmem((tnts_addr + j) + OFFSET(tnt_false), KVADDR, &tnt_false, sizeof(char), "tnt false", FAULT_ON_ERROR); if (tnt_false != ' ' && tnt_false != '-' && tnt_false != 'G') buf1[bx++] = tnt_false; } } } if (VALID_MEMBER(module_gpgsig_ok) && !gpgsig_ok) { buf1[bx++] = '('; buf1[bx++] = 'U'; buf1[bx++] = ')'; } buf1[bx++] = '\0'; if (tnts_exists) fprintf(fp, "%s %s\n", mkstring(buf2, maxnamelen, LJUST, lm->mod_name), buf1); else fprintf(fp, "%s %x%s\n", mkstring(buf2, maxnamelen, LJUST, lm->mod_name), VALID_MEMBER(module_taints) ? taints : license_gplok, buf1); } FREEBUF(modbuf); } /* * Do the simple list work for cmd_mod(). */ static void do_module_cmd(ulong flag, char *modref, ulong address, char *objfile, char *tree) { int i, j; struct load_module *lm, *lmp; int maxnamelen; int maxsizelen; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; if (NO_MODULES()) return; switch (flag) { case LIST_MODULE: case LIST_MODULE_HDR: maxnamelen = maxsizelen = 0; for (i = 0; i < kt->mods_installed; i++) { lm = &st->load_modules[i]; maxnamelen = strlen(lm->mod_name) > maxnamelen ? strlen(lm->mod_name) : maxnamelen; sprintf(buf1, "%ld", lm->mod_size); maxsizelen = strlen(buf1) > maxsizelen ? strlen(buf1) : maxsizelen; } if (flag == LIST_MODULE_HDR) { fprintf(fp, "%s %s %s %s OBJECT FILE\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MODULE"), mkstring(buf2, maxnamelen, LJUST, "NAME"), mkstring(buf4, VADDR_PRLEN, CENTER|LJUST, MODULE_MEMORY() ? "TEXT_BASE" : "BASE"), mkstring(buf3, maxsizelen, RJUST, "SIZE")); } for (i = 0; i < kt->mods_installed; i++) { lm = &st->load_modules[i]; if (!address || (lm->module_struct == address) || (lm->mod_base == address)) { fprintf(fp, "%s ", mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(lm->module_struct))); fprintf(fp, "%s ", mkstring(buf2, maxnamelen, LJUST, lm->mod_name)); fprintf(fp, "%s ", mkstring(buf4, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(lm->mod_base))); fprintf(fp, "%s ", mkstring(buf3, maxsizelen, RJUST|LONG_DEC, MKSTR(lm->mod_size))); // fprintf(fp, "%6ld ", lm->mod_size); if (strlen(lm->mod_namelist)) fprintf(fp, "%s %s", lm->mod_namelist, lm->mod_flags & MOD_REMOTE ? " (temporary)" : ""); else { fprintf(fp, "(not loaded)"); if (lm->mod_flags & MOD_KALLSYMS) fprintf(fp, " [CONFIG_KALLSYMS]"); } fprintf(fp, "\n"); } } break; case REMOTE_MODULE_SAVE_MSG: if (!REMOTE()) return; for (i = j = 0, lmp = NULL; i < kt->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_REMOTE) { j++; lmp = lm; } } switch (j) { case 0: return; case 1: error(NOTE, "\nTo save the %s module object locally,\n enter: \"save %s\"\n", lmp->mod_name, lmp->mod_name); break; default: error(NOTE, "\nTo save all temporary remote module objects locally,\n enter: \"save modules\"\n"); fprintf(fp, " To save a single remote module object locally,\n enter: \"save NAME\",\n" " where \"NAME\" is one of the module names shown in the list above.\n"); break; } break; case LOAD_SPECIFIED_MODULE_SYMBOLS: if (!load_module_symbols(modref, objfile, address)) error(FATAL, "cannot load symbols from: %s\n", objfile); do_module_cmd(LIST_MODULE_HDR, 0, address, 0, NULL); do_module_cmd(REMOTE_MODULE_SAVE_MSG, 0, 0, 0, NULL); break; case LOAD_ALL_MODULE_SYMBOLS: for (i = j = 0; i < kt->mods_installed; i++) { lm = &st->load_modules[i]; if (STREQ(lm->mod_name, "(unknown module)")) { error(INFO, "cannot find object file for unknown module at %lx\n", lm->mod_base); continue; } modref = lm->mod_name; address = lm->mod_base; if ((objfile = find_module_objfile(modref,NULL,tree))) { if (!is_elf_file(objfile)) { error(INFO, "%s: not an ELF format object file\n", objfile); } else if (!load_module_symbols(modref, objfile, address)) error(INFO, "cannot load symbols from: %s\n", objfile); do_module_cmd(j++ ? LIST_MODULE : LIST_MODULE_HDR, 0, address, 0, tree); FREEBUF(objfile); } else if ((lm->mod_flags & MOD_LOAD_SYMS) || strlen(lm->mod_namelist)) { if (CRASHDEBUG(1)) fprintf(fp, "%s: module symbols are already loaded\n", modref); do_module_cmd(j++ ? LIST_MODULE : LIST_MODULE_HDR, 0, address, 0, tree); } else error(INFO, "cannot find or load object file for %s module\n", modref); } do_module_cmd(REMOTE_MODULE_SAVE_MSG, 0, 0, 0, tree); break; case DELETE_ALL_MODULE_SYMBOLS: delete_load_module(ALL_MODULES); break; case DELETE_MODULE_SYMBOLS: delete_load_module(address); break; case REINIT_MODULES: reinit_modules(); do_module_cmd(LIST_MODULE_HDR, NULL, 0, NULL, NULL); break; case LIST_ALL_MODULE_TAINT: show_module_taint(); break; } } /* * Reinitialize the current set of modules: * * 1. first clear out all references to the current set. * 2. call module_init() again. * 3. display the new set. */ static void reinit_modules(void) { delete_load_module(ALL_MODULES); st->mods_installed = 0; st->flags &= ~MODULE_SYMS; free(st->ext_module_symtable); free(st->load_modules); st->ext_module_symtable = NULL; st->load_modules = NULL; kt->mods_installed = 0; memset(st->mod_symname_hash, 0, sizeof(st->mod_symname_hash)); module_init(); } static char * module_objfile_search(char *modref, char *filename, char *tree) { char buf[BUFSIZE]; char file[BUFSIZE]; char dir[BUFSIZE]; struct load_module *lm; char *retbuf; int initrd; struct syment *sp; char *p1, *p2; char *env; char *namelist; retbuf = NULL; initrd = FALSE; if (filename) strcpy(file, filename); #ifdef MODULES_IN_CWD else { char *fileext[] = { "ko", "o"}; int i; for (i = 0; i < 2; i++) { sprintf(file, "%s.%s", modref, fileext[i]); if (access(file, R_OK) == 0) { retbuf = GETBUF(strlen(file)+1); strcpy(retbuf, file); if (CRASHDEBUG(1)) fprintf(fp, "find_module_objfile: [%s] file in cwd\n", retbuf); return retbuf; } } } #else else sprintf(file, "%s.o", modref); #endif /* * Later versions of insmod create a symbol at the module's base * address. Examples: * * __insmod_sunrpc_O/lib/modules/2.2.17/misc/sunrpc.o_M3A7EE300_V131601 * __insmod_lockd_O/lib/modules/2.2.17/fs/lockd.o_M3A7EE300_V131601 * __insmod_nfsd_O/lib/modules/2.2.17/fs/nfsd.o_M3A7EE300_V131601 * __insmod_nfs_O/lib/modules/2.2.17/fs/nfs.o_M3A7EE300_V131601 */ if ((st->flags & INSMOD_BUILTIN) && !filename) { sprintf(buf, "__insmod_%s_O/", modref); if (symbol_query(buf, NULL, &sp) == 1) { if (CRASHDEBUG(1)) fprintf(fp, "search: INSMOD_BUILTIN %s\n", sp->name); BZERO(buf, BUFSIZE); p1 = strstr(sp->name, "/"); if ((p2 = strstr(sp->name, file))) p2 += strlen(file); if (p2) { strncpy(buf, p1, p2-p1); if (!strstr(buf, "/lib/modules/")) { sprintf(dir, "/lib/%s.o", modref); if (STREQ(dir, buf)) initrd = TRUE; } else if (REMOTE()) strcpy(file, buf); else { retbuf = GETBUF(strlen(buf)+1); strcpy(retbuf, buf); if (CRASHDEBUG(1)) fprintf(fp, "find_module_objfile: [%s]\n", retbuf); return retbuf; } } } if (is_module_name(modref, NULL, &lm) && (lm->mod_flags & MOD_INITRD)) { sprintf(dir, "/lib/%s.o", modref); initrd = TRUE; } } if (initrd) error(NOTE, "%s: installed from initrd image\n", dir); if (REMOTE()) { retbuf = GETBUF(MAX_MOD_NAMELIST*2); if (!is_module_name(modref, NULL, &lm)) { error(INFO, "%s is not a module reference\n", modref); return NULL; } if ((lm->mod_flags & MOD_LOAD_SYMS) && strlen(lm->mod_namelist)) { if (CRASHDEBUG(1)) fprintf(fp, "redundant mod call: %s\n", lm->mod_namelist); strcpy(retbuf, lm->mod_namelist); return retbuf; } if (find_remote_module_objfile(lm, file, retbuf)) return retbuf; return NULL; } if (tree) { if (!(retbuf = search_directory_tree(tree, file, 1))) { switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V2: sprintf(file, "%s.ko", modref); retbuf = search_directory_tree(tree, file, 1); if (!retbuf) { sprintf(file, "%s.ko.debug", modref); retbuf = search_directory_tree(tree, file, 1); } } } return retbuf; } sprintf(dir, "%s/%s", DEFAULT_REDHAT_DEBUG_LOCATION, kt->utsname.release); if (!(retbuf = search_directory_tree(dir, file, 0))) { switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V2: sprintf(file, "%s.ko", modref); retbuf = search_directory_tree(dir, file, 0); if (!retbuf) { sprintf(file, "%s.ko.debug", modref); retbuf = search_directory_tree(dir, file, 0); } } } if (!retbuf && (env = getenv("CRASH_MODULE_PATH"))) { sprintf(dir, "%s", env); if (!(retbuf = search_directory_tree(dir, file, 0))) { switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V2: sprintf(file, "%s.ko", modref); retbuf = search_directory_tree(dir, file, 0); if (!retbuf) { sprintf(file, "%s.ko.debug", modref); retbuf = search_directory_tree(dir, file, 0); } } } } if (!retbuf) { sprintf(dir, "/lib/modules/%s/updates", kt->utsname.release); if (!(retbuf = search_directory_tree(dir, file, 0))) { switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V2: sprintf(file, "%s.ko", modref); retbuf = search_directory_tree(dir, file, 0); } } } if (!retbuf) { sprintf(dir, "/lib/modules/%s", kt->utsname.release); if (!(retbuf = search_directory_tree(dir, file, 0))) { switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V2: sprintf(file, "%s.ko", modref); retbuf = search_directory_tree(dir, file, 0); } } } if (!retbuf && !filename && !tree && kt->module_tree) { sprintf(dir, "%s", kt->module_tree); if (!(retbuf = search_directory_tree(dir, file, 0))) { switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V2: sprintf(file, "%s.ko", modref); retbuf = search_directory_tree(dir, file, 0); if (!retbuf) { sprintf(file, "%s.ko.debug", modref); retbuf = search_directory_tree(dir, file, 0); } } } } /* * Check the directory tree where the vmlinux file is located. */ if (!retbuf && (namelist = realpath(pc->namelist_orig ? pc->namelist_orig : pc->namelist, NULL))) { sprintf(dir, "%s", dirname(namelist)); if (!(retbuf = search_directory_tree(dir, file, 0))) { switch (kt->flags & (KMOD_V1|KMOD_V2)) { case KMOD_V2: sprintf(file, "%s.ko", modref); retbuf = search_directory_tree(dir, file, 0); if (!retbuf) { sprintf(file, "%s.ko.debug", modref); retbuf = search_directory_tree(dir, file, 0); } } } free(namelist); } if (!retbuf && is_livepatch()) { sprintf(file, "%s.ko", modref); sprintf(dir, "/usr/lib/kpatch/%s", kt->utsname.release); if (!(retbuf = search_directory_tree(dir, file, 0))) { sprintf(file, "%s.ko.debug", modref); sprintf(dir, "/usr/lib/debug/usr/lib/kpatch/%s", kt->utsname.release); retbuf = search_directory_tree(dir, file, 0); } } return retbuf; } /* * First look for a module based upon its reference name. * If that fails, try replacing any underscores in the * reference name with a dash. * If that fails, because of intermingled dashes and underscores, * try a regex expression. * * Example: module name "dm_mod" comes from "dm-mod.ko" objfile * module name "dm_region_hash" comes from "dm-region_hash.ko" objfile */ static char * find_module_objfile(char *modref, char *filename, char *tree) { char * retbuf; char tmpref[BUFSIZE]; int i, c; retbuf = module_objfile_search(modref, filename, tree); if (!retbuf) { strncpy(tmpref, modref, BUFSIZE-1); for (c = 0; c < BUFSIZE && tmpref[c]; c++) if (tmpref[c] == '_') tmpref[c] = '-'; retbuf = module_objfile_search(tmpref, filename, tree); } if (!retbuf && (count_chars(modref, '_') > 1)) { for (i = c = 0; modref[i]; i++) { if (modref[i] == '_') { tmpref[c++] = '['; tmpref[c++] = '_'; tmpref[c++] = '-'; tmpref[c++] = ']'; } else tmpref[c++] = modref[i]; } tmpref[c] = NULLCHAR; retbuf = module_objfile_search(tmpref, filename, tree); } return retbuf; } /* * Try to load module symbols with name. */ int load_module_symbols_helper(char *name) { char *objfile; ulong address; if (is_module_name(name, &address, NULL) && (objfile = find_module_objfile(name, NULL, NULL))) { do_module_cmd(LOAD_SPECIFIED_MODULE_SYMBOLS, name, address, objfile, NULL); return TRUE; } return FALSE; } /* * Unlink any temporary remote module object files. */ void unlink_module(struct load_module *load_module) { int i; struct load_module *lm; if (load_module) { if (load_module->mod_flags & MOD_REMOTE) unlink(load_module->mod_namelist); return; } for (i = 0; i < kt->mods_installed; i++) { lm = &st->load_modules[i]; if (lm->mod_flags & MOD_REMOTE) unlink(lm->mod_namelist); } } /* * Dump the kernel log_buf in chronological order. */ void cmd_log(void) { int c; int msg_flags; msg_flags = 0; while ((c = getopt(argcnt, args, "TtdmascR")) != EOF) { switch(c) { case 'T': msg_flags |= SHOW_LOG_CTIME; break; case 't': msg_flags |= SHOW_LOG_TEXT; break; case 'd': msg_flags |= SHOW_LOG_DICT; break; case 'm': msg_flags |= SHOW_LOG_LEVEL; break; case 'a': msg_flags |= SHOW_LOG_AUDIT; break; case 's': msg_flags |= SHOW_LOG_SAFE; break; case 'c': msg_flags |= SHOW_LOG_CALLER; break; case 'R': msg_flags |= SHOW_LOG_RUST; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (msg_flags & SHOW_LOG_CTIME && pc->flags & MINIMAL_MODE) { error(WARNING, "the option '-T' is not available in minimal mode\n"); return; } if (msg_flags & SHOW_LOG_AUDIT) { dump_audit(); return; } if (msg_flags & SHOW_LOG_SAFE) { dump_printk_safe_seq_buf(msg_flags); return; } dump_log(msg_flags); dump_printk_safe_seq_buf(msg_flags); } void dump_log(int msg_flags) { int i, len, tmp, show_level; ulong log_buf, log_end; char *buf; char last; ulong index; struct syment *nsp; int log_wrap, loglevel, log_buf_len; if (kernel_symbol_exists("prb")) { dump_lockless_record_log(msg_flags); return; } if (kernel_symbol_exists("log_first_idx") && kernel_symbol_exists("log_next_idx")) { dump_variable_length_record_log(msg_flags); return; } if (msg_flags & SHOW_LOG_CTIME) option_not_supported('T'); if (msg_flags & SHOW_LOG_DICT) option_not_supported('d'); if ((msg_flags & SHOW_LOG_TEXT) && STREQ(pc->curcmd, "log")) option_not_supported('t'); show_level = msg_flags & SHOW_LOG_LEVEL ? TRUE : FALSE; if (symbol_exists("log_buf_len")) { get_symbol_data("log_buf_len", sizeof(int), &log_buf_len); get_symbol_data("log_buf", sizeof(ulong), &log_buf); } else { if ((ARRAY_LENGTH(log_buf) == 0) && (get_array_length("log_buf", NULL, 0) == 0)) { if ((nsp = next_symbol("log_buf", NULL)) == NULL) error(FATAL, "cannot determine length of log_buf\n"); builtin_array_length("log_buf", (int)(nsp->value - symbol_value("log_buf")), NULL); } log_buf_len = ARRAY_LENGTH(log_buf); log_buf = symbol_value("log_buf"); } buf = GETBUF(log_buf_len); log_wrap = FALSE; last = 0; if ((len = get_symbol_length("log_end")) == sizeof(int)) { get_symbol_data("log_end", len, &tmp); log_end = (ulong)tmp; } else if (len == 0) { THIS_KERNEL_VERSION >= LINUX(2,6,25) ? get_symbol_data("log_end", sizeof(unsigned), &log_end) : get_symbol_data("log_end", sizeof(unsigned long), &log_end); } else get_symbol_data("log_end", len, &log_end); if (!readmem(log_buf, KVADDR, buf, log_buf_len, "log_buf contents", RETURN_ON_ERROR|QUIET)) { error(WARNING, "\ncannot read log_buf contents\n"); return; } if (log_end < log_buf_len) index = 0; else index = log_end & (log_buf_len - 1); if ((log_end < log_buf_len) && (index == 0) && (buf[index] == '<')) loglevel = TRUE; else loglevel = FALSE; if (index != 0) log_wrap = TRUE; wrap_around: for (i = index; i < log_buf_len; i++) { if (loglevel && !show_level) { switch (buf[i]) { case '>': loglevel = FALSE; /* FALLTHROUGH */ case '<': case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': continue; default: loglevel = FALSE; break; } } if (buf[i]) { fputc(ascii(buf[i]) ? buf[i] : '.', fp); loglevel = buf[i] == '\n' ? TRUE : FALSE; last = buf[i]; } } if (log_wrap) { log_buf_len = index; index = 0; log_wrap = FALSE; goto wrap_around; } if (last != '\n') fprintf(fp, "\n"); FREEBUF(buf); } /* * get log record by index; idx must point to valid message. */ static char * log_from_idx(uint32_t idx, char *logbuf) { char *logptr; uint16_t msglen; logptr = logbuf + idx; /* * A length == 0 record is the end of buffer marker. * Wrap around and return the message at the start of * the buffer. */ msglen = USHORT(logptr + OFFSET(log_len)); if (!msglen) logptr = logbuf; return logptr; } /* * get next record index; idx must point to valid message. */ static uint32_t log_next(uint32_t idx, char *logbuf) { char *logptr; uint16_t msglen; logptr = logbuf + idx; /* * A length == 0 record is the end of buffer marker. Wrap around and * read the message at the start of the buffer as *this* one, and * return the one after that. */ msglen = USHORT(logptr + OFFSET(log_len)); if (!msglen) { msglen = USHORT(logbuf + OFFSET(log_len)); return msglen; } return idx + msglen; } static void dump_log_entry(char *logptr, int msg_flags) { int indent; char *msg, *p; uint16_t i, text_len, dict_len, level; uint64_t ts_nsec; ulonglong nanos; ulong rem; char buf[BUFSIZE]; int ilen; ilen = level = 0; text_len = USHORT(logptr + OFFSET(log_text_len)); dict_len = USHORT(logptr + OFFSET(log_dict_len)); if (VALID_MEMBER(log_level)) { /* * Initially a "u16 level", then a "u8 level:3" */ if (SIZE(log_level) == sizeof(short)) level = USHORT(logptr + OFFSET(log_level)); else level = UCHAR(logptr + OFFSET(log_level)); } else { if (VALID_MEMBER(log_flags_level)) level = UCHAR(logptr + OFFSET(log_flags_level)); else if (msg_flags & SHOW_LOG_LEVEL) msg_flags &= ~SHOW_LOG_LEVEL; } ts_nsec = ULONGLONG(logptr + OFFSET(log_ts_nsec)); msg = logptr + SIZE(log); if (CRASHDEBUG(1)) fprintf(fp, "\nlog %lx -> msg: %lx ts_nsec: %lld flags/level: %x" " text_len: %d dict_len: %d\n", (ulong)logptr, (ulong)msg, (ulonglong)ts_nsec, level, text_len, dict_len); if ((msg_flags & SHOW_LOG_TEXT) == 0) { nanos = (ulonglong)ts_nsec / (ulonglong)1000000000; rem = (ulonglong)ts_nsec % (ulonglong)1000000000; if (msg_flags & SHOW_LOG_CTIME) { time_t t = kt->boot_date.tv_sec + nanos; sprintf(buf, "[%s] ", ctime_tz(&t)); } else sprintf(buf, "[%5lld.%06ld] ", nanos, rem/1000); ilen = strlen(buf); fprintf(fp, "%s", buf); } /* * The PRINTK_CALLER id field was introduced with Linux-5.1 so if * requested, Kernel version >= 5.1 and field exists print caller_id. */ if (msg_flags & SHOW_LOG_CALLER && VALID_MEMBER(log_caller_id)) { const unsigned int cpuid = 0x80000000; char cbuf[PID_CHARS_MAX]; unsigned int cid; /* Get id type, isolate just id value in cid for print */ cid = UINT(logptr + OFFSET(log_caller_id)); sprintf(cbuf, "%c%d", (cid & cpuid) ? 'C' : 'T', cid & ~cpuid); sprintf(buf, "[%*s] ", PID_CHARS_DEFAULT, cbuf); ilen += strlen(buf); fprintf(fp, "%s", buf); } level = LOG_LEVEL(level); if (msg_flags & SHOW_LOG_LEVEL) { sprintf(buf, "<%x>", level); ilen += strlen(buf); fprintf(fp, "%s", buf); } for (i = 0, p = msg; i < text_len; i++, p++) { if (*p == '\n') fprintf(fp, "\n%s", space(ilen)); else if (isprint(*p) || isspace(*p)) fputc(*p, fp); else fputc('.', fp); } if (dict_len & (msg_flags & SHOW_LOG_DICT)) { fprintf(fp, "\n"); indent = TRUE; for (i = 0; i < dict_len; i++, p++) { if (indent) { fprintf(fp, "%s", space(ilen)); indent = FALSE; } if (isprint(*p)) fputc(*p, fp); else if (*p == NULLCHAR) { fputc('\n', fp); indent = TRUE; } else fputc('.', fp); } } fprintf(fp, "\n"); } /* * Handle the variable-length-record log_buf. */ static void dump_variable_length_record_log(int msg_flags) { uint32_t idx, log_first_idx, log_next_idx, log_buf_len; ulong log_buf; char *logptr, *logbuf, *log_struct_name; if (INVALID_SIZE(log)) { if (STRUCT_EXISTS("printk_log")) { /* * In kernel 3.11 the log structure name was renamed * from log to printk_log. See 62e32ac3505a0cab. */ log_struct_name = "printk_log"; MEMBER_OFFSET_INIT(log_caller_id, "printk_log", "caller_id"); } else log_struct_name = "log"; STRUCT_SIZE_INIT(log, log_struct_name); MEMBER_OFFSET_INIT(log_ts_nsec, log_struct_name, "ts_nsec"); MEMBER_OFFSET_INIT(log_len, log_struct_name, "len"); MEMBER_OFFSET_INIT(log_text_len, log_struct_name, "text_len"); MEMBER_OFFSET_INIT(log_dict_len, log_struct_name, "dict_len"); MEMBER_OFFSET_INIT(log_level, log_struct_name, "level"); MEMBER_SIZE_INIT(log_level, log_struct_name, "level"); MEMBER_OFFSET_INIT(log_flags_level, log_struct_name, "flags_level"); /* * If things change, don't kill a dumpfile session * searching for a panic message. */ if (INVALID_SIZE(log) || INVALID_MEMBER(log_ts_nsec) || INVALID_MEMBER(log_len) || INVALID_MEMBER(log_text_len) || INVALID_MEMBER(log_dict_len) || (INVALID_MEMBER(log_level) && INVALID_MEMBER(log_flags_level)) || !kernel_symbol_exists("log_buf_len") || !kernel_symbol_exists("log_buf")) { error(WARNING, "\nlog buf data structure(s) have changed\n"); return; } } get_symbol_data("log_first_idx", sizeof(uint32_t), &log_first_idx); get_symbol_data("log_next_idx", sizeof(uint32_t), &log_next_idx); get_symbol_data("log_buf_len", sizeof(uint32_t), &log_buf_len); get_symbol_data("log_buf", sizeof(char *), &log_buf); if (CRASHDEBUG(1)) { fprintf(fp, "log_buf: %lx\n", (ulong)log_buf); fprintf(fp, "log_buf_len: %d\n", log_buf_len); fprintf(fp, "log_first_idx: %d\n", log_first_idx); fprintf(fp, "log_next_idx: %d\n", log_next_idx); } logbuf = GETBUF(log_buf_len); if (!readmem(log_buf, KVADDR, logbuf, log_buf_len, "log_buf contents", RETURN_ON_ERROR|QUIET)) { error(WARNING, "\ncannot read log_buf contents\n"); FREEBUF(logbuf); return; } hq_open(); idx = log_first_idx; while (idx != log_next_idx) { logptr = log_from_idx(idx, logbuf); dump_log_entry(logptr, msg_flags); if (!hq_enter((ulong)logptr)) { error(INFO, "\nduplicate log_buf message pointer\n"); break; } idx = log_next(idx, logbuf); if (idx >= log_buf_len) { if (log_first_idx > log_next_idx) idx = 0; else { error(INFO, "\ninvalid log_buf entry encountered\n"); break; } } if (CRASHDEBUG(1) && (idx == log_next_idx)) fprintf(fp, "\nfound log_next_idx OK\n"); } hq_close(); FREEBUF(logbuf); } /* * Display general system info. */ void cmd_sys(void) { int c, cnt; ulong sflag; char buf[BUFSIZE]; sflag = FALSE; while ((c = getopt(argcnt, args, "ctip:")) != EOF) { switch(c) { case 'p': if (STREQ(optarg, "anic")) panic_this_kernel(); else argerrs++; break; case 'c': sflag = TRUE; break; case 't': show_kernel_taints(buf, VERBOSE); return; case 'i': dump_dmi_info(); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!args[optind]) { if (sflag) dump_sys_call_table(NULL, 0); else display_sys_stats(); return; } cnt = 0; do { if (sflag) dump_sys_call_table(args[optind], cnt++); else if (STREQ(args[optind], "config")) read_in_kernel_config(IKCFG_READ); else cmd_usage(pc->curcmd, SYNOPSIS); optind++; } while (args[optind]); } static int is_kernel_tainted(void) { ulong tainted_mask; int tainted; if (kernel_symbol_exists("tainted")) { get_symbol_data("tainted", sizeof(int), &tainted); if (tainted) return TRUE; } else if (kernel_symbol_exists("tainted_mask")) { get_symbol_data("tainted_mask", sizeof(ulong), &tainted_mask); if (tainted_mask) return TRUE; } return FALSE; } static int is_livepatch(void) { int i; struct load_module *lm; char buf[BUFSIZE]; show_kernel_taints(buf, !VERBOSE); if (strstr(buf, "K")) /* TAINT_LIVEPATCH */ return TRUE; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; if (STREQ("kpatch", lm->mod_name)) return TRUE; } return FALSE; } /* * Display system stats at init-time or for the sys command. */ void display_sys_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; // if (!(pc->flags & RUNTIME) && !DUMPFILE() && !GDB_PATCHED()) // fprintf(fp, "\n"); /* * It's now safe to unlink the remote namelist. */ if (pc->flags & UNLINK_NAMELIST) { unlink(pc->namelist); pc->flags &= ~UNLINK_NAMELIST; pc->flags |= NAMELIST_UNLINKED; } if (REMOTE()) { switch (pc->flags & (NAMELIST_LOCAL|NAMELIST_UNLINKED|NAMELIST_SAVED)) { case NAMELIST_UNLINKED: fprintf(fp, " KERNEL: %s (temporary)\n", pc->namelist); break; case (NAMELIST_UNLINKED|NAMELIST_SAVED): fprintf(fp, " KERNEL: %s\n", pc->namelist); break; case NAMELIST_LOCAL: fprintf(fp, " KERNEL: %s\n", pc->namelist); break; } } else { if (pc->system_map) { fprintf(fp, " SYSTEM MAP: %s%s%s\n", pc->system_map, is_livepatch() ? " [LIVEPATCH]" : "", is_kernel_tainted() ? " [TAINTED]" : ""); fprintf(fp, "DEBUG KERNEL: %s %s\n", pc->namelist_orig ? pc->namelist_orig : pc->namelist, debug_kernel_version(pc->namelist)); } else fprintf(fp, " KERNEL: %s%s%s\n", pc->namelist_orig ? pc->namelist_orig : pc->namelist, is_livepatch() ? " [LIVEPATCH]" : "", is_kernel_tainted() ? " [TAINTED]" : ""); } if (pc->debuginfo_file) { if (STREQ(pc->debuginfo_file, pc->namelist_debug) && pc->namelist_debug_orig) fprintf(fp, " DEBUGINFO: %s\n", pc->namelist_debug_orig); else fprintf(fp, " DEBUGINFO: %s\n", pc->debuginfo_file); } else if (pc->namelist_debug) fprintf(fp, "DEBUG KERNEL: %s %s\n", pc->namelist_debug_orig ? pc->namelist_debug_orig : pc->namelist_debug, debug_kernel_version(pc->namelist_debug)); /* * After the initial banner display, we no longer need the * temporary namelist file(s). */ if (!(pc->flags & RUNTIME)) { if (pc->namelist_orig) unlink(pc->namelist); if (pc->namelist_debug_orig) unlink(pc->namelist_debug); } if (dumpfile_is_split() || sadump_is_diskset() || is_ramdump_image()) fprintf(fp, " DUMPFILES: "); else fprintf(fp, " DUMPFILE: "); if (ACTIVE()) { if (REMOTE_ACTIVE()) fprintf(fp, "%s@%s (remote live system)\n", pc->server_memsrc, pc->server); else fprintf(fp, "%s\n", pc->live_memsrc); } else { if (REMOTE_DUMPFILE()) fprintf(fp, "%s@%s (remote dumpfile)", pc->server_memsrc, pc->server); else if (REMOTE_PAUSED()) fprintf(fp, "%s %s (remote paused system)\n", pc->server_memsrc, pc->server); else { if (dumpfile_is_split()) show_split_dumpfiles(); else if (sadump_is_diskset()) sadump_show_diskset(); else if (is_ramdump_image()) show_ramdump_files(); else fprintf(fp, "%s", pc->dumpfile); } if (LIVE()) fprintf(fp, " [LIVE DUMP]"); if (NETDUMP_DUMPFILE() && is_partial_netdump()) fprintf(fp, " [PARTIAL DUMP]"); if (KDUMP_DUMPFILE() && is_incomplete_dump()) fprintf(fp, " [INCOMPLETE]"); if (DISKDUMP_DUMPFILE() && !dumpfile_is_split() && (is_partial_diskdump() || is_incomplete_dump() || is_excluded_vmemmap())) { fprintf(fp, " %s%s%s", is_partial_diskdump() ? " [PARTIAL DUMP]" : "", is_incomplete_dump() ? " [INCOMPLETE]" : "", is_excluded_vmemmap() ? " [EXCLUDED VMEMMAP]" : ""); } fprintf(fp, "\n"); if (KVMDUMP_DUMPFILE() && pc->kvmdump_mapfile) fprintf(fp, " MAPFILE: %s\n", pc->kvmdump_mapfile); } int number_cpus_to_display = get_cpus_to_display(); int number_cpus_present = get_cpus_present(); if (!number_cpus_present) number_cpus_present = kt->cpus; fprintf(fp, " CPUS: %d", number_cpus_present); if (number_cpus_present > number_cpus_to_display) fprintf(fp, " [OFFLINE: %d]", number_cpus_present - number_cpus_to_display); fprintf(fp, "\n"); if (ACTIVE()) get_xtime(&kt->date); fprintf(fp, " DATE: %s\n", ctime_tz(&kt->date.tv_sec)); fprintf(fp, " UPTIME: %s\n", get_uptime(buf, NULL)); fprintf(fp, "LOAD AVERAGE: %s\n", get_loadavg(buf)); fprintf(fp, " TASKS: %ld\n", RUNNING_TASKS()); fprintf(fp, " NODENAME: %s\n", uts->nodename); fprintf(fp, " RELEASE: %s\n", uts->release); fprintf(fp, " VERSION: %s\n", uts->version); fprintf(fp, " MACHINE: %s ", uts->machine); if ((mhz = machdep->processor_speed())) fprintf(fp, "(%ld Mhz)\n", mhz); else fprintf(fp, "(unknown Mhz)\n"); fprintf(fp, " MEMORY: %s\n", get_memory_size(buf)); #ifdef WHO_CARES fprintf(fp, " DOMAINNAME: %s\n", uts->domainname); #endif if (XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND)) return; if (DUMPFILE()) { fprintf(fp, " PANIC: "); if (machdep->flags & HWRESET) fprintf(fp, "(HARDWARE RESET)\n"); else if (machdep->flags & INIT) fprintf(fp, "(INIT)\n"); else if (machdep->flags & MCA) fprintf(fp, "(MCA)\n"); else { strip_linefeeds(get_panicmsg(buf)); fprintf(fp, "\"%s\"%s\n", buf, strstr(buf, "Oops: ") ? " (check log for details)" : ""); } } } /* * Get the kernel version from the debug kernel and store it here. */ static char *debug_kernel_version_string = NULL; static char * debug_kernel_version(char *namelist) { FILE *pipe; int argc; char buf[BUFSIZE]; char command[BUFSIZE]; char *arglist[MAXARGS]; if (debug_kernel_version_string) return debug_kernel_version_string; sprintf(command, "/usr/bin/strings %s", namelist); if ((pipe = popen(command, "r")) == NULL) { debug_kernel_version_string = " "; return debug_kernel_version_string; } argc = 0; while (fgets(buf, BUFSIZE-1, pipe)) { if (!strstr(buf, "Linux version 2.") && !strstr(buf, "Linux version 3.") && !strstr(buf, "Linux version 4.") && !strstr(buf, "Linux version 5.") && !strstr(buf, "Linux version 6.")) continue; argc = parse_line(buf, arglist); break; } pclose(pipe); if ((argc >= 3) && (debug_kernel_version_string = (char *) malloc(strlen(arglist[2])+3))) sprintf(debug_kernel_version_string, "(%s)", arglist[2]); else debug_kernel_version_string = " "; return debug_kernel_version_string; } /* * Calculate and return the uptime. */ char * get_uptime(char *buf, ulonglong *j64p) { ulong jiffies, tmp1, tmp2; ulonglong jiffies_64, wrapped; if (symbol_exists("jiffies_64")) { get_symbol_data("jiffies_64", sizeof(ulonglong), &jiffies_64); if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { wrapped = (jiffies_64 & 0xffffffff00000000ULL); if (wrapped) { wrapped -= 0x100000000ULL; jiffies_64 &= 0x00000000ffffffffULL; jiffies_64 |= wrapped; jiffies_64 += (ulonglong)(300*machdep->hz); } else { tmp1 = (ulong)(uint)(-300*machdep->hz); tmp2 = (ulong)jiffies_64; jiffies_64 = (ulonglong)(tmp2 - tmp1); } } if (buf) convert_time(jiffies_64, buf); if (j64p) *j64p = jiffies_64; } else { get_symbol_data("jiffies", sizeof(long), &jiffies); if (buf) convert_time((ulonglong)jiffies, buf); if (j64p) *j64p = (ulonglong)jiffies; } return buf; } #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) static char * get_loadavg(char *buf) { int a, b, c; long avenrun[3]; readmem(symbol_value("avenrun"), KVADDR, &avenrun[0], sizeof(long)*3, "avenrun array", FAULT_ON_ERROR); a = avenrun[0] + (FIXED_1/200); b = avenrun[1] + (FIXED_1/200); c = avenrun[2] + (FIXED_1/200); sprintf(buf, "%d.%02d, %d.%02d, %d.%02d", LOAD_INT(a), LOAD_FRAC(a), LOAD_INT(b), LOAD_FRAC(b), LOAD_INT(c), LOAD_FRAC(c)); return buf; } /* * Determine whether a string or value equates to a system call name or value. */ int is_system_call(char *name, ulong value) { int i; ulong *sys_call_table, *sct; char *sp; long size; int NR_syscalls; NR_syscalls = get_NR_syscalls(NULL); size = sizeof(void *) * NR_syscalls; sys_call_table = (ulong *)GETBUF(size); readmem(symbol_value("sys_call_table"), KVADDR, sys_call_table, size, "sys_call_table", FAULT_ON_ERROR); for (i = 0, sct = sys_call_table; i < NR_syscalls; i++, sct++) { if (name && (sp = value_symbol(*sct))) { if (STREQ(name, sp)) return TRUE; } else if (value) { if (value == *sct) return TRUE; } } return FALSE; } char *sys_call_hdr = "NUM SYSTEM CALL FILE AND LINE NUMBER\n"; static void dump_sys_call_table(char *spec, int cnt) { int i, confirmed; char buf1[BUFSIZE], *scp; char buf2[BUFSIZE], *p; char buf3[BUFSIZE]; char *arglist[MAXARGS]; int argc, NR_syscalls; int number, printit, hdr_printed; struct syment *sp, *spn; long size; #ifdef S390X unsigned int *sct, *sys_call_table, sys_ni_syscall, addr; #else ulong *sys_call_table, *sct, sys_ni_syscall, addr; #endif if (NO_LINE_NUMBERS()) error(INFO, "line numbers are not available\n"); NR_syscalls = get_NR_syscalls(&confirmed); if (CRASHDEBUG(1)) fprintf(fp, "NR_syscalls: %d (%sconfirmed)\n", NR_syscalls, confirmed ? "" : "not "); size = sizeof(addr) * NR_syscalls; #ifdef S390X sys_call_table = (unsigned int *)GETBUF(size); #else sys_call_table = (ulong *)GETBUF(size); #endif readmem(symbol_value("sys_call_table"), KVADDR, sys_call_table, size, "sys_call_table", FAULT_ON_ERROR); sys_ni_syscall = symbol_value("sys_ni_syscall"); if (spec) open_tmpfile(); fprintf(fp, "%s", sys_call_hdr); get_build_directory(buf2); for (i = 0, sct = sys_call_table; i < NR_syscalls; i++, sct++) { if (!(scp = value_symbol(*sct))) { if (confirmed || CRASHDEBUG(1)) { fprintf(fp, (*gdb_output_radix == 16) ? "%3x " : "%3d ", i); fprintf(fp, "invalid sys_call_table entry: %lx ", (unsigned long)*sct); if (strlen(value_to_symstr(*sct, buf1, 0))) fprintf(fp, "(%s)\n", buf1); else fprintf(fp, "\n"); } continue; } fprintf(fp, (*gdb_output_radix == 16) ? "%3x " : "%3d ", i); if (sys_ni_syscall && *sct == sys_ni_syscall) fprintf(fp, "%-26s ", "sys_ni_syscall"); else fprintf(fp, "%-26s ", scp); /* * For system call symbols whose first instruction is * an inline from a header file, the file/line-number is * confusing. For this command only, look for the first * instruction address in the system call that shows the * the actual source file containing the system call. */ sp = value_search(*sct, NULL); spn = next_symbol(NULL, sp); for (addr = *sct; sp && spn && (addr < spn->value); addr++) { BZERO(buf1, BUFSIZE); get_line_number(addr, buf1, FALSE); if (strstr(buf1, ".h: ") && strstr(buf1, "include/")) continue; if (strstr(buf1, buf2)) { p = buf1 + strlen(buf2); fprintf(fp, "%s%s", strlen(buf1) ? ".." : "", p); break; } } fprintf(fp, "\n"); } if (spec) { rewind(pc->tmpfile); hdr_printed = cnt; if ((number = IS_A_NUMBER(spec))) sprintf(buf3, (*gdb_output_radix == 16) ? "%lx" : "%ld", stol(spec, FAULT_ON_ERROR, NULL)); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { printit = FALSE; strcpy(buf2, buf1); argc = parse_line(buf2, arglist); if (argc < 2) continue; if (number && STREQ(arglist[0], buf3)) printit = TRUE; else if (!number && strstr(arglist[1], spec)) printit = TRUE; if (printit) { fprintf(pc->saved_fp, "%s%s", hdr_printed++ ? "" : sys_call_hdr, buf1); if (number) break; } } close_tmpfile(); } } /* * Get the number of system calls in the sys_call_table, confirming * the number only if the debuginfo data shows sys_call_table as an * array. Otherwise base it upon next symbol after it. */ static int get_NR_syscalls(int *confirmed) { ulong sys_call_table; struct syment *sp; int type, cnt; type = get_symbol_type("sys_call_table", NULL, NULL); if ((type == TYPE_CODE_ARRAY) && (cnt = get_array_length("sys_call_table", NULL, 0))) { *confirmed = TRUE; return cnt; } *confirmed = FALSE; sys_call_table = symbol_value("sys_call_table"); if (!(sp = next_symbol("sys_call_table", NULL))) return 256; while (sp->value == sys_call_table) { if (!(sp = next_symbol(sp->name, NULL))) return 256; } if (machine_type("S390X")) cnt = (sp->value - sys_call_table)/sizeof(int); else cnt = (sp->value - sys_call_table)/sizeof(void *); return cnt; } /* * "help -k" output */ void dump_kernel_table(int verbose) { int i, c, j, more, nr_cpus; struct new_utsname *uts; int others; others = 0; more = FALSE; uts = &kt->utsname; fprintf(fp, " flags: %lx\n (", kt->flags); if (kt->flags & NO_MODULE_ACCESS) fprintf(fp, "%sNO_MODULE_ACCESS", others++ ? "|" : ""); if (kt->flags & TVEC_BASES_V1) fprintf(fp, "%sTVEC_BASES_V1", others++ ? "|" : ""); if (kt->flags & TVEC_BASES_V2) fprintf(fp, "%sTVEC_BASES_V2", others++ ? "|" : ""); if (kt->flags & GCC_2_96) fprintf(fp, "%sGCC_2_96", others++ ? "|" : ""); if (kt->flags & GCC_3_2) fprintf(fp, "%sGCC_3_2", others++ ? "|" : ""); if (kt->flags & GCC_3_2_3) fprintf(fp, "%sGCC_3_2_3", others++ ? "|" : ""); if (kt->flags & GCC_3_3_2) fprintf(fp, "%sGCC_3_3_2", others++ ? "|" : ""); if (kt->flags & GCC_3_3_3) fprintf(fp, "%sGCC_3_3_3", others++ ? "|" : ""); if (kt->flags & RA_SEEK) fprintf(fp, "%sRA_SEEK", others++ ? "|" : ""); if (kt->flags & NO_RA_SEEK) fprintf(fp, "%sNO_RA_SEEK", others++ ? "|" : ""); if (kt->flags & KALLSYMS_V1) fprintf(fp, "%sKALLSYMS_V1", others++ ? "|" : ""); if (kt->flags & NO_KALLSYMS) fprintf(fp, "%sNO_KALLSYMS", others++ ? "|" : ""); if (kt->flags & PER_CPU_OFF) fprintf(fp, "%sPER_CPU_OFF", others++ ? "|" : ""); if (kt->flags & SMP) fprintf(fp, "%sSMP", others++ ? "|" : ""); if (kt->flags & KMOD_V1) fprintf(fp, "%sKMOD_V1", others++ ? "|" : ""); if (kt->flags & KMOD_V2) fprintf(fp, "%sKMOD_V2", others++ ? "|" : ""); if (kt->flags & KALLSYMS_V2) fprintf(fp, "%sKALLSYMS_V2", others++ ? "|" : ""); if (kt->flags & USE_OPT_BT) fprintf(fp, "%sUSE_OPT_BT", others++ ? "|" : ""); if (kt->flags & ARCH_XEN) fprintf(fp, "%sARCH_XEN", others++ ? "|" : ""); if (kt->flags & ARCH_PVOPS_XEN) fprintf(fp, "%sARCH_PVOPS_XEN", others++ ? "|" : ""); if (kt->flags & ARCH_OPENVZ) fprintf(fp, "%sARCH_OPENVZ", others++ ? "|" : ""); if (kt->flags & ARCH_PVOPS) fprintf(fp, "%sARCH_PVOPS", others++ ? "|" : ""); if (kt->flags & NO_IKCONFIG) fprintf(fp, "%sNO_IKCONFIG", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND) fprintf(fp, "%sDWARF_UNWIND", others++ ? "|" : ""); if (kt->flags & NO_DWARF_UNWIND) fprintf(fp, "%sNO_DWARF_UNWIND", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_MEMORY) fprintf(fp, "%sDWARF_UNWIND_MEMORY", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_EH_FRAME) fprintf(fp, "%sDWARF_UNWIND_EH_FRAME", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_MODULES) fprintf(fp, "%sDWARF_UNWIND_MODULES", others++ ? "|" : ""); if (kt->flags & BUGVERBOSE_OFF) fprintf(fp, "%sBUGVERBOSE_OFF", others++ ? "|" : ""); if (kt->flags & RELOC_SET) fprintf(fp, "%sRELOC_SET", others++ ? "|" : ""); if (kt->flags & RELOC_FORCE) fprintf(fp, "%sRELOC_FORCE", others++ ? "|" : ""); if (kt->flags & PRE_KERNEL_INIT) fprintf(fp, "%sPRE_KERNEL_INIT", others++ ? "|" : ""); fprintf(fp, ")\n"); others = 0; fprintf(fp, " flags2: %llx %s", kt->flags2, kt->flags2 ? " \n (" : " (unused"); if (kt->flags2 & RELOC_AUTO) fprintf(fp, "%sRELOC_AUTO", others++ ? "|" : ""); if (kt->flags2 & KASLR) fprintf(fp, "%sKASLR", others++ ? "|" : ""); if (kt->flags2 & KASLR_CHECK) fprintf(fp, "%sKASLR_CHECK", others++ ? "|" : ""); if (kt->flags2 & TVEC_BASES_V3) fprintf(fp, "%sTVEC_BASES_V3", others++ ? "|" : ""); if (kt->flags2 & TIMER_BASES) fprintf(fp, "%sTIMER_BASES", others++ ? "|" : ""); if (kt->flags2 & IRQ_DESC_TREE_RADIX) fprintf(fp, "%sIRQ_DESC_TREE_RADIX", others++ ? "|" : ""); if (kt->flags2 & IRQ_DESC_TREE_XARRAY) fprintf(fp, "%sIRQ_DESC_TREE_XARRAY", others++ ? "|" : ""); if (kt->flags2 & IRQ_DESC_TREE_MAPLE) fprintf(fp, "%sIRQ_DESC_TREE_MAPLE", others++ ? "|" : ""); if (kt->flags2 & KMOD_PAX) fprintf(fp, "%sKMOD_PAX", others++ ? "|" : ""); if (kt->flags2 & KMOD_MEMORY) fprintf(fp, "%sKMOD_MEMORY", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " stext: %lx\n", kt->stext); fprintf(fp, " etext: %lx\n", kt->etext); fprintf(fp, " stext_init: %lx\n", kt->stext_init); fprintf(fp, " etext_init: %lx\n", kt->etext_init); fprintf(fp, " init_begin: %lx\n", kt->init_begin); fprintf(fp, " init_end: %lx\n", kt->init_end); fprintf(fp, " end: %lx\n", kt->end); fprintf(fp, " cpus: %d\n", kt->cpus); fprintf(fp, " cpus_override: %s\n", kt->cpus_override); fprintf(fp, " NR_CPUS: %d (compiled-in to this version of %s)\n", NR_CPUS, pc->program_name); fprintf(fp, "kernel_NR_CPUS: %d\n", kt->kernel_NR_CPUS); others = 0; fprintf(fp, "ikconfig_flags: %x (", kt->ikconfig_flags); if (kt->ikconfig_flags & IKCONFIG_AVAIL) fprintf(fp, "%sIKCONFIG_AVAIL", others++ ? "|" : ""); if (kt->ikconfig_flags & IKCONFIG_LOADED) fprintf(fp, "%sIKCONFIG_LOADED", others++ ? "|" : ""); if (!kt->ikconfig_flags) fprintf(fp, "unavailable"); fprintf(fp, ")\n"); fprintf(fp, " ikconfig_ents: %d\n", kt->ikconfig_ents); if (kt->display_bh == display_bh_1) fprintf(fp, " display_bh: display_bh_1()\n"); else if (kt->display_bh == display_bh_2) fprintf(fp, " display_bh: display_bh_2()\n"); else if (kt->display_bh == display_bh_3) fprintf(fp, " display_bh: display_bh_3()\n"); else fprintf(fp, " display_bh: %lx\n", (ulong)kt->display_bh); fprintf(fp, " highest_irq: "); if (kt->highest_irq) fprintf(fp, "%d\n", kt->highest_irq); else fprintf(fp, "(unused/undetermined)\n"); fprintf(fp, " module_list: %lx\n", kt->module_list); fprintf(fp, " kernel_module: %lx\n", kt->kernel_module); fprintf(fp, "mods_installed: %d\n", kt->mods_installed); fprintf(fp, " module_tree: %s\n", kt->module_tree ? kt->module_tree : "(not used)"); fprintf(fp, " source_tree: %s\n", kt->source_tree ? kt->source_tree : "(not used)"); if (!(pc->flags & KERNEL_DEBUG_QUERY) && ACTIVE()) get_xtime(&kt->date); fprintf(fp, " date: %s\n", ctime_tz(&kt->date.tv_sec)); fprintf(fp, " boot_date: %s\n", ctime_tz(&kt->boot_date.tv_sec)); fprintf(fp, " proc_version: %s\n", strip_linefeeds(kt->proc_version)); fprintf(fp, " new_utsname: \n"); fprintf(fp, " .sysname: %s\n", uts->sysname); fprintf(fp, " .nodename: %s\n", uts->nodename); fprintf(fp, " .release: %s\n", uts->release); fprintf(fp, " .version: %s\n", uts->version); fprintf(fp, " .machine: %s\n", uts->machine); fprintf(fp, " .domainname: %s\n", uts->domainname); fprintf(fp, "kernel_version: %d.%d.%d\n", kt->kernel_version[0], kt->kernel_version[1], kt->kernel_version[2]); fprintf(fp, " gcc_version: %d.%d.%d\n", kt->gcc_version[0], kt->gcc_version[1], kt->gcc_version[2]); fprintf(fp, " BUG_bytes: %d\n", kt->BUG_bytes); fprintf(fp, " relocate: %lx", kt->relocate); if (kt->flags2 & KASLR) fprintf(fp, " (KASLR offset: %lx / %ldMB)", kt->relocate * -1, (kt->relocate * -1) >> 20); fprintf(fp, "\n runq_siblings: %d\n", kt->runq_siblings); fprintf(fp, " __rq_idx[NR_CPUS]: "); nr_cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS; for (i = 0; i < nr_cpus; i++) { if (!(kt->__rq_idx)) { fprintf(fp, "(unused)"); break; } fprintf(fp, "%ld ", kt->__rq_idx[i]); for (j = i, more = FALSE; j < nr_cpus; j++) { if (kt->__rq_idx[j]) more = TRUE; } if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n __cpu_idx[NR_CPUS]: "); for (i = 0; i < nr_cpus; i++) { if (!(kt->__cpu_idx)) { fprintf(fp, "(unused)"); break; } fprintf(fp, "%ld ", kt->__cpu_idx[i]); for (j = i, more = FALSE; j < nr_cpus; j++) { if (kt->__cpu_idx[j]) more = TRUE; } if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n __per_cpu_offset[NR_CPUS]:"); for (i = 0; i < nr_cpus; i++) { fprintf(fp, "%s%.*lx ", (i % 4) == 0 ? "\n " : "", LONG_PRLEN, kt->__per_cpu_offset[i]); if ((i % 4) == 0) { for (j = i, more = FALSE; j < nr_cpus; j++) { if (kt->__per_cpu_offset[j] && (kt->__per_cpu_offset[j] != kt->__per_cpu_offset[i])) more = TRUE; } } if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n cpu_flags[NR_CPUS]: "); for (i = 0; i < nr_cpus; i++) { if (!(kt->cpu_flags)) { fprintf(fp, "(unused)\n"); goto no_cpu_flags; } fprintf(fp, "%lx ", kt->cpu_flags[i]); for (j = i, more = FALSE; j < nr_cpus; j++) { if (kt->cpu_flags[j]) more = TRUE; } if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " possible cpus: "); if (cpu_map_addr("possible")) { for (i = c = 0; i < nr_cpus; i++) { if (kt->cpu_flags[i] & POSSIBLE_MAP) { fprintf(fp, "%d ", i); c++; } } fprintf(fp, "%s\n", c ? "" : "(none)"); } else fprintf(fp, "(nonexistent)\n"); fprintf(fp, " present cpus: "); if (cpu_map_addr("present")) { for (i = c = 0; i < nr_cpus; i++) { if (kt->cpu_flags[i] & PRESENT_MAP) { fprintf(fp, "%d ", i); c++; } } fprintf(fp, "%s\n", c ? "" : "(none)"); } else fprintf(fp, "(nonexistent)\n"); fprintf(fp, " online cpus: "); if (cpu_map_addr("online")) { for (i = c = 0; i < nr_cpus; i++) { if (kt->cpu_flags[i] & ONLINE_MAP) { fprintf(fp, "%d ", i); c++; } } fprintf(fp, "%s\n", c ? "" : "(none)"); } else fprintf(fp, "(nonexistent)\n"); fprintf(fp, " active cpus: "); if (cpu_map_addr("active")) { for (i = c = 0; i < nr_cpus; i++) { if (kt->cpu_flags[i] & ACTIVE_MAP) { fprintf(fp, "%d ", i); c++; } } fprintf(fp, "%s\n", c ? "" : "(none)"); } else fprintf(fp, "(nonexistent)\n"); no_cpu_flags: fprintf(fp, " vmcoreinfo: \n"); fprintf(fp, " log_buf_SYMBOL: %lx\n", kt->vmcoreinfo.log_buf_SYMBOL); fprintf(fp, " log_end_SYMBOL: %ld\n", kt->vmcoreinfo.log_end_SYMBOL); fprintf(fp, " log_buf_len_SYMBOL: %ld\n", kt->vmcoreinfo.log_buf_len_SYMBOL); fprintf(fp, " logged_chars_SYMBOL: %ld\n", kt->vmcoreinfo.logged_chars_SYMBOL); fprintf(fp, "log_first_idx_SYMBOL: %ld\n", kt->vmcoreinfo.log_first_idx_SYMBOL); fprintf(fp, " log_next_idx_SYMBOL: %ld\n", kt->vmcoreinfo.log_next_idx_SYMBOL); fprintf(fp, " log_SIZE: %ld\n", kt->vmcoreinfo.log_SIZE); fprintf(fp, " log_ts_nsec_OFFSET: %ld\n", kt->vmcoreinfo.log_ts_nsec_OFFSET); fprintf(fp, " log_len_OFFSET: %ld\n", kt->vmcoreinfo.log_len_OFFSET); fprintf(fp, " log_text_len_OFFSET: %ld\n", kt->vmcoreinfo.log_text_len_OFFSET); fprintf(fp, " log_dict_len_OFFSET: %ld\n", kt->vmcoreinfo.log_dict_len_OFFSET); fprintf(fp, " phys_base_SYMBOL: %lx\n", kt->vmcoreinfo.phys_base_SYMBOL); fprintf(fp, " _stext_SYMBOL: %lx\n", kt->vmcoreinfo._stext_SYMBOL); fprintf(fp, " hypervisor: %s\n", kt->hypervisor); others = 0; fprintf(fp, " xen_flags: %lx (", kt->xen_flags); if (kt->xen_flags & WRITABLE_PAGE_TABLES) fprintf(fp, "%sWRITABLE_PAGE_TABLES", others++ ? "|" : ""); if (kt->xen_flags & SHADOW_PAGE_TABLES) fprintf(fp, "%sSHADOW_PAGE_TABLES", others++ ? "|" : ""); if (kt->xen_flags & CANONICAL_PAGE_TABLES) fprintf(fp, "%sCANONICAL_PAGE_TABLES", others++ ? "|" : ""); if (kt->xen_flags & XEN_SUSPEND) fprintf(fp, "%sXEN_SUSPEND", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " m2p_page: %lx\n", (ulong)kt->m2p_page); fprintf(fp, "phys_to_machine_mapping: %lx\n", kt->phys_to_machine_mapping); fprintf(fp, " p2m_table_size: %ld\n", kt->p2m_table_size); fprintf(fp, " p2m_mapping_cache[%d]: %s\n", P2M_MAPPING_CACHE, verbose ? "" : "(use \"help -K\" to view cache contents)"); for (i = 0; verbose && (i < P2M_MAPPING_CACHE); i++) { if (!kt->p2m_mapping_cache[i].mapping) continue; fprintf(fp, " [%d] mapping: %lx pfn: ", i, kt->p2m_mapping_cache[i].mapping); if (PVOPS_XEN()) fprintf(fp, "%lx ", kt->p2m_mapping_cache[i].pfn); else fprintf(fp, "n/a "); fprintf(fp, "start: %lx end: %lx (%ld mfns)\n", kt->p2m_mapping_cache[i].start, kt->p2m_mapping_cache[i].end, kt->p2m_mapping_cache[i].end - kt->p2m_mapping_cache[i].start + 1); } fprintf(fp, " last_mapping_read: %lx\n", kt->last_mapping_read); fprintf(fp, " p2m_cache_index: %ld\n", kt->p2m_cache_index); fprintf(fp, " p2m_pages_searched: %ld\n", kt->p2m_pages_searched); fprintf(fp, " p2m_mfn_cache_hits: %ld ", kt->p2m_mfn_cache_hits); if (kt->p2m_pages_searched) fprintf(fp, "(%ld%%)\n", kt->p2m_mfn_cache_hits * 100 / kt->p2m_pages_searched); else fprintf(fp, "\n"); fprintf(fp, " p2m_page_cache_hits: %ld ", kt->p2m_page_cache_hits); if (kt->p2m_pages_searched) fprintf(fp, "(%ld%%)\n", kt->p2m_page_cache_hits * 100 / kt->p2m_pages_searched); else fprintf(fp, "\n"); if (!symbol_exists("xen_p2m_addr")) { fprintf(fp, " pvops_xen:\n"); fprintf(fp, " p2m_top: %lx\n", kt->pvops_xen.p2m_top); fprintf(fp, " p2m_top_entries: %d\n", kt->pvops_xen.p2m_top_entries); if (symbol_exists("p2m_mid_missing")) fprintf(fp, " p2m_mid_missing: %lx\n", kt->pvops_xen.p2m_mid_missing); fprintf(fp, " p2m_missing: %lx\n", kt->pvops_xen.p2m_missing); } } /* * Set the context to the active task on a given cpu -- dumpfiles only. */ void set_cpu(int cpu) { ulong task; if (cpu >= kt->cpus) error(FATAL, "invalid cpu number: system has only %d cpu%s\n", kt->cpus, kt->cpus > 1 ? "s" : ""); if (hide_offline_cpu(cpu)) error(FATAL, "invalid cpu number: cpu %d is OFFLINE\n", cpu); task = get_active_task(cpu); /* Check if context is already set to given cpu */ if (task == CURRENT_TASK()) return; if (task) set_context(task, NO_PID, TRUE); else error(FATAL, "cannot determine active task on cpu %ld\n", cpu); show_context(CURRENT_CONTEXT()); } /* * Collect the irq_desc[] entry along with its associated handler and * action structures. */ void cmd_irq(void) { int i, c; int nr_irqs; ulong *cpus; int show_intr, choose_cpu; char buf[15]; char arg_buf[BUFSIZE]; cpus = NULL; show_intr = 0; choose_cpu = 0; while ((c = getopt(argcnt, args, "dbuasc:")) != EOF) { switch(c) { case 'd': display_idt_table(); return; case 'b': if (!kt->display_bh) { if (symbol_exists("bh_base") && symbol_exists("bh_mask") && symbol_exists("bh_active")) kt->display_bh = display_bh_1; else if (symbol_exists("bh_base") && symbol_exists("softirq_state") && symbol_exists("softirq_vec")) kt->display_bh = display_bh_2; else if (symbol_exists("bh_base") && symbol_exists("irq_stat") && symbol_exists("softirq_vec") && VALID_MEMBER(irq_cpustat_t___softirq_active) && VALID_MEMBER(irq_cpustat_t___softirq_mask)) kt->display_bh = display_bh_3; else if (get_symbol_type("softirq_vec", NULL, NULL) == TYPE_CODE_ARRAY) kt->display_bh = display_bh_4; else error(FATAL, "bottom-half option not supported\n"); } kt->display_bh(); return; case 'u': pc->curcmd_flags |= IRQ_IN_USE; if (kernel_symbol_exists("no_irq_chip")) pc->curcmd_private = (ulonglong)symbol_value("no_irq_chip"); else if (kernel_symbol_exists("no_irq_type")) pc->curcmd_private = (ulonglong)symbol_value("no_irq_type"); else error(WARNING, "irq: -u option ignored: \"no_irq_chip\" or \"no_irq_type\" symbols do not exist\n"); break; case 'a': if (!machdep->get_irq_affinity) option_not_supported(c); if (INVALID_MEMBER(irq_data_affinity) && INVALID_MEMBER(irq_common_data_affinity) && INVALID_MEMBER(irq_desc_t_affinity)) option_not_supported(c); if ((nr_irqs = machdep->nr_irqs) == 0) error(FATAL, "cannot determine number of IRQs\n"); fprintf(fp, "IRQ NAME AFFINITY\n"); for (i = 0; i < nr_irqs; i++) machdep->get_irq_affinity(i); return; case 's': if (!machdep->show_interrupts) option_not_supported(c); show_intr = 1; break; case 'c': if (choose_cpu) { error(INFO, "only one -c option allowed\n"); argerrs++; } else { choose_cpu = 1; BZERO(arg_buf, BUFSIZE); strcpy(arg_buf, optarg); } break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if ((nr_irqs = machdep->nr_irqs) == 0) error(FATAL, "cannot determine number of IRQs\n"); if (show_intr) { cpus = get_cpumask_buf(); if (choose_cpu) { make_cpumask(arg_buf, cpus, FAULT_ON_ERROR, NULL); } else { for (i = 0; i < kt->cpus; i++) SET_BIT(cpus, i); } for (i = 0; i < kt->cpus; i++) { if (NUM_IN_BITMAP(cpus, i) && hide_offline_cpu(i)) error(INFO, "CPU%d is OFFLINE.\n", i); } fprintf(fp, " "); BZERO(buf, 15); for (i = 0; i < kt->cpus; i++) { if (hide_offline_cpu(i)) continue; if (NUM_IN_BITMAP(cpus, i)) { sprintf(buf, "CPU%d", i); fprintf(fp, "%10s ", buf); } } fprintf(fp, "\n"); for (i = 0; i < nr_irqs; i++) machdep->show_interrupts(i, cpus); if (choose_cpu) FREEBUF(cpus); return; } pc->curcmd_flags &= ~HEADER_PRINTED; if (!args[optind]) { for (i = 0; i < nr_irqs; i++) machdep->dump_irq(i); return; } pc->curcmd_flags &= ~IRQ_IN_USE; while (args[optind]) { i = dtoi(args[optind], FAULT_ON_ERROR, NULL); if (i >= nr_irqs) error(FATAL, "invalid IRQ value: %d (%d max)\n", i, nr_irqs-1); machdep->dump_irq(i); optind++; } } static ulong get_irq_desc_addr(int irq) { int c; ulong cnt, addr, ptr; long len; struct list_pair *lp; addr = 0; lp = NULL; if (!VALID_STRUCT(irq_desc_t)) error(FATAL, "cannot determine size of irq_desc_t\n"); len = SIZE(irq_desc_t); if (symbol_exists("irq_desc")) addr = symbol_value("irq_desc") + (len * irq); else if (symbol_exists("_irq_desc")) addr = symbol_value("_irq_desc") + (len * irq); else if (symbol_exists("irq_desc_ptrs")) { if (get_symbol_type("irq_desc_ptrs", NULL, NULL) == TYPE_CODE_PTR) get_symbol_data("irq_desc_ptrs", sizeof(void *), &ptr); else ptr = symbol_value("irq_desc_ptrs"); ptr += (irq * sizeof(void *)); readmem(ptr, KVADDR, &addr, sizeof(void *), "irq_desc_ptrs entry", FAULT_ON_ERROR); } else if (kt->flags2 & IRQ_DESC_TREE_MAPLE) { unsigned int i; if (kt->highest_irq && (irq > kt->highest_irq)) return addr; cnt = do_maple_tree(symbol_value("sparse_irqs"), MAPLE_TREE_COUNT, NULL); len = sizeof(struct list_pair) * (cnt+1); lp = (struct list_pair *)GETBUF(len); lp[0].index = cnt; /* maxcount */ cnt = do_maple_tree(symbol_value("sparse_irqs"), MAPLE_TREE_GATHER, lp); /* * NOTE: We cannot use lp.index like Radix Tree or XArray because * it's not an absolute index and just counter in Maple Tree. */ if (kt->highest_irq == 0) { readmem((ulong)lp[cnt-1].value + OFFSET(irq_desc_irq_data) + OFFSET(irq_data_irq), KVADDR, &kt->highest_irq, sizeof(int), "irq_data.irq", FAULT_ON_ERROR); } for (c = 0; c < cnt; c++) { readmem((ulong)lp[c].value + OFFSET(irq_desc_irq_data) + OFFSET(irq_data_irq), KVADDR, &i, sizeof(int), "irq_data.irq", FAULT_ON_ERROR); if (i == irq) { if (CRASHDEBUG(1)) fprintf(fp, "index: %d value: %lx\n", i, (ulong)lp[c].value); addr = (ulong)lp[c].value; break; } } FREEBUF(lp); } else if (kt->flags2 & (IRQ_DESC_TREE_RADIX|IRQ_DESC_TREE_XARRAY)) { if (kt->highest_irq && (irq > kt->highest_irq)) return addr; cnt = 0; switch (kt->flags2 & (IRQ_DESC_TREE_RADIX|IRQ_DESC_TREE_XARRAY)) { case IRQ_DESC_TREE_RADIX: cnt = do_radix_tree(symbol_value("irq_desc_tree"), RADIX_TREE_COUNT, NULL); break; case IRQ_DESC_TREE_XARRAY: cnt = do_xarray(symbol_value("irq_desc_tree"), XARRAY_COUNT, NULL); break; } len = sizeof(struct list_pair) * (cnt+1); lp = (struct list_pair *)GETBUF(len); lp[0].index = cnt; switch (kt->flags2 & (IRQ_DESC_TREE_RADIX|IRQ_DESC_TREE_XARRAY)) { case IRQ_DESC_TREE_RADIX: cnt = do_radix_tree(symbol_value("irq_desc_tree"), RADIX_TREE_GATHER, lp); break; case IRQ_DESC_TREE_XARRAY: cnt = do_xarray(symbol_value("irq_desc_tree"), XARRAY_GATHER, lp); break; } if (kt->highest_irq == 0) kt->highest_irq = lp[cnt-1].index; for (c = 0; c < cnt; c++) { if (lp[c].index == irq) { if (CRASHDEBUG(1)) fprintf(fp, "index: %ld value: %lx\n", lp[c].index, (ulong)lp[c].value); addr = (ulong)lp[c].value; break; } } FREEBUF(lp); } else { error(FATAL, "neither irq_desc, _irq_desc, irq_desc_ptrs, " "irq_desc_tree or sparse_irqs symbols exist\n"); } return addr; } static void display_cpu_affinity(ulong *mask) { int cpu, seq, start, count; seq = FALSE; start = 0; count = 0; for (cpu = 0; cpu < kt->cpus; ++cpu) { if (NUM_IN_BITMAP(mask, cpu)) { if (seq) continue; start = cpu; seq = TRUE; } else if (seq) { if (count) fprintf(fp, ","); if (start == cpu - 1) fprintf(fp, "%d", cpu - 1); else fprintf(fp, "%d-%d", start, cpu - 1); count++; seq = FALSE; } } if (seq) { if (count) fprintf(fp, ","); if (start == kt->cpus - 1) fprintf(fp, "%d", kt->cpus - 1); else fprintf(fp, "%d-%d", start, kt->cpus - 1); } } /* * Do the work for cmd_irq(). */ void generic_dump_irq(int irq) { ulong irq_desc_addr; char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; int status, depth, others; ulong handler, action, value; ulong tmp1, tmp2; handler = UNINITIALIZED; action = 0; irq_desc_addr = get_irq_desc_addr(irq); if (!irq_desc_addr && symbol_exists("irq_desc_ptrs")) { if (!(pc->curcmd_flags & IRQ_IN_USE)) fprintf(fp, " IRQ: %d (unused)\n\n", irq); return; } if (irq_desc_addr) { if (VALID_MEMBER(irq_desc_t_status)) readmem(irq_desc_addr + OFFSET(irq_desc_t_status), KVADDR, &status, sizeof(int), "irq_desc status", FAULT_ON_ERROR); if (VALID_MEMBER(irq_desc_t_handler)) readmem(irq_desc_addr + OFFSET(irq_desc_t_handler), KVADDR, &handler, sizeof(long), "irq_desc handler", FAULT_ON_ERROR); else if (VALID_MEMBER(irq_desc_t_chip)) readmem(irq_desc_addr + OFFSET(irq_desc_t_chip), KVADDR, &handler, sizeof(long), "irq_desc chip", FAULT_ON_ERROR); readmem(irq_desc_addr + OFFSET(irq_desc_t_action), KVADDR, &action, sizeof(long), "irq_desc action", FAULT_ON_ERROR); readmem(irq_desc_addr + OFFSET(irq_desc_t_depth), KVADDR, &depth, sizeof(int), "irq_desc depth", FAULT_ON_ERROR); } if (!action && (handler == (ulong)pc->curcmd_private)) return; if ((handler == UNINITIALIZED) && VALID_STRUCT(irq_data)) goto irq_desc_format_v2; if (!irq_desc_addr) { if (!(pc->curcmd_flags & IRQ_IN_USE)) fprintf(fp, " IRQ: %d (unused)\n\n", irq); return; } fprintf(fp, " IRQ: %d\n", irq); fprintf(fp, " STATUS: %x %s", status, status ? "(" : ""); others = 0; if (status & IRQ_INPROGRESS) { fprintf(fp, "IRQ_INPROGRESS"); others++; } if (status & IRQ_DISABLED) fprintf(fp, "%sIRQ_DISABLED", others++ ? "|" : ""); if (status & IRQ_PENDING) fprintf(fp, "%sIRQ_PENDING", others++ ? "|" : ""); if (status & IRQ_REPLAY) fprintf(fp, "%sIRQ_REPLAY", others++ ? "|" : ""); if (status & IRQ_AUTODETECT) fprintf(fp, "%sIRQ_AUTODETECT", others++ ? "|" : ""); if (status & IRQ_WAITING) fprintf(fp, "%sIRQ_WAITING", others++ ? "|" : ""); if (status & IRQ_LEVEL) fprintf(fp, "%sIRQ_LEVEL", others++ ? "|" : ""); if (status & IRQ_MASKED) fprintf(fp, "%sIRQ_MASKED", others++ ? "|" : ""); fprintf(fp, "%s\n", status ? ")" : ""); fprintf(fp, "HANDLER: "); if (value_symbol(handler)) { fprintf(fp, "%lx ", handler); pad_line(fp, VADDR_PRLEN == 8 ? VADDR_PRLEN+2 : VADDR_PRLEN-6, ' '); fprintf(fp, "<%s>\n", value_symbol(handler)); } else fprintf(fp, "%lx\n", handler); if (handler) { if (VALID_MEMBER(hw_interrupt_type_typename)) readmem(handler+OFFSET(hw_interrupt_type_typename), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type typename", FAULT_ON_ERROR); else if (VALID_MEMBER(irq_chip_typename)) readmem(handler+OFFSET(irq_chip_typename), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type typename", FAULT_ON_ERROR); fprintf(fp, " typename: %lx ", tmp1); BZERO(buf, BUFSIZE); if (read_string(tmp1, buf, BUFSIZE-1)) fprintf(fp, "\"%s\"", buf); fprintf(fp, "\n"); if (VALID_MEMBER(hw_interrupt_type_startup)) readmem(handler+OFFSET(hw_interrupt_type_startup), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type startup", FAULT_ON_ERROR); else if (VALID_MEMBER(irq_chip_startup)) readmem(handler+OFFSET(irq_chip_startup), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type startup", FAULT_ON_ERROR); fprintf(fp, " startup: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "startup indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); if (VALID_MEMBER(hw_interrupt_type_shutdown)) readmem(handler+OFFSET(hw_interrupt_type_shutdown), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type shutdown", FAULT_ON_ERROR); else if (VALID_MEMBER(irq_chip_shutdown)) readmem(handler+OFFSET(irq_chip_shutdown), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type shutdown", FAULT_ON_ERROR); fprintf(fp, " shutdown: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "shutdown indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); if (VALID_MEMBER(hw_interrupt_type_handle)) { readmem(handler+OFFSET(hw_interrupt_type_handle), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type handle", FAULT_ON_ERROR); fprintf(fp, " handle: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "handle indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } if (VALID_MEMBER(hw_interrupt_type_enable)) readmem(handler+OFFSET(hw_interrupt_type_enable), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type enable", FAULT_ON_ERROR); else if (VALID_MEMBER(irq_chip_enable)) readmem(handler+OFFSET(irq_chip_enable), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type enable", FAULT_ON_ERROR); fprintf(fp, " enable: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "enable indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); if (VALID_MEMBER(hw_interrupt_type_disable)) readmem(handler+OFFSET(hw_interrupt_type_disable), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type disable", FAULT_ON_ERROR); else if (VALID_MEMBER(irq_chip_disable)) readmem(handler+OFFSET(irq_chip_disable), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type disable", FAULT_ON_ERROR); fprintf(fp, " disable: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "disable indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); if (VALID_MEMBER(hw_interrupt_type_ack)) { readmem(handler+OFFSET(hw_interrupt_type_ack), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type ack", FAULT_ON_ERROR); fprintf(fp, " ack: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "ack indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } else if (VALID_MEMBER(irq_chip_ack)) { readmem(handler+OFFSET(irq_chip_ack), KVADDR, &tmp1, sizeof(void *), "irq_chip ack", FAULT_ON_ERROR); fprintf(fp, " ack: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "ack indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } if (VALID_MEMBER(irq_chip_mask)) { readmem(handler+OFFSET(irq_chip_mask), KVADDR, &tmp1, sizeof(void *), "irq_chip mask", FAULT_ON_ERROR); fprintf(fp, " mask: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "mask indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } if (VALID_MEMBER(irq_chip_mask_ack)) { readmem(handler+OFFSET(irq_chip_mask_ack), KVADDR, &tmp1, sizeof(void *), "irq_chip mask_ack", FAULT_ON_ERROR); fprintf(fp, " mask_ack: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "mask_ack indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } if (VALID_MEMBER(irq_chip_unmask)) { readmem(handler+OFFSET(irq_chip_unmask), KVADDR, &tmp1, sizeof(void *), "irq_chip unmask", FAULT_ON_ERROR); fprintf(fp, " unmask: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "unmask indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } if (VALID_MEMBER(irq_chip_eoi)) { readmem(handler+OFFSET(irq_chip_eoi), KVADDR, &tmp1, sizeof(void *), "irq_chip eoi", FAULT_ON_ERROR); fprintf(fp, " eoi: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "eoi indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } if (VALID_MEMBER(hw_interrupt_type_end)) { readmem(handler+OFFSET(hw_interrupt_type_end), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type end", FAULT_ON_ERROR); fprintf(fp, " end: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "end indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } else if (VALID_MEMBER(irq_chip_end)) { readmem(handler+OFFSET(irq_chip_end), KVADDR, &tmp1, sizeof(void *), "irq_chip end", FAULT_ON_ERROR); fprintf(fp, " end: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "end indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } if (VALID_MEMBER(hw_interrupt_type_set_affinity)) { readmem(handler+OFFSET(hw_interrupt_type_set_affinity), KVADDR, &tmp1, sizeof(void *), "hw_interrupt_type set_affinity", FAULT_ON_ERROR); fprintf(fp, " set_affinity: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "set_affinity indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } else if (VALID_MEMBER(irq_chip_set_affinity)) { readmem(handler+OFFSET(irq_chip_set_affinity), KVADDR, &tmp1, sizeof(void *), "irq_chip set_affinity", FAULT_ON_ERROR); fprintf(fp, " set_affinity: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "set_affinity indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } if (VALID_MEMBER(irq_chip_retrigger)) { readmem(handler+OFFSET(irq_chip_retrigger), KVADDR, &tmp1, sizeof(void *), "irq_chip retrigger", FAULT_ON_ERROR); fprintf(fp, " retrigger: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "retrigger indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } if (VALID_MEMBER(irq_chip_set_type)) { readmem(handler+OFFSET(irq_chip_set_type), KVADDR, &tmp1, sizeof(void *), "irq_chip set_type", FAULT_ON_ERROR); fprintf(fp, " set_type: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "set_type indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } if (VALID_MEMBER(irq_chip_set_wake)) { readmem(handler+OFFSET(irq_chip_set_wake), KVADDR, &tmp1, sizeof(void *), "irq_chip set wake", FAULT_ON_ERROR); fprintf(fp, " set_wake: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "set_wake indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); } } do_linked_action: fprintf(fp, " ACTION: "); if (value_symbol(action)) { fprintf(fp, "%lx ", action); pad_line(fp, VADDR_PRLEN == 8 ? VADDR_PRLEN+2 : VADDR_PRLEN-6, ' '); fprintf(fp, "<%s>\n", value_symbol(action)); } else if (action) fprintf(fp, "%lx\n", action); else fprintf(fp, "(none)\n"); if (action) { readmem(action+OFFSET(irqaction_handler), KVADDR, &tmp1, sizeof(void *), "irqaction handler", FAULT_ON_ERROR); fprintf(fp, " handler: %lx ", tmp1); if (is_kernel_text(tmp1)) fprintf(fp, "<%s>", value_to_symstr(tmp1, buf, 0)); else if (readmem(tmp1, KVADDR, &tmp2, sizeof(ulong), "handler indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(tmp2)) fprintf(fp, "<%s>", value_to_symstr(tmp2, buf, 0)); fprintf(fp, "\n"); readmem(action+OFFSET(irqaction_flags), KVADDR, &value, sizeof(void *), "irqaction flags", FAULT_ON_ERROR); fprintf(fp, " flags: %lx\n", value); if (VALID_MEMBER(irqaction_mask)) { readmem(action+OFFSET(irqaction_mask), KVADDR, &tmp1, sizeof(void *), "irqaction mask", FAULT_ON_ERROR); fprintf(fp, " mask: %lx\n", tmp1); } readmem(action+OFFSET(irqaction_name), KVADDR, &tmp1, sizeof(void *), "irqaction name", FAULT_ON_ERROR); fprintf(fp, " name: %lx ", tmp1); BZERO(buf, BUFSIZE); if (read_string(tmp1, buf, BUFSIZE-1)) fprintf(fp, "\"%s\"", buf); fprintf(fp, "\n"); readmem(action+OFFSET(irqaction_dev_id), KVADDR, &tmp1, sizeof(void *), "irqaction dev_id", FAULT_ON_ERROR); fprintf(fp, " dev_id: %lx\n", tmp1); readmem(action+OFFSET(irqaction_next), KVADDR, &action, sizeof(void *), "irqaction dev_id", FAULT_ON_ERROR); fprintf(fp, " next: %lx\n", action); } if (action) goto do_linked_action; fprintf(fp, " DEPTH: %d\n\n", depth); return; irq_desc_format_v2: if (!(pc->curcmd_flags & HEADER_PRINTED)) { fprintf(fp, " IRQ %s %s NAME\n", mkstring(buf1, VADDR_PRLEN, CENTER, "IRQ_DESC/_DATA"), mkstring(buf2, VADDR_PRLEN, CENTER, "IRQACTION")); pc->curcmd_flags |= HEADER_PRINTED; } if (!irq_desc_addr) { if (pc->curcmd_flags & IRQ_IN_USE) return; } fprintf(fp, "%s %s ", mkstring(buf1, 4, CENTER|RJUST|INT_DEC, MKSTR((ulong)irq)), irq_desc_addr ? mkstring(buf2, MAX(VADDR_PRLEN, strlen("IRQ_DESC/_DATA")), CENTER|LONG_HEX, MKSTR(irq_desc_addr)) : mkstring(buf3, MAX(VADDR_PRLEN, strlen("IRQ_DESC/_DATA")), CENTER, "(unused)")); do_linked_action_v2: fprintf(fp, "%s ", action ? mkstring(buf1, MAX(VADDR_PRLEN, strlen("IRQACTION")), CENTER|LONG_HEX, MKSTR(action)) : mkstring(buf2, MAX(VADDR_PRLEN, strlen("IRQACTION")), CENTER, "(unused)")); if (action) { readmem(action+OFFSET(irqaction_name), KVADDR, &tmp1, sizeof(void *), "irqaction name", FAULT_ON_ERROR); if (read_string(tmp1, buf, BUFSIZE-1)) fprintf(fp, "\"%s\"", buf); readmem(action+OFFSET(irqaction_next), KVADDR, &action, sizeof(void *), "irqaction next", FAULT_ON_ERROR); if (action) { fprintf(fp, "\n%s", space(4 + 2 + MAX(VADDR_PRLEN, strlen("IRQ_DESC/_DATA")) + 2)); goto do_linked_action_v2; } } fprintf(fp, "\n"); } void generic_get_irq_affinity(int irq) { ulong irq_desc_addr; long len, len_cpumask; ulong affinity_ptr; ulong *affinity; ulong tmp_addr; ulong action, name; char buf[BUFSIZE]; char name_buf[BUFSIZE]; affinity = NULL; irq_desc_addr = get_irq_desc_addr(irq); if (!irq_desc_addr) return; readmem(irq_desc_addr + OFFSET(irq_desc_t_action), KVADDR, &action, sizeof(long), "irq_desc action", FAULT_ON_ERROR); if (!action) return; len = DIV_ROUND_UP(kt->cpus, BITS_PER_LONG) * sizeof(ulong); len_cpumask = VALID_SIZE(cpumask_t) ? SIZE(cpumask_t) : 0; if (len_cpumask > 0) len = len_cpumask > len ? len : len_cpumask; affinity = (ulong *)GETBUF(len); if (VALID_MEMBER(irq_common_data_affinity)) tmp_addr = irq_desc_addr + OFFSET(irq_desc_irq_common_data) + OFFSET(irq_common_data_affinity); else if (VALID_MEMBER(irq_data_affinity)) tmp_addr = irq_desc_addr + \ OFFSET(irq_data_affinity); else tmp_addr = irq_desc_addr + \ OFFSET(irq_desc_t_affinity); if (symbol_exists("alloc_cpumask_var_node") || symbol_exists("alloc_cpumask_var")) /* pointer member */ readmem(tmp_addr,KVADDR, &affinity_ptr, sizeof(ulong), "irq_desc affinity", FAULT_ON_ERROR); else /* array member */ affinity_ptr = tmp_addr; readmem(affinity_ptr, KVADDR, affinity, len, "irq_desc affinity", FAULT_ON_ERROR); fprintf(fp, "%3d ", irq); BZERO(name_buf, BUFSIZE); while (action) { readmem(action+OFFSET(irqaction_name), KVADDR, &name, sizeof(void *), "irqaction name", FAULT_ON_ERROR); BZERO(buf, BUFSIZE); if (read_string(name, buf, BUFSIZE-1)) { if (strlen(name_buf) != 0) strcat(name_buf, ","); strcat(name_buf, buf); } readmem(action+OFFSET(irqaction_next), KVADDR, &action, sizeof(void *), "irqaction dev_id", FAULT_ON_ERROR); } fprintf(fp, "%-20s ", name_buf); display_cpu_affinity(affinity); fprintf(fp, "\n"); FREEBUF(affinity); } void generic_show_interrupts(int irq, ulong *cpus) { int i; ulong irq_desc_addr; ulong handler, action, name; uint kstat_irq; uint kstat_irqs[kt->cpus]; ulong kstat_irqs_ptr; struct syment *percpu_sp; ulong tmp, tmp1; char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char name_buf[BUFSIZE]; handler = UNINITIALIZED; irq_desc_addr = get_irq_desc_addr(irq); if (!irq_desc_addr) return; readmem(irq_desc_addr + OFFSET(irq_desc_t_action), KVADDR, &action, sizeof(long), "irq_desc action", FAULT_ON_ERROR); if (!action) return; if (!symbol_exists("kstat_irqs_cpu")) { /* for RHEL5 or earlier */ if (!(percpu_sp = per_cpu_symbol_search("kstat"))) return; for (i = 0; i < kt->cpus; i++) { if (!(NUM_IN_BITMAP(cpus, i))) continue; tmp = percpu_sp->value + kt->__per_cpu_offset[i]; readmem(tmp + OFFSET(kernel_stat_irqs) + sizeof(uint) * irq, KVADDR, &kstat_irq, sizeof(uint), "kernel_stat irqs", FAULT_ON_ERROR); kstat_irqs[i] = kstat_irq; } } else { readmem(irq_desc_addr + OFFSET(irq_desc_t_kstat_irqs), KVADDR, &kstat_irqs_ptr, sizeof(long), "irq_desc kstat_irqs", FAULT_ON_ERROR); if (THIS_KERNEL_VERSION > LINUX(2,6,37)) { for (i = 0; i < kt->cpus; i++) { if (!(NUM_IN_BITMAP(cpus, i))) continue; tmp = kstat_irqs_ptr + kt->__per_cpu_offset[i]; readmem(tmp, KVADDR, &kstat_irq, sizeof(uint), "kernel_stat irqs", FAULT_ON_ERROR); kstat_irqs[i] = kstat_irq; } } else readmem(kstat_irqs_ptr, KVADDR, kstat_irqs, sizeof(kstat_irqs), "kstat_irqs", FAULT_ON_ERROR); } if (VALID_MEMBER(irq_desc_t_handler)) readmem(irq_desc_addr + OFFSET(irq_desc_t_handler), KVADDR, &handler, sizeof(long), "irq_desc handler", FAULT_ON_ERROR); else if (VALID_MEMBER(irq_desc_t_chip)) readmem(irq_desc_addr + OFFSET(irq_desc_t_chip), KVADDR, &handler, sizeof(long), "irq_desc chip", FAULT_ON_ERROR); else if (VALID_MEMBER(irq_data_chip)) { tmp = irq_desc_addr + OFFSET(irq_data_chip); if (VALID_MEMBER(irq_desc_irq_data)) tmp += OFFSET(irq_desc_irq_data); readmem(tmp, KVADDR, &handler, sizeof(long), "irq_data chip", FAULT_ON_ERROR); } fprintf(fp, "%3d: ", irq); for (i = 0; i < kt->cpus; i++) { if (hide_offline_cpu(i)) continue; if (NUM_IN_BITMAP(cpus, i)) fprintf(fp, "%10u ", kstat_irqs[i]); } if (handler != UNINITIALIZED) { if (VALID_MEMBER(hw_interrupt_type_typename)) { readmem(handler+OFFSET(hw_interrupt_type_typename), KVADDR, &tmp, sizeof(void *), "hw_interrupt_type typename", FAULT_ON_ERROR); BZERO(buf, BUFSIZE); if (read_string(tmp, buf, BUFSIZE-1)) fprintf(fp, "%14s", buf); } else if (VALID_MEMBER(irq_chip_typename)) { readmem(handler+OFFSET(irq_chip_typename), KVADDR, &tmp, sizeof(void *), "hw_interrupt_type typename", FAULT_ON_ERROR); BZERO(buf, BUFSIZE); if (read_string(tmp, buf, BUFSIZE-1)) fprintf(fp, "%8s", buf); BZERO(buf1, BUFSIZE); if (VALID_MEMBER(irq_desc_t_name)) readmem(irq_desc_addr+OFFSET(irq_desc_t_name), KVADDR, &tmp1, sizeof(void *), "irq_desc name", FAULT_ON_ERROR); if (read_string(tmp1, buf1, BUFSIZE-1)) fprintf(fp, "-%-8s", buf1); } } BZERO(name_buf, BUFSIZE); while (action) { readmem(action+OFFSET(irqaction_name), KVADDR, &name, sizeof(void *), "irqaction name", FAULT_ON_ERROR); BZERO(buf2, BUFSIZE); if (read_string(name, buf2, BUFSIZE-1)) { if (strlen(name_buf) != 0) strcat(name_buf, ","); strcat(name_buf, buf2); } readmem(action+OFFSET(irqaction_next), KVADDR, &action, sizeof(void *), "irqaction dev_id", FAULT_ON_ERROR); } fprintf(fp, " %s\n", name_buf); } /* * Dump the earlier 2.2 Linux version's bottom-half essentials. */ static void display_bh_1(void) { int i; ulong bh_mask, bh_active; ulong bh_base[32]; char buf[BUFSIZE]; get_symbol_data("bh_mask", sizeof(ulong), &bh_mask); get_symbol_data("bh_active", sizeof(ulong), &bh_active); readmem(symbol_value("bh_base"), KVADDR, bh_base, sizeof(void *) * 32, "bh_base[32]", FAULT_ON_ERROR); fprintf(fp, "BH_MASK BH_ACTIVE\n"); fprintf(fp, "%08lx %08lx\n", bh_mask, bh_active); fprintf(fp, "\nBH_BASE %s\n", mkstring(buf, VADDR_PRLEN, CENTER|LJUST, "FUNCTION")); for (i = 0; i < 32; i++) { if (!bh_base[i]) continue; fprintf(fp, " %2d %lx <%s>\n", i, bh_base[i], value_to_symstr(bh_base[i], buf, 0)); } } /* * Dump the 2.3-ish Linux version's bottom half essentials. */ static void display_bh_2(void) { int i; ulong bh_base[32]; struct softirq_state { uint32_t active; uint32_t mask; } softirq_state; struct softirq_action { void *action; void *data; } softirq_vec[32]; char buf[BUFSIZE]; readmem(symbol_value("bh_base"), KVADDR, bh_base, sizeof(void *) * 32, "bh_base[32]", FAULT_ON_ERROR); readmem(symbol_value("softirq_vec"), KVADDR, softirq_vec, sizeof(struct softirq_action) * 32, "softirq_vec[32]", FAULT_ON_ERROR); fprintf(fp, "CPU MASK ACTIVE\n"); for (i = 0; i < kt->cpus; i++) { readmem(symbol_value("softirq_state") + (i * SIZE(softirq_state)), KVADDR, &softirq_state, sizeof(struct softirq_state), "softirq_state", FAULT_ON_ERROR); fprintf(fp, " %-2d %08x %08x\n", i, softirq_state.mask, softirq_state.active); } fprintf(fp, "\nVEC %s\n", mkstring(buf, VADDR_PRLEN, CENTER|LJUST, "ACTION")); for (i = 0; i < 32; i++) { if (!softirq_vec[i].action) continue; fprintf(fp, " %-2d %lx <%s>\n", i, (ulong)softirq_vec[i].action, value_to_symstr((ulong)softirq_vec[i].action, buf, 0)); } fprintf(fp, "\nBH_BASE %s\n", mkstring(buf, VADDR_PRLEN, CENTER|LJUST, "FUNCTION")); for (i = 0; i < 32; i++) { if (!bh_base[i]) continue; fprintf(fp, " %2d %lx <%s>\n", i, bh_base[i], value_to_symstr(bh_base[i], buf, 0)); } } /* * Dump the 2.4 Linux version's bottom half essentials. */ static void display_bh_3(void) { int i; ulong bh_base[32]; struct softirq_action { void *action; void *data; } softirq_vec[32]; char buf[BUFSIZE]; uint active, mask; ulong function; readmem(symbol_value("bh_base"), KVADDR, bh_base, sizeof(void *) * 32, "bh_base[32]", FAULT_ON_ERROR); readmem(symbol_value("softirq_vec"), KVADDR, softirq_vec, sizeof(struct softirq_action) * 32, "softirq_vec[32]", FAULT_ON_ERROR); fprintf(fp, "CPU MASK ACTIVE\n"); for (i = 0; i < kt->cpus; i++) { readmem(symbol_value("irq_stat") + (i * SIZE(irq_cpustat_t)) + OFFSET(irq_cpustat_t___softirq_active), KVADDR, &active, sizeof(uint), "__softirq_active", FAULT_ON_ERROR); readmem(symbol_value("irq_stat") + (i * SIZE(irq_cpustat_t)) + OFFSET(irq_cpustat_t___softirq_mask), KVADDR, &mask, sizeof(uint), "__softirq_mask", FAULT_ON_ERROR); fprintf(fp, " %-2d %08x %08x\n", i, mask, active); } fprintf(fp, "\nVEC %s\n", mkstring(buf, VADDR_PRLEN, CENTER|LJUST, "ACTION")); for (i = 0; i < 32; i++) { if (!softirq_vec[i].action) continue; fprintf(fp, " %-2d %lx ", i, (ulong)softirq_vec[i].action); if (is_kernel_text((ulong)softirq_vec[i].action)) fprintf(fp, "<%s>", value_to_symstr((ulong)softirq_vec[i].action, buf, 0)); else if (readmem((ulong)softirq_vec[i].action, KVADDR, &function, sizeof(ulong), "action indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(function)) fprintf(fp, "<%s>", value_to_symstr(function, buf, 0)); fprintf(fp, "\n"); } fprintf(fp, "\nBH_BASE %s\n", mkstring(buf, VADDR_PRLEN, CENTER|LJUST, "FUNCTION")); for (i = 0; i < 32; i++) { if (!bh_base[i]) continue; fprintf(fp, " %2d %lx ", i, bh_base[i]); if (is_kernel_text(bh_base[i])) fprintf(fp, "<%s>", value_to_symstr(bh_base[i], buf, 0)); else if (readmem(bh_base[i], KVADDR, &function, sizeof(ulong), "bh_base indirection", RETURN_ON_ERROR|QUIET) && is_kernel_text(function)) fprintf(fp, "<%s>", value_to_symstr(function, buf, 0)); fprintf(fp, "\n"); } } /* * Dump the 2.6 Linux version's bottom half essentials. */ static void display_bh_4(void) { int i, len; char buf[BUFSIZE]; char *array; ulong *p; struct load_module *lm; if (!(len = get_array_length("softirq_vec", NULL, 0))) error(FATAL, "cannot determine softirq_vec array length\n"); fprintf(fp, "SOFTIRQ_VEC %s\n", mkstring(buf, VADDR_PRLEN, CENTER|RJUST, "ACTION")); array = GETBUF(SIZE(softirq_action) * (len+1)); readmem(symbol_value("softirq_vec"), KVADDR, array, SIZE(softirq_action) * len, "softirq_vec", FAULT_ON_ERROR); for (i = 0, p = (ulong *)array; i < len; i++, p++) { if (*p) { fprintf(fp, " [%d]%s %s <%s>", i, i < 10 ? space(4) : space(3), mkstring(buf, VADDR_PRLEN, LONG_HEX|CENTER|RJUST, MKSTR(*p)), value_symbol(*p)); if (module_symbol(*p, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); } if (SIZE(softirq_action) == (sizeof(void *)*2)) p++; } FREEBUF(array); } /* * Dump the entries in the old- and new-style timer queues in * chronological order. */ void cmd_timer(void) { int c; int rflag; char *cpuspec; ulong *cpus = NULL; rflag = 0; while ((c = getopt(argcnt, args, "rC:")) != EOF) { switch(c) { case 'r': rflag = 1; break; case 'C': cpuspec = optarg; cpus = get_cpumask_buf(); make_cpumask(cpuspec, cpus, FAULT_ON_ERROR, NULL); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (rflag) dump_hrtimer_data(cpus); else dump_timer_data(cpus); if (cpus) FREEBUF(cpus); } static void dump_hrtimer_data(const ulong *cpus) { int i, j, k = 0; int hrtimer_max_clock_bases, max_hrtimer_bases; struct syment * hrtimer_bases; hrtimer_max_clock_bases = 0; max_hrtimer_bases = 0; /* * deside whether hrtimer is available and * set hrtimer_max_clock_bases or max_hrtimer_bases. * if both are not available, hrtimer is not available. */ if (VALID_STRUCT(hrtimer_clock_base)) { hrtimer_max_clock_bases = 2; if (symbol_exists("ktime_get_boottime")) hrtimer_max_clock_bases = MEMBER_SIZE("hrtimer_cpu_base", "clock_base") / SIZE(hrtimer_clock_base); } else if (VALID_STRUCT(hrtimer_base)) { max_hrtimer_bases = 2; } else option_not_supported('r'); hrtimer_bases = per_cpu_symbol_search("hrtimer_bases"); for (i = 0; i < kt->cpus; i++) { if (cpus && !NUM_IN_BITMAP(cpus, i)) continue; if (k++) fprintf(fp, "\n"); if (hide_offline_cpu(i)) { fprintf(fp, "CPU: %d [OFFLINE]\n", i); continue; } fprintf(fp, "CPU: %d ", i); if (VALID_STRUCT(hrtimer_clock_base)) { fprintf(fp, "HRTIMER_CPU_BASE: %lx\n", (ulong)(hrtimer_bases->value + kt->__per_cpu_offset[i])); for (j = 0; j < hrtimer_max_clock_bases; j++) { if (j) fprintf(fp, "\n"); dump_hrtimer_clock_base( (void *)(hrtimer_bases->value) + kt->__per_cpu_offset[i], j); } } else { fprintf(fp, "\n"); for (j = 0; j < max_hrtimer_bases; j++) { if (j) fprintf(fp, "\n"); dump_hrtimer_base( (void *)(hrtimer_bases->value) + kt->__per_cpu_offset[i], j); } } } } static int expires_len = -1; static int softexpires_len = -1; static int tte_len = -1; static void dump_hrtimer_clock_base(const void *hrtimer_bases, const int num) { void *base; ulonglong current_time, now; ulonglong offset; ulong get_time; char buf[BUFSIZE]; base = (void *)hrtimer_bases + OFFSET(hrtimer_cpu_base_clock_base) + SIZE(hrtimer_clock_base) * num; readmem((ulong)(base + OFFSET(hrtimer_clock_base_get_time)), KVADDR, &get_time, sizeof(get_time), "hrtimer_clock_base get_time", FAULT_ON_ERROR); fprintf(fp, " CLOCK: %d HRTIMER_CLOCK_BASE: %lx [%s]\n", num, (ulong)base, value_to_symstr(get_time, buf, 0)); /* get current time(uptime) */ get_uptime(NULL, ¤t_time); offset = 0; if (VALID_MEMBER(hrtimer_clock_base_offset)) offset = ktime_to_ns(base + OFFSET(hrtimer_clock_base_offset)); now = current_time * (1000000000LL / machdep->hz) + offset; dump_active_timers(base, now); } static void dump_hrtimer_base(const void *hrtimer_bases, const int num) { void *base; ulonglong current_time, now; ulong get_time; char buf[BUFSIZE]; base = (void *)hrtimer_bases + SIZE(hrtimer_base) * num; readmem((ulong)(base + OFFSET(hrtimer_base_get_time)), KVADDR, &get_time, sizeof(get_time), "hrtimer_base get_time", FAULT_ON_ERROR); fprintf(fp, " CLOCK: %d HRTIMER_BASE: %lx [%s]\n", num, (ulong)base, value_to_symstr(get_time, buf, 0)); /* get current time(uptime) */ get_uptime(NULL, ¤t_time); now = current_time * (1000000000LL / machdep->hz); dump_active_timers(base, now); } static void dump_active_timers(const void *base, ulonglong now) { int next, i, t; struct rb_node *curr; int timer_cnt; ulong *timer_list; void *timer; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; next = 0; timer_list = 0; /* search hrtimers */ hq_open(); timer_cnt = 0; next_one: i = 0; /* get the first node */ if (VALID_MEMBER(hrtimer_base_pending)) readmem((ulong)(base + OFFSET(hrtimer_base_pending) - OFFSET(hrtimer_list) + OFFSET(hrtimer_node)), KVADDR, &curr, sizeof(curr), "hrtimer_base pending", FAULT_ON_ERROR); else if (VALID_MEMBER(hrtimer_base_first)) readmem((ulong)(base + OFFSET(hrtimer_base_first)), KVADDR, &curr, sizeof(curr), "hrtimer_base first", FAULT_ON_ERROR); else if (VALID_MEMBER(hrtimer_clock_base_first)) readmem((ulong)(base + OFFSET(hrtimer_clock_base_first)), KVADDR, &curr, sizeof(curr), "hrtimer_clock_base first", FAULT_ON_ERROR); else if (VALID_MEMBER(timerqueue_head_next)) readmem((ulong)(base + OFFSET(hrtimer_clock_base_active) + OFFSET(timerqueue_head_next)), KVADDR, &curr, sizeof(curr), "hrtimer_clock base", FAULT_ON_ERROR); else readmem((ulong)(base + OFFSET(hrtimer_clock_base_active) + OFFSET(timerqueue_head_rb_root) + OFFSET(rb_root_cached_rb_leftmost)), KVADDR, &curr, sizeof(curr), "hrtimer_clock_base active", FAULT_ON_ERROR); while (curr && i < next) { curr = rb_next(curr); i++; } if (curr) { if (!hq_enter((ulong)curr)) { error(INFO, "duplicate rb_node: %lx\n", curr); return; } timer_cnt++; next++; goto next_one; } if (timer_cnt) { timer_list = (ulong *)GETBUF(timer_cnt * sizeof(long)); timer_cnt = retrieve_list(timer_list, timer_cnt); } hq_close(); if (!timer_cnt) { fprintf(fp, " (empty)\n"); return; } /* dump hrtimers */ /* print header */ expires_len = get_expires_len(timer_cnt, timer_list, 0, 0); if (expires_len < 7) expires_len = 7; softexpires_len = get_expires_len(timer_cnt, timer_list, 0, 1); tte_len = get_expires_len(timer_cnt, timer_list, now, 2); if (softexpires_len > -1) { if (softexpires_len < 11) softexpires_len = 11; fprintf(fp, " %s\n", mkstring(buf1, softexpires_len, CENTER|RJUST, "CURRENT")); sprintf(buf1, "%lld", now); fprintf(fp, " %s\n", mkstring(buf1, softexpires_len, CENTER|RJUST, NULL)); fprintf(fp, " %s %s %s %s %s\n", mkstring(buf1, softexpires_len, CENTER|RJUST, "SOFTEXPIRES"), mkstring(buf2, expires_len, CENTER|RJUST, "EXPIRES"), mkstring(buf5, tte_len, CENTER|RJUST, "TTE"), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "HRTIMER"), mkstring(buf4, VADDR_PRLEN, CENTER|LJUST, "FUNCTION")); } else { fprintf(fp, " %s\n", mkstring(buf1, expires_len, CENTER|RJUST, "CURRENT")); sprintf(buf1, "%lld", now); fprintf(fp, " %s\n", mkstring(buf1, expires_len, CENTER|RJUST, NULL)); fprintf(fp, " %s %s %s %s\n", mkstring(buf1, expires_len, CENTER|RJUST, "EXPIRES"), mkstring(buf5, tte_len, CENTER|RJUST, "TTE"), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "HRTIMER"), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "FUNCTION")); } /* print timers */ for (t = 0; t < timer_cnt; t++) { if (VALID_MEMBER(timerqueue_node_node)) timer = (void *)(timer_list[t] - OFFSET(timerqueue_node_node) - OFFSET(hrtimer_node)); else timer = (void *)(timer_list[t] - OFFSET(hrtimer_node)); print_timer(timer, now); } } static int get_expires_len(const int timer_cnt, const ulong *timer_list, ulonglong now, const int getsoft) { void *last_timer; char buf[BUFSIZE]; ulonglong softexpires, expires; int len; len = -1; if (!timer_cnt) return len; if (VALID_MEMBER(timerqueue_node_node)) last_timer = (void *)(timer_list[timer_cnt - 1] - OFFSET(timerqueue_node_node) - OFFSET(hrtimer_node)); else last_timer = (void *)(timer_list[timer_cnt -1] - OFFSET(hrtimer_node)); if (getsoft == 1) { /* soft expires exist*/ if (VALID_MEMBER(hrtimer_softexpires)) { softexpires = ktime_to_ns(last_timer + OFFSET(hrtimer_softexpires)); sprintf(buf, "%lld", softexpires); len = strlen(buf); } } else { if (VALID_MEMBER(hrtimer_expires)) expires = ktime_to_ns(last_timer + OFFSET(hrtimer_expires)); else expires = ktime_to_ns(last_timer + OFFSET(hrtimer_node) + OFFSET(timerqueue_node_expires)); sprintf(buf, "%lld", getsoft ? expires - now : expires); len = strlen(buf); } return len; } /* * print hrtimer and its related information */ static void print_timer(const void *timer, ulonglong now) { ulonglong softexpires, expires, tte; ulong function; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; /* align information */ fprintf(fp, " "); if (!accessible((ulong)timer)) { fprintf(fp, "(destroyed timer)\n"); return; } if (VALID_MEMBER(hrtimer_expires)) expires = ktime_to_ns(timer + OFFSET(hrtimer_expires)); else expires = ktime_to_ns(timer + OFFSET(hrtimer_node) + OFFSET(timerqueue_node_expires)); if (VALID_MEMBER(hrtimer_softexpires)) { softexpires = ktime_to_ns(timer + OFFSET(hrtimer_softexpires)); sprintf(buf1, "%lld-%lld", softexpires, expires); } if (VALID_MEMBER(hrtimer_softexpires)) { softexpires = ktime_to_ns(timer + OFFSET(hrtimer_softexpires)); sprintf(buf1, "%lld", softexpires); fprintf(fp, "%s ", mkstring(buf2, softexpires_len, CENTER|RJUST, buf1)); } sprintf(buf1, "%lld", expires); fprintf(fp, "%s ", mkstring(buf2, expires_len, CENTER|RJUST, buf1)); tte = expires - now; fprintf(fp, "%s ", mkstring(buf4, tte_len, SLONG_DEC|RJUST, MKSTR((ulong)tte))); fprintf(fp, "%lx ", (ulong)timer); if (readmem((ulong)(timer + OFFSET(hrtimer_function)), KVADDR, &function, sizeof(function), "hrtimer function", QUIET|RETURN_ON_ERROR)) { fprintf(fp, "%lx ", function); fprintf(fp ,"<%s>", value_to_symstr(function, buf3, 0)); } fprintf(fp, "\n"); } /* * convert ktime to ns, only need the address of ktime */ static ulonglong ktime_to_ns(const void *ktime) { ulonglong ns; ns = 0; if (!accessible((ulong)ktime)) return ns; if (VALID_MEMBER(ktime_t_tv64)) { readmem((ulong)ktime + OFFSET(ktime_t_tv64), KVADDR, &ns, sizeof(ns), "ktime_t tv64", QUIET|RETURN_ON_ERROR); } else if (VALID_MEMBER(ktime_t_sec) && VALID_MEMBER(ktime_t_nsec)) { uint32_t sec, nsec; sec = 0; nsec = 0; readmem((ulong)ktime + OFFSET(ktime_t_sec), KVADDR, &sec, sizeof(sec), "ktime_t sec", QUIET|RETURN_ON_ERROR); readmem((ulong)ktime + OFFSET(ktime_t_nsec), KVADDR, &nsec, sizeof(nsec), "ktime_t nsec", QUIET|RETURN_ON_ERROR); ns = sec * 1000000000L + nsec; } else { readmem((ulong)ktime, KVADDR, &ns, sizeof(ns), "ktime_t", QUIET|RETURN_ON_ERROR); } return ns; } /* * Display the pending timer queue entries, both the old and new-style. */ struct timer_data { ulong address; ulong expires; ulong function; long tte; }; struct tv_range { ulong base; ulong end; }; #define TVN (6) static void dump_timer_data(const ulong *cpus) { int i; ulong timer_active; struct timer_struct { unsigned long expires; void *fn; } timer_table[32]; char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf4[BUFSIZE]; struct timer_struct *tp; ulong mask, highest, highest_tte, function; ulong jiffies, timer_jiffies; ulong *vec; long count; int vec_root_size, vec_size; struct timer_data *td; int flen, tlen, tdx, old_timers_exist; struct tv_range tv[TVN]; if (kt->flags2 & TIMER_BASES) { dump_timer_data_timer_bases(cpus); return; } else if (kt->flags2 & TVEC_BASES_V3) { dump_timer_data_tvec_bases_v3(cpus); return; } else if (kt->flags & TVEC_BASES_V2) { dump_timer_data_tvec_bases_v2(cpus); return; } else if (kt->flags & TVEC_BASES_V1) { dump_timer_data_tvec_bases_v1(cpus); return; } BZERO(tv, sizeof(struct tv_range) * TVN); vec_root_size = (i = ARRAY_LENGTH(timer_vec_root_vec)) ? i : get_array_length("timer_vec_root.vec", NULL, SIZE(list_head)); vec_size = (i = ARRAY_LENGTH(timer_vec_vec)) ? i : get_array_length("timer_vec.vec", NULL, SIZE(list_head)); vec = (ulong *)GETBUF(SIZE(list_head) * MAX(vec_root_size, vec_size)); if (symbol_exists("timer_active") && symbol_exists("timer_table")) { get_symbol_data("timer_active", sizeof(ulong), &timer_active); readmem(symbol_value("timer_table"), KVADDR, &timer_table, sizeof(struct timer_struct) * 32, "timer_table[32]", FAULT_ON_ERROR); old_timers_exist = TRUE; } else old_timers_exist = FALSE; /* * Get rough count first, and then gather a bunch of timer_data * structs to stuff in a sortable array. */ count = 0; for (mask = 1, tp = timer_table+0; old_timers_exist && mask; tp++, mask += mask) { if (mask > timer_active) break; if (!(mask & timer_active)) continue; count++; } init_tv_ranges(tv, vec_root_size, vec_size, 0); count += do_timer_list(symbol_value("tv1") + OFFSET(timer_vec_root_vec), vec_root_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(symbol_value("tv2") + OFFSET(timer_vec_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(symbol_value("tv3") + OFFSET(timer_vec_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(symbol_value("tv4") + OFFSET(timer_vec_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(symbol_value("tv4") + OFFSET(timer_vec_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); td = (struct timer_data *) GETBUF((count*2) * sizeof(struct timer_data)); tdx = 0; get_symbol_data("jiffies", sizeof(ulong), &jiffies); get_symbol_data("timer_jiffies", sizeof(ulong), &timer_jiffies); if (old_timers_exist) get_symbol_data("timer_active", sizeof(ulong), &timer_active); highest = 0; highest_tte = 0; for (i = 0, mask = 1, tp = timer_table+0; old_timers_exist && mask; i++, tp++, mask += mask) { if (mask > timer_active) break; if (!(mask & timer_active)) continue; td[tdx].address = i; td[tdx].expires = tp->expires; td[tdx].function = (ulong)tp->fn; td[tdx].tte = tp->expires - jiffies; if (td[tdx].expires > highest) highest = td[tdx].expires; if (abs(td[tdx].tte) > highest_tte) highest_tte = abs(td[tdx].tte); tdx++; } do_timer_list(symbol_value("tv1") + OFFSET(timer_vec_root_vec), vec_root_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); do_timer_list(symbol_value("tv2") + OFFSET(timer_vec_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); do_timer_list(symbol_value("tv3") + OFFSET(timer_vec_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); do_timer_list(symbol_value("tv4") + OFFSET(timer_vec_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); tdx = do_timer_list(symbol_value("tv5") + OFFSET(timer_vec_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); qsort(td, tdx, sizeof(struct timer_data), compare_timer_data); /* * Because the jiffies values can fluctuate wildly from dump to * dump, try to use the appropriate amount of space... */ sprintf(buf, "%ld", highest); flen = MAX(strlen(buf), strlen("JIFFIES")); fprintf(fp, "%s\n", mkstring(buf, flen, CENTER|LJUST, "JIFFIES")); fprintf(fp, "%s\n", mkstring(buf, flen, RJUST|LONG_DEC,MKSTR(jiffies))); /* +1 accounts possible "-" sign */ sprintf(buf4, "%ld", highest_tte); tlen = MAX(strlen(buf4) + 1, strlen("TTE")); fprintf(fp, "%s %s TIMER_LIST/TABLE FUNCTION\n", mkstring(buf, flen, CENTER|LJUST, "EXPIRES"), mkstring(buf4, tlen, CENTER|LJUST, "TTE")); for (i = 0; i < tdx; i++) { fprintf(fp, "%s", mkstring(buf, flen, RJUST|LONG_DEC, MKSTR(td[i].expires))); fprintf(fp, " %s", mkstring(buf4, tlen, RJUST|SLONG_DEC, MKSTR(td[i].tte))); if (td[i].address < 32) { sprintf(buf, "timer_table[%ld]", td[i].address); fprintf(fp, " %s ", mkstring(buf, 16, CENTER|LJUST, NULL)); } else { mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(td[i].address)); fprintf(fp, " %s ", mkstring(buf, 16, CENTER, buf1)); } if (is_kernel_text(td[i].function)) fprintf(fp, "%s <%s>\n", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(td[i].function)), value_to_symstr(td[i].function, buf, 0)); else { fprintf(fp, "%s ", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(td[i].function))); if (readmem(td[i].function, KVADDR, &function, sizeof(ulong), "timer function", RETURN_ON_ERROR|QUIET)) { if (is_kernel_text(function)) fprintf(fp, "<%s>", value_to_symstr(function, buf, 0)); } fprintf(fp, "\n"); } } } /* * Newer per-cpu timers, using "tvec_bases". */ static void dump_timer_data_tvec_bases_v1(const ulong *cpus) { int i, cpu, tdx, flen, tlen; struct timer_data *td; int vec_root_size, vec_size; struct tv_range tv[TVN]; ulong *vec, jiffies, highest, highest_tte, function; long count; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; /* */ vec_root_size = (i = ARRAY_LENGTH(tvec_root_s_vec)) ? i : get_array_length("tvec_root_s.vec", NULL, SIZE(list_head)); vec_size = (i = ARRAY_LENGTH(tvec_s_vec)) ? i : get_array_length("tvec_s.vec", NULL, SIZE(list_head)); vec = (ulong *)GETBUF(SIZE(list_head) * MAX(vec_root_size, vec_size)); cpu = 0; next_cpu: if (cpus && !NUM_IN_BITMAP(cpus, cpu)) { if (++cpu < kt->cpus) goto next_cpu; return; } count = 0; td = (struct timer_data *)NULL; BZERO(tv, sizeof(struct tv_range) * TVN); init_tv_ranges(tv, vec_root_size, vec_size, cpu); count += do_timer_list(tv[1].base + OFFSET(tvec_root_s_vec), vec_root_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(tv[2].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(tv[3].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(tv[4].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(tv[5].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); if (count) td = (struct timer_data *) GETBUF((count*2) * sizeof(struct timer_data)); tdx = 0; highest = 0; highest_tte = 0; get_symbol_data("jiffies", sizeof(ulong), &jiffies); do_timer_list(tv[1].base + OFFSET(tvec_root_s_vec), vec_root_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); do_timer_list(tv[2].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); do_timer_list(tv[3].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); do_timer_list(tv[4].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); tdx = do_timer_list(tv[5].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); qsort(td, tdx, sizeof(struct timer_data), compare_timer_data); fprintf(fp, "TVEC_BASES[%d]: %lx\n", cpu, symbol_value("tvec_bases") + (SIZE(tvec_t_base_s) * cpu)); sprintf(buf1, "%ld", highest); flen = MAX(strlen(buf1), strlen("JIFFIES")); fprintf(fp, "%s\n", mkstring(buf1,flen, CENTER|RJUST, "JIFFIES")); fprintf(fp, "%s\n", mkstring(buf1,flen, RJUST|LONG_DEC,MKSTR(jiffies))); /* +1 accounts possible "-" sign */ sprintf(buf4, "%ld", highest_tte); tlen = MAX(strlen(buf4) + 1, strlen("TTE")); fprintf(fp, "%s %s %s %s\n", mkstring(buf1, flen, CENTER|RJUST, "EXPIRES"), mkstring(buf4, tlen, CENTER|RJUST, "TTE"), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "TIMER_LIST"), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "FUNCTION")); for (i = 0; i < tdx; i++) { fprintf(fp, "%s", mkstring(buf1, flen, RJUST|LONG_DEC, MKSTR(td[i].expires))); fprintf(fp, " %s", mkstring(buf4, tlen, RJUST|SLONG_DEC, MKSTR(td[i].tte))); fprintf(fp, " %s ", mkstring(buf1, MAX(VADDR_PRLEN, strlen("TIMER_LIST")), RJUST|CENTER|LONG_HEX, MKSTR(td[i].address))); if (is_kernel_text(td[i].function)) { fprintf(fp, "%s <%s>\n", mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(td[i].function)), value_to_symstr(td[i].function, buf1, 0)); } else { fprintf(fp, "%s ", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(td[i].function))); if (readmem(td[i].function, KVADDR, &function, sizeof(ulong), "timer function", RETURN_ON_ERROR|QUIET)) { if (is_kernel_text(function)) fprintf(fp, "<%s>", value_to_symstr(function, buf1, 0)); } fprintf(fp, "\n"); } } if (td) FREEBUF(td); if (++cpu < kt->cpus) goto next_cpu; } /* * 2.6 per-cpu timers, using "per_cpu__tvec_bases". */ static void dump_timer_data_tvec_bases_v2(const ulong *cpus) { int i, cpu, tdx, flen, tlen; struct timer_data *td; int vec_root_size, vec_size; struct tv_range tv[TVN]; ulong *vec, jiffies, highest, highest_tte, function; ulong tvec_bases; long count; struct syment *sp; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; vec_root_size = (i = ARRAY_LENGTH(tvec_root_s_vec)) ? i : get_array_length("tvec_root_s.vec", NULL, SIZE(list_head)); if (!vec_root_size && (i = get_array_length("tvec_root.vec", NULL, SIZE(list_head)))) vec_root_size = i; if (!vec_root_size) error(FATAL, "cannot determine tvec_root.vec[] array size\n"); vec_size = (i = ARRAY_LENGTH(tvec_s_vec)) ? i : get_array_length("tvec_s.vec", NULL, SIZE(list_head)); if (!vec_size && (i = get_array_length("tvec.vec", NULL, SIZE(list_head)))) vec_size = i; if (!vec_size) error(FATAL, "cannot determine tvec.vec[] array size\n"); vec = (ulong *)GETBUF(SIZE(list_head) * MAX(vec_root_size, vec_size)); cpu = 0; next_cpu: if (cpus && !NUM_IN_BITMAP(cpus, cpu)) { if (++cpu < kt->cpus) goto next_cpu; return; } /* * hide data of offline cpu and goto next cpu */ if (hide_offline_cpu(cpu)) { fprintf(fp, "TVEC_BASES[%d]: [OFFLINE]\n", cpu); if (++cpu < kt->cpus) goto next_cpu; return; } count = 0; td = (struct timer_data *)NULL; BZERO(tv, sizeof(struct tv_range) * TVN); init_tv_ranges(tv, vec_root_size, vec_size, cpu); count += do_timer_list(tv[1].base + OFFSET(tvec_root_s_vec), vec_root_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(tv[2].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(tv[3].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(tv[4].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); count += do_timer_list(tv[5].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, tv, 0); if (count) td = (struct timer_data *) GETBUF((count*2) * sizeof(struct timer_data)); tdx = 0; highest = 0; highest_tte = 0; get_symbol_data("jiffies", sizeof(ulong), &jiffies); do_timer_list(tv[1].base + OFFSET(tvec_root_s_vec), vec_root_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); do_timer_list(tv[2].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); do_timer_list(tv[3].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); do_timer_list(tv[4].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); tdx = do_timer_list(tv[5].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, tv, jiffies); qsort(td, tdx, sizeof(struct timer_data), compare_timer_data); sp = per_cpu_symbol_search("per_cpu__tvec_bases"); if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) tvec_bases = sp->value + kt->__per_cpu_offset[cpu]; else tvec_bases = sp->value; if (symbol_exists("boot_tvec_bases")) { readmem(tvec_bases, KVADDR, &tvec_bases, sizeof(void *), "per-cpu tvec_bases", FAULT_ON_ERROR); } fprintf(fp, "TVEC_BASES[%d]: %lx\n", cpu, tvec_bases); sprintf(buf1, "%ld", highest); flen = MAX(strlen(buf1), strlen("JIFFIES")); fprintf(fp, "%s\n", mkstring(buf1,flen, CENTER|RJUST, "JIFFIES")); fprintf(fp, "%s\n", mkstring(buf1,flen, RJUST|LONG_DEC,MKSTR(jiffies))); /* +1 accounts possible "-" sign */ sprintf(buf4, "%ld", highest_tte); tlen = MAX(strlen(buf4) + 1, strlen("TTE")); fprintf(fp, "%s %s %s %s\n", mkstring(buf1, flen, CENTER|RJUST, "EXPIRES"), mkstring(buf4, tlen, CENTER|RJUST, "TTE"), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "TIMER_LIST"), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "FUNCTION")); for (i = 0; i < tdx; i++) { fprintf(fp, "%s", mkstring(buf1, flen, RJUST|LONG_DEC, MKSTR(td[i].expires))); fprintf(fp, " %s", mkstring(buf4, tlen, RJUST|SLONG_DEC, MKSTR(td[i].tte))); fprintf(fp, " %s ", mkstring(buf1, MAX(VADDR_PRLEN, strlen("TIMER_LIST")), RJUST|CENTER|LONG_HEX, MKSTR(td[i].address))); if (is_kernel_text(td[i].function)) { fprintf(fp, "%s <%s>\n", mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(td[i].function)), value_to_symstr(td[i].function, buf1, 0)); } else { fprintf(fp, "%s ", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(td[i].function))); if (readmem(td[i].function, KVADDR, &function, sizeof(ulong), "timer function", RETURN_ON_ERROR|QUIET)) { if (is_kernel_text(function)) fprintf(fp, "<%s>", value_to_symstr(function, buf1, 0)); } fprintf(fp, "\n"); } } if (td) FREEBUF(td); if (++cpu < kt->cpus) goto next_cpu; } /* * Linux 4.2 timers use new tvec_root, tvec and timer_list structures */ static void dump_timer_data_tvec_bases_v3(const ulong *cpus) { int i, cpu, tdx, flen, tlen; struct timer_data *td; int vec_root_size, vec_size; struct tv_range tv[TVN]; ulong *vec, jiffies, highest, highest_tte, function; ulong tvec_bases; long count, head_size; struct syment *sp; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; vec_root_size = vec_size = 0; if (STREQ(MEMBER_TYPE_NAME("tvec_root", "vec"), "list_head")) /* for RHEL7.6 or later */ head_size = SIZE(list_head); else head_size = SIZE(hlist_head); if ((i = get_array_length("tvec_root.vec", NULL, head_size))) vec_root_size = i; else error(FATAL, "cannot determine tvec_root.vec[] array size\n"); if ((i = get_array_length("tvec.vec", NULL, head_size))) vec_size = i; else error(FATAL, "cannot determine tvec.vec[] array size\n"); vec = (ulong *)GETBUF(head_size * MAX(vec_root_size, vec_size)); cpu = 0; next_cpu: if (cpus && !NUM_IN_BITMAP(cpus, cpu)) { if (++cpu < kt->cpus) goto next_cpu; return; } /* * hide data of offline cpu and goto next cpu */ if (hide_offline_cpu(cpu)) { fprintf(fp, "TVEC_BASES[%d]: [OFFLINE]\n", cpu); if (++cpu < kt->cpus) goto next_cpu; return; } count = 0; td = (struct timer_data *)NULL; BZERO(tv, sizeof(struct tv_range) * TVN); init_tv_ranges(tv, vec_root_size, vec_size, cpu); count += do_timer_list_v3(tv[1].base + OFFSET(tvec_root_s_vec), vec_root_size, vec, NULL, NULL, NULL, 0, head_size); count += do_timer_list_v3(tv[2].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, 0, head_size); count += do_timer_list_v3(tv[3].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, 0, head_size); count += do_timer_list_v3(tv[4].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, 0, head_size); count += do_timer_list_v3(tv[5].base + OFFSET(tvec_s_vec), vec_size, vec, NULL, NULL, NULL, 0, head_size); if (count) td = (struct timer_data *) GETBUF((count*2) * sizeof(struct timer_data)); tdx = 0; highest = 0; highest_tte = 0; get_symbol_data("jiffies", sizeof(ulong), &jiffies); do_timer_list_v3(tv[1].base + OFFSET(tvec_root_s_vec), vec_root_size, vec, (void *)td, &highest, &highest_tte, jiffies, head_size); do_timer_list_v3(tv[2].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, jiffies, head_size); do_timer_list_v3(tv[3].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, jiffies, head_size); do_timer_list_v3(tv[4].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, jiffies, head_size); tdx = do_timer_list_v3(tv[5].base + OFFSET(tvec_s_vec), vec_size, vec, (void *)td, &highest, &highest_tte, jiffies, head_size); qsort(td, tdx, sizeof(struct timer_data), compare_timer_data); sp = per_cpu_symbol_search("per_cpu__tvec_bases"); if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) tvec_bases = sp->value + kt->__per_cpu_offset[cpu]; else tvec_bases = sp->value; fprintf(fp, "TVEC_BASES[%d]: %lx\n", cpu, tvec_bases); sprintf(buf1, "%ld", highest); flen = MAX(strlen(buf1), strlen("JIFFIES")); fprintf(fp, "%s\n", mkstring(buf1,flen, CENTER|RJUST, "JIFFIES")); fprintf(fp, "%s\n", mkstring(buf1,flen, RJUST|LONG_DEC,MKSTR(jiffies))); /* +1 accounts possible "-" sign */ sprintf(buf4, "%ld", highest_tte); tlen = MAX(strlen(buf4) + 1, strlen("TTE")); fprintf(fp, "%s %s %s %s\n", mkstring(buf1, flen, CENTER|RJUST, "EXPIRES"), mkstring(buf4, tlen, CENTER|RJUST, "TTE"), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "TIMER_LIST"), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "FUNCTION")); for (i = 0; i < tdx; i++) { fprintf(fp, "%s", mkstring(buf1, flen, RJUST|LONG_DEC, MKSTR(td[i].expires))); fprintf(fp, " %s", mkstring(buf4, tlen, RJUST|SLONG_DEC, MKSTR(td[i].tte))); fprintf(fp, " %s ", mkstring(buf1, MAX(VADDR_PRLEN, strlen("TIMER_LIST")), RJUST|CENTER|LONG_HEX, MKSTR(td[i].address))); if (is_kernel_text(td[i].function)) { fprintf(fp, "%s <%s>\n", mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(td[i].function)), value_to_symstr(td[i].function, buf1, 0)); } else { fprintf(fp, "%s ", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(td[i].function))); if (readmem(td[i].function, KVADDR, &function, sizeof(ulong), "timer function", RETURN_ON_ERROR|QUIET)) { if (is_kernel_text(function)) fprintf(fp, "<%s>", value_to_symstr(function, buf1, 0)); } fprintf(fp, "\n"); } } if (td) FREEBUF(td); if (++cpu < kt->cpus) goto next_cpu; } /* * The comparison function must return an integer less than, * equal to, or greater than zero if the first argument is * considered to be respectively less than, equal to, or * greater than the second. If two members compare as equal, * their order in the sorted array is undefined. */ static int compare_timer_data(const void *v1, const void *v2) { struct timer_data *t1, *t2; t1 = (struct timer_data *)v1; t2 = (struct timer_data *)v2; return (t1->expires < t2->expires ? -1 : t1->expires == t2->expires ? 0 : 1); } /* * Create the address range for each of the timer vectors. */ static void init_tv_ranges(struct tv_range *tv, int vec_root_size, int vec_size, int cpu) { ulong tvec_bases; struct syment *sp; if (kt->flags & TVEC_BASES_V1) { tv[1].base = symbol_value("tvec_bases") + (SIZE(tvec_t_base_s) * cpu) + OFFSET(tvec_t_base_s_tv1); tv[1].end = tv[1].base + SIZE(tvec_root_s); tv[2].base = tv[1].end; tv[2].end = tv[2].base + SIZE(tvec_s); tv[3].base = tv[2].end; tv[3].end = tv[3].base + SIZE(tvec_s); tv[4].base = tv[3].end; tv[4].end = tv[4].base + SIZE(tvec_s); tv[5].base = tv[4].end; tv[5].end = tv[5].base + SIZE(tvec_s); } else if ((kt->flags & TVEC_BASES_V2) || (kt->flags2 & TVEC_BASES_V3)) { sp = per_cpu_symbol_search("per_cpu__tvec_bases"); if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) tvec_bases = sp->value + kt->__per_cpu_offset[cpu]; else tvec_bases = sp->value; if (symbol_exists("boot_tvec_bases")) { readmem(tvec_bases, KVADDR, &tvec_bases, sizeof(void *), "per-cpu tvec_bases", FAULT_ON_ERROR); } tv[1].base = tvec_bases + OFFSET(tvec_t_base_s_tv1); tv[1].end = tv[1].base + SIZE(tvec_root_s); tv[2].base = tv[1].end; tv[2].end = tv[2].base + SIZE(tvec_s); tv[3].base = tv[2].end; tv[3].end = tv[3].base + SIZE(tvec_s); tv[4].base = tv[3].end; tv[4].end = tv[4].base + SIZE(tvec_s); tv[5].base = tv[4].end; tv[5].end = tv[5].base + SIZE(tvec_s); } else { tv[1].base = symbol_value("tv1"); tv[1].end = tv[1].base + SIZE(timer_vec_root); tv[2].base = symbol_value("tv2"); tv[2].end = tv[2].base + SIZE(timer_vec); tv[3].base = symbol_value("tv3"); tv[3].end = tv[3].base + SIZE(timer_vec); tv[4].base = symbol_value("tv4"); tv[4].end = tv[4].base + SIZE(timer_vec); tv[5].base = symbol_value("tv5"); tv[5].end = tv[5].base + SIZE(timer_vec); } } #define IN_TV_RANGE(vaddr) \ ((((vaddr) >= tv[1].base) && ((vaddr) < tv[1].end)) || \ (((vaddr) >= tv[2].base) && ((vaddr) < tv[2].end)) || \ (((vaddr) >= tv[3].base) && ((vaddr) < tv[3].end)) || \ (((vaddr) >= tv[4].base) && ((vaddr) < tv[4].end)) || \ (((vaddr) >= tv[5].base) && ((vaddr) < tv[5].end))) /* * Count, or stash, the entries of a linked timer_list -- depending * upon the option value. */ static int do_timer_list(ulong vec_kvaddr, int size, ulong *vec, void *option, ulong *highest, ulong *highest_tte, struct tv_range *tv, ulong jiffies) { int i, t; int count, tdx; ulong expires, function; struct timer_data *td; char *timer_list_buf; ulong *timer_list; int timer_cnt; struct list_data list_data, *ld; long sz; ulong offset = 0; tdx = 0; td = option ? (struct timer_data *)option : NULL; if (td) { while (td[tdx].function) tdx++; } if (VALID_MEMBER(timer_list_list)) sz = SIZE(list_head) * size; else if (VALID_MEMBER(timer_list_entry)) sz = SIZE(list_head) * size; else sz = sizeof(ulong) * size; readmem(vec_kvaddr, KVADDR, vec, sz, "timer_list vec array", FAULT_ON_ERROR); if (VALID_MEMBER(timer_list_list)) { offset = OFFSET(timer_list_list); goto new_timer_list_format; } if (VALID_MEMBER(timer_list_entry)) { offset = OFFSET(timer_list_entry); goto new_timer_list_format; } if (VALID_MEMBER(timer_list_next)) offset = OFFSET(timer_list_next); else error(FATAL, "no timer_list next, list, or entry members?\n"); ld = &list_data; timer_list_buf = GETBUF(SIZE(timer_list)); for (i = count = 0; i < size; i++) { if (vec[i]) { BZERO(ld, sizeof(struct list_data)); ld->start = vec[i]; ld->member_offset = offset; hq_open(); timer_cnt = do_list(ld); if (!timer_cnt) { hq_close(); continue; } timer_list = (ulong *)GETBUF(timer_cnt * sizeof(ulong)); timer_cnt = retrieve_list(timer_list, timer_cnt); hq_close(); for (t = 0; t < timer_cnt; t++) { readmem(timer_list[t], KVADDR, timer_list_buf, SIZE(timer_list), "timer_list buffer", FAULT_ON_ERROR); expires = ULONG(timer_list_buf + OFFSET(timer_list_expires)); function = ULONG(timer_list_buf + OFFSET(timer_list_function)); if (td) { td[tdx].address = timer_list[t]; td[tdx].expires = expires; td[tdx].function = function; td[tdx].tte = expires - jiffies; if (highest && (expires > *highest)) *highest = expires; if (highest_tte && (abs(td[tdx].tte) > *highest_tte)) *highest_tte = abs(td[tdx].tte); tdx++; } } FREEBUF(timer_list); count += timer_cnt; } } FREEBUF(timer_list_buf); return(td ? tdx : count); new_timer_list_format: ld = &list_data; timer_list_buf = GETBUF(SIZE(timer_list)); for (i = count = 0; i < (size*2); i += 2, vec_kvaddr += SIZE(list_head)) { if (vec[i] == vec_kvaddr) continue; BZERO(ld, sizeof(struct list_data)); ld->start = vec[i]; ld->list_head_offset = offset; ld->end = vec_kvaddr; ld->flags = RETURN_ON_LIST_ERROR; hq_open(); if ((timer_cnt = do_list(ld)) == -1) { /* Ignore chains with errors */ error(INFO, "ignoring faulty timer list at index %d of timer array\n", i/2); continue; } if (!timer_cnt) continue; timer_list = (ulong *)GETBUF(timer_cnt * sizeof(ulong)); timer_cnt = retrieve_list(timer_list, timer_cnt); hq_close(); for (t = 0; t < timer_cnt; t++) { if (IN_TV_RANGE(timer_list[t])) break; count++; readmem(timer_list[t], KVADDR, timer_list_buf, SIZE(timer_list), "timer_list buffer", FAULT_ON_ERROR); expires = ULONG(timer_list_buf + OFFSET(timer_list_expires)); function = ULONG(timer_list_buf + OFFSET(timer_list_function)); if (td) { td[tdx].address = timer_list[t]; td[tdx].expires = expires; td[tdx].function = function; td[tdx].tte = expires - jiffies; if (highest && (expires > *highest)) *highest = expires; if (highest_tte && (abs(td[tdx].tte) > *highest_tte)) *highest_tte = abs(td[tdx].tte); tdx++; } } FREEBUF(timer_list); } FREEBUF(timer_list_buf); return(td ? tdx : count); } static int do_timer_list_v3(ulong vec_kvaddr, int size, ulong *vec, void *option, ulong *highest, ulong *highest_tte, ulong jiffies, long head_size) { int i, t; int count, tdx; ulong expires, function; struct timer_data *td; char *timer_list_buf; ulong *timer_list; int timer_cnt; struct list_data list_data, *ld; tdx = 0; td = option ? (struct timer_data *)option : NULL; if (td) { while (td[tdx].function) tdx++; } readmem(vec_kvaddr, KVADDR, vec, head_size * size, "timer_list vec array", FAULT_ON_ERROR); ld = &list_data; timer_list_buf = GETBUF(SIZE(timer_list)); for (i = count = 0; i < size; i++, vec_kvaddr += head_size) { if (head_size == SIZE(list_head)) { if (vec[i*2] == vec_kvaddr) continue; } else { if (vec[i] == 0) continue; } BZERO(ld, sizeof(struct list_data)); ld->start = (head_size == SIZE(list_head)) ? vec[i*2] : vec[i]; ld->list_head_offset = OFFSET(timer_list_entry); ld->end = vec_kvaddr; ld->flags = RETURN_ON_LIST_ERROR; hq_open(); if ((timer_cnt = do_list(ld)) == -1) { /* Ignore chains with errors */ error(INFO, "ignoring faulty timer list at index %d of timer array\n", i); continue; } if (!timer_cnt) { hq_close(); continue; } timer_list = (ulong *)GETBUF(timer_cnt * sizeof(ulong)); timer_cnt = retrieve_list(timer_list, timer_cnt); hq_close(); for (t = 0; t < timer_cnt; t++) { count++; readmem(timer_list[t], KVADDR, timer_list_buf, SIZE(timer_list), "timer_list buffer", FAULT_ON_ERROR); expires = ULONG(timer_list_buf + OFFSET(timer_list_expires)); function = ULONG(timer_list_buf + OFFSET(timer_list_function)); if (td) { td[tdx].address = timer_list[t]; td[tdx].expires = expires; td[tdx].function = function; td[tdx].tte = expires - jiffies; if (highest && (expires > *highest)) *highest = expires; if (highest_tte && (abs(td[tdx].tte) > *highest_tte)) *highest_tte = abs(td[tdx].tte); tdx++; } } FREEBUF(timer_list); } FREEBUF(timer_list_buf); return(td ? tdx : count); } #define TIMERS_CHUNK (100) struct timer_bases_data { int total, cnt, num_vectors; ulong *vectors; ulong timer_base; struct timer_data *timers; }; static int do_timer_list_v4(struct timer_bases_data *data, ulong jiffies) { int i, t, timer_cnt, found; struct list_data list_data, *ld; ulong *timer_list; ulong expires, function; long oldsize; char *timer_list_buf; timer_list_buf = GETBUF(SIZE(timer_list)); ld = &list_data; for (i = found = 0; i < data->num_vectors; i++) { if (data->vectors[i] == 0) continue; if (CRASHDEBUG(1)) fprintf(fp, "%lx vectors[%d]: %lx\n", data->timer_base + OFFSET(timer_base_vectors) + (i * sizeof(void *)), i, data->vectors[i]); BZERO(ld, sizeof(struct list_data)); ld->start = data->vectors[i]; ld->list_head_offset = OFFSET(timer_list_entry); ld->end = 0; ld->flags = RETURN_ON_LIST_ERROR; hq_open(); if ((timer_cnt = do_list(ld)) == -1) { /* Ignore chains with errors */ if (CRASHDEBUG(1)) error(INFO, "ignoring faulty timer_list in timer_base.vector[%d] list\n", i); hq_close(); continue; } if (!timer_cnt) { hq_close(); continue; } timer_list = (ulong *)GETBUF(timer_cnt * sizeof(ulong)); timer_cnt = retrieve_list(timer_list, timer_cnt); hq_close(); for (t = 0; t < timer_cnt; t++) { if (CRASHDEBUG(1)) fprintf(fp, " %lx\n", timer_list[t]); if (!readmem(timer_list[t], KVADDR, timer_list_buf, SIZE(timer_list), "timer_list buffer", QUIET|RETURN_ON_ERROR)) continue; expires = ULONG(timer_list_buf + OFFSET(timer_list_expires)); function = ULONG(timer_list_buf + OFFSET(timer_list_function)); data->timers[data->cnt].address = timer_list[t]; data->timers[data->cnt].expires = expires; data->timers[data->cnt].function = function; data->timers[data->cnt].tte = expires - jiffies; data->cnt++; if (data->cnt == data->total) { oldsize = data->total * sizeof(struct timer_data); RESIZEBUF(data->timers, oldsize, oldsize * 2); data->total *= 2; } found++; } FREEBUF(timer_list); } FREEBUF(timer_list_buf); return found; } /* * Linux 4.8 timers use new timer_bases[][] */ static void dump_timer_data_timer_bases(const ulong *cpus) { int i, cpu, flen, tlen, base, nr_bases, found, display, j = 0; struct syment *sp; ulong timer_base, jiffies, function, highest_tte; struct timer_bases_data data; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf4[BUFSIZE]; if (!(data.num_vectors = get_array_length("timer_base.vectors", NULL, 0))) error(FATAL, "cannot determine timer_base.vectors[] array size\n"); data.vectors = (ulong *)GETBUF(data.num_vectors * sizeof(void *)); data.timers = (struct timer_data *)GETBUF(sizeof(struct timer_data) * TIMERS_CHUNK); data.total = TIMERS_CHUNK; data.cnt = 0; nr_bases = kernel_symbol_exists("sysctl_timer_migration") ? 2 : 1; cpu = 0; get_symbol_data("jiffies", sizeof(ulong), &jiffies); sprintf(buf1, "%ld", jiffies); flen = MAX(strlen(buf1), strlen("JIFFIES")); fprintf(fp, "%s\n", mkstring(buf1, flen, LJUST, "JIFFIES")); fprintf(fp, "%s\n\n", mkstring(buf1, flen, RJUST|LONG_DEC,MKSTR(jiffies))); next_cpu: if (cpus && !NUM_IN_BITMAP(cpus, cpu)) { if (++cpu < kt->cpus) goto next_cpu; goto done; } /* * hide data of offline cpu and goto next cpu */ if (hide_offline_cpu(cpu)) { fprintf(fp, "TIMER_BASES[%d]: [OFFLINE]\n", cpu); if (++cpu < kt->cpus) goto next_cpu; goto done; } base = 0; sp = per_cpu_symbol_search("per_cpu__timer_bases"); if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) timer_base = sp->value + kt->__per_cpu_offset[cpu]; else timer_base = sp->value; if (j++) fprintf(fp, "\n"); next_base: fprintf(fp, "TIMER_BASES[%d][%s]: %lx\n", cpu, base == 0 ? "BASE_STD" : "BASE_DEF", timer_base); readmem(timer_base + OFFSET(timer_base_vectors), KVADDR, data.vectors, data.num_vectors * sizeof(void *), "timer_base.vectors[]", FAULT_ON_ERROR); data.cnt = 0; data.timer_base = timer_base; found = do_timer_list_v4(&data, jiffies); qsort(data.timers, found, sizeof(struct timer_data), compare_timer_data); highest_tte = 0; for (i = 0; i < found; i++) { display = FALSE; if (is_kernel_text(data.timers[i].function)) { display = TRUE; } else { if (readmem(data.timers[i].function, KVADDR, &function, sizeof(ulong), "timer function", RETURN_ON_ERROR|QUIET) && is_kernel_text(function)) { display = TRUE; } else { if (LIVE()) display = FALSE; else display = TRUE; } } if (display) { if (abs(data.timers[i].tte) > highest_tte) highest_tte = abs(data.timers[i].tte); } } /* +1 accounts possible "-" sign */ sprintf(buf4, "%ld", highest_tte); tlen = MAX(strlen(buf4) + 1, strlen("TTE")); fprintf(fp, " %s %s TIMER_LIST FUNCTION\n", mkstring(buf1, flen, LJUST, "EXPIRES"), mkstring(buf4, tlen, LJUST, "TTE")); for (i = 0; i < found; i++) { display = FALSE; if (is_kernel_text(data.timers[i].function)) { display = TRUE; function = data.timers[i].function; } else { if (readmem(data.timers[i].function, KVADDR, &function, sizeof(ulong), "timer function", RETURN_ON_ERROR|QUIET) && is_kernel_text(function)) display = TRUE; else { if (LIVE()) { if (CRASHDEBUG(1)) fprintf(fp, "(invalid/stale entry at %lx)\n", data.timers[i].address); display = FALSE; } else { function = data.timers[i].function; display = TRUE; } } } if (display) { fprintf(fp, " %s", mkstring(buf1, flen, RJUST|LONG_DEC, MKSTR(data.timers[i].expires))); fprintf(fp, " %s", mkstring(buf4, tlen, RJUST|SLONG_DEC, MKSTR(data.timers[i].tte))); mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(data.timers[i].address)); fprintf(fp, " %s ", mkstring(buf2, 16, CENTER, buf1)); fprintf(fp, "%s <%s>\n", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(data.timers[i].function)), value_to_symstr(function, buf2, 0)); } } if (!found) fprintf(fp, " (none)\n"); if ((nr_bases == 2) && (base == 0)) { base++; timer_base += SIZE(timer_base); goto next_base; } if (++cpu < kt->cpus) goto next_cpu; done: FREEBUF(data.vectors); FREEBUF(data.timers); } /* * Panic a live system by exploiting this code in do_exit(): * * if (!tsk->pid) * panic("Attempted to kill the idle task!"); * * by writing a zero to this task's pid number. If the write * succeeds, the subsequent exit() call will invoke the panic. */ static void panic_this_kernel(void) { pid_t zero_pid = 0; if (!LOCAL_ACTIVE()) error(FATAL, "cannot panic a dumpfile!\n"); if (!(pc->flags & MFD_RDWR) || (pc->flags & MEMMOD)) error(FATAL, "cannot write to %s\n", pc->live_memsrc); writemem(pid_to_task(pc->program_pid) + OFFSET(task_struct_pid), KVADDR, &zero_pid, sizeof(pid_t), "zero pid", FAULT_ON_ERROR); clean_exit(0); } /* * Dump the list of entries on a wait queue, taking into account the two * different definitions: wait_queue vs. __wait_queue (wait_queue_t). */ void cmd_waitq(void) { ulong q = 0; char *wq_name = NULL; /* name of symbol which is a waitq */ char *wq_struct = NULL; /* struct containing the waitq */ char *wq_member = NULL; /* member of struct which is a waitq */ int recd_address = 0; if (argcnt < 2 || argcnt > 3) { cmd_usage(pc->curcmd, SYNOPSIS); } if (IS_A_NUMBER(args[1])) { q = htol(args[1], FAULT_ON_ERROR, NULL); recd_address = 1; } else { /* * We weren't given a number... see if it is the name of * a symbol or and struct.member format. */ char *dot; dot = strstr(args[1], "."); if (dot == NULL) { wq_name = args[1]; q = symbol_value(wq_name); } else { wq_struct = args[1]; wq_member = dot+1; *dot = '\0'; if (argcnt != 3) { fprintf(fp, "must supply an address for %s\n", wq_struct); return; } q = htol(args[2], FAULT_ON_ERROR, NULL); if (MEMBER_OFFSET(wq_struct, wq_member) == -1) { fprintf(fp, "%s is not a member of %s\n", wq_member, wq_struct); return; } q += MEMBER_OFFSET(wq_struct, wq_member); } } if (q != 0 && IS_KVADDR(q)) { /* * If we weren't passed in an address and we're dealing * with old style wait_queue, we must dereference the pointer * and pass in the addr of the first elem on the queue. * If we were supplied an address, assume the user knows * what should be provided. */ if (!recd_address && VALID_STRUCT(wait_queue)) { ulong first_elem; readmem(q, KVADDR, &first_elem, sizeof(q), "wait queue pointer", FAULT_ON_ERROR); if (first_elem == 0) { fprintf(fp, "wait queue %lx is empty\n", q); return; } else { q = first_elem; } } dump_waitq(q, wq_name); } } static void dump_waitq(ulong wq, char *wq_name) { struct list_data list_data, *ld; ulong *wq_list; /* addr of wait queue element */ ulong next_offset; /* next pointer of wq element */ ulong task_offset = 0; /* offset of task in wq element */ int cnt; /* # elems on Queue */ int start_index = -1; /* where to start in wq array */ int i; ld = &list_data; BZERO(ld, sizeof(*ld)); /* * setup list depending on how the wait queues are organized. */ if (VALID_STRUCT(wait_queue)) { task_offset = OFFSET(wait_queue_task); next_offset = OFFSET(wait_queue_next); ld->end = wq; ld->start = wq; ld->member_offset = next_offset; ld->list_head_offset = task_offset; start_index = 0; } else if (VALID_STRUCT(__wait_queue)) { ulong task_list_offset; next_offset = OFFSET(list_head_next); task_offset = OFFSET(__wait_queue_task); task_list_offset = OFFSET(__wait_queue_head_task_list); ld->end = ld->start = wq + task_list_offset + next_offset; ld->list_head_offset = OFFSET(__wait_queue_task_list); ld->member_offset = next_offset; start_index = 1; } else if (VALID_STRUCT(wait_queue_entry)) { ulong head_offset; next_offset = OFFSET(list_head_next); task_offset = OFFSET(wait_queue_entry_private); head_offset = OFFSET(wait_queue_head_head); ld->end = ld->start = wq + head_offset + next_offset; ld->list_head_offset = OFFSET(wait_queue_entry_entry); ld->member_offset = next_offset; start_index = 1; } else { error(FATAL, "cannot determine wait queue structures\n"); } hq_open(); cnt = do_list(ld); if (cnt <= 1) { /* * Due to the queueing of wait queues, list count returns * an extra number of list entries: * - in the case of a wait_queue_head_t, there is the * the list_entry in that structure; * - in the case of a simple wait_queue, we have the * pointer back to the wait_queue head (see the * WAIT_QUEUE_HEAD macro in 2.2 systems). */ if (wq_name) fprintf(fp, "wait queue \"%s\" (%lx) is empty\n", wq_name, wq); else fprintf(fp, "wait queue %lx is empty\n", wq); hq_close(); return; } wq_list = (ulong *) GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(wq_list, cnt); for (i = start_index; i < cnt; i++) { struct task_context *tc; ulong task; readmem(wq_list[i] + task_offset, KVADDR, &task, sizeof(void *), "wait_queue_t.task", FAULT_ON_ERROR); if ((tc = task_to_context(task)) || (tc = task_to_context(stkptr_to_task(task)))) { print_task_header(fp, tc, 0); } else { break; } } hq_close(); } /* * If active, clear the references to the last page tables read. */ void clear_machdep_cache(void) { if (ACTIVE()) { machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; if (machdep->clear_machdep_cache) machdep->clear_machdep_cache(); } } /* * If it exists, return the number of cpus in the cpu_online_map. */ int get_cpus_online() { int i, len, online; char *buf; ulong *maskptr, addr; if (!(addr = cpu_map_addr("online"))) return 0; len = cpu_map_size("online"); buf = GETBUF(len); online = 0; if (readmem(addr, KVADDR, buf, len, "cpu_online_map", RETURN_ON_ERROR)) { maskptr = (ulong *)buf; for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) online += count_bits_long(*maskptr); if (CRASHDEBUG(1)) error(INFO, "get_cpus_online: online: %d\n", online); } FREEBUF(buf); return online; } /* * Check whether a cpu is offline */ int check_offline_cpu(int cpu) { if (!cpu_map_addr("online")) return FALSE; if (in_cpu_map(ONLINE_MAP, cpu)) return FALSE; return TRUE; } /* * Check whether the data related to the specified cpu should be hidden. */ int hide_offline_cpu(int cpu) { if (!(pc->flags2 & OFFLINE_HIDE)) return FALSE; return check_offline_cpu(cpu); } /* * If it exists, return the highest cpu number in the cpu_online_map. */ int get_highest_cpu_online() { int i, len; char *buf; ulong *maskptr, addr; int high, highest; if (!(addr = cpu_map_addr("online"))) return -1; len = cpu_map_size("online"); buf = GETBUF(len); highest = -1; if (readmem(addr, KVADDR, buf, len, "cpu_online_map", RETURN_ON_ERROR)) { maskptr = (ulong *)buf; for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) { if ((high = highest_bit_long(*maskptr)) < 0) continue; highest = high + (i * (sizeof(ulong)*8)); } if (CRASHDEBUG(1)) error(INFO, "get_highest_cpu_online: %d\n", highest); } FREEBUF(buf); return highest; } /* * If it exists, return the number of cpus in the cpu_active_map. */ int get_cpus_active() { int i, len, active; char *buf; ulong *maskptr, addr; if (!(addr = cpu_map_addr("active"))) return 0; len = cpu_map_size("active"); buf = GETBUF(len); active = 0; if (readmem(addr, KVADDR, buf, len, "cpu_active_map", RETURN_ON_ERROR)) { maskptr = (ulong *)buf; for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) active += count_bits_long(*maskptr); if (CRASHDEBUG(1)) error(INFO, "get_cpus_active: active: %d\n", active); } FREEBUF(buf); return active; } /* * If it exists, return the number of cpus in the cpu_present_map. */ int get_cpus_present() { int i, len, present; char *buf; ulong *maskptr, addr; if (!(addr = cpu_map_addr("present"))) return 0; len = cpu_map_size("present"); buf = GETBUF(len); present = 0; if (readmem(addr, KVADDR, buf, len, "cpu_present_map", RETURN_ON_ERROR)) { maskptr = (ulong *)buf; for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) present += count_bits_long(*maskptr); if (CRASHDEBUG(1)) error(INFO, "get_cpus_present: present: %d\n", present); } FREEBUF(buf); return present; } /* * If it exists, return the highest cpu number in the cpu_present_map. */ int get_highest_cpu_present() { int i, len; char *buf; ulong *maskptr, addr; int high, highest; if (!(addr = cpu_map_addr("present"))) return -1; len = cpu_map_size("present"); buf = GETBUF(len); highest = -1; if (readmem(addr, KVADDR, buf, len, "cpu_present_map", RETURN_ON_ERROR)) { maskptr = (ulong *)buf; for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) { if ((high = highest_bit_long(*maskptr)) < 0) continue; highest = high + (i * (sizeof(ulong)*8)); } if (CRASHDEBUG(1)) error(INFO, "get_highest_cpu_present: %d\n", highest); } FREEBUF(buf); return highest; } /* * If it exists, return the number of cpus in the cpu_possible_map. */ int get_cpus_possible() { int i, len, possible; char *buf; ulong *maskptr, addr; if (!(addr = cpu_map_addr("possible"))) return 0; len = cpu_map_size("possible"); buf = GETBUF(len); possible = 0; if (readmem(addr, KVADDR, buf, len, "cpu_possible_map", RETURN_ON_ERROR)) { maskptr = (ulong *)buf; for (i = 0; i < (len/sizeof(ulong)); i++, maskptr++) possible += count_bits_long(*maskptr); if (CRASHDEBUG(1)) error(INFO, "get_cpus_possible: possible: %d\n", possible); } FREEBUF(buf); return possible; } /* * When displaying cpus, return the number of cpus online if possible, * otherwise kt->cpus. */ int get_cpus_to_display(void) { int online = get_cpus_online(); return (online ? online : kt->cpus); } /* * Xen machine-address to pseudo-physical-page translator. */ ulonglong xen_m2p(ulonglong machine) { ulong mfn, pfn; mfn = XEN_MACHINE_TO_MFN(machine); pfn = __xen_m2p(machine, mfn); if (pfn == XEN_MFN_NOT_FOUND) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "xen_m2p: machine address %lx not found\n", machine); return XEN_MACHADDR_NOT_FOUND; } return XEN_PFN_TO_PSEUDO(pfn); } static ulong __xen_m2p(ulonglong machine, ulong mfn) { ulong c, i, kmfn, mapping, p, pfn; ulong start, end; ulong *mp = (ulong *)kt->m2p_page; int memtype; if (XEN_CORE_DUMPFILE() && symbol_exists("xen_p2m_addr")) memtype = PHYSADDR; else memtype = KVADDR; /* * Check the FIFO cache first. */ for (c = 0; c < P2M_MAPPING_CACHE; c++) { if (kt->p2m_mapping_cache[c].mapping && ((mfn >= kt->p2m_mapping_cache[c].start) && (mfn <= kt->p2m_mapping_cache[c].end))) { if (kt->p2m_mapping_cache[c].mapping != kt->last_mapping_read) { if (memtype == PHYSADDR) pc->curcmd_flags |= XEN_MACHINE_ADDR; read_p2m(c, memtype, mp); if (memtype == PHYSADDR) pc->curcmd_flags &= ~XEN_MACHINE_ADDR; } else kt->p2m_page_cache_hits++; for (i = 0; i < XEN_PFNS_PER_PAGE; i++) { kmfn = (*(mp+i)) & ~XEN_FOREIGN_FRAME; if (kmfn == mfn) { p = P2M_MAPPING_PAGE_PFN(c); pfn = p + i; if (CRASHDEBUG(1)) console("(cached) mfn: %lx (%llx) p: %ld" " i: %ld pfn: %lx (%llx)\n", mfn, machine, p, i, pfn, XEN_PFN_TO_PSEUDO(pfn)); kt->p2m_mfn_cache_hits++; return pfn; } } /* * Stale entry -- clear it out. */ kt->p2m_mapping_cache[c].mapping = 0; } } if (PVOPS_XEN()) { /* * The machine address was not cached, so search from the * beginning of the p2m tree/array, caching the contiguous * range containing the found machine address. */ if (symbol_exists("p2m_mid_missing")) pfn = __xen_pvops_m2p_l3(machine, mfn); else if (symbol_exists("xen_p2m_addr")) { if (XEN_CORE_DUMPFILE()) pfn = __xen_pvops_m2p_hyper(machine, mfn); else pfn = __xen_pvops_m2p_domU(machine, mfn); } else pfn = __xen_pvops_m2p_l2(machine, mfn); if (pfn != XEN_MFN_NOT_FOUND) return pfn; } else { /* * The machine address was not cached, so search from the * beginning of the phys_to_machine_mapping array, caching * the contiguous range containing the found machine address. */ mapping = kt->phys_to_machine_mapping; for (p = 0; p < kt->p2m_table_size; p += XEN_PFNS_PER_PAGE) { if (mapping != kt->last_mapping_read) { if (!readmem(mapping, KVADDR, mp, PAGESIZE(), "phys_to_machine_mapping page", RETURN_ON_ERROR)) error(FATAL, "cannot access" " phys_to_machine_mapping page\n"); else kt->last_mapping_read = mapping; } kt->p2m_pages_searched++; if (search_mapping_page(mfn, &i, &start, &end)) { pfn = p + i; if (CRASHDEBUG(1)) console("pages: %d mfn: %lx (%llx) p: %ld" " i: %ld pfn: %lx (%llx)\n", (p/XEN_PFNS_PER_PAGE)+1, mfn, machine, p, i, pfn, XEN_PFN_TO_PSEUDO(pfn)); c = kt->p2m_cache_index; kt->p2m_mapping_cache[c].start = start; kt->p2m_mapping_cache[c].end = end; kt->p2m_mapping_cache[c].mapping = mapping; kt->p2m_cache_index = (c+1) % P2M_MAPPING_CACHE; return pfn; } mapping += PAGESIZE(); } } if (CRASHDEBUG(1)) console("machine address %llx not found\n", machine); return (XEN_MFN_NOT_FOUND); } static ulong __xen_pvops_m2p_l2(ulonglong machine, ulong mfn) { ulong c, e, end, i, mapping, p, p2m, pfn, start; for (e = p = 0, p2m = kt->pvops_xen.p2m_top; e < kt->pvops_xen.p2m_top_entries; e++, p += XEN_PFNS_PER_PAGE, p2m += sizeof(void *)) { if (!readmem(p2m, KVADDR, &mapping, sizeof(void *), "p2m_top", RETURN_ON_ERROR)) error(FATAL, "cannot access p2m_top[] entry\n"); if (mapping == kt->pvops_xen.p2m_missing) continue; if (mapping != kt->last_mapping_read) { if (!readmem(mapping, KVADDR, (void *)kt->m2p_page, PAGESIZE(), "p2m_top page", RETURN_ON_ERROR)) error(FATAL, "cannot access p2m_top[] page\n"); kt->last_mapping_read = mapping; } kt->p2m_pages_searched++; if (search_mapping_page(mfn, &i, &start, &end)) { pfn = p + i; if (CRASHDEBUG(1)) console("pages: %d mfn: %lx (%llx) p: %ld" " i: %ld pfn: %lx (%llx)\n", (p/XEN_PFNS_PER_PAGE)+1, mfn, machine, p, i, pfn, XEN_PFN_TO_PSEUDO(pfn)); c = kt->p2m_cache_index; kt->p2m_mapping_cache[c].start = start; kt->p2m_mapping_cache[c].end = end; kt->p2m_mapping_cache[c].mapping = mapping; kt->p2m_mapping_cache[c].pfn = p; kt->p2m_cache_index = (c+1) % P2M_MAPPING_CACHE; return pfn; } } return XEN_MFN_NOT_FOUND; } static ulong __xen_pvops_m2p_l3(ulonglong machine, ulong mfn) { ulong c, end, i, j, k, mapping, p; ulong p2m_mid, p2m_top, pfn, start; p2m_top = kt->pvops_xen.p2m_top; for (i = 0; i < XEN_P2M_TOP_PER_PAGE; ++i, p2m_top += sizeof(void *)) { if (!readmem(p2m_top, KVADDR, &mapping, sizeof(void *), "p2m_top", RETURN_ON_ERROR)) error(FATAL, "cannot access p2m_top[] entry\n"); if (mapping == kt->pvops_xen.p2m_mid_missing) continue; p2m_mid = mapping; for (j = 0; j < XEN_P2M_MID_PER_PAGE; ++j, p2m_mid += sizeof(void *)) { if (!readmem(p2m_mid, KVADDR, &mapping, sizeof(void *), "p2m_mid", RETURN_ON_ERROR)) error(FATAL, "cannot access p2m_mid[] entry\n"); if (mapping == kt->pvops_xen.p2m_missing) continue; if (mapping != kt->last_mapping_read) { if (!readmem(mapping, KVADDR, (void *)kt->m2p_page, PAGESIZE(), "p2m_mid page", RETURN_ON_ERROR)) error(FATAL, "cannot access p2m_mid[] page\n"); kt->last_mapping_read = mapping; } if (!search_mapping_page(mfn, &k, &start, &end)) continue; p = i * XEN_P2M_MID_PER_PAGE * XEN_P2M_PER_PAGE; p += j * XEN_P2M_PER_PAGE; pfn = p + k; if (CRASHDEBUG(1)) console("pages: %d mfn: %lx (%llx) p: %ld" " i: %ld j: %ld k: %ld pfn: %lx (%llx)\n", (p / XEN_P2M_PER_PAGE) + 1, mfn, machine, p, i, j, k, pfn, XEN_PFN_TO_PSEUDO(pfn)); c = kt->p2m_cache_index; kt->p2m_mapping_cache[c].start = start; kt->p2m_mapping_cache[c].end = end; kt->p2m_mapping_cache[c].mapping = mapping; kt->p2m_mapping_cache[c].pfn = p; kt->p2m_cache_index = (c + 1) % P2M_MAPPING_CACHE; return pfn; } } return XEN_MFN_NOT_FOUND; } static ulong __xen_pvops_m2p_hyper(ulonglong machine, ulong mfn) { ulong c, end, i, mapping, p, pfn, start; for (p = 0; p < xkd->p2m_frames; ++p) { mapping = PTOB(xkd->p2m_mfn_frame_list[p]); if (mapping != kt->last_mapping_read) { pc->curcmd_flags |= XEN_MACHINE_ADDR; if (!readmem(mapping, PHYSADDR, (void *)kt->m2p_page, PAGESIZE(), "p2m_mfn_frame_list page", RETURN_ON_ERROR)) error(FATAL, "cannot access p2m_mfn_frame_list[] page\n"); pc->curcmd_flags &= ~XEN_MACHINE_ADDR; kt->last_mapping_read = mapping; } kt->p2m_pages_searched++; if (search_mapping_page(mfn, &i, &start, &end)) { pfn = p * XEN_PFNS_PER_PAGE + i; if (CRASHDEBUG(1)) console("pages: %d mfn: %lx (%llx) p: %ld" " i: %ld pfn: %lx (%llx)\n", p + 1, mfn, machine, p, i, pfn, XEN_PFN_TO_PSEUDO(pfn)); c = kt->p2m_cache_index; kt->p2m_mapping_cache[c].start = start; kt->p2m_mapping_cache[c].end = end; kt->p2m_mapping_cache[c].mapping = mapping; kt->p2m_mapping_cache[c].pfn = p * XEN_PFNS_PER_PAGE; kt->p2m_cache_index = (c+1) % P2M_MAPPING_CACHE; return pfn; } } return XEN_MFN_NOT_FOUND; } static void read_p2m(ulong cache_index, int memtype, void *buffer) { /* * Use special read function for PV domain p2m reading. * See the comments of read_xc_p2m(). */ if (symbol_exists("xen_p2m_addr") && !XEN_CORE_DUMPFILE()) { if (!read_xc_p2m(kt->p2m_mapping_cache[cache_index].mapping, buffer, PAGESIZE())) error(FATAL, "cannot access phys_to_machine_mapping page\n"); } else if (!readmem(kt->p2m_mapping_cache[cache_index].mapping, memtype, buffer, PAGESIZE(), "phys_to_machine_mapping page (cached)", RETURN_ON_ERROR)) error(FATAL, "cannot access phys_to_machine_mapping page\n"); kt->last_mapping_read = kt->p2m_mapping_cache[cache_index].mapping; } /* * PV domain p2m mapping info is stored in xd->xfd at xch_index_offset. It * is organized as struct xen_dumpcore_p2m and the pfns are progressively * increased by 1 from 0. * * This is a special p2m reading function for xen PV domain vmcores after * kernel commit 054954eb051f35e74b75a566a96fe756015352c8 (xen: switch * to linear virtual mapped sparse p2m list). It is invoked for reading * p2m associate stuff by read_p2m(). */ static int read_xc_p2m(ulonglong addr, void *buffer, long size) { ulong i, new_p2m_buf_size; off_t offset; struct xen_dumpcore_p2m *new_p2m_buf; static struct xen_dumpcore_p2m *p2m_buf; static ulong p2m_buf_size = 0; if (size <= 0) { if ((CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) || CRASHDEBUG(2)) error(INFO, "invalid size request: %ld\n", size); return FALSE; } /* * We extract xen_dumpcore_p2m.gmfn and copy them into the * buffer. So, we need temporary p2m_buf whose size is * (size * (sizeof(struct xen_dumpcore_p2m) / sizeof(ulong))) * to put xen_dumpcore_p2m structures read from xd->xfd. */ new_p2m_buf_size = size * (sizeof(struct xen_dumpcore_p2m) / sizeof(ulong)); if (p2m_buf_size != new_p2m_buf_size) { p2m_buf_size = new_p2m_buf_size; new_p2m_buf = realloc(p2m_buf, p2m_buf_size); if (new_p2m_buf == NULL) { free(p2m_buf); error(FATAL, "cannot realloc p2m buffer\n"); } p2m_buf = new_p2m_buf; } offset = addr * (sizeof(struct xen_dumpcore_p2m) / sizeof(ulong)); offset += xd->xc_core.header.xch_index_offset; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_index_offset offset 0x%lx\n", offset); if (read(xd->xfd, (void*)p2m_buf, p2m_buf_size) != p2m_buf_size) error(FATAL, "cannot read from xch_index_offset offset 0x%lx\n", offset); for (i = 0; i < size / sizeof(ulong); i++) *((ulong *)buffer + i) = p2m_buf[i].gmfn; return TRUE; } static ulong __xen_pvops_m2p_domU(ulonglong machine, ulong mfn) { ulong c, end, i, mapping, p, pfn, start; /* * xch_nr_pages is the number of pages of p2m mapping. It is composed * of struct xen_dumpcore_p2m. The stuff we want to copy into the mapping * page is mfn whose type is unsigned long. * So actual number of p2m pages should be: * * xch_nr_pages / (sizeof(struct xen_dumpcore_p2m) / sizeof(ulong)) */ for (p = 0; p < xd->xc_core.header.xch_nr_pages / (sizeof(struct xen_dumpcore_p2m) / sizeof(ulong)); ++p) { mapping = p * PAGESIZE(); if (mapping != kt->last_mapping_read) { if (!read_xc_p2m(mapping, (void *)kt->m2p_page, PAGESIZE())) error(FATAL, "cannot read the last mapping page\n"); kt->last_mapping_read = mapping; } kt->p2m_pages_searched++; if (search_mapping_page(mfn, &i, &start, &end)) { pfn = p * XEN_PFNS_PER_PAGE + i; c = kt->p2m_cache_index; if (CRASHDEBUG (1)) console("mfn: %lx (%llx) i: %ld pfn: %lx (%llx)\n", mfn, machine, i, pfn, XEN_PFN_TO_PSEUDO(pfn)); kt->p2m_mapping_cache[c].start = start; kt->p2m_mapping_cache[c].end = end; kt->p2m_mapping_cache[c].mapping = mapping; kt->p2m_mapping_cache[c].pfn = p * XEN_PFNS_PER_PAGE; kt->p2m_cache_index = (c+1) % P2M_MAPPING_CACHE; return pfn; } } return XEN_MFN_NOT_FOUND; } /* * Search for an mfn in the current mapping page, and if found, * determine the range of contiguous mfns that it's contained * within (if any). */ #define PREV_UP 0x1 #define NEXT_UP 0x2 #define PREV_DOWN 0x4 #define NEXT_DOWN 0x8 static int search_mapping_page(ulong mfn, ulong *index, ulong *startptr, ulong *endptr) { int n, found; ulong i, kmfn; ulong flags, start, end, next, prev, curr; ulong *mp; mp = (ulong *)kt->m2p_page; for (i = 0, found = FALSE; i < XEN_PFNS_PER_PAGE; i++) { kmfn = (*(mp+i)) & ~XEN_FOREIGN_FRAME; if (kmfn == mfn) { found = TRUE; *index = i; break; } } if (found) { flags = 0; next = prev = XEN_MFN_NOT_FOUND; start = end = kmfn; if (i) prev = (*(mp+(i-1))) & ~XEN_FOREIGN_FRAME; if ((i+1) != XEN_PFNS_PER_PAGE) next = (*(mp+(i+1))) & ~XEN_FOREIGN_FRAME; if (prev == (kmfn-1)) flags |= PREV_UP; else if (prev == (kmfn+1)) flags |= PREV_DOWN; if (next == (kmfn+1)) flags |= NEXT_UP; else if (next == (kmfn-1)) flags |= NEXT_DOWN; /* Should be impossible, but just in case... */ if ((flags & PREV_UP) && (flags & NEXT_DOWN)) flags &= ~NEXT_DOWN; else if ((flags & PREV_DOWN) && (flags & NEXT_UP)) flags &= ~NEXT_UP; if (flags & (PREV_UP|PREV_DOWN)) { start = prev; for (n = (i-2); n >= 0; n--) { curr = (*(mp+n)) & ~XEN_FOREIGN_FRAME; if (flags & PREV_UP) { if (curr == (start-1)) start = curr; } else { if (curr == (start+1)) start = curr; } } } if (flags & (NEXT_UP|NEXT_DOWN)) { end = next; for (n = (i+2); n < XEN_PFNS_PER_PAGE; n++) { curr = (*(mp+n)) & ~XEN_FOREIGN_FRAME; if (flags & NEXT_UP) { if (curr == (end+1)) end = curr; } else { if (curr == (end-1)) end = curr; } } } if (start > end) { curr = start; start = end; end = curr; } *startptr = start; *endptr = end; if (CRASHDEBUG(2)) fprintf(fp, "mfn: %lx -> start: %lx end: %lx (%ld mfns)\n", mfn, start, end, end - start); } return found; } /* * IKCONFIG management. */ #define IKCONFIG_MAX 5000 static struct ikconfig_list { char *name; char *val; } *ikconfig_all; static int add_ikconfig_entry(char *line, struct ikconfig_list *ent) { char *tokptr, *name, *val; name = strtok_r(line, "=", &tokptr); sscanf(name, "CONFIG_%s", name); val = strtok_r(NULL, "", &tokptr); if (!val) { if (CRASHDEBUG(2)) error(WARNING, "invalid ikconfig entry: %s\n", line); return FALSE; } ent->name = strdup(name); ent->val = strdup(val); return TRUE; } static int setup_ikconfig(char *config) { char *ent, *tokptr; struct ikconfig_list *new; ikconfig_all = calloc(1, sizeof(struct ikconfig_list) * IKCONFIG_MAX); if (!ikconfig_all) { error(WARNING, "cannot calloc for ikconfig entries.\n"); return 0; } ent = strtok_r(config, "\n", &tokptr); while (ent) { while (whitespace(*ent)) ent++; if (STRNEQ(ent, "CONFIG_")) { if (add_ikconfig_entry(ent, &ikconfig_all[kt->ikconfig_ents])) kt->ikconfig_ents++; if (kt->ikconfig_ents == IKCONFIG_MAX) { error(WARNING, "ikconfig overflow.\n"); return 1; } } ent = strtok_r(NULL, "\n", &tokptr); } if (kt->ikconfig_ents == 0) { free(ikconfig_all); return 0; } if ((new = realloc(ikconfig_all, sizeof(struct ikconfig_list) * kt->ikconfig_ents))) ikconfig_all = new; return 1; } static void free_ikconfig(void) { int i; for (i = 0; i < kt->ikconfig_ents; i++) { free(ikconfig_all[i].name); free(ikconfig_all[i].val); } free(ikconfig_all); } int get_kernel_config(char *conf_name, char **str) { int i; int ret = IKCONFIG_N; char *name; if (!(kt->ikconfig_flags & IKCONFIG_AVAIL)) { error(WARNING, "CONFIG_IKCONFIG is not set\n"); return ret; } else if (!(kt->ikconfig_flags & IKCONFIG_LOADED)) { read_in_kernel_config(IKCFG_SETUP); if (!(kt->ikconfig_flags & IKCONFIG_LOADED)) { error(WARNING, "IKCFG_SETUP failed\n"); return ret; } } name = strdup(conf_name); if (!strncmp(name, "CONFIG_", strlen("CONFIG_"))) sscanf(name, "CONFIG_%s", name); for (i = 0; i < kt->ikconfig_ents; i++) { if (STREQ(name, ikconfig_all[i].name)) { if (str) *str = ikconfig_all[i].val; if (STREQ(ikconfig_all[i].val, "y")) ret = IKCONFIG_Y; else if (STREQ(ikconfig_all[i].val, "m")) ret = IKCONFIG_M; else ret = IKCONFIG_STR; break; } } free(name); return ret; } /* * Read the relevant IKCONFIG (In Kernel Config) data if available. */ static char *ikconfig[] = { "CONFIG_NR_CPUS", "CONFIG_PGTABLE_4", "CONFIG_HZ", "CONFIG_DEBUG_BUGVERBOSE", "CONFIG_DEBUG_INFO_REDUCED", NULL, }; void read_in_kernel_config(int command) { struct syment *sp; int ii, jj, ret, end, found=0; unsigned long size, bufsz; uint64_t magic; char *pos, *ln, *buf, *head, *tail, *val, *uncomp; char line[512]; z_stream stream; if ((kt->flags & NO_IKCONFIG) && !(pc->flags & RUNTIME)) return; if ((sp = symbol_search("kernel_config_data")) == NULL) { if (command == IKCFG_READ) error(FATAL, "kernel_config_data does not exist in this kernel\n"); else if (command == IKCFG_SETUP || command == IKCFG_FREE) error(WARNING, "kernel_config_data does not exist in this kernel\n"); return; } /* We don't know how large IKCONFIG is, so we start with * 32k, if we can't find MAGIC_END assume we didn't read * enough, double it and try again. */ ii = 32; again: size = ii * 1024; if ((buf = (char *)malloc(size)) == NULL) { error(WARNING, "cannot malloc IKCONFIG input buffer\n"); return; } if (!readmem(sp->value, KVADDR, buf, size, "kernel_config_data", RETURN_ON_ERROR)) { error(WARNING, "cannot read kernel_config_data\n"); goto out2; } /* Find the start */ if (strstr(buf, MAGIC_START)) head = buf + MAGIC_SIZE + 10; /* skip past MAGIC_START and gzip header */ else { /* * Later versions put the magic number before the compressed data. */ if (readmem(sp->value - 8, KVADDR, &magic, 8, "kernel_config_data MAGIC_START", RETURN_ON_ERROR) && STRNEQ(&magic, MAGIC_START)) { head = buf + 10; } else { error(WARNING, "could not find MAGIC_START!\n"); goto out2; } } tail = head; end = strlen(MAGIC_END); /* Find the end*/ while (tail < (buf + (size - 1))) { if (strncmp(tail, MAGIC_END, end)==0) { found = 1; break; } tail++; } if (found) { bufsz = tail - head; size = 10 * bufsz; if ((uncomp = (char *)malloc(size)) == NULL) { error(WARNING, "cannot malloc IKCONFIG output buffer\n"); goto out2; } } else { if (ii > 512) { error(WARNING, "could not find MAGIC_END!\n"); goto out2; } else { free(buf); ii *= 2; goto again; } } /* initialize zlib */ stream.next_in = (Bytef *)head; stream.avail_in = (uInt)bufsz; stream.next_out = (Bytef *)uncomp; stream.avail_out = (uInt)size; stream.zalloc = NULL; stream.zfree = NULL; stream.opaque = NULL; ret = inflateInit2(&stream, -MAX_WBITS); if (ret != Z_OK) { read_in_kernel_config_err(ret, "initialize"); goto out1; } ret = inflate(&stream, Z_FINISH); if (ret != Z_STREAM_END) { inflateEnd(&stream); if (ret == Z_NEED_DICT || (ret == Z_BUF_ERROR && stream.avail_in == 0)) { read_in_kernel_config_err(Z_DATA_ERROR, "uncompress"); goto out1; } read_in_kernel_config_err(ret, "uncompress"); goto out1; } size = stream.total_out; ret = inflateEnd(&stream); pos = uncomp; if (command == IKCFG_INIT) kt->ikconfig_flags |= IKCONFIG_AVAIL; else if (command == IKCFG_SETUP) { if (!(kt->ikconfig_flags & IKCONFIG_LOADED)) { if (setup_ikconfig(pos)) { kt->ikconfig_flags |= IKCONFIG_LOADED; if (CRASHDEBUG(1)) fprintf(fp, "ikconfig: %d valid configs.\n", kt->ikconfig_ents); } else error(WARNING, "IKCFG_SETUP failed\n\n"); } else error(WARNING, "IKCFG_SETUP: ikconfig data already loaded\n"); goto out1; } else if (command == IKCFG_FREE) { if (kt->ikconfig_flags & IKCONFIG_LOADED) { free_ikconfig(); kt->ikconfig_ents = 0; kt->ikconfig_flags &= ~IKCONFIG_LOADED; } else error(WARNING, "IKCFG_FREE: ikconfig data not loaded\n"); goto out1; } do { ret = sscanf(pos, "%511[^\n]\n%n", line, &ii); if (ret > 0) { if ((command == IKCFG_READ) || CRASHDEBUG(8)) fprintf(fp, "%s\n", line); pos += ii; ln = line; /* skip leading whitespace */ while (whitespace(*ln)) ln++; /* skip comments -- except when looking for "not set" */ if (*ln == '#') { if (strstr(ln, "CONFIG_DEBUG_BUGVERBOSE") && strstr(ln, "not set")) kt->flags |= BUGVERBOSE_OFF; if (strstr(ln, "CONFIG_DEBUG_INFO_REDUCED")) if (CRASHDEBUG(1)) error(INFO, "%s\n", ln); continue; } /* Find '=' */ if ((head = strchr(ln, '=')) != NULL) { *head = '\0'; val = head + 1; head--; /* skip trailing whitespace */ while (whitespace(*head)) { *head = '\0'; head--; } /* skip whitespace */ while (whitespace(*val)) val++; } else /* Bad line, skip it */ continue; if (command != IKCFG_INIT) continue; for (jj = 0; ikconfig[jj]; jj++) { if (STREQ(ln, ikconfig[jj])) { if (STREQ(ln, "CONFIG_NR_CPUS")) { kt->kernel_NR_CPUS = atoi(val); if (CRASHDEBUG(1)) error(INFO, "CONFIG_NR_CPUS: %d\n", kt->kernel_NR_CPUS); } else if (STREQ(ln, "CONFIG_PGTABLE_4")) { machdep->flags |= VM_4_LEVEL; if (CRASHDEBUG(1)) error(INFO, "CONFIG_PGTABLE_4\n"); } else if (STREQ(ln, "CONFIG_HZ")) { machdep->hz = atoi(val); if (CRASHDEBUG(1)) error(INFO, "CONFIG_HZ: %d\n", machdep->hz); } else if (STREQ(ln, "CONFIG_DEBUG_INFO_REDUCED")) { if (STREQ(val, "y")) { error(WARNING, "CONFIG_DEBUG_INFO_REDUCED=y\n"); no_debugging_data(INFO); } } } } } } while (ret > 0); out1: free(uncomp); out2: free(buf); return; } static void read_in_kernel_config_err(int e, char *msg) { error(WARNING, "zlib could not %s\n", msg); switch (e) { case Z_OK: fprintf(fp, "Z_OK\n"); break; case Z_STREAM_END: fprintf(fp, "Z_STREAM_END\n"); break; case Z_NEED_DICT: fprintf(fp, "Z_NEED_DICT\n"); break; case Z_ERRNO: fprintf(fp, "Z_ERNO\n"); break; case Z_STREAM_ERROR: fprintf(fp, "Z_STREAM\n"); break; case Z_DATA_ERROR: fprintf(fp, "Z_DATA_ERROR\n"); break; case Z_MEM_ERROR: /* out of memory */ fprintf(fp, "Z_MEM_ERROR\n"); break; case Z_BUF_ERROR: /* not enough room in output buf */ fprintf(fp, "Z_BUF_ERROR\n"); break; case Z_VERSION_ERROR: fprintf(fp, "Z_VERSION_ERROR\n"); break; default: fprintf(fp, "UNKNOWN ERROR: %d\n", e); break; } } /* * With the evidence available, attempt to pre-determine whether * this is a paravirt-capable kernel running as bare-metal, xen, * kvm, etc. * * NOTE: Only bare-metal pv_ops kernels are supported so far. */ void paravirt_init(void) { /* * pv_init_ops appears to be (as of 2.6.27) an arch-common * symbol. This may have to change. */ if (kernel_symbol_exists("pv_init_ops")) { if (CRASHDEBUG(1)) error(INFO, "pv_init_ops exists: ARCH_PVOPS\n"); kt->flags |= ARCH_PVOPS; } /* * pv_init_ops moved to first entry in pv_ops as of 4.20-rc1 */ if (kernel_symbol_exists("pv_ops")) { if (CRASHDEBUG(1)) error(INFO, "pv_ops exists: ARCH_PVOPS\n"); kt->flags |= ARCH_PVOPS; } } static int is_pvops_xen(void) { ulong addr; char *sym; if (!PVOPS()) return FALSE; if (symbol_exists("pv_init_ops") && readmem(symbol_value("pv_init_ops"), KVADDR, &addr, sizeof(void *), "pv_init_ops", RETURN_ON_ERROR) && (sym = value_symbol(addr)) && (STREQ(sym, "xen_patch") || STREQ(sym, "paravirt_patch_default"))) return TRUE; if (machine_type("X86") || machine_type("X86_64")) { if (symbol_exists("xen_start_info") && readmem(symbol_value("xen_start_info"), KVADDR, &addr, sizeof(void *), "xen_start_info", RETURN_ON_ERROR) && addr != 0) return TRUE; } if (machine_type("ARM") || machine_type("ARM64")) { if (symbol_exists("xen_vcpu_info") && readmem(symbol_value("xen_vcpu_info"), KVADDR, &addr, sizeof(void *), "xen_vcpu_info", RETURN_ON_ERROR) && addr != 0) return TRUE; } return FALSE; } /* * Get the kernel's xtime timespec from its relevant location. */ static void get_xtime(struct timespec *date) { struct syment *sp; uint64_t xtime_sec; if (VALID_MEMBER(timekeeper_xtime) && (sp = kernel_symbol_search("timekeeper"))) { readmem(sp->value + OFFSET(timekeeper_xtime), KVADDR, date, sizeof(struct timespec), "timekeeper xtime", RETURN_ON_ERROR); } else if (VALID_MEMBER(timekeeper_xtime_sec) && (sp = kernel_symbol_search("timekeeper"))) { readmem(sp->value + OFFSET(timekeeper_xtime_sec), KVADDR, &xtime_sec, sizeof(uint64_t), "timekeeper xtime_sec", RETURN_ON_ERROR); date->tv_sec = (__time_t)xtime_sec; } else if (VALID_MEMBER(timekeeper_xtime_sec) && (sp = kernel_symbol_search("shadow_timekeeper"))) { readmem(sp->value + OFFSET(timekeeper_xtime_sec), KVADDR, &xtime_sec, sizeof(uint64_t), "shadow_timekeeper xtime_sec", RETURN_ON_ERROR); date->tv_sec = (__time_t)xtime_sec; } else if (kernel_symbol_exists("xtime")) get_symbol_data("xtime", sizeof(struct timespec), date); } static void hypervisor_init(void) { ulong x86_hyper, name, pv_init_ops, pv_ops; char buf[BUFSIZE], *p1; kt->hypervisor = "(undetermined)"; BZERO(buf, BUFSIZE); if (kernel_symbol_exists("pv_info") && MEMBER_EXISTS("pv_info", "name") && readmem(symbol_value("pv_info") + MEMBER_OFFSET("pv_info", "name"), KVADDR, &name, sizeof(char *), "pv_info.name", QUIET|RETURN_ON_ERROR) && read_string(name, buf, BUFSIZE-1)) kt->hypervisor = strdup(buf); else if (try_get_symbol_data("x86_hyper", sizeof(void *), &x86_hyper)) { if (!x86_hyper) kt->hypervisor = "bare hardware"; else if (MEMBER_EXISTS("hypervisor_x86", "name") && readmem(x86_hyper + MEMBER_OFFSET("hypervisor_x86", "name"), KVADDR, &name, sizeof(char *), "x86_hyper->name", QUIET|RETURN_ON_ERROR) && read_string(name, buf, BUFSIZE-1)) kt->hypervisor = strdup(buf); } else if (XENDUMP_DUMPFILE() || XEN()) kt->hypervisor = "Xen"; else if (KVMDUMP_DUMPFILE()) kt->hypervisor = "KVM"; else if (PVOPS() && symbol_exists("pv_init_ops") && readmem(symbol_value("pv_init_ops"), KVADDR, &pv_init_ops, sizeof(void *), "pv_init_ops", RETURN_ON_ERROR) && (p1 = value_symbol(pv_init_ops)) && STREQ(p1, "native_patch")) kt->hypervisor = "bare hardware"; else if (PVOPS() && symbol_exists("pv_ops") && readmem(symbol_value("pv_ops"), KVADDR, &pv_ops, sizeof(void *), "pv_ops", RETURN_ON_ERROR) && (p1 = value_symbol(pv_ops)) && STREQ(p1, "native_patch")) kt->hypervisor = "bare hardware"; if (CRASHDEBUG(1)) fprintf(fp, "hypervisor: %s\n", kt->hypervisor); } /* * Get and display the kernel log buffer using the vmcoreinfo * data alone without the vmlinux file. */ void get_log_from_vmcoreinfo(char *file) { char *string; struct vmcoreinfo_data *vmc = &kt->vmcoreinfo; if (!(pc->flags2 & VMCOREINFO)) error(FATAL, "%s: no VMCOREINFO section\n", file); vmc->log_SIZE = vmc->log_ts_nsec_OFFSET = vmc->log_len_OFFSET = vmc->log_text_len_OFFSET = vmc->log_dict_len_OFFSET = -1; if ((string = pc->read_vmcoreinfo("OSRELEASE"))) { if (CRASHDEBUG(1)) fprintf(fp, "OSRELEASE: %s\n", string); parse_kernel_version(string); if (CRASHDEBUG(1)) fprintf(fp, "base kernel version: %d.%d.%d\n", kt->kernel_version[0], kt->kernel_version[1], kt->kernel_version[2]); free(string); } else error(FATAL, "VMCOREINFO: cannot determine kernel version\n"); if ((string = pc->read_vmcoreinfo("PAGESIZE"))) { machdep->pagesize = atoi(string); machdep->pageoffset = machdep->pagesize - 1; if (CRASHDEBUG(1)) fprintf(fp, "PAGESIZE: %d\n", machdep->pagesize); free(string); } else error(FATAL, "VMCOREINFO: cannot determine page size\n"); if ((string = pc->read_vmcoreinfo("SYMBOL(log_buf)"))) { vmc->log_buf_SYMBOL = htol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "SYMBOL(log_buf): %lx\n", vmc->log_buf_SYMBOL); free(string); } if ((string = pc->read_vmcoreinfo("SYMBOL(log_end)"))) { vmc->log_end_SYMBOL = htol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "SYMBOL(log_end): %lx\n", vmc->log_end_SYMBOL); free(string); } if ((string = pc->read_vmcoreinfo("SYMBOL(log_buf_len)"))) { vmc->log_buf_len_SYMBOL = htol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "SYMBOL(log_buf_len): %lx\n", vmc->log_buf_len_SYMBOL); free(string); } if ((string = pc->read_vmcoreinfo("SYMBOL(logged_chars)"))) { vmc->logged_chars_SYMBOL = htol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "SYMBOL(logged_chars): %lx\n", vmc->logged_chars_SYMBOL); free(string); } if ((string = pc->read_vmcoreinfo("SYMBOL(log_first_idx)"))) { vmc->log_first_idx_SYMBOL = htol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "SYMBOL(log_first_idx): %lx\n", vmc->log_first_idx_SYMBOL); free(string); } if ((string = pc->read_vmcoreinfo("SYMBOL(log_next_idx)"))) { vmc->log_next_idx_SYMBOL = htol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "SYMBOL(log_next_idx): %lx\n", vmc->log_next_idx_SYMBOL); free(string); } if ((string = pc->read_vmcoreinfo("SYMBOL(phys_base)"))) { vmc->phys_base_SYMBOL = htol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "SYMBOL(phys_base): %lx\n", vmc->phys_base_SYMBOL); free(string); } if ((string = pc->read_vmcoreinfo("SYMBOL(_stext)"))) { vmc->_stext_SYMBOL = htol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "SYMBOL(_stext): %lx\n", vmc->_stext_SYMBOL); free(string); } if ((string = pc->read_vmcoreinfo("OFFSET(log.ts_nsec)"))) { vmc->log_ts_nsec_OFFSET = dtol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "OFFSET(log.ts_nsec): %ld\n", vmc->log_ts_nsec_OFFSET); free(string); } else if ((string = pc->read_vmcoreinfo("OFFSET(printk_log.ts_nsec)"))) { vmc->log_ts_nsec_OFFSET = dtol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "OFFSET(printk_log.ts_nsec): %ld\n", vmc->log_ts_nsec_OFFSET); free(string); } if ((string = pc->read_vmcoreinfo("OFFSET(log.len)"))) { vmc->log_len_OFFSET = dtol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "OFFSET(log.len): %ld\n", vmc->log_len_OFFSET); free(string); } else if ((string = pc->read_vmcoreinfo("OFFSET(printk_log.len)"))) { vmc->log_len_OFFSET = dtol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "OFFSET(printk_log.len): %ld\n", vmc->log_len_OFFSET); free(string); } if ((string = pc->read_vmcoreinfo("OFFSET(log.text_len)"))) { vmc->log_text_len_OFFSET = dtol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "OFFSET(log.text_len): %ld\n", vmc->log_text_len_OFFSET); free(string); } else if ((string = pc->read_vmcoreinfo("OFFSET(printk_log.text_len)"))) { vmc->log_text_len_OFFSET = dtol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "OFFSET(printk_log.text_len): %ld\n", vmc->log_text_len_OFFSET); free(string); } if ((string = pc->read_vmcoreinfo("OFFSET(log.dict_len)"))) { vmc->log_dict_len_OFFSET = dtol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "OFFSET(log.dict_len): %ld\n", vmc->log_dict_len_OFFSET); free(string); } else if ((string = pc->read_vmcoreinfo("OFFSET(printk_log.dict_len)"))) { vmc->log_dict_len_OFFSET = dtol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "OFFSET(printk_log.dict_len): %ld\n", vmc->log_dict_len_OFFSET); free(string); } if ((string = pc->read_vmcoreinfo("SIZE(log)"))) { vmc->log_SIZE = dtol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "SIZE(log): %ld\n", vmc->log_SIZE); free(string); } else if ((string = pc->read_vmcoreinfo("SIZE(printk_log)"))) { vmc->log_SIZE = dtol(string, RETURN_ON_ERROR, NULL); if (CRASHDEBUG(1)) fprintf(fp, "SIZE(printk_log): %ld\n", vmc->log_SIZE); free(string); } /* * The per-arch VTOP() macro must be functional. */ machdep_init(LOG_ONLY); if (vmc->log_buf_SYMBOL && vmc->log_buf_len_SYMBOL && vmc->log_first_idx_SYMBOL && vmc->log_next_idx_SYMBOL && (vmc->log_SIZE > 0) && (vmc->log_ts_nsec_OFFSET >= 0) && (vmc->log_len_OFFSET >= 0) && (vmc->log_text_len_OFFSET >= 0) && (vmc->log_dict_len_OFFSET >= 0)) dump_variable_length_record(); else if (vmc->log_buf_SYMBOL && vmc->log_end_SYMBOL && vmc->log_buf_len_SYMBOL && vmc->logged_chars_SYMBOL) dump_log_legacy(); else error(FATAL, "VMCOREINFO: no log buffer data\n"); } static void dump_log_legacy(void) { int i; physaddr_t paddr; ulong long_value; uint int_value; ulong log_buf; uint log_end, log_buf_len, logged_chars, total; char *buf, *p; ulong index, bytes; struct vmcoreinfo_data *vmc; vmc = &kt->vmcoreinfo; log_buf = log_end = log_buf_len = logged_chars = 0; paddr = VTOP(vmc->log_buf_SYMBOL); if (readmem(paddr, PHYSADDR, &long_value, sizeof(ulong), "log_buf pointer", RETURN_ON_ERROR)) log_buf = long_value; else error(FATAL, "cannot read log_buf value\n"); if (CRASHDEBUG(1)) fprintf(fp, "log_buf vaddr: %lx paddr: %llx => %lx\n", vmc->log_buf_SYMBOL, (ulonglong)paddr, log_buf); paddr = VTOP(vmc->log_end_SYMBOL); if (THIS_KERNEL_VERSION < LINUX(2,6,25)) { if (readmem(paddr, PHYSADDR, &long_value, sizeof(ulong), "log_end (long)", RETURN_ON_ERROR)) log_end = (uint)long_value; else error(FATAL, "cannot read log_end value\n"); } else { if (readmem(paddr, PHYSADDR, &int_value, sizeof(uint), "log_end (int)", RETURN_ON_ERROR)) log_end = int_value; else error(FATAL, "cannot read log_end value\n"); } if (CRASHDEBUG(1)) fprintf(fp, "log_end vaddr: %lx paddr: %llx => %d\n", vmc->log_end_SYMBOL, (ulonglong)paddr, log_end); paddr = VTOP(vmc->log_buf_len_SYMBOL); if (readmem(paddr, PHYSADDR, &int_value, sizeof(uint), "log_buf_len", RETURN_ON_ERROR)) log_buf_len = int_value; else error(FATAL, "cannot read log_buf_len value\n"); if (CRASHDEBUG(1)) fprintf(fp, "log_buf_len vaddr: %lx paddr: %llx => %d\n", vmc->log_buf_len_SYMBOL, (ulonglong)paddr, log_buf_len); paddr = VTOP(vmc->logged_chars_SYMBOL); if (readmem(paddr, PHYSADDR, &int_value, sizeof(uint), "logged_chars", RETURN_ON_ERROR)) logged_chars = int_value; else error(FATAL, "cannot read logged_chars value\n"); if (CRASHDEBUG(1)) fprintf(fp, "logged_chars vaddr: %lx paddr: %llx => %d\n", vmc->logged_chars_SYMBOL, (ulonglong)paddr, logged_chars); if ((buf = calloc(sizeof(char), log_buf_len)) == NULL) error(FATAL, "cannot calloc log_buf_len (%d) bytes\n", log_buf_len); paddr = VTOP(log_buf); if (log_end < log_buf_len) { bytes = log_end; if (!readmem(paddr, PHYSADDR, buf, bytes, "log_buf", RETURN_ON_ERROR)) error(FATAL, "cannot read log_buf\n"); total = bytes; } else { index = log_end & (log_buf_len - 1); bytes = log_buf_len - index; if (!readmem(paddr + index, PHYSADDR, buf, bytes, "log_buf + index", RETURN_ON_ERROR)) error(FATAL, "cannot read log_buf\n"); if (!readmem(paddr, PHYSADDR, buf + bytes, index, "log_buf", RETURN_ON_ERROR)) error(FATAL, "cannot read log_buf\n"); total = log_buf_len; } for (i = 0, p = buf; i < total; i++, p++) { if (*p == NULLCHAR) fputc('\n', fp); else if (ascii(*p)) fputc(*p, fp); else fputc('.', fp); } } static void dump_variable_length_record(void) { physaddr_t paddr; ulong long_value; uint32_t int_value; struct vmcoreinfo_data *vmc; ulong log_buf; uint32_t idx, log_buf_len, log_first_idx, log_next_idx; char *buf, *logptr; vmc = &kt->vmcoreinfo; log_buf = log_buf_len = log_first_idx = log_next_idx = 0; paddr = VTOP(vmc->log_buf_SYMBOL); if (readmem(paddr, PHYSADDR, &long_value, sizeof(ulong), "log_buf pointer", RETURN_ON_ERROR)) log_buf = long_value; else error(FATAL, "cannot read log_buf value\n"); if (CRASHDEBUG(1)) fprintf(fp, "log_buf vaddr: %lx paddr: %llx => %lx\n", vmc->log_buf_SYMBOL, (ulonglong)paddr, log_buf); paddr = VTOP(vmc->log_buf_len_SYMBOL); if (readmem(paddr, PHYSADDR, &int_value, sizeof(uint), "log_buf_len", RETURN_ON_ERROR)) log_buf_len = int_value; else error(FATAL, "cannot read log_buf_len value\n"); if (CRASHDEBUG(1)) fprintf(fp, "log_buf_len vaddr: %lx paddr: %llx => %d\n", vmc->log_buf_len_SYMBOL, (ulonglong)paddr, log_buf_len); paddr = VTOP(vmc->log_first_idx_SYMBOL); if (readmem(paddr, PHYSADDR, &int_value, sizeof(uint), "log_first_idx", RETURN_ON_ERROR)) log_first_idx = int_value; else error(FATAL, "cannot read log_first_idx value\n"); if (CRASHDEBUG(1)) fprintf(fp, "log_first_idx vaddr: %lx paddr: %llx => %d\n", vmc->log_first_idx_SYMBOL, (ulonglong)paddr, log_first_idx); paddr = VTOP(vmc->log_next_idx_SYMBOL); if (readmem(paddr, PHYSADDR, &int_value, sizeof(uint), "log_next_idx", RETURN_ON_ERROR)) log_next_idx = int_value; else error(FATAL, "cannot read log_next_idx value\n"); if (CRASHDEBUG(1)) fprintf(fp, "log_next_idx vaddr: %lx paddr: %llx => %d\n", vmc->log_next_idx_SYMBOL, (ulonglong)paddr, log_next_idx); ASSIGN_SIZE(log)= vmc->log_SIZE; ASSIGN_OFFSET(log_ts_nsec) = vmc->log_ts_nsec_OFFSET; ASSIGN_OFFSET(log_len) = vmc->log_len_OFFSET; ASSIGN_OFFSET(log_text_len) = vmc->log_text_len_OFFSET; ASSIGN_OFFSET(log_dict_len) = vmc->log_dict_len_OFFSET; if ((buf = calloc(sizeof(char), log_buf_len)) == NULL) error(FATAL, "cannot calloc log_buf_len (%d) bytes\n", log_buf_len); paddr = VTOP(log_buf); if (!readmem(paddr, PHYSADDR, buf, log_buf_len, "log_buf", RETURN_ON_ERROR)) error(FATAL, "cannot read log_buf\n"); hq_init(); hq_open(); idx = log_first_idx; while (idx != log_next_idx) { logptr = log_from_idx(idx, buf); dump_log_entry(logptr, 0); if (!hq_enter((ulong)logptr)) { error(INFO, "\nduplicate log_buf message pointer\n"); break; } idx = log_next(idx, buf); if (idx >= log_buf_len) { error(INFO, "\ninvalid log_buf entry encountered\n"); break; } if (CRASHDEBUG(1) && (idx == log_next_idx)) fprintf(fp, "\nfound log_next_idx OK\n"); } hq_close(); } static void show_kernel_taints(char *buf, int verbose) { int i, bx; uint8_t tnt_bit; char tnt_true, tnt_false; int tnts_len = 0; ulong tnts_addr; ulong tainted_mask, *tainted_mask_ptr; int tainted; struct syment *sp = NULL; if (kernel_symbol_exists("tainted")) { get_symbol_data("tainted", sizeof(int), &tainted); if (verbose) fprintf(fp, "TAINTED: %x\n", tainted); return; } else if (VALID_STRUCT(tnt) || (kernel_symbol_exists("tnts") && STRUCT_EXISTS("tnt"))) { if (!VALID_STRUCT(tnt)) { STRUCT_SIZE_INIT(tnt, "tnt"); MEMBER_OFFSET_INIT(tnt_bit, "tnt", "bit"); MEMBER_OFFSET_INIT(tnt_true, "tnt", "true"); MEMBER_OFFSET_INIT(tnt_false, "tnt", "false"); } tnts_len = get_array_length("tnts", NULL, 0); sp = symbol_search("tnts"); } else if (VALID_STRUCT(taint_flag) || (kernel_symbol_exists("taint_flags") && STRUCT_EXISTS("taint_flag"))) { if (!(VALID_STRUCT(taint_flag) && VALID_MEMBER(tnt_true) && VALID_MEMBER(tnt_false))) { STRUCT_SIZE_INIT(taint_flag, "taint_flag"); MEMBER_OFFSET_INIT(tnt_true, "taint_flag", "true"); MEMBER_OFFSET_INIT(tnt_false, "taint_flag", "false"); if (INVALID_MEMBER(tnt_true)) { MEMBER_OFFSET_INIT(tnt_true, "taint_flag", "c_true"); MEMBER_OFFSET_INIT(tnt_false, "taint_flag", "c_false"); } } if (!(pc->flags & RUNTIME)) { if (INVALID_MEMBER(tnt_true) || INVALID_MEMBER(tnt_false) || !kernel_symbol_exists("tainted_mask")) return; } tnts_len = get_array_length("taint_flags", NULL, 0); sp = symbol_search("taint_flags"); } else if (verbose) option_not_supported('t'); tnts_addr = sp->value; get_symbol_data("tainted_mask", sizeof(ulong), &tainted_mask); tainted_mask_ptr = &tainted_mask; bx = 0; buf[0] = '\0'; if (VALID_STRUCT(tnt)) { for (i = 0; i < (tnts_len * SIZE(tnt)); i += SIZE(tnt)) { readmem((tnts_addr + i) + OFFSET(tnt_bit), KVADDR, &tnt_bit, sizeof(uint8_t), "tnt bit", FAULT_ON_ERROR); if (NUM_IN_BITMAP(tainted_mask_ptr, tnt_bit)) { readmem((tnts_addr + i) + OFFSET(tnt_true), KVADDR, &tnt_true, sizeof(char), "tnt true", FAULT_ON_ERROR); buf[bx++] = tnt_true; } else { readmem((tnts_addr + i) + OFFSET(tnt_false), KVADDR, &tnt_false, sizeof(char), "tnt false", FAULT_ON_ERROR); if (tnt_false != ' ' && tnt_false != '-' && tnt_false != 'G') buf[bx++] = tnt_false; } } } else if (VALID_STRUCT(taint_flag)) { for (i = 0; i < tnts_len; i++) { if (NUM_IN_BITMAP(tainted_mask_ptr, i)) { readmem((tnts_addr + i * SIZE(taint_flag)) + OFFSET(tnt_true), KVADDR, &tnt_true, sizeof(char), "tnt true", FAULT_ON_ERROR); buf[bx++] = tnt_true; } else { readmem((tnts_addr + i * SIZE(taint_flag)) + OFFSET(tnt_false), KVADDR, &tnt_false, sizeof(char), "tnt false", FAULT_ON_ERROR); if (tnt_false != ' ' && tnt_false != '-' && tnt_false != 'G') buf[bx++] = tnt_false; } } } buf[bx++] = '\0'; if (verbose) fprintf(fp, "TAINTED_MASK: %lx %s\n", tainted_mask, buf); } static void dump_dmi_info(void) { int i, array_len, len, maxlen; ulong dmi_ident_p, vaddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *arglist[MAXARGS]; if (!kernel_symbol_exists("dmi_ident")) error(FATAL, "dmi_ident does not exist in this kernel\n"); dmi_ident_p = symbol_value("dmi_ident"); array_len = get_array_length("dmi_ident", NULL, 0); maxlen = 0; open_tmpfile(); if (dump_enumerator_list("dmi_field")) { rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (!strstr(buf1, " = ")) continue; if ((parse_line(buf1, arglist) != 3) || (atoi(arglist[2]) >= array_len)) break; len = strlen(arglist[0]); if (len > maxlen) maxlen = len; } rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (!strstr(buf1, " = ")) continue; if ((parse_line(buf1, arglist) != 3) || ((i = atoi(arglist[2])) >= array_len)) break; readmem(dmi_ident_p + (sizeof(void *) * i), KVADDR, &vaddr, sizeof(void *), "dmi_ident", FAULT_ON_ERROR); if (!vaddr) continue; read_string(vaddr, buf2, BUFSIZE-1); fprintf(pc->saved_fp, " %s%s: %s\n", space(maxlen - strlen(arglist[0])), arglist[0], buf2); } } else { for (i = 0; i < array_len; i++) { readmem(dmi_ident_p + (sizeof(void *) * i), KVADDR, &vaddr, sizeof(void *), "dmi_ident", FAULT_ON_ERROR); if (!vaddr) continue; read_string(vaddr, buf1, BUFSIZE-1); fprintf(pc->saved_fp, " dmi_ident[%d]: %s\n", i, buf1); } } close_tmpfile(); } #define NLMSG_ALIGNTO 4 #define NLMSG_DATA(nlh) (nlh + roundup(SIZE(nlmsghdr), NLMSG_ALIGNTO)) static ulong dump_audit_skb_queue(ulong audit_skb_queue) { ulong skb_buff_head_next = 0, p; uint32_t qlen = 0; if (INVALID_SIZE(nlmsghdr)) { STRUCT_SIZE_INIT(nlmsghdr, "nlmsghdr"); MEMBER_OFFSET_INIT(nlmsghdr_nlmsg_type, "nlmsghdr", "nlmsg_type"); MEMBER_SIZE_INIT(nlmsghdr_nlmsg_type, "nlmsghdr", "nlmsg_type"); MEMBER_OFFSET_INIT(sk_buff_head_next, "sk_buff_head", "next"); MEMBER_OFFSET_INIT(sk_buff_head_qlen, "sk_buff_head", "qlen"); MEMBER_SIZE_INIT(sk_buff_head_qlen, "sk_buff_head", "qlen"); MEMBER_OFFSET_INIT(sk_buff_data, "sk_buff", "data"); MEMBER_OFFSET_INIT(sk_buff_len, "sk_buff", "len"); MEMBER_OFFSET_INIT(sk_buff_next, "sk_buff", "next"); MEMBER_SIZE_INIT(sk_buff_len, "sk_buff", "len"); } readmem(audit_skb_queue + OFFSET(sk_buff_head_qlen), KVADDR, &qlen, SIZE(sk_buff_head_qlen), "audit_skb_queue.qlen", FAULT_ON_ERROR); if (!qlen) return 0; readmem(audit_skb_queue + OFFSET(sk_buff_head_next), KVADDR, &skb_buff_head_next, sizeof(void *), "audit_skb_queue.next", FAULT_ON_ERROR); if (!skb_buff_head_next) error(FATAL, "audit_skb_queue.next: NULL\n"); p = skb_buff_head_next; do { ulong data, data_len; uint len; uint16_t nlmsg_type; char *buf = NULL; if (CRASHDEBUG(2)) fprintf(fp, "%#016lx\n", p); readmem(p + OFFSET(sk_buff_len), KVADDR, &len, SIZE(sk_buff_len), "sk_buff.len", FAULT_ON_ERROR); data_len = len - roundup(SIZE(nlmsghdr), NLMSG_ALIGNTO); readmem(p + OFFSET(sk_buff_data), KVADDR, &data, sizeof(void *), "sk_buff.data", FAULT_ON_ERROR); if (!data) error(FATAL, "sk_buff.data: NULL\n"); readmem(data + OFFSET(nlmsghdr_nlmsg_type), KVADDR, &nlmsg_type, SIZE(nlmsghdr_nlmsg_type), "nlmsghdr.nlmsg_type", FAULT_ON_ERROR); buf = GETBUF(data_len + 1); readmem(NLMSG_DATA(data), KVADDR, buf, data_len, "sk_buff.data + sizeof(struct nlmsghdr)", FAULT_ON_ERROR); buf[data_len] = '\0'; fprintf(fp, "type=%u %s\n", nlmsg_type, buf); FREEBUF(buf); readmem(p + OFFSET(sk_buff_next), KVADDR, &p, sizeof(void *), "skb_buff.next", FAULT_ON_ERROR); } while (p != audit_skb_queue); return qlen; } static ulong __dump_audit(char *symname) { if (symbol_exists(symname)) { if (CRASHDEBUG(1)) fprintf(fp, "# %s:\n", symname); return dump_audit_skb_queue(symbol_value(symname)); } return 0; } static void dump_audit(void) { ulong qlen = 0; if (symbol_exists("audit_skb_queue")) { qlen += __dump_audit("audit_skb_hold_queue"); qlen += __dump_audit("audit_skb_queue"); } else if (symbol_exists("audit_queue")) { qlen += __dump_audit("audit_hold_queue"); qlen += __dump_audit("audit_retry_queue"); qlen += __dump_audit("audit_queue"); } else option_not_supported('a'); if (!qlen) error(INFO, "kernel audit log is empty\n"); } #define PRINTK_SAFE_SEQ_BUF_INDENT 2 static void __dump_printk_safe_seq_buf(char *buf_name, int msg_flags) { int cpu, buffer_size; char *buffer; ulong base_addr, len_addr, message_lost_addr, buffer_addr; bool show_header; show_header = msg_flags & SHOW_LOG_SAFE; if (!symbol_exists(buf_name)) { return; } base_addr = symbol_value(buf_name); len_addr = base_addr + OFFSET(printk_safe_seq_buf_len) + OFFSET(atomic_t_counter); message_lost_addr = base_addr + OFFSET(printk_safe_seq_buf_message_lost) + OFFSET(atomic_t_counter); buffer_addr = base_addr + OFFSET(printk_safe_seq_buf_buffer); buffer_size = SIZE(printk_safe_seq_buf_buffer); buffer = GETBUF(buffer_size); if (show_header) fprintf(fp, "PRINTK_SAFE_SEQ_BUF: %s\n", buf_name); for (cpu = 0; cpu < kt->cpus; cpu++) { int len, message_lost; ulong per_cpu_offset; per_cpu_offset = kt->__per_cpu_offset[cpu]; readmem(len_addr + per_cpu_offset, KVADDR, &len, sizeof(int), "printk_safe_seq_buf len", FAULT_ON_ERROR); if (show_header) { readmem(message_lost_addr + per_cpu_offset, KVADDR, &message_lost, sizeof(int), "printk_safe_seq_buf message_lost", FAULT_ON_ERROR); fprintf(fp, "CPU: %d ADDR: %lx LEN: %d MESSAGE_LOST: %d\n", cpu, base_addr + per_cpu_offset, len, message_lost); } if (len > 0) { int i, n, ilen; char *p; bool start_of_line; ilen = 0; if (show_header) { ilen = PRINTK_SAFE_SEQ_BUF_INDENT; } else { if (msg_flags & SHOW_LOG_TEXT) ilen = 0; else ilen = strlen(buf_name) + 3; // "[%s] " } if (msg_flags & SHOW_LOG_LEVEL) ilen += 3; // "<%c>" readmem(buffer_addr + per_cpu_offset, KVADDR, buffer, buffer_size, "printk_safe_seq_buf buffer", FAULT_ON_ERROR); start_of_line = true; n = (len <= buffer_size) ? len : buffer_size; for (i = 0, p = buffer; i < n; i++, p++) { bool sol = start_of_line; start_of_line = false; if (*p == 0x1) { //SOH i++; p++; if (!sol) fprintf(fp, "\n"); if (show_header) fprintf(fp, "%s", space(PRINTK_SAFE_SEQ_BUF_INDENT)); else if (!(msg_flags & SHOW_LOG_TEXT)) fprintf(fp, "[%s] ", buf_name); if ((msg_flags & SHOW_LOG_LEVEL) && (i < n)) { switch (*p) { case '0' ... '7': case 'c': fprintf(fp, "<%c>", *p); } } continue; } else { if (sol) fprintf(fp, "%s", space(ilen)); if (isprint(*p) || isspace(*p)) { fputc(*p, fp); if (*p == '\n') start_of_line = true; } else { fputc('.', fp); } } } if (!start_of_line) fputc('\n', fp); if (show_header) fputc('\n', fp); } else if (show_header) { fprintf(fp, "%s(empty)\n\n", space(PRINTK_SAFE_SEQ_BUF_INDENT)); } } FREEBUF(buffer); } static void dump_printk_safe_seq_buf(int msg_flags) { if (!STRUCT_EXISTS("printk_safe_seq_buf")) return; if (INVALID_SIZE(printk_safe_seq_buf_buffer)) { MEMBER_OFFSET_INIT(printk_safe_seq_buf_len, "printk_safe_seq_buf", "len"); MEMBER_OFFSET_INIT(printk_safe_seq_buf_message_lost, "printk_safe_seq_buf", "message_lost"); MEMBER_OFFSET_INIT(printk_safe_seq_buf_buffer, "printk_safe_seq_buf", "buffer"); if (!INVALID_MEMBER(printk_safe_seq_buf_buffer)) { MEMBER_SIZE_INIT(printk_safe_seq_buf_buffer, "printk_safe_seq_buf", "buffer"); } } if (INVALID_MEMBER(printk_safe_seq_buf_len) || INVALID_MEMBER(printk_safe_seq_buf_message_lost) || INVALID_MEMBER(printk_safe_seq_buf_buffer) || INVALID_SIZE(printk_safe_seq_buf_buffer)) { if (msg_flags & SHOW_LOG_SAFE) error(INFO, "-s not supported with this kernel version\n"); return; } __dump_printk_safe_seq_buf("nmi_print_seq", msg_flags); __dump_printk_safe_seq_buf("safe_print_seq", msg_flags); } /* * Reads a string value from the VMCOREINFO data stored in (live) memory. * * Returns a string (that has to be freed by the caller) that contains the * value for key or NULL if the key has not been found. */ static char * vmcoreinfo_read_string(const char *key) { char *buf, *value_string, *p1, *p2; size_t value_length; size_t vmcoreinfo_size; ulong vmcoreinfo_data; char keybuf[BUFSIZE]; buf = value_string = NULL; switch (get_symbol_type("vmcoreinfo_data", NULL, NULL)) { case TYPE_CODE_PTR: get_symbol_data("vmcoreinfo_data", sizeof(vmcoreinfo_data), &vmcoreinfo_data); break; case TYPE_CODE_ARRAY: vmcoreinfo_data = symbol_value("vmcoreinfo_data"); break; default: return NULL; } get_symbol_data("vmcoreinfo_size", sizeof(vmcoreinfo_size), &vmcoreinfo_size); sprintf(keybuf, "%s=", key); if ((buf = malloc(vmcoreinfo_size+1)) == NULL) { error(INFO, "cannot malloc vmcoreinfo buffer\n"); goto err; } if (!readmem(vmcoreinfo_data, KVADDR, buf, vmcoreinfo_size, "vmcoreinfo_data", RETURN_ON_ERROR|QUIET)) { error(INFO, "cannot read vmcoreinfo_data\n"); goto err; } buf[vmcoreinfo_size] = '\n'; if ((p1 = strstr(buf, keybuf))) { p2 = p1 + strlen(keybuf); p1 = strstr(p2, "\n"); value_length = p1-p2; value_string = calloc(value_length+1, sizeof(char)); strncpy(value_string, p2, value_length); value_string[value_length] = NULLCHAR; } err: if (buf) free(buf); return value_string; } static void check_vmcoreinfo(void) { if (!kernel_symbol_exists("vmcoreinfo_data") || !kernel_symbol_exists("vmcoreinfo_size")) return; if (pc->read_vmcoreinfo == no_vmcoreinfo) { switch (get_symbol_type("vmcoreinfo_data", NULL, NULL)) { case TYPE_CODE_PTR: pc->read_vmcoreinfo = vmcoreinfo_read_string; break; case TYPE_CODE_ARRAY: pc->read_vmcoreinfo = vmcoreinfo_read_string; break; } } } static int get_linux_banner_from_vmlinux(char *buf, size_t size) { struct bfd_section *sect; long offset; ulong start_rodata; if (kernel_symbol_exists(".rodata")) start_rodata = symbol_value(".rodata"); else if (kernel_symbol_exists("__start_rodata")) start_rodata = symbol_value("__start_rodata"); else return FALSE; sect = bfd_get_section_by_name(st->bfd, ".rodata"); if (!sect) return FALSE; /* * Although symbol_value() returns dynamic symbol value that * is affected by kaslr, which is different from static symbol * value in vmlinux file, but relative offset to linux_banner * object in .rodata section is idential. */ offset = symbol_value("linux_banner") - start_rodata; if (!bfd_get_section_contents(st->bfd, sect, buf, offset, size)) return FALSE; return TRUE; } #if defined(X86_64) || defined(ARM64) || defined(PPC64) extern ulong extra_stacks_idx; extern void *extra_stacks_regs[]; void silent_call_bt(void); void silent_call_bt(void) { jmp_buf main_loop_env_save; unsigned long long flags_save = pc->flags; FILE *fp_save = fp; FILE *error_fp_save = pc->error_fp; /* Redirect all cmd_bt() outputs into null */ fp = pc->nullfp; pc->error_fp = pc->nullfp; for (int i = 0; i < extra_stacks_idx; i++) { /* Note: GETBUF/FREEBUF is not applicable for extra_stacks_regs, because we are reserving extra_stacks_regs by cmd_bt() for later use. But GETBUF/FREEBUF is designed for use only within one cmd. See process_command_line() -> restore_sanity() -> free_all_bufs(). So we use malloc/free instead. */ free(extra_stacks_regs[i]); extra_stacks_regs[i] = NULL; } /* Prepare args used by cmd_bt() */ sprintf(pc->command_line, "bt\n"); argcnt = parse_line(pc->command_line, args); optind = 1; pc->flags |= RUNTIME; /* Catch error FATAL generated by cmd_bt() if any */ memcpy(&main_loop_env_save, &pc->main_loop_env, sizeof(jmp_buf)); if (setjmp(pc->main_loop_env)) { goto out; } cmd_bt(); out: /* Restore all */ memcpy(&pc->main_loop_env, &main_loop_env_save, sizeof(jmp_buf)); pc->flags = flags_save; fp = fp_save; pc->error_fp = error_fp_save; } #endif crash-utility-crash-9cd43f5/xen_hyper_command.c0000664000372000037200000013325715107550337021252 0ustar juerghjuergh/* * xen_hyper_command.c * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "defs.h" #ifdef XEN_HYPERVISOR_ARCH #include "xen_hyper_defs.h" #ifdef X86 char *xhregt[] = { "ebx", "ecx", "edx", "esi", "edi", "ebp", "eax", "ds", "es", "fs", "gs", "orig_eax", "eip", "cs", "eflags", "esp", "ss", NULL }; #endif #ifdef X86_64 char *xhregt[] = { "r15", "r14", "r13", "r12", "rbp", "rbx", "r11", "r10", "r9", "r8", "rax", "rcx", "rdx", "rsi", "rdi", "orig_rax", "rip", "cs", "eflags", "rsp", "ss", "fs", "gs", "ds", "es", "fs", "gs", NULL }; #endif #ifdef IA64 char *xhregt[] = { "aaa", "bbb", NULL }; #endif static void xen_hyper_do_domain(struct xen_hyper_cmd_args *da); static void xen_hyper_do_doms(struct xen_hyper_cmd_args *da); static void xen_hyper_show_doms(struct xen_hyper_domain_context *dc); static void xen_hyper_do_dumpinfo(ulong flag, struct xen_hyper_cmd_args *dia); static void xen_hyper_show_dumpinfo(ulong flag, struct xen_hyper_dumpinfo_context *dic); static void xen_hyper_do_pcpus(ulong flag, struct xen_hyper_cmd_args *pca); static void xen_hyper_show_pcpus(ulong flag, struct xen_hyper_pcpu_context *pcc); static void xen_hyper_do_sched(ulong flag, struct xen_hyper_cmd_args *scha); static void xen_hyper_show_sched(ulong flag, struct xen_hyper_sched_context *schc); static void xen_hyper_do_vcpu(struct xen_hyper_cmd_args *vca); static void xen_hyper_do_vcpus(struct xen_hyper_cmd_args *vca); static void xen_hyper_show_vcpus(struct xen_hyper_vcpu_context *vcc); static char *xen_hyper_domain_to_type(ulong domain, int *type, char *buf, int verbose); static char *xen_hyper_domain_context_to_type( struct xen_hyper_domain_context *dc, int *type, char *buf, int verbose); static int xen_hyper_str_to_domain_context(char *string, ulong *value, struct xen_hyper_domain_context **dcp); static int xen_hyper_str_to_dumpinfo_context(char *string, ulong *value, struct xen_hyper_dumpinfo_context **dicp); static int xen_hyper_strvcpu_to_vcpu_context(char *string, ulong *value, struct xen_hyper_vcpu_context **vccp); static int xen_hyper_strid_to_vcpu_context(char *strdom, char *strvc, ulong *valdom, ulong *valvc, struct xen_hyper_vcpu_context **vccp); static int xen_hyper_str_to_pcpu_context(char *string, ulong *value, struct xen_hyper_pcpu_context **pccp); /* * Display domain struct. */ void xen_hyper_cmd_domain(void) { struct xen_hyper_cmd_args da; struct xen_hyper_domain_context *dc; ulong val; int c, cnt, type, bogus; BZERO(&da, sizeof(struct xen_hyper_cmd_args)); while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { type = xen_hyper_str_to_domain_context(args[optind], &val, &dc); switch (type) { case XEN_HYPER_STR_DID: case XEN_HYPER_STR_DOMAIN: da.value[cnt] = val; da.type[cnt] = type; da.addr[cnt] = dc->domain; da.context[cnt] = dc; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid domain or id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; } da.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_domain(&da); } /* * Do the work requested by xen_hyper_cmd_dom(). */ static void xen_hyper_do_domain(struct xen_hyper_cmd_args *da) { int i; if (da->cnt) { if (da->cnt == 1) { xhdt->last = da->context[0]; } for (i = 0; i < da->cnt; i++) { dump_struct("domain", da->addr[i], 0); } } else { dump_struct("domain", xhdt->last->domain, 0); } } /* * Display domain status. */ void xen_hyper_cmd_doms(void) { struct xen_hyper_cmd_args da; struct xen_hyper_domain_context *dc; ulong val; int c, cnt, type, bogus; BZERO(&da, sizeof(struct xen_hyper_cmd_args)); while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { type = xen_hyper_str_to_domain_context(args[optind], &val, &dc); switch (type) { case XEN_HYPER_STR_DID: case XEN_HYPER_STR_DOMAIN: da.value[cnt] = val; da.type[cnt] = type; da.addr[cnt] = dc->domain; da.context[cnt] = dc; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid domain or id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; } da.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_doms(&da); } /* * Do the work requested by xen_hyper_cmd_doms(). */ static void xen_hyper_do_doms(struct xen_hyper_cmd_args *da) { struct xen_hyper_domain_context *dca; char buf1[XEN_HYPER_CMD_BUFSIZE]; char buf2[XEN_HYPER_CMD_BUFSIZE]; int i; sprintf(buf1, " DID %s ST T ", mkstring(buf2, VADDR_PRLEN, CENTER|RJUST, "DOMAIN")); mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|RJUST, "MAXPAGE"); strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|RJUST, "TOTPAGE"); strncat(buf1, " VCPU ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); mkstring(&buf1[strlen(buf1)], VADDR_PRLEN, CENTER|RJUST, "SHARED_I"); strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|RJUST, "P2M_MFN"); fprintf(fp, "%s\n", buf1); if (da->cnt) { for (i = 0; i < da->cnt; i++) { xen_hyper_show_doms(da->context[i]); } } else { for (i = 0, dca=xhdt->context_array; i < XEN_HYPER_NR_DOMAINS(); i++, dca++) { xen_hyper_show_doms(dca); } } } static void xen_hyper_show_doms(struct xen_hyper_domain_context *dc) { char *act, *crash; uint cpuid; int type, i, j; struct xen_hyper_pcpu_context *pcc; #if defined(X86) || defined(X86_64) char *shared_info; #elif defined(IA64) char *domain_struct; ulong pgd; #endif char buf1[XEN_HYPER_CMD_BUFSIZE]; char buf2[XEN_HYPER_CMD_BUFSIZE]; if (!(dc->domain)) { return; } #if defined(X86) || defined(X86_64) shared_info = GETBUF(XEN_HYPER_SIZE(shared_info)); if (dc->shared_info) { if (!readmem(dc->shared_info, KVADDR, shared_info, XEN_HYPER_SIZE(shared_info), "fill_shared_info_struct", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { error(WARNING, "cannot fill shared_info struct.\n"); BZERO(shared_info, XEN_HYPER_SIZE(shared_info)); } } #elif defined(IA64) if ((domain_struct = xen_hyper_read_domain(dc->domain)) == NULL) { error(FATAL, "cannot read domain.\n"); } #endif act = NULL; for_cpu_indexes(i, cpuid) { pcc = xen_hyper_id_to_pcpu_context(cpuid); for (j = 0; j < dc->vcpu_cnt; j++) { if (pcc->current_vcpu == dc->vcpu[j]) { act = ">"; break; } } if (act) break; } if (act == NULL) act = " "; if (xht->crashing_vcc && dc->domain == xht->crashing_vcc->domain) { crash = "*"; } else { crash = " "; } sprintf(buf1, "%s%s%5d ", act, crash, dc->domain_id); mkstring(&buf1[strlen(buf1)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, (char *)(dc->domain)); strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); sprintf(&buf1[strlen(buf1)], "%s ", xen_hyper_domain_state_string(dc, buf2, !VERBOSE)); sprintf(&buf1[strlen(buf1)], "%s ", xen_hyper_domain_context_to_type(dc, &type, buf2, !VERBOSE)); mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|INT_HEX|RJUST, MKSTR((long)(dc->max_pages))); strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); mkstring(&buf1[strlen(buf1)], INT_PRLEN, CENTER|INT_HEX|RJUST, MKSTR((long)(dc->tot_pages))); sprintf(&buf1[strlen(buf1)], " %3d ", dc->vcpu_cnt); mkstring(&buf1[strlen(buf1)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(dc->shared_info)); strncat(buf1, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf1)-1); #if defined(X86) || defined(X86_64) if (dc->shared_info) { mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(ULONG(shared_info + XEN_HYPER_OFFSET(shared_info_arch) + XEN_HYPER_OFFSET(arch_shared_info_pfn_to_mfn_frame_list_list))) ); } else { mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|RJUST, "----"); } FREEBUF(shared_info); #elif defined(IA64) pgd = ULONG(domain_struct + XEN_HYPER_OFFSET(domain_arch) + XEN_HYPER_OFFSET(arch_domain_mm) + XEN_HYPER_OFFSET(mm_struct_pgd)); if (pgd) { mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR((pgd - DIRECTMAP_VIRT_START) >> machdep->pageshift)); } else { mkstring(&buf1[strlen(buf1)], LONG_PRLEN, CENTER|RJUST, "----"); } #endif fprintf(fp, "%s\n", buf1); } /* * Display ELF Notes information. */ void xen_hyper_cmd_dumpinfo(void) { struct xen_hyper_cmd_args dia; ulong flag; ulong val; struct xen_hyper_dumpinfo_context *dic; int c, cnt, type, bogus; BZERO(&dia, sizeof(struct xen_hyper_cmd_args)); flag = val =0; dic = NULL; while ((c = getopt(argcnt, args, "rt")) != EOF) { switch(c) { case 't': flag |= XEN_HYPER_DUMPINFO_TIME; break; case 'r': flag |= XEN_HYPER_DUMPINFO_REGS; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { type = xen_hyper_str_to_dumpinfo_context(args[optind], &val, &dic); switch (type) { case XEN_HYPER_STR_PCID: case XEN_HYPER_STR_ADDR: dia.value[cnt] = val; dia.type[cnt] = type; dia.context[cnt] = dic; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid note address or id " "value: %s\n\n", args[optind]); bogus++; break; } } else { error(INFO, "invalid note address or id " "value: %s\n\n", args[optind]); } optind++; } dia.cnt = cnt; if (!cnt && bogus) { return; } xen_hyper_do_dumpinfo(flag, &dia); } /* * Do the work requested by xen_hyper_cmd_dumpinfo(). */ static void xen_hyper_do_dumpinfo(ulong flag, struct xen_hyper_cmd_args *dia) { struct xen_hyper_dumpinfo_context *dic; char buf[XEN_HYPER_CMD_BUFSIZE]; int i, cnt; if (dia->cnt) { cnt = dia->cnt; } else { cnt = XEN_HYPER_NR_PCPUS(); } for (i = 0; i < cnt; i++) { if (i == 0 || flag & XEN_HYPER_DUMPINFO_REGS || flag & XEN_HYPER_DUMPINFO_TIME) { if (i) { fprintf(fp, "\n"); } sprintf(buf, " PCID "); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "ENOTE"); // sprintf(&buf[strlen(buf)], " PID PPID PGRP SID"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "CORE"); if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "XEN_CORE"); } if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "XEN_INFO"); } fprintf(fp, "%s\n", buf); } if (dia->cnt) { dic = dia->context[i]; } else { dic = xen_hyper_id_to_dumpinfo_context(xht->cpu_idxs[i]); } xen_hyper_show_dumpinfo(flag, dic); } } static void xen_hyper_show_dumpinfo(ulong flag, struct xen_hyper_dumpinfo_context *dic) { char buf[XEN_HYPER_CMD_BUFSIZE]; char *note_buf; ulong addr; ulong *regs; long tv_sec, tv_usec; int i, regcnt; if (!dic || !dic->note) { return; } note_buf = dic->ELF_Prstatus_ptr; sprintf(buf, "%5d ", dic->pcpu_id); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(dic->note)); #if 0 pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_pid)); sprintf(&buf[strlen(buf)], " %5d ", pid); pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_ppid)); sprintf(&buf[strlen(buf)], "%5d ", pid); pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_pgrp)); sprintf(&buf[strlen(buf)], "%5d ", pid); pid = INT(note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_sid)); sprintf(&buf[strlen(buf)], "%5d", pid); #endif strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(dic->note)); if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V2) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(dic->note + xhdit->core_size)); } if (xhdit->note_ver >= XEN_HYPER_ELF_NOTE_V3) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); if (xhdit->xen_info_cpu == dic->pcpu_id) mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(dic->note + xhdit->core_size + xhdit->xen_core_size)); else mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "--"); } fprintf(fp, "%s\n", buf); if (flag & XEN_HYPER_DUMPINFO_TIME) { sprintf(buf, " "); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "tv_sec"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "tv_usec"); fprintf(fp, "%s\n", buf); addr = (ulong)note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_utime); for (i = 0; i < 4; i++, addr += XEN_HYPER_SIZE(ELF_Timeval)) { switch (i) { case 0: sprintf(buf, " pr_utime "); break; case 1: sprintf(buf, " pr_stime "); break; case 2: sprintf(buf, " pr_cutime "); break; case 3: sprintf(buf, " pr_cstime "); break; } tv_sec = LONG(addr + XEN_HYPER_OFFSET(ELF_Timeval_tv_sec)); tv_usec = LONG(addr + XEN_HYPER_OFFSET(ELF_Timeval_tv_sec) + XEN_HYPER_OFFSET(ELF_Timeval_tv_usec)); mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(tv_sec)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(tv_usec)); fprintf(fp, "%s\n", buf); } } if (flag & XEN_HYPER_DUMPINFO_REGS) { regcnt = XEN_HYPER_SIZE(ELF_Gregset) / sizeof(long); addr = (ulong)note_buf + XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg); regs = (ulong *)addr; fprintf(fp, "Register information(%lx):\n", dic->note + xhdit->core_offset + XEN_HYPER_OFFSET(ELF_Prstatus_pr_reg)); for (i = 0; i < regcnt; i++, regs++) { if (xhregt[i] == NULL) { break; } fprintf(fp, " %s = ", xhregt[i]); fprintf(fp, "0x%s\n", mkstring(buf, LONG_PRLEN, LONG_HEX|LJUST, MKSTR(*regs))); } } } /* * Dump the Xen conring in chronological order. */ void xen_hyper_cmd_log(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); xen_hyper_dump_log(); } void xen_hyper_dump_log(void) { uint conringp, warp, len, idx, i; ulong conring; char *buf; char last = 0; uint32_t conring_size; if (get_symbol_type("conring", NULL, NULL) == TYPE_CODE_ARRAY) conring = symbol_value("conring"); else get_symbol_data("conring", sizeof(ulong), &conring); get_symbol_data("conringp", sizeof(uint), &conringp); if (symbol_exists("conring_size")) get_symbol_data("conring_size", sizeof(uint32_t), &conring_size); else conring_size = XEN_HYPER_CONRING_SIZE; if (conringp >= conring_size) { idx = conringp & (conring_size - 1); len = conring_size; warp = TRUE; } else { idx = 0; len = conringp; warp = FALSE; } buf = GETBUF(conring_size); readmem(conring, KVADDR, buf, conring_size, "conring contents", FAULT_ON_ERROR); wrap_around: for (i = idx; i < len; i++) { if (buf[i]) { fputc(ascii(buf[i]) ? buf[i] : '.', fp); last = buf[i]; } } if (warp) { len = idx; idx = 0; warp = FALSE; goto wrap_around; } if (last != '\n') { fprintf(fp, "\n"); } FREEBUF(buf); } /* * Display physical cpu information. */ void xen_hyper_cmd_pcpus(void) { struct xen_hyper_cmd_args pca; struct xen_hyper_pcpu_context *pcc; ulong flag; ulong val; int c, cnt, type, bogus; BZERO(&pca, sizeof(struct xen_hyper_cmd_args)); flag= 0; while ((c = getopt(argcnt, args, "rt")) != EOF) { switch(c) { case 'r': flag |= XEN_HYPER_PCPUS_REGS; break; case 't': flag |= XEN_HYPER_PCPUS_TSS; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { type = xen_hyper_str_to_pcpu_context(args[optind], &val, &pcc); switch (type) { case XEN_HYPER_STR_PCID: case XEN_HYPER_STR_PCPU: pca.value[cnt] = val; pca.type[cnt] = type; pca.addr[cnt] = pcc->pcpu; pca.context[cnt] = pcc; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid pcpu or id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; } pca.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_pcpus(flag, &pca); } /* * Do the work requested by xen_hyper_cmd_pcpu(). */ static void xen_hyper_do_pcpus(ulong flag, struct xen_hyper_cmd_args *pca) { struct xen_hyper_pcpu_context *pcc; uint cpuid; int i; if (pca->cnt) { for (i = 0; i < pca->cnt; i++) { xen_hyper_show_pcpus(flag, pca->context[i]); flag |= XEN_HYPER_PCPUS_1STCALL; } } else { for_cpu_indexes(i, cpuid) { pcc = xen_hyper_id_to_pcpu_context(cpuid); xen_hyper_show_pcpus(flag, pcc); flag |= XEN_HYPER_PCPUS_1STCALL; } } } static void xen_hyper_show_pcpus(ulong flag, struct xen_hyper_pcpu_context *pcc) { char *act = " "; char buf[XEN_HYPER_CMD_BUFSIZE]; if (!(pcc->pcpu)) { return; } if (XEN_HYPER_CRASHING_CPU() == pcc->processor_id) { act = " *"; } if ((flag & XEN_HYPER_PCPUS_REGS) || (flag & XEN_HYPER_PCPUS_TSS) || !(flag & XEN_HYPER_PCPUS_1STCALL)) { if (((flag & XEN_HYPER_PCPUS_REGS) || (flag & XEN_HYPER_PCPUS_TSS)) && (flag & XEN_HYPER_PCPUS_1STCALL)) { fprintf(fp, "\n"); } sprintf(buf, " PCID "); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "PCPU"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "CUR-VCPU"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "TSS"); fprintf(fp, "%s\n", buf); } sprintf(buf, "%s%5d ", act, pcc->processor_id); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(pcc->pcpu)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(pcc->current_vcpu)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(pcc->init_tss)); fprintf(fp, "%s\n", buf); if (flag & XEN_HYPER_PCPUS_REGS) { fprintf(fp, "Register information:\n"); dump_struct("cpu_user_regs", pcc->guest_cpu_user_regs, 0); } if (flag & XEN_HYPER_PCPUS_TSS) { fprintf(fp, "init_tss information:\n"); dump_struct("tss_struct", pcc->init_tss, 0); } } /* * Display schedule info. */ void xen_hyper_cmd_sched(void) { struct xen_hyper_cmd_args scha; struct xen_hyper_pcpu_context *pcc; ulong flag; ulong val; int c, cnt, type, bogus; BZERO(&scha, sizeof(struct xen_hyper_cmd_args)); flag = 0; while ((c = getopt(argcnt, args, "v")) != EOF) { switch(c) { case 'v': flag |= XEN_HYPER_SCHED_VERBOSE; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { type = xen_hyper_str_to_pcpu_context(args[optind], &val, &pcc); switch (type) { case XEN_HYPER_STR_PCID: scha.value[cnt] = val; scha.type[cnt] = type; scha.context[cnt] = &xhscht->sched_context_array[val]; cnt++; break; case XEN_HYPER_STR_PCPU: case XEN_HYPER_STR_INVALID: error(INFO, "invalid pcpu id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; } scha.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_sched(flag, &scha); } /* * Do the work requested by xen_hyper_cmd_pcpu(). */ static void xen_hyper_do_sched(ulong flag, struct xen_hyper_cmd_args *scha) { struct xen_hyper_sched_context *schc; uint cpuid; int i; fprintf(fp, "Scheduler name : %s\n\n", xhscht->name); if (scha->cnt) { for (i = 0; i < scha->cnt; i++) { xen_hyper_show_sched(flag, scha->context[i]); flag |= XEN_HYPER_SCHED_1STCALL; } } else { for_cpu_indexes(i, cpuid) { schc = &xhscht->sched_context_array[cpuid]; xen_hyper_show_sched(flag, schc); flag |= XEN_HYPER_SCHED_1STCALL; } } } static void xen_hyper_show_sched(ulong flag, struct xen_hyper_sched_context *schc) { char buf[XEN_HYPER_CMD_BUFSIZE]; if (!(schc->schedule_data)) { return; } if ((flag & XEN_HYPER_SCHED_VERBOSE) || !(flag & XEN_HYPER_SCHED_1STCALL)) { if ((flag & XEN_HYPER_SCHED_1STCALL) && (flag & XEN_HYPER_SCHED_VERBOSE)) { fprintf(fp, "\n"); } sprintf(buf, " CPU "); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "SCH-DATA"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "SCH-PRIV"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "CUR-VCPU"); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|RJUST, "IDL-VCPU"); if (XEN_HYPER_VALID_MEMBER(schedule_data_tick)) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|RJUST, "TICK"); } fprintf(fp, "%s\n", buf); } sprintf(buf, "%5d ", schc->cpu_id); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(schc->schedule_data)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(schc->sched_priv)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(schc->curr)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(schc->idle)); if (XEN_HYPER_VALID_MEMBER(schedule_data_tick)) { strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); mkstring(&buf[strlen(buf)], LONG_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(schc->tick)); } fprintf(fp, "%s\n", buf); if (flag & XEN_HYPER_SCHED_VERBOSE) { ; } } /* * Display general system info. */ void xen_hyper_cmd_sys(void) { int c; ulong sflag; sflag = FALSE; while ((c = getopt(argcnt, args, "c")) != EOF) { switch(c) { case 'c': sflag = TRUE; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!args[optind]) { if (sflag) fprintf(fp, "No support argument\n"); /* display config info here. */ else xen_hyper_display_sys_stats(); return; } } /* * Display system stats at init-time or for the sys command. */ void xen_hyper_display_sys_stats(void) { struct new_utsname *uts; char buf1[XEN_HYPER_CMD_BUFSIZE]; char buf2[XEN_HYPER_CMD_BUFSIZE]; ulong mhz; int len, flag; uts = &xht->utsname; len = 11; flag = XEN_HYPER_PRI_R; /* * It's now safe to unlink the remote namelist. */ if (pc->flags & UNLINK_NAMELIST) { unlink(pc->namelist); pc->flags &= ~UNLINK_NAMELIST; pc->flags |= NAMELIST_UNLINKED; } if (REMOTE()) { switch (pc->flags & (NAMELIST_LOCAL|NAMELIST_UNLINKED|NAMELIST_SAVED)) { case NAMELIST_UNLINKED: XEN_HYPER_PRI(fp, len, "KERNEL: ", buf1, flag, (buf1, "%s (temporary)\n", pc->namelist)); break; case (NAMELIST_UNLINKED|NAMELIST_SAVED): case NAMELIST_LOCAL: XEN_HYPER_PRI(fp, len, "KERNEL: ", buf1, flag, (buf1, "%s\n", pc->namelist)); break; } } else { if (pc->system_map) { XEN_HYPER_PRI(fp, len, "SYSTEM MAP: ", buf1, flag, (buf1, "%s\n", pc->system_map)); XEN_HYPER_PRI(fp, len, "DEBUG KERNEL: ", buf1, flag, (buf1, "%s\n", pc->namelist)); } else { XEN_HYPER_PRI(fp, len, "KERNEL: ", buf1, flag, (buf1, "%s\n", pc->namelist)); } } if (pc->debuginfo_file) { XEN_HYPER_PRI(fp, len, "DEBUGINFO: ", buf1, flag, (buf1, "%s\n", pc->debuginfo_file)); } else if (pc->namelist_debug) { XEN_HYPER_PRI(fp, len, "DEBUG KERNEL: ", buf1, flag, (buf1, "%s\n", pc->namelist_debug)); } XEN_HYPER_PRI_CONST(fp, len, "DUMPFILE: ", flag); if (ACTIVE()) { if (REMOTE_ACTIVE()) fprintf(fp, "%s@%s (remote live system)\n", pc->server_memsrc, pc->server); else fprintf(fp, "%s\n", pc->live_memsrc); } else { if (REMOTE_DUMPFILE()) fprintf(fp, "%s@%s (remote dumpfile)", pc->server_memsrc, pc->server); else fprintf(fp, "%s", pc->dumpfile); fprintf(fp, "\n"); } XEN_HYPER_PRI(fp, len, "CPUS: ", buf1, flag, (buf1, "%d\n", XEN_HYPER_NR_PCPUS())); XEN_HYPER_PRI(fp, len, "DOMAINS: ", buf1, flag, (buf1, "%d\n", XEN_HYPER_NR_DOMAINS())); /* !!!Display a date here if it can be found. */ XEN_HYPER_PRI(fp, len, "UPTIME: ", buf1, flag, (buf1, "%s\n", (xen_hyper_get_uptime_hyper() ? convert_time(xen_hyper_get_uptime_hyper(), buf2) : "--:--:--"))); /* !!!Display a version here if it can be found. */ XEN_HYPER_PRI_CONST(fp, len, "MACHINE: ", flag); if (strlen(uts->machine)) { fprintf(fp, "%s ", uts->machine); } else { fprintf(fp, "unknown "); } if ((mhz = machdep->processor_speed())) fprintf(fp, "(%ld Mhz)\n", mhz); else fprintf(fp, "(unknown Mhz)\n"); XEN_HYPER_PRI(fp, len, "MEMORY: ", buf1, flag, (buf1, "%s\n", get_memory_size(buf2))); if (XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND)) return; } /* * Display vcpu struct. */ void xen_hyper_cmd_vcpu(void) { struct xen_hyper_cmd_args vca; struct xen_hyper_vcpu_context *vcc; ulong flag; ulong valvc, valdom; int c, cnt, type, bogus; BZERO(&vca, sizeof(struct xen_hyper_cmd_args)); flag = 0; while ((c = getopt(argcnt, args, "i")) != EOF) { switch(c) { case 'i': flag |= XEN_HYPER_VCPUS_ID; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { if (flag & XEN_HYPER_VCPUS_ID) { type = xen_hyper_strid_to_vcpu_context( args[optind], args[optind+1], &valdom, &valvc, &vcc); } else { type = xen_hyper_strvcpu_to_vcpu_context( args[optind], &valvc, &vcc); } switch (type) { case XEN_HYPER_STR_VCID: case XEN_HYPER_STR_VCPU: vca.value[cnt] = valvc; vca.type[cnt] = type; vca.addr[cnt] = vcc->vcpu; vca.context[cnt] = vcc; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid vcpu or id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; if (flag & XEN_HYPER_VCPUS_ID) optind++; } vca.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_vcpu(&vca); } /* * Do the work requested by xen_hyper_cmd_vcpu(). */ static void xen_hyper_do_vcpu(struct xen_hyper_cmd_args *vca) { int i; if (vca->cnt) { if (vca->cnt == 1) { xhvct->last = vca->context[0]; } for (i = 0; i < vca->cnt; i++) { dump_struct("vcpu", vca->addr[i], 0); } } else { dump_struct("vcpu", xhvct->last->vcpu, 0); } } /* * Display vcpu status. */ void xen_hyper_cmd_vcpus(void) { struct xen_hyper_cmd_args vca; struct xen_hyper_vcpu_context *vcc; ulong flag; ulong valvc, valdom; int c, cnt, type, bogus; BZERO(&vca, sizeof(struct xen_hyper_cmd_args)); flag = 0; while ((c = getopt(argcnt, args, "i")) != EOF) { switch(c) { case 'i': flag |= XEN_HYPER_VCPUS_ID; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); cnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { if (flag & XEN_HYPER_VCPUS_ID) { type = xen_hyper_strid_to_vcpu_context( args[optind], args[optind+1], &valdom, &valvc, &vcc); } else { type = xen_hyper_strvcpu_to_vcpu_context( args[optind], &valvc, &vcc); } switch (type) { case XEN_HYPER_STR_VCID: case XEN_HYPER_STR_VCPU: vca.value[cnt] = valvc; vca.type[cnt] = type; vca.addr[cnt] = vcc->vcpu; vca.context[cnt] = vcc; cnt++; break; case XEN_HYPER_STR_INVALID: error(INFO, "invalid vcpu or id value: %s\n\n", args[optind]); bogus++; } } else { error(FATAL, "invalid address: %s\n", args[optind]); } optind++; } vca.cnt = cnt; if (bogus && !cnt) { return; } xen_hyper_do_vcpus(&vca); } /* * Do the work requested by xen_hyper_cmd_vcpus(). */ static void xen_hyper_do_vcpus(struct xen_hyper_cmd_args *vca) { struct xen_hyper_vcpu_context_array *vcca; struct xen_hyper_vcpu_context *vcc; char buf1[XEN_HYPER_CMD_BUFSIZE]; char buf2[XEN_HYPER_CMD_BUFSIZE]; int i, j; fprintf(fp, " VCID PCID %s ST T DOMID %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, "VCPU"), mkstring(buf2, VADDR_PRLEN, CENTER|RJUST, "DOMAIN")); if (vca->cnt) { for (i = 0; i < vca->cnt; i++) { xen_hyper_show_vcpus(vca->context[i]); } } else { for (i = 0, vcca = xhvct->vcpu_context_arrays; i < XEN_HYPER_NR_DOMAINS(); i++, vcca++) { for (j = 0, vcc = vcca->context_array; j < vcca->context_array_valid; j++, vcc++) { xen_hyper_show_vcpus(vcc); } } } } static void xen_hyper_show_vcpus(struct xen_hyper_vcpu_context *vcc) { int type; char *act, *crash; char buf[XEN_HYPER_CMD_BUFSIZE]; struct xen_hyper_pcpu_context *pcc; domid_t domid; if (!(vcc->vcpu)) { return; } if((pcc = xen_hyper_id_to_pcpu_context(vcc->processor))) { if (pcc->current_vcpu == vcc->vcpu) { act = ">"; } else { act = " "; } } else { act = " "; } if (xht->crashing_vcc && vcc->vcpu == xht->crashing_vcc->vcpu) { crash = "*"; } else { crash = " "; } sprintf(buf, "%s%s%5d %5d ", act, crash, vcc->vcpu_id, vcc->processor); mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(vcc->vcpu)); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); xen_hyper_vcpu_state_string(vcc, &buf[strlen(buf)], !VERBOSE); strncat(buf, " ", XEN_HYPER_CMD_BUFSIZE-strlen(buf)-1); xen_hyper_domain_to_type(vcc->domain, &type, &buf[strlen(buf)], !VERBOSE); if ((domid = xen_hyper_domain_to_id(vcc->domain)) == XEN_HYPER_DOMAIN_ID_INVALID) { sprintf(&buf[strlen(buf)], " ????? "); } else { sprintf(&buf[strlen(buf)], " %5d ", domid); } mkstring(&buf[strlen(buf)], VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(vcc->domain)); fprintf(fp, "%s\n", buf); } /* * Get string for domain status. * - This may need some data in domain struct. */ char * xen_hyper_domain_state_string(struct xen_hyper_domain_context *dc, char *buf, int verbose) { ulong stat; stat = xen_hyper_domain_state(dc); if (stat == XEN_HYPER_DOMF_ERROR) { sprintf(buf, verbose ? "(unknown)" : "??"); } else if (XEN_HYPER_VALID_MEMBER(domain_domain_flags)) { if (stat & XEN_HYPER_DOMF_shutdown) { sprintf(buf, verbose ? "DOMAIN_SHUTDOWN" : "SF"); } else if (stat & XEN_HYPER_DOMF_dying) { sprintf(buf, verbose ? "DOMAIN_DYING" : "DY"); } else if (stat & XEN_HYPER_DOMF_ctrl_pause) { sprintf(buf, verbose ? "DOMAIN_CTRL_PAUSE" : "CP"); } else if (stat & XEN_HYPER_DOMF_polling) { sprintf(buf, verbose ? "DOMAIN_POLLING" : "PO"); } else if (stat & XEN_HYPER_DOMF_paused) { sprintf(buf, verbose ? "DOMAIN_PAUSED" : "PA"); } else { sprintf(buf, verbose ? "DOMAIN_RUNNING" : "RU"); } } else { if (stat & XEN_HYPER_DOMS_shutdown) { sprintf(buf, verbose ? "DOMAIN_SHUTDOWN" : "SF"); } else if (stat & XEN_HYPER_DOMS_shuttingdown) { sprintf(buf, verbose ? "DOMAIN_SHUTTINGDOWN" : "SH"); } else if (stat & XEN_HYPER_DOMS_dying) { sprintf(buf, verbose ? "DOMAIN_DYING" : "DY"); } else if (stat & XEN_HYPER_DOMS_ctrl_pause) { sprintf(buf, verbose ? "DOMAIN_CTRL_PAUSE" : "CP"); } else if (stat & XEN_HYPER_DOMS_polling) { sprintf(buf, verbose ? "DOMAIN_POLLING" : "PO"); } else { sprintf(buf, verbose ? "DOMAIN_RUNNING" : "RU"); } } return buf; } /* * Get string for vcpu status. * - This may need some data in vcpu struct. */ char * xen_hyper_vcpu_state_string(struct xen_hyper_vcpu_context *vcc, char *buf, int verbose) { int stat; stat = xen_hyper_vcpu_state(vcc); if (stat == XEN_HYPER_RUNSTATE_ERROR) { sprintf(buf, verbose ? "(unknown)" : "??"); } else if (stat == XEN_HYPER_RUNSTATE_running || stat == XEN_HYPER_RUNSTATE_runnable) { sprintf(buf, verbose ? "VCPU_RUNNING" : "RU"); } else if (stat == XEN_HYPER_RUNSTATE_blocked) { sprintf(buf, verbose ? "VCPU_BLOCKED" : "BL"); } else if (stat == XEN_HYPER_RUNSTATE_offline) { sprintf(buf, verbose ? "VCPU_OFFLINE" : "OF"); } else { sprintf(buf, verbose ? "(unknown)" : "??"); } return buf; } /* * Get domain type from domain address. */ static char * xen_hyper_domain_to_type(ulong domain, int *type, char *buf, int verbose) { struct xen_hyper_domain_context *dc; if ((dc = xen_hyper_domain_to_domain_context(domain)) == NULL) { error(WARNING, "cannot get context from domain address.\n"); return NULL; } return xen_hyper_domain_context_to_type(dc, type, buf, verbose); } /* * Get domain type from domain context. */ static char * xen_hyper_domain_context_to_type(struct xen_hyper_domain_context *dc, int *type, char *buf, int verbose) { if (!dc) { *type = XEN_HYPER_DOMAIN_TYPE_INVALID; return NULL; } else if (dc->domain_id == XEN_HYPER_DOMID_IO) { *type = XEN_HYPER_DOMAIN_TYPE_IO; sprintf(buf, verbose ? "dom_io" : "O"); } else if (dc->domain_id == XEN_HYPER_DOMID_XEN) { *type = XEN_HYPER_DOMAIN_TYPE_XEN; sprintf(buf, verbose ? "dom_xen" : "X"); } else if (dc->domain_id == XEN_HYPER_DOMID_IDLE) { *type = XEN_HYPER_DOMAIN_TYPE_IDLE; sprintf(buf, verbose ? "idle domain" : "I"); } else if (dc == xhdt->dom0) { *type = XEN_HYPER_DOMAIN_TYPE_DOM0; sprintf(buf, verbose ? "domain 0" : "0"); } else { *type = XEN_HYPER_DOMAIN_TYPE_GUEST; sprintf(buf, verbose ? "domain U" : "U"); } return buf; } /* * Check a type for value. And return domain context. */ static int xen_hyper_str_to_domain_context(char *string, ulong *value, struct xen_hyper_domain_context **dcp) { ulong dvalue, hvalue; int found, type; char *s; struct xen_hyper_domain_context *dc_did, *dc_ddc, *dc_hid, *dc_hdc; if (string == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } s = string; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; dc_did = dc_ddc = dc_hid = dc_hdc = NULL; type = XEN_HYPER_STR_INVALID; if (dvalue != BADADDR) { if ((dc_did = xen_hyper_id_to_domain_context(dvalue))) found++; if ((dc_ddc = xen_hyper_domain_to_domain_context(dvalue))) found++; } if ((hvalue != BADADDR) && (dvalue != hvalue)) { if ((dc_hid = xen_hyper_id_to_domain_context(hvalue))) found++; if ((dc_hdc = xen_hyper_domain_to_domain_context(hvalue))) found++; } switch (found) { case 2: if (dc_did && dc_hid) { *dcp = dc_did; *value = dvalue; type = STR_PID; } break; case 1: if (dc_did) { *dcp = dc_did; *value = dvalue; type = XEN_HYPER_STR_DID; } if (dc_ddc) { *dcp = dc_ddc; *value = dvalue; type = XEN_HYPER_STR_DOMAIN; } if (dc_hid) { *dcp = dc_hid; *value = hvalue; type = XEN_HYPER_STR_DID; } if (dc_hdc) { *dcp = dc_hdc; *value = hvalue; type = XEN_HYPER_STR_DOMAIN; } break; } return type; } /* * Display a vcpu context. */ void xen_hyper_show_vcpu_context(struct xen_hyper_vcpu_context *vcc) { char buf[XEN_HYPER_CMD_BUFSIZE]; struct xen_hyper_pcpu_context *pcc; struct xen_hyper_domain_context *dc; int len, flag; len = 6; len += pc->flags & RUNTIME ? 0 : 5; flag = XEN_HYPER_PRI_R; if (!(pcc = xen_hyper_id_to_pcpu_context(vcc->processor))) { error(WARNING, "cannot get pcpu context vcpu belongs.\n"); return; } if (!(dc = xen_hyper_domain_to_domain_context(vcc->domain))) { error(WARNING, "cannot get domain context vcpu belongs.\n"); return; } XEN_HYPER_PRI(fp, len, "PCPU-ID: ", buf, flag, (buf, "%d\n", vcc->processor)); XEN_HYPER_PRI(fp, len, "PCPU: ", buf, flag, (buf, "%lx\n", pcc->pcpu)); XEN_HYPER_PRI(fp, len, "VCPU-ID: ", buf, flag, (buf, "%d\n", vcc->vcpu_id)); XEN_HYPER_PRI(fp, len, "VCPU: ", buf, flag, (buf, "%lx ", vcc->vcpu)); fprintf(fp, "(%s)\n", xen_hyper_vcpu_state_string(vcc, buf, VERBOSE)); XEN_HYPER_PRI(fp, len, "DOMAIN-ID: ", buf, flag, (buf, "%d\n", dc->domain_id)); XEN_HYPER_PRI(fp, len, "DOMAIN: ", buf, flag, (buf, "%lx ", vcc->domain)); fprintf(fp, "(%s)\n", xen_hyper_domain_state_string(dc, buf, VERBOSE)); XEN_HYPER_PRI_CONST(fp, len, "STATE: ", flag); if (machdep->flags & HWRESET) { fprintf(fp, "HARDWARE RESET"); } else if (machdep->flags & INIT) { fprintf(fp, "INIT"); } else if (xen_hyper_is_vcpu_crash(vcc)) { fprintf(fp, "CRASH"); } else { fprintf(fp, "ACTIVE"); } fprintf(fp, "\n"); } /* * Check a type for value. And return dump information context address. */ static int xen_hyper_str_to_dumpinfo_context(char *string, ulong *value, struct xen_hyper_dumpinfo_context **dicp) { ulong dvalue, hvalue; struct xen_hyper_dumpinfo_context *note_did, *note_hid; struct xen_hyper_dumpinfo_context *note_dad, *note_had; int found, type; char *s; if (string == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } s = string; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; note_did = note_hid = note_dad = note_had = 0; type = XEN_HYPER_STR_INVALID; if (dvalue != BADADDR) { if (dvalue > XEN_HYPER_MAX_CPUS()) { note_dad = xen_hyper_note_to_dumpinfo_context(dvalue); } else { note_did = xen_hyper_id_to_dumpinfo_context(dvalue); } found++; } if ((hvalue != BADADDR)) { if (hvalue > XEN_HYPER_MAX_CPUS()) { note_had = xen_hyper_note_to_dumpinfo_context(hvalue); } else { note_hid = xen_hyper_id_to_dumpinfo_context(hvalue); } found++; } switch (found) { case 2: if (note_did && note_hid) { *value = dvalue; *dicp = note_did; type = XEN_HYPER_STR_PCID; } break; case 1: if (note_did) { *value = dvalue; *dicp = note_did; type = XEN_HYPER_STR_PCID; } if (note_hid) { *value = hvalue; *dicp = note_hid; type = XEN_HYPER_STR_PCID; } if (note_dad) { *value = dvalue; *dicp = note_dad; type = XEN_HYPER_STR_ADDR; } if (note_had) { *value = hvalue; *dicp = note_had; type = XEN_HYPER_STR_ADDR; } break; } return type; } /* * Check a type for value. And return vcpu context. */ static int xen_hyper_strvcpu_to_vcpu_context(char *string, ulong *value, struct xen_hyper_vcpu_context **vccp) { ulong dvalue, hvalue; int found, type; char *s; struct xen_hyper_vcpu_context *vcc_dvc, *vcc_hvc; if (string == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } s = string; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; vcc_dvc = vcc_hvc = NULL; type = XEN_HYPER_STR_INVALID; if (dvalue != BADADDR) { if ((vcc_dvc = xen_hyper_vcpu_to_vcpu_context(dvalue))) found++; } if ((hvalue != BADADDR) && (dvalue != hvalue)) { if ((vcc_hvc = xen_hyper_vcpu_to_vcpu_context(hvalue))) found++; } switch (found) { case 1: if (vcc_dvc) { *vccp = vcc_dvc; *value = dvalue; type = XEN_HYPER_STR_VCPU; } if (vcc_hvc) { *vccp = vcc_hvc; *value = hvalue; type = XEN_HYPER_STR_VCPU; } break; } return type; } /* * Check a type for id value. And return vcpu context. */ static int xen_hyper_strid_to_vcpu_context(char *strdom, char *strvc, ulong *valdom, ulong *valvc, struct xen_hyper_vcpu_context **vccp) { ulong dvalue, hvalue; int found, type; char *s; struct xen_hyper_vcpu_context *vcc_did, *vcc_hid; struct xen_hyper_domain_context *dc; if (strdom == NULL || strvc == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } if (xen_hyper_str_to_domain_context(strdom, valdom, &dc) == XEN_HYPER_STR_INVALID) { error(INFO, "invalid domain id string.\n"); return STR_INVALID; } s = strvc; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; vcc_did = vcc_hid = NULL; type = XEN_HYPER_STR_INVALID; if (dvalue != BADADDR) { if ((vcc_did = xen_hyper_id_to_vcpu_context(dc->domain, XEN_HYPER_DOMAIN_ID_INVALID, dvalue))) found++; } if ((hvalue != BADADDR) && (dvalue != hvalue)) { if ((vcc_hid = xen_hyper_id_to_vcpu_context(dc->domain, XEN_HYPER_DOMAIN_ID_INVALID, hvalue))) found++; } switch (found) { case 2: if (vcc_did && vcc_hid) { *vccp = vcc_did; *valvc = dvalue; type = XEN_HYPER_STR_VCID; } break; case 1: if (vcc_did) { *vccp = vcc_did; *valvc = dvalue; type = XEN_HYPER_STR_VCID; } if (vcc_hid) { *vccp = vcc_hid; *valvc = hvalue; type = XEN_HYPER_STR_VCID; } break; } return type; } /* * Check a type for value. And return pcpu context. */ static int xen_hyper_str_to_pcpu_context(char *string, ulong *value, struct xen_hyper_pcpu_context **pccp) { ulong dvalue, hvalue; int found, type; char *s; struct xen_hyper_pcpu_context *pcc_did, *pcc_dpc, *pcc_hid, *pcc_hpc; if (string == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } s = string; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; pcc_did = pcc_dpc = pcc_hid = pcc_hpc = NULL; type = XEN_HYPER_STR_INVALID; if (dvalue != BADADDR) { if ((pcc_did = xen_hyper_id_to_pcpu_context(dvalue))) found++; if ((pcc_dpc = xen_hyper_pcpu_to_pcpu_context(dvalue))) found++; } if ((hvalue != BADADDR) && (dvalue != hvalue)) { if ((pcc_hid = xen_hyper_id_to_pcpu_context(hvalue))) found++; if ((pcc_hpc = xen_hyper_pcpu_to_pcpu_context(hvalue))) found++; } switch (found) { case 2: if (pcc_did && pcc_hid) { *pccp = pcc_did; *value = dvalue; type = STR_PID; } break; case 1: if (pcc_did) { *pccp = pcc_did; *value = dvalue; type = XEN_HYPER_STR_PCID; } if (pcc_dpc) { *pccp = pcc_dpc; *value = dvalue; type = XEN_HYPER_STR_PCPU; } if (pcc_hid) { *pccp = pcc_hid; *value = hvalue; type = XEN_HYPER_STR_PCID; } if (pcc_hpc) { *pccp = pcc_hpc; *value = hvalue; type = XEN_HYPER_STR_PCPU; } break; } return type; } #endif crash-utility-crash-9cd43f5/x86.c0000664000372000037200000050256415107550337016201 0ustar juerghjuergh/* x86.c - core analysis suite * * Portions Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2014,2017-2018 David Anderson * Copyright (C) 2002-2014,2017-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef X86 /* * NOTICE OF APPRECIATION * * The stack-trace related code in this file is an extension of the stack * trace code from the Mach in-kernel debugger "ddb". Sincere thanks to * the author(s). * */ /* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ #include "defs.h" #include "xen_hyper_defs.h" #ifndef MCLX #include #include #include #include #include #include #include #include #include #include /* * Machine register set. */ struct db_variable db_regs[] = { "cs", &ddb_regs.tf_cs, FCN_NULL, "ds", &ddb_regs.tf_ds, FCN_NULL, "es", &ddb_regs.tf_es, FCN_NULL, #if 0 "fs", &ddb_regs.tf_fs, FCN_NULL, "gs", &ddb_regs.tf_gs, FCN_NULL, #endif "ss", &ddb_regs.tf_ss, FCN_NULL, "eax", &ddb_regs.tf_eax, FCN_NULL, "ecx", &ddb_regs.tf_ecx, FCN_NULL, "edx", &ddb_regs.tf_edx, FCN_NULL, "ebx", &ddb_regs.tf_ebx, FCN_NULL, "esp", &ddb_regs.tf_esp, FCN_NULL, "ebp", &ddb_regs.tf_ebp, FCN_NULL, "esi", &ddb_regs.tf_esi, FCN_NULL, "edi", &ddb_regs.tf_edi, FCN_NULL, "eip", &ddb_regs.tf_eip, FCN_NULL, "efl", &ddb_regs.tf_eflags, FCN_NULL, }; struct db_variable *db_eregs = db_regs + sizeof(db_regs)/sizeof(db_regs[0]); #else typedef int db_strategy_t; /* search strategy */ #define DB_STGY_ANY 0 /* anything goes */ #define DB_STGY_XTRN 1 /* only external symbols */ #define DB_STGY_PROC 2 /* only procedures */ typedef ulong db_addr_t; /* address - unsigned */ typedef int db_expr_t; /* expression - signed */ /* * Symbol representation is specific to the symtab style: * BSD compilers use dbx' nlist, other compilers might use * a different one */ typedef char * db_sym_t; /* opaque handle on symbols */ #define DB_SYM_NULL ((db_sym_t)0) typedef uint boolean_t; #endif /* !MCLX */ /* * Stack trace. */ #ifdef MCLX static db_expr_t db_get_value(db_addr_t, int, boolean_t, struct bt_info *); #define INKERNEL(va) (machdep->kvtop(CURRENT_CONTEXT(), va, &phys, 0)) #else #define INKERNEL(va) (((vm_offset_t)(va)) >= USRSTACK) #endif struct i386_frame { struct i386_frame *f_frame; int f_retaddr; int f_arg0; }; #ifdef MCLX #define NORMAL 0 #define IDT_DIRECT_ENTRY 1 #define IDT_JMP_ERROR_CODE 2 #define RET_FROM_INTR 3 #define SIGNAL_RETURN 4 #else #define NORMAL 0 #define TRAP 1 #define INTERRUPT 2 #define SYSCALL 3 #endif #ifndef MCLX typedef vm_offset_t db_addr_t; #endif #ifdef MCLX struct eframe { int eframe_found; int eframe_type; ulong eframe_addr; ulong jmp_error_code_eip; }; static void db_nextframe(struct i386_frame **, db_addr_t *, struct eframe *, struct bt_info *); static int dump_eframe(struct eframe *, int, struct bt_info *); static int eframe_numargs(ulong eip, struct bt_info *); static int check_for_eframe(char *, struct bt_info *); static void x86_user_eframe(struct bt_info *); static ulong x86_next_eframe(ulong addr, struct bt_info *bt); static void x86_cmd_mach(void); static int x86_get_smp_cpus(void); static void x86_display_machine_stats(void); static void x86_display_cpu_data(unsigned int); static void x86_display_memmap(void); static int x86_omit_frame_pointer(void); static void x86_back_trace_cmd(struct bt_info *); static int is_rodata_text(ulong); static int mach_CRASHDEBUG(ulong); static db_sym_t db_search_symbol(db_addr_t, db_strategy_t,db_expr_t *); static void db_symbol_values(db_sym_t, char **, db_expr_t *); static int db_sym_numargs(db_sym_t, int *, char **); static void x86_dump_line_number(ulong); static void x86_clear_machdep_cache(void); static void x86_parse_cmdline_args(void); static ulong mach_debug = 0; static int mach_CRASHDEBUG(ulong dval) { if (CRASHDEBUG(dval)) return TRUE; return (mach_debug >= dval); } #else static void db_nextframe(struct i386_frame **, db_addr_t *); #endif #ifdef MCLX static int db_numargs(struct i386_frame *, struct bt_info *bt); static void db_print_stack_entry(char *, int, char **, int *, db_addr_t, struct bt_info *, struct eframe *, int, struct i386_frame *); #else static void db_print_stack_entry (char *, int, char **, int *, db_addr_t); #endif /* * Figure out how many arguments were passed into the frame at "fp". */ static int db_numargs(fp, bt) struct i386_frame *fp; struct bt_info *bt; { int *argp; int inst; int args; argp = (int *)db_get_value((int)&fp->f_retaddr, 4, FALSE, bt); /* * etext is wrong for LKMs. We should attempt to interpret * the instruction at the return address in all cases. This * may require better fault handling. */ #ifdef MCLX if (!is_kernel_text((ulong)argp)) { #else if (argp < (int *)btext || argp >= (int *)etext) { #endif args = 5; } else { inst = db_get_value((int)argp, 4, FALSE, bt); if ((inst & 0xff) == 0x59) /* popl %ecx */ args = 1; else if ((inst & 0xffff) == 0xc483) /* addl $Ibs, %esp */ args = ((inst >> 16) & 0xff) / 4; else args = 5; } return (args); } #ifdef MCLX static int eframe_numargs(ulong eip, struct bt_info *bt) { int inst; int args; if (!is_kernel_text(eip)) args = 5; else { inst = db_get_value((int)eip, 4, FALSE, bt); if ((inst & 0xff) == 0x59) /* popl %ecx */ args = 1; else if ((inst & 0xffff) == 0xc483) /* addl $Ibs, %esp */ args = ((inst >> 16) & 0xff) / 4; else args = 5; } return args; } #endif static void #ifdef MCLX db_print_stack_entry(name, narg, argnp, argp, callpc, bt, ep, fnum, frame) #else db_print_stack_entry(name, narg, argnp, argp, callpc) #endif char *name; int narg; char **argnp; int *argp; db_addr_t callpc; #ifdef MCLX struct bt_info *bt; struct eframe *ep; int fnum; struct i386_frame *frame; #endif { #ifdef MCLX int i; db_expr_t arg; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char *sp; if (!name) { if (IS_MODULE_VADDR(callpc) && module_symbol(callpc, NULL, NULL, buf1, *gdb_output_radix)) { sprintf(buf2, "(%s)", buf1); name = buf2; } else name = "(unknown module)"; } if (strstr(name, "_MODULE_START_")) { sprintf(buf3, "(%s module)", name + strlen("_MODULE_START_")); name = buf3; } if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (ep->eframe_found && ep->jmp_error_code_eip) { if (STREQ(closest_symbol(ep->jmp_error_code_eip), bt->ref->str) || STREQ(closest_symbol(callpc), bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } else if (STREQ(name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; break; case BT_REF_HEXVAL: if (ep->eframe_found && ep->jmp_error_code_eip && (bt->ref->hexval == ep->jmp_error_code_eip)) bt->ref->cmdflags |= BT_REF_FOUND; else if (bt->ref->hexval == callpc) bt->ref->cmdflags |= BT_REF_FOUND; break; } return; } else { fprintf(fp, "%s#%d [%08lx] ", fnum < 10 ? " " : "", fnum, (ulong)frame); if (ep->eframe_found && ep->jmp_error_code_eip) fprintf(fp, "%s (via %s)", closest_symbol(callpc), closest_symbol(ep->jmp_error_code_eip)); else fprintf(fp, "%s", name); fprintf(fp, " at %lx\n", callpc); } if (ep->eframe_found) goto done_entry; if (STREQ(name, "L6")) goto done_entry; fprintf(fp, " ("); if ((i = get_function_numargs(callpc)) >= 0) narg = i; while (narg) { if (argnp) fprintf(fp, "%s=", *argnp++); arg = db_get_value((int)argp, 4, FALSE, bt); if ((sp = value_symbol(arg))) fprintf(fp, "%s", sp); else if ((bt->flags & BT_SYMBOLIC_ARGS) && strlen(value_to_symstr(arg, buf1, 0))) fprintf(fp, "%s", buf1); else fprintf(fp, "%x", arg); argp++; if (--narg != 0) fprintf(fp, ", "); } if (i == 0) fprintf(fp, "void"); fprintf(fp, ")\n"); done_entry: if (bt->flags & BT_LINE_NUMBERS) x86_dump_line_number(callpc); return; #else db_printf("%s(", name); while (narg) { if (argnp) db_printf("%s=", *argnp++); db_printf("%r", db_get_value((int)argp, 4, FALSE, bt)); argp++; if (--narg != 0) db_printf(","); } db_printf(") at "); db_printsym(callpc, DB_STGY_PROC); db_printf("\n"); return; #endif } #ifdef MCLX static db_sym_t db_search_symbol(db_addr_t val, db_strategy_t strategy, db_expr_t *offp) { struct syment *sp; ulong offset; if ((sp = value_search(val, &offset))) { *offp = (db_expr_t)offset; return(sp->name); } else return DB_SYM_NULL; } /* * Return name and value of a symbol */ static void db_symbol_values(db_sym_t sym, char **namep, db_expr_t *valuep) { struct syment *sp; if (sym == DB_SYM_NULL) { *namep = 0; return; } if ((sp = symbol_search(sym)) == NULL) { error(INFO, "db_symbol_values: cannot find symbol: %s\n", sym); *namep = 0; return; } *namep = sp->name; if (valuep) *valuep = sp->value; #ifndef MCLX X_db_symbol_values(db_last_symtab, sym, namep, &value); if (db_symbol_is_ambiguous(sym)) *namep = db_qualify(sym, db_last_symtab->name); if (valuep) *valuep = value; #endif } static unsigned db_extend[] = { /* table for sign-extending */ 0, 0xFFFFFF80U, 0xFFFF8000U, 0xFF800000U }; static db_expr_t db_get_value(addr, size, is_signed, bt) db_addr_t addr; int size; boolean_t is_signed; struct bt_info * bt; { char data[sizeof(int)]; db_expr_t value; int i; #ifndef MCLX db_read_bytes(addr, size, data); #else BZERO(data, sizeof(int)); if (INSTACK(addr, bt)) { if (size == sizeof(ulong)) return (db_expr_t)GET_STACK_ULONG(addr); else GET_STACK_DATA(addr, data, size); } else { if (!readmem(addr, KVADDR, &value, size, "db_get_value", RETURN_ON_ERROR)) error(FATAL, "db_get_value: read error: address: %lx\n", addr); } #endif value = 0; #if BYTE_MSF for (i = 0; i < size; i++) #else /* BYTE_LSF */ for (i = size - 1; i >= 0; i--) #endif { value = (value << 8) + (data[i] & 0xFF); } if (size < 4) { if (is_signed && (value & db_extend[size]) != 0) value |= db_extend[size]; } return (value); } static int db_sym_numargs(db_sym_t sym, int *nargp, char **argnames) { return FALSE; } #endif /* * Figure out the next frame up in the call stack. */ #ifdef MCLX static void db_nextframe(fp, ip, ep, bt) struct i386_frame **fp; /* in/out */ db_addr_t *ip; /* out */ struct eframe *ep; struct bt_info *bt; #else static void db_nextframe(fp, ip) struct i386_frame **fp; /* in/out */ db_addr_t *ip; /* out */ #endif { int eip, ebp; db_expr_t offset; char *sym, *name; #ifdef MCLX static int last_ebp; static int last_eip; struct syment *sp; #endif eip = db_get_value((int) &(*fp)->f_retaddr, 4, FALSE, bt); ebp = db_get_value((int) &(*fp)->f_frame, 4, FALSE, bt); /* * Figure out frame type, presuming normal. */ BZERO(ep, sizeof(struct eframe)); ep->eframe_type = NORMAL; sym = db_search_symbol(eip, DB_STGY_ANY, &offset); db_symbol_values(sym, &name, NULL); if (name != NULL) { ep->eframe_type = check_for_eframe(name, bt); #ifndef MCLX if (!strcmp(name, "calltrap")) { frame_type = TRAP; } else if (!strncmp(name, "Xresume", 7)) { frame_type = INTERRUPT; } else if (!strcmp(name, "_Xsyscall")) { frame_type = SYSCALL; } #endif } switch (ep->eframe_type) { case NORMAL: ep->eframe_found = FALSE; break; case IDT_DIRECT_ENTRY: case RET_FROM_INTR: case SIGNAL_RETURN: ep->eframe_found = TRUE; ep->eframe_addr = x86_next_eframe(last_ebp + sizeof(ulong)*2, bt); break; case IDT_JMP_ERROR_CODE: ep->eframe_found = TRUE; ep->eframe_addr = x86_next_eframe(last_ebp + sizeof(ulong) * 4, bt); if ((sp = x86_jmp_error_code(last_eip))) ep->jmp_error_code_eip = sp->value; break; default: error(FATAL, "unknown exception frame type?\n"); } *ip = (db_addr_t) eip; *fp = (struct i386_frame *) ebp; last_ebp = ebp; last_eip = eip; return; #ifndef MCLX db_print_stack_entry(name, 0, 0, 0, eip); /* * Point to base of trapframe which is just above the * current frame. */ tf = (struct trapframe *) ((int)*fp + 8); esp = (ISPL(tf->tf_cs) == SEL_UPL) ? tf->tf_esp : (int)&tf->tf_esp; switch (frame_type) { case TRAP: if (INKERNEL((int) tf)) { eip = tf->tf_eip; ebp = tf->tf_ebp; db_printf( "--- trap %#r, eip = %#r, esp = %#r, ebp = %#r ---\n", tf->tf_trapno, eip, esp, ebp); } break; case SYSCALL: if (INKERNEL((int) tf)) { eip = tf->tf_eip; ebp = tf->tf_ebp; db_printf( "--- syscall %#r, eip = %#r, esp = %#r, ebp = %#r ---\n", tf->tf_eax, eip, esp, ebp); } break; case INTERRUPT: tf = (struct trapframe *)((int)*fp + 16); if (INKERNEL((int) tf)) { eip = tf->tf_eip; ebp = tf->tf_ebp; db_printf( "--- interrupt, eip = %#r, esp = %#r, ebp = %#r ---\n", eip, esp, ebp); } break; default: break; } *ip = (db_addr_t) eip; *fp = (struct i386_frame *) ebp; #endif } #ifdef MCLX void x86_back_trace_cmd(struct bt_info *bt) #else ulong db_stack_trace_cmd(addr, have_addr, count, modif, task, flags) db_expr_t addr; boolean_t have_addr; db_expr_t count; char *modif; ulong task; ulong flags; #endif /* MCLX */ { struct i386_frame *frame; int *argp; db_addr_t callpc; boolean_t first; #ifdef MCLX db_expr_t addr; boolean_t have_addr; db_expr_t count; char *modif; db_addr_t last_callpc; ulong lastframe; physaddr_t phys; int frame_number; int forced; struct eframe eframe, *ep; char dbuf[BUFSIZE]; if (!(bt->flags & BT_USER_SPACE) && (!bt->stkptr || !accessible(bt->stkptr))) { error(INFO, "cannot determine starting stack pointer\n"); if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, fp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, fp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, fp); return; } if (bt->flags & BT_USER_SPACE) { if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, fp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, fp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, fp); fprintf(fp, " #0 [user space]\n"); return; } else if ((bt->flags & BT_KERNEL_SPACE)) { if (KVMDUMP_DUMPFILE()) kvmdump_display_regs(bt->tc->processor, fp); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) diskdump_display_regs(bt->tc->processor, fp); else if (SADUMP_DUMPFILE()) sadump_display_regs(bt->tc->processor, fp); } addr = bt->stkptr; have_addr = TRUE; count = 50; modif = (char *)bt->instptr; mach_debug = bt->debug; if ((machdep->flags & OMIT_FRAME_PTR) || bt->debug || (bt->flags & BT_FRAMESIZE_DEBUG) || !(bt->flags & BT_OLD_BACK_TRACE)) { bt->flags &= ~BT_OLD_BACK_TRACE; lkcd_x86_back_trace(bt, 0, fp); return; } if (mach_CRASHDEBUG(2)) { fprintf(fp, "--> stkptr: %lx instptr: %lx (%s)\n", bt->stkptr, bt->instptr, closest_symbol(bt->instptr)); } #endif if (count == -1) count = 65535; if (!have_addr) { #ifndef MCLX frame = (struct i386_frame *)ddb_regs.tf_ebp; if (frame == NULL) frame = (struct i386_frame *)(ddb_regs.tf_esp - 4); callpc = (db_addr_t)ddb_regs.tf_eip; #endif } else { frame = (struct i386_frame *)addr; lastframe = (ulong)frame; ep = &eframe; BZERO(ep, sizeof(struct eframe)); ep->eframe_found = FALSE; callpc = (db_addr_t)db_get_value((int)&frame->f_retaddr, 4, FALSE, bt); if (modif) { frame_number = 0; forced = TRUE; callpc = (db_addr_t)modif; } else { frame_number = 1; forced = FALSE; if (!is_kernel_text(callpc)) error(INFO, "callpc from stack is not a text address\n"); } } first = TRUE; while (count--) { struct i386_frame *actframe; int narg; char * name; db_expr_t offset; db_sym_t sym; #define MAXNARG 16 char *argnames[MAXNARG], **argnp = NULL; sym = db_search_symbol(callpc, DB_STGY_ANY, &offset); db_symbol_values(sym, &name, NULL); /* * Attempt to determine a (possibly fake) frame that gives * the caller's pc. It may differ from `frame' if the * current function never sets up a standard frame or hasn't * set one up yet or has just discarded one. The last two * cases can be guessed fairly reliably for code generated * by gcc. The first case is too much trouble to handle in * general because the amount of junk on the stack depends * on the pc (the special handling of "calltrap", etc. in * db_nextframe() works because the `next' pc is special). */ actframe = frame; if (first && !have_addr) { #ifdef MCLX error(FATAL, "cannot handle \"!have_addr\" path #2\n"); #else int instr; instr = db_get_value(callpc, 4, FALSE); if ((instr & 0x00ffffff) == 0x00e58955) { /* pushl %ebp; movl %esp, %ebp */ actframe = (struct i386_frame *) (ddb_regs.tf_esp - 4); } else if ((instr & 0x0000ffff) == 0x0000e589) { /* movl %esp, %ebp */ actframe = (struct i386_frame *) ddb_regs.tf_esp; if (ddb_regs.tf_ebp == 0) { /* Fake the caller's frame better. */ frame = actframe; } } else if ((instr & 0x000000ff) == 0x000000c3) { /* ret */ actframe = (struct i386_frame *) (ddb_regs.tf_esp - 4); } else if (offset == 0) { /* Probably a symbol in assembler code. */ actframe = (struct i386_frame *) (ddb_regs.tf_esp - 4); } #endif } first = FALSE; argp = &actframe->f_arg0; narg = MAXNARG; if (sym != NULL && db_sym_numargs(sym, &narg, argnames)) { argnp = argnames; } else { narg = db_numargs(frame, bt); } #ifdef MCLX if (is_kernel_text(callpc) || IS_MODULE_VADDR(callpc)) { if (mach_CRASHDEBUG(2)) fprintf(fp, "--> (1) lastframe: %lx => frame: %lx\n", lastframe, (ulong)frame); db_print_stack_entry(name, narg, argnp, argp, callpc, bt, ep, frame_number++, frame); if (STREQ(closest_symbol(callpc), "start_secondary")) break; if (BT_REFERENCE_FOUND(bt)) return; if ((ulong)frame < lastframe) { break; } if (INSTACK(frame, bt) && ((ulong)frame > lastframe)) lastframe = (ulong)frame; } else { if (!(forced && frame_number == 1)) { if (is_kernel_data(callpc)) { if (mach_CRASHDEBUG(2)) fprintf(fp, "--> break(1): callpc %lx is data?\n", callpc); if (!is_rodata_text(callpc)) break; } if (mach_CRASHDEBUG(2)) fprintf(fp, "--> (2) lastframe: %lx => frame: %lx\n", lastframe, (ulong)frame); db_print_stack_entry(name, narg, argnp, argp, callpc, bt, ep, frame_number++, frame); if (BT_REFERENCE_FOUND(bt)) return; if ((ulong)frame < lastframe) { break; } if (INSTACK(frame, bt) && ((ulong)frame > lastframe)) lastframe = (ulong)frame; } } if (!INSTACK(frame, bt)) { if (mach_CRASHDEBUG(2)) fprintf(fp, "--> break: !INSTACK(frame: %lx, task: %lx)\n", (ulong)frame, bt->task); break; } #else db_print_stack_entry(name, narg, argnp, argp, callpc); #endif if (actframe != frame) { /* `frame' belongs to caller. */ callpc = (db_addr_t) db_get_value((int)&actframe->f_retaddr, 4, FALSE, bt); continue; } if (ep->eframe_found) frame_number = dump_eframe(ep, frame_number, bt); last_callpc = callpc; skip_frame: db_nextframe(&frame, &callpc, ep, bt); if (mach_CRASHDEBUG(2)) { fprintf(fp, "--> db_nextframe: frame: %lx callpc: %lx [%s]\n", (ulong)frame, callpc, value_to_symstr(callpc, dbuf,0)); if (callpc == last_callpc) fprintf(fp, "last callpc == callpc!\n"); } if ((callpc == last_callpc) && STREQ(closest_symbol(callpc), "smp_stop_cpu_interrupt")) goto skip_frame; if (INSTACK(frame, bt) && ((ulong)frame < lastframe)) if (mach_CRASHDEBUG(2)) fprintf(fp, "--> frame pointer reversion?\n"); if (INKERNEL((int) callpc) && !INKERNEL((int) frame)) { sym = db_search_symbol(callpc, DB_STGY_ANY, &offset); db_symbol_values(sym, &name, NULL); if (is_kernel_data(callpc)) { if (mach_CRASHDEBUG(2)) fprintf(fp, "--> break(2): callpc %lx is data?\n", callpc); if (!is_rodata_text(callpc)) break; } if (mach_CRASHDEBUG(2)) fprintf(fp, "--> (3) lastframe: %lx => frame: %lx\n", lastframe, (ulong)frame); db_print_stack_entry(name, 0, 0, 0, callpc, bt, ep, frame_number++, frame); if (BT_REFERENCE_FOUND(bt)) return; if ((ulong)frame < lastframe) { if (STREQ(closest_symbol(callpc), "reschedule")) x86_user_eframe(bt); break; } if (INSTACK(frame, bt) && ((ulong)frame > lastframe)) lastframe = (ulong)frame; if (mach_CRASHDEBUG(2)) fprintf(fp, "--> break: INKERNEL(callpc: %lx [%s]) && !INKERNEL(frame: %lx)\n", callpc, value_to_symstr(callpc, dbuf, 0), (ulong)frame); break; } if (!INKERNEL((int) frame)) { if (mach_CRASHDEBUG(2)) fprintf(fp, "--> break: !INKERNEL(frame: %lx)\n", (ulong)frame); break; } } if (mach_CRASHDEBUG(2)) { fprintf(fp, "--> returning lastframe: %lx\n", lastframe); } if (ep->eframe_found) frame_number = dump_eframe(ep, frame_number, bt); #ifndef MCLX return(lastframe); #endif } /* * The remainder of this file was generated at MCL to segregate * x86-specific needs. */ static int x86_uvtop(struct task_context *, ulong, physaddr_t *, int); static int x86_kvtop(struct task_context *, ulong, physaddr_t *, int); static int x86_uvtop_PAE(struct task_context *, ulong, physaddr_t *, int); static int x86_kvtop_PAE(struct task_context *, ulong, physaddr_t *, int); static int x86_uvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); static int x86_kvtop_xen_wpt(struct task_context *, ulong, physaddr_t *, int); static int x86_uvtop_xen_wpt_PAE(struct task_context *, ulong, physaddr_t *, int); static int x86_kvtop_xen_wpt_PAE(struct task_context *, ulong, physaddr_t *, int); static int x86_kvtop_remap(ulong, physaddr_t *); static ulong x86_get_task_pgd(ulong); static ulong x86_processor_speed(void); static ulong x86_get_pc(struct bt_info *); static ulong x86_get_sp(struct bt_info *); static void x86_get_stack_frame(struct bt_info *, ulong *, ulong *); static int x86_translate_pte(ulong, void *, ulonglong); static uint64_t x86_memory_size(void); static ulong x86_vmalloc_start(void); static ulong *read_idt_table(int); static void eframe_init(void); static int remap_init(void); #define READ_IDT_INIT 1 #define READ_IDT_RUNTIME 2 static char *extract_idt_function(ulong *, char *, ulong *); static int x86_is_task_addr(ulong); static int x86_verify_symbol(const char *, ulong, char); static int x86_eframe_search(struct bt_info *); static ulong x86_in_irqstack(ulong); static int x86_dis_filter(ulong, char *, unsigned int); static struct line_number_hook x86_line_number_hooks[]; static int x86_is_uvaddr(ulong, struct task_context *); static void x86_init_kernel_pgd(void); static ulong xen_m2p_nonPAE(ulong); static int x86_xendump_p2m_create(struct xendump_data *); static int x86_pvops_xendump_p2m_create(struct xendump_data *); static int x86_pvops_xendump_p2m_l2_create(struct xendump_data *); static int x86_pvops_xendump_p2m_l3_create(struct xendump_data *); static void x86_debug_dump_page(FILE *, char *, char *); static int x86_xen_kdump_p2m_create(struct xen_kdump_data *); static char *x86_xen_kdump_load_page(ulong, char *); static char *x86_xen_kdump_load_page_PAE(ulong, char *); static ulong x86_xen_kdump_page_mfn(ulong); static ulong x86_xen_kdump_page_mfn_PAE(ulong); static ulong x86_xendump_panic_task(struct xendump_data *); static void x86_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); static char *x86_xendump_load_page(ulong, char *); static char *x86_xendump_load_page_PAE(ulong, char *); static int x86_xendump_page_index(ulong); static int x86_xendump_page_index_PAE(ulong); static void x86_init_hyper(int); static ulong x86_get_stackbase_hyper(ulong); static ulong x86_get_stacktop_hyper(ulong); int INT_EFRAME_SS = 14; int INT_EFRAME_ESP = 13; int INT_EFRAME_EFLAGS = 12; /* CS lcall7 */ int INT_EFRAME_CS = 11; /* EIP lcall7 */ int INT_EFRAME_EIP = 10; /* EFLAGS lcall7 */ int INT_EFRAME_ERR = 9; int INT_EFRAME_ES = 8; int INT_EFRAME_DS = 7; int INT_EFRAME_EAX = 6; int INT_EFRAME_EBP = 5; int INT_EFRAME_EDI = 4; int INT_EFRAME_ESI = 3; int INT_EFRAME_EDX = 2; int INT_EFRAME_ECX = 1; int INT_EFRAME_EBX = 0; int INT_EFRAME_GS = -1; #define MAX_USER_EFRAME_SIZE (17) #define KERNEL_EFRAME_SIZE (INT_EFRAME_EFLAGS+1) #define EFRAME_USER (1) #define EFRAME_KERNEL (2) #define DPL_BITS (0x3) static int dump_eframe(struct eframe *ep, int frame_number, struct bt_info *bt) { int i; char buf[BUFSIZE], *sp; ulong int_eframe[MAX_USER_EFRAME_SIZE]; int eframe_type, args; ulong value, *argp; eframe_type = 0; if (STACK_OFFSET_TYPE(ep->eframe_addr) > STACKSIZE()) return(frame_number); GET_STACK_DATA(ep->eframe_addr, (char *)int_eframe, SIZE(pt_regs)); if (int_eframe[INT_EFRAME_CS] & DPL_BITS) { if (!INSTACK(ep->eframe_addr + SIZE(pt_regs) - 1, bt)) return(frame_number); /* error(FATAL, "read of exception frame would go beyond stack\n"); */ eframe_type = EFRAME_USER; } else { if (!INSTACK(ep->eframe_addr + (KERNEL_EFRAME_SIZE*sizeof(ulong)) - 1, bt)) return(frame_number); /* error(FATAL, "read of exception frame would go beyond stack\n"); */ eframe_type = EFRAME_KERNEL; } x86_dump_eframe_common(bt, int_eframe, (eframe_type == EFRAME_KERNEL)); if (bt->flags & BT_EFRAME_SEARCH) return 0; if (eframe_type == EFRAME_USER) return(frame_number); if (BT_REFERENCE_CHECK(bt)) return(++frame_number); /* * The exception occurred while executing in kernel mode. * Pull out the EIP from the exception frame and display * the frame line. Then figure out whether it's possible to * show any arguments. */ fprintf(fp, "%s#%d [%08lx] %s at %08lx\n", frame_number < 10 ? " " : "", frame_number, int_eframe[INT_EFRAME_EBP], value_to_symstr(int_eframe[INT_EFRAME_EIP], buf, 0), int_eframe[INT_EFRAME_EIP]); frame_number++; if ((sp = closest_symbol(int_eframe[INT_EFRAME_EIP])) == NULL) return(frame_number); value = symbol_value(sp); argp = (ulong *)(int_eframe[INT_EFRAME_EBP] + (sizeof(long)*2)); args = is_system_call(NULL, value) ? 4 : eframe_numargs(int_eframe[INT_EFRAME_EIP], bt); fprintf(fp, " ("); for (i = 0; i < args; i++, argp++) { if (INSTACK(argp, bt)) value = GET_STACK_ULONG((ulong)argp); else /* impossible! */ readmem((ulong)argp, KVADDR, &value, sizeof(ulong), "syscall arg", FAULT_ON_ERROR); if (i) fprintf(fp, ", "); if ((sp = value_symbol(value))) fprintf(fp, "%s", sp); else if ((bt->flags & BT_SYMBOLIC_ARGS) && strlen(value_to_symstr(value, buf, 0))) fprintf(fp, "%s", buf); else fprintf(fp, "%lx", value); } fprintf(fp, ")\n"); if (bt->flags & BT_LINE_NUMBERS) x86_dump_line_number(int_eframe[INT_EFRAME_EIP]); return(frame_number); } /* * Dump an exception frame, coming from either source of stack trace code. * (i.e., -fomit-frame-pointer or not) */ void x86_dump_eframe_common(struct bt_info *bt, ulong *int_eframe, int kernel) { struct syment *sp; ulong offset; if (bt && BT_REFERENCE_CHECK(bt)) { if (!(bt->ref->cmdflags & BT_REF_HEXVAL)) return; if ((int_eframe[INT_EFRAME_EAX] == bt->ref->hexval) || (int_eframe[INT_EFRAME_EBX] == bt->ref->hexval) || (int_eframe[INT_EFRAME_ECX] == bt->ref->hexval) || (int_eframe[INT_EFRAME_EDX] == bt->ref->hexval) || (int_eframe[INT_EFRAME_EBP] == bt->ref->hexval) || (int_eframe[INT_EFRAME_ESI] == bt->ref->hexval) || (int_eframe[INT_EFRAME_EDI] == bt->ref->hexval) || ((short)int_eframe[INT_EFRAME_ES] == (short)bt->ref->hexval) || ((short)int_eframe[INT_EFRAME_DS] == (short)bt->ref->hexval) || ((short)int_eframe[INT_EFRAME_CS] == (short)bt->ref->hexval) || (int_eframe[INT_EFRAME_EIP] == bt->ref->hexval) || (int_eframe[INT_EFRAME_ERR] == bt->ref->hexval) || (int_eframe[INT_EFRAME_EFLAGS] == bt->ref->hexval)) bt->ref->cmdflags |= BT_REF_FOUND; if (!kernel) { if ((int_eframe[INT_EFRAME_ESP] == bt->ref->hexval) || ((short)int_eframe[INT_EFRAME_SS] == (short)bt->ref->hexval)) bt->ref->cmdflags |= BT_REF_FOUND; } return; } if (kernel) { if (bt && (bt->flags & BT_EFRAME_SEARCH)) { fprintf(fp, " [exception EIP: "); if ((sp = value_search(int_eframe[INT_EFRAME_EIP], &offset))) { fprintf(fp, "%s", sp->name); if (offset) fprintf(fp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); } else fprintf(fp, "unknown or invalid address"); fprintf(fp, "]\n"); } fprintf(fp, " EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx EBP: %08lx \n", int_eframe[INT_EFRAME_EAX], int_eframe[INT_EFRAME_EBX], int_eframe[INT_EFRAME_ECX], int_eframe[INT_EFRAME_EDX], int_eframe[INT_EFRAME_EBP]); } else fprintf(fp, " EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx \n", int_eframe[INT_EFRAME_EAX], int_eframe[INT_EFRAME_EBX], int_eframe[INT_EFRAME_ECX], int_eframe[INT_EFRAME_EDX]); fprintf(fp, " DS: %04x ESI: %08lx ES: %04x EDI: %08lx", (short)int_eframe[INT_EFRAME_DS], int_eframe[INT_EFRAME_ESI], (short)int_eframe[INT_EFRAME_ES], int_eframe[INT_EFRAME_EDI]); if (kernel && (INT_EFRAME_GS != -1)) fprintf(fp, " GS: %04x", (short)int_eframe[INT_EFRAME_GS]); fprintf(fp, "\n"); if (!kernel) { fprintf(fp, " SS: %04x ESP: %08lx EBP: %08lx", (short)int_eframe[INT_EFRAME_SS], int_eframe[INT_EFRAME_ESP], int_eframe[INT_EFRAME_EBP]); if (INT_EFRAME_GS != -1) fprintf(fp, " GS: %04x", (short)int_eframe[INT_EFRAME_GS]); fprintf(fp, "\n"); } fprintf(fp, " CS: %04x EIP: %08lx ERR: %08lx EFLAGS: %08lx \n", (short)int_eframe[INT_EFRAME_CS], int_eframe[INT_EFRAME_EIP], int_eframe[INT_EFRAME_ERR], int_eframe[INT_EFRAME_EFLAGS]); } /* * Catch a few functions that show up as rodata but really are * functions. */ int is_rodata_text(ulong callpc) { struct syment *sp; if (!is_rodata(callpc, &sp)) return FALSE; if (strstr(sp->name, "interrupt") || strstr(sp->name, "call_")) return TRUE; return FALSE; } static int check_for_eframe(char *name, struct bt_info *bt) { int i; ulong *ip; char buf[BUFSIZE]; ip = read_idt_table(READ_IDT_RUNTIME); for (i = 0; i < 256; i++, ip += 2) { if (STREQ(name, extract_idt_function(ip, buf, NULL))) return IDT_DIRECT_ENTRY; } if (STREQ(name, "ret_from_intr") || STREQ(name, "call_call_function_interrupt") || STREQ(name, "call_reschedule_interrupt") || STREQ(name, "call_invalidate_interrupt")) return RET_FROM_INTR; if (STREQ(name, "error_code")) return IDT_JMP_ERROR_CODE; if (STREQ(name, "signal_return")) return SIGNAL_RETURN; return FALSE; } /* * Return the syment of the function that did the "jmp error_code". */ struct syment * x86_jmp_error_code(ulong callpc) { struct syment *sp; if (!(sp = value_search(callpc, NULL)) || !STRNEQ(sp->name, "do_")) return NULL; return (symbol_search(sp->name + strlen("do_"))); } static const char *hook_files[] = { "arch/i386/kernel/entry.S", "arch/i386/kernel/head.S", "arch/i386/kernel/semaphore.c" }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) #define SEMAPHORE_C ((char **)&hook_files[2]) static struct line_number_hook x86_line_number_hooks[] = { {"lcall7", ENTRY_S}, {"lcall27", ENTRY_S}, {"ret_from_fork", ENTRY_S}, {"system_call", ENTRY_S}, {"ret_from_sys_call", ENTRY_S}, {"ret_from_intr", ENTRY_S}, {"divide_error", ENTRY_S}, {"coprocessor_error", ENTRY_S}, {"simd_coprocessor_error", ENTRY_S}, {"device_not_available", ENTRY_S}, {"debug", ENTRY_S}, {"nmi", ENTRY_S}, {"int3", ENTRY_S}, {"overflow", ENTRY_S}, {"bounds", ENTRY_S}, {"invalid_op", ENTRY_S}, {"coprocessor_segment_overrun", ENTRY_S}, {"double_fault", ENTRY_S}, {"invalid_TSS", ENTRY_S}, {"segment_not_present", ENTRY_S}, {"stack_segment", ENTRY_S}, {"general_protection", ENTRY_S}, {"alignment_check", ENTRY_S}, {"page_fault", ENTRY_S}, {"machine_check", ENTRY_S}, {"spurious_interrupt_bug", ENTRY_S}, {"v86_signal_return", ENTRY_S}, {"tracesys", ENTRY_S}, {"tracesys_exit", ENTRY_S}, {"badsys", ENTRY_S}, {"ret_from_exception", ENTRY_S}, {"reschedule", ENTRY_S}, {"error_code", ENTRY_S}, {"device_not_available_emulate", ENTRY_S}, {"restore_all", ENTRY_S}, {"signal_return", ENTRY_S}, {"L6", HEAD_S}, {"_text", HEAD_S}, {"startup_32", HEAD_S}, {"checkCPUtype", HEAD_S}, {"is486", HEAD_S}, {"is386", HEAD_S}, {"ready", HEAD_S}, {"check_x87", HEAD_S}, {"setup_idt", HEAD_S}, {"rp_sidt", HEAD_S}, {"stack_start", HEAD_S}, {"int_msg", HEAD_S}, {"ignore_int", HEAD_S}, {"idt_descr", HEAD_S}, {"idt", HEAD_S}, {"gdt_descr", HEAD_S}, {"gdt", HEAD_S}, {"swapper_pg_dir", HEAD_S}, {"pg0", HEAD_S}, {"pg1", HEAD_S}, {"empty_zero_page", HEAD_S}, {"__down_failed", SEMAPHORE_C}, {"__down_failed_interruptible", SEMAPHORE_C}, {"__down_failed_trylock", SEMAPHORE_C}, {"__up_wakeup", SEMAPHORE_C}, {"__write_lock_failed", SEMAPHORE_C}, {"__read_lock_failed", SEMAPHORE_C}, {NULL, NULL} /* list must be NULL-terminated */ }; static void x86_dump_line_number(ulong callpc) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) { fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); } else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } /* * Look for likely exception frames in a stack. */ struct x86_pt_regs { ulong reg_value[MAX_USER_EFRAME_SIZE]; }; /* * Searches from addr within the stackframe defined by bt * for the next set of bytes that matches an exception frame pattern. * Returns either the address of the frame or 0. */ static ulong x86_next_eframe(ulong addr, struct bt_info *bt) { ulong *first, *last; struct x86_pt_regs *pt; ulong *stack; ulong rv; stack = (ulong *)bt->stackbuf; if (!INSTACK(addr, bt)) { return(0); } rv = 0; first = stack + ((addr - bt->stackbase) / sizeof(ulong)); last = stack + (((bt->stacktop - bt->stackbase) - SIZE(pt_regs)) / sizeof(ulong)); for ( ; first <= last; first++) { pt = (struct x86_pt_regs *)first; /* check for kernel exception frame */ if (((short)pt->reg_value[INT_EFRAME_CS] == 0x10) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x18) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x18) && IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) { if (!(machdep->flags & OMIT_FRAME_PTR) && !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt)) continue; rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } if (((short)pt->reg_value[INT_EFRAME_CS] == 0x60) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x68) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x68) && IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) { if (!(machdep->flags & OMIT_FRAME_PTR) && !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt)) continue; rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } if (((short)pt->reg_value[INT_EFRAME_CS] == 0x60) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) { if (!(machdep->flags & OMIT_FRAME_PTR) && !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt)) continue; rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } if (XEN() && ((short)pt->reg_value[INT_EFRAME_CS] == 0x61) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && IS_KVADDR(pt->reg_value[INT_EFRAME_EIP])) { if (!(machdep->flags & OMIT_FRAME_PTR) && !INSTACK(pt->reg_value[INT_EFRAME_EBP], bt)) continue; rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } /* check for user exception frame */ if (((short)pt->reg_value[INT_EFRAME_CS] == 0x23) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x2b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x2b) && ((short)pt->reg_value[INT_EFRAME_SS] == 0x2b) && IS_UVADDR(pt->reg_value[INT_EFRAME_EIP], bt->tc) && IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc)) { rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } if (((short)pt->reg_value[INT_EFRAME_CS] == 0x73) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_SS] == 0x7b) && IS_UVADDR(pt->reg_value[INT_EFRAME_EIP], bt->tc) && IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc)) { rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } /* * 2.6 kernels using sysenter_entry instead of system_call * have a funky trampoline EIP address. */ if (((short)pt->reg_value[INT_EFRAME_CS] == 0x73) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x7b) && ((short)pt->reg_value[INT_EFRAME_SS] == 0x7b) && (pt->reg_value[INT_EFRAME_EFLAGS] == 0x246) && IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc)) { rv = bt->stackbase + sizeof(ulong) * (first - stack); break; } } return(rv); } static int x86_eframe_search(struct bt_info *bt_in) { ulong addr; struct x86_pt_regs *pt; struct eframe eframe, *ep; struct bt_info bt_local, *bt; ulong flagsave; ulong irqstack; short cs; char *mode, *ibuf; int c, cnt; bt = bt_in; ibuf = NULL; cnt = 0; if (bt->flags & BT_EFRAME_SEARCH2) { if (!(tt->flags & IRQSTACKS)) { error(FATAL, "this kernel does not have IRQ stacks\n"); return 0; } BCOPY(bt_in, &bt_local, sizeof(struct bt_info)); bt = &bt_local; bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH2; for (c = 0; c < NR_CPUS; c++) { if (tt->hardirq_ctx[c]) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; bt->hp->esp = tt->hardirq_ctx[c]; fprintf(fp, "CPU %d HARD IRQ STACK:\n", c); if ((cnt = x86_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } } for (c = 0; c < NR_CPUS; c++) { if (tt->softirq_ctx[c]) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; bt->hp->esp = tt->softirq_ctx[c]; fprintf(fp, "CPU %d SOFT IRQ STACK:\n", c); if ((cnt = x86_eframe_search(bt))) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } } return 0; } if (bt->hp && bt->hp->esp) { BCOPY(bt_in, &bt_local, sizeof(struct bt_info)); bt = &bt_local; addr = bt->hp->esp; if ((irqstack = x86_in_irqstack(addr))) { bt->stackbase = irqstack; bt->stacktop = irqstack + SIZE(irq_ctx); if (SIZE(irq_ctx) > STACKSIZE()) { ibuf = (char *)GETBUF(SIZE(irq_ctx)); bt->stackbuf = ibuf; } alter_stackbuf(bt); } else if (!INSTACK(addr, bt)) error(FATAL, "unrecognized stack address for this task: %lx\n", bt->hp->esp); } else if (tt->flags & THREAD_INFO) addr = bt->stackbase + roundup(SIZE(thread_info), sizeof(ulong)); else addr = bt->stackbase + roundup(SIZE(task_struct), sizeof(ulong)); ep = &eframe; BZERO(ep, sizeof(struct eframe)); while ((addr = x86_next_eframe(addr, bt)) != 0) { cnt++; if (bt->flags & BT_EFRAME_COUNT) { addr += 4; continue; } pt = (struct x86_pt_regs *) (bt->stackbuf + (addr - bt->stackbase)); ep->eframe_addr = addr; cs = pt->reg_value[INT_EFRAME_CS]; if ((cs == 0x23) || (cs == 0x73)) { mode = "USER-MODE"; } else if ((cs == 0x10) || (cs == 0x60)) { mode = "KERNEL-MODE"; } else if (XEN() && (cs == 0x61)) { mode = "KERNEL-MODE"; } else { mode = "UNKNOWN-MODE"; } fprintf(fp, "%s %s EXCEPTION FRAME AT %lx:\n", bt->flags & BT_EFRAME_SEARCH ? "\n" : "", mode, ep->eframe_addr); flagsave = bt->flags; bt->flags |= BT_EFRAME_SEARCH; dump_eframe(ep, 0, bt); bt->flags = flagsave; addr += 4; } if (ibuf) FREEBUF(ibuf); return cnt; } static ulong x86_in_irqstack(ulong addr) { int c; if (!(tt->flags & IRQSTACKS)) return 0; for (c = 0; c < NR_CPUS; c++) { if (tt->hardirq_ctx[c]) { if ((addr >= tt->hardirq_ctx[c]) && (addr < (tt->hardirq_ctx[c] + SIZE(irq_ctx)))) return(tt->hardirq_ctx[c]); } if (tt->softirq_ctx[c]) { if ((addr >= tt->softirq_ctx[c]) && (addr < (tt->softirq_ctx[c] + SIZE(irq_ctx)))) return(tt->softirq_ctx[c]); } } return 0; } /* * Dump the kernel-entry user-mode exception frame. */ static void x86_user_eframe(struct bt_info *bt) { struct eframe eframe, *ep; struct x86_pt_regs x86_pt_regs, *pt; ulong pt_regs_addr; pt_regs_addr = USER_EFRAME_ADDR(bt->task); readmem(pt_regs_addr, KVADDR, &x86_pt_regs, sizeof(struct x86_pt_regs), "x86 pt_regs", FAULT_ON_ERROR); pt = &x86_pt_regs; if (((short)pt->reg_value[INT_EFRAME_CS] == 0x23) && ((short)pt->reg_value[INT_EFRAME_DS] == 0x2b) && ((short)pt->reg_value[INT_EFRAME_ES] == 0x2b) && ((short)pt->reg_value[INT_EFRAME_SS] == 0x2b) && IS_UVADDR(pt->reg_value[INT_EFRAME_EIP], bt->tc) && IS_UVADDR(pt->reg_value[INT_EFRAME_ESP], bt->tc) && IS_UVADDR(pt->reg_value[INT_EFRAME_EBP], bt->tc)) { ep = &eframe; BZERO(ep, sizeof(struct eframe)); ep->eframe_addr = pt_regs_addr; bt->flags |= BT_EFRAME_SEARCH; dump_eframe(ep, 0, bt); bt->flags &= ~(ulonglong)BT_EFRAME_SEARCH; } } /* * Do all necessary machine-specific setup here. This is called three times, * during symbol table initialization, and before and after GDB has been * initialized. */ struct machine_specific x86_machine_specific = { 0 }; static int PGDIR_SHIFT; static int PTRS_PER_PTE; static int PTRS_PER_PGD; void x86_init(int when) { struct syment *sp, *spn; if (XEN_HYPER_MODE()) { x86_init_hyper(when); return; } switch (when) { case SETUP_ENV: machdep->process_elf_notes = x86_process_elf_notes; break; case PRE_SYMTAB: machdep->verify_symbol = x86_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->machspec = &x86_machine_specific; machdep->verify_paddr = generic_verify_paddr; x86_parse_cmdline_args(); break; case PRE_GDB: if (symbol_exists("pae_pgd_cachep") || ((sp = symbol_search("pkmap_count")) && (spn = next_symbol(NULL, sp)) && (((spn->value - sp->value)/sizeof(int)) == 512))) { machdep->flags |= PAE; PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL; PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL; PTRS_PER_PGD = PTRS_PER_PGD_3LEVEL; machdep->uvtop = x86_uvtop_PAE; machdep->kvtop = x86_kvtop_PAE; } else { PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL; PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL; PTRS_PER_PGD = PTRS_PER_PGD_2LEVEL; machdep->uvtop = x86_uvtop; machdep->kvtop = x86_kvtop; free(machdep->pmd); machdep->pmd = machdep->pgd; } machdep->ptrs_per_pgd = PTRS_PER_PGD; if (!machdep->kvbase) { if (kernel_symbol_exists("module_kaslr_mutex")) machdep->kvbase = 0xc0000000; else machdep->kvbase = symbol_value("_stext") & ~KVBASE_MASK; } if (machdep->kvbase & 0x80000000) machdep->is_uvaddr = generic_is_uvaddr; else { vt->flags |= COMMON_VADDR; machdep->is_uvaddr = x86_is_uvaddr; } machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->eframe_search = x86_eframe_search; machdep->back_trace = x86_back_trace_cmd; machdep->processor_speed = x86_processor_speed; machdep->get_task_pgd = x86_get_task_pgd; machdep->dump_irq = generic_dump_irq; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->show_interrupts = generic_show_interrupts; machdep->get_stack_frame = x86_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = x86_translate_pte; machdep->memory_size = x86_memory_size; machdep->vmalloc_start = x86_vmalloc_start; machdep->is_task_addr = x86_is_task_addr; machdep->dis_filter = x86_dis_filter; machdep->cmd_mach = x86_cmd_mach; machdep->get_smp_cpus = x86_get_smp_cpus; machdep->flags |= FRAMESIZE_DEBUG; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = x86_init_kernel_pgd; machdep->xendump_p2m_create = x86_xendump_p2m_create; machdep->xen_kdump_p2m_create = x86_xen_kdump_p2m_create; machdep->xendump_panic_task = x86_xendump_panic_task; machdep->get_xendump_regs = x86_get_xendump_regs; machdep->clear_machdep_cache = x86_clear_machdep_cache; break; case POST_GDB: if (x86_omit_frame_pointer()) machdep->flags |= OMIT_FRAME_PTR; STRUCT_SIZE_INIT(user_regs_struct, "user_regs_struct"); if (MEMBER_EXISTS("user_regs_struct", "ebp")) MEMBER_OFFSET_INIT(user_regs_struct_ebp, "user_regs_struct", "ebp"); else MEMBER_OFFSET_INIT(user_regs_struct_ebp, "user_regs_struct", "bp"); if (MEMBER_EXISTS("user_regs_struct", "esp")) MEMBER_OFFSET_INIT(user_regs_struct_esp, "user_regs_struct", "esp"); else MEMBER_OFFSET_INIT(user_regs_struct_esp, "user_regs_struct", "sp"); if (MEMBER_EXISTS("user_regs_struct", "eip")) MEMBER_OFFSET_INIT(user_regs_struct_eip, "user_regs_struct", "eip"); else MEMBER_OFFSET_INIT(user_regs_struct_eip, "user_regs_struct", "ip"); if (MEMBER_EXISTS("user_regs_struct", "eax")) MEMBER_OFFSET_INIT(user_regs_struct_eax, "user_regs_struct", "eax"); else MEMBER_OFFSET_INIT(user_regs_struct_eax, "user_regs_struct", "ax"); if (MEMBER_EXISTS("user_regs_struct", "ebx")) MEMBER_OFFSET_INIT(user_regs_struct_ebx, "user_regs_struct", "ebx"); else MEMBER_OFFSET_INIT(user_regs_struct_ebx, "user_regs_struct", "bx"); if (MEMBER_EXISTS("user_regs_struct", "ecx")) MEMBER_OFFSET_INIT(user_regs_struct_ecx, "user_regs_struct", "ecx"); else MEMBER_OFFSET_INIT(user_regs_struct_ecx, "user_regs_struct", "cx"); if (MEMBER_EXISTS("user_regs_struct", "edx")) MEMBER_OFFSET_INIT(user_regs_struct_edx, "user_regs_struct", "edx"); else MEMBER_OFFSET_INIT(user_regs_struct_edx, "user_regs_struct", "dx"); if (MEMBER_EXISTS("user_regs_struct", "esi")) MEMBER_OFFSET_INIT(user_regs_struct_esi, "user_regs_struct", "esi"); else MEMBER_OFFSET_INIT(user_regs_struct_esi, "user_regs_struct", "si"); if (MEMBER_EXISTS("user_regs_struct", "edi")) MEMBER_OFFSET_INIT(user_regs_struct_edi, "user_regs_struct", "edi"); else MEMBER_OFFSET_INIT(user_regs_struct_edi, "user_regs_struct", "di"); if (MEMBER_EXISTS("user_regs_struct", "eflags")) MEMBER_OFFSET_INIT(user_regs_struct_eflags, "user_regs_struct", "eflags"); else MEMBER_OFFSET_INIT(user_regs_struct_eflags, "user_regs_struct", "flags"); MEMBER_OFFSET_INIT(user_regs_struct_cs, "user_regs_struct", "cs"); MEMBER_OFFSET_INIT(user_regs_struct_ds, "user_regs_struct", "ds"); MEMBER_OFFSET_INIT(user_regs_struct_es, "user_regs_struct", "es"); MEMBER_OFFSET_INIT(user_regs_struct_fs, "user_regs_struct", "fs"); MEMBER_OFFSET_INIT(user_regs_struct_gs, "user_regs_struct", "gs"); MEMBER_OFFSET_INIT(user_regs_struct_ss, "user_regs_struct", "ss"); if (!VALID_STRUCT(user_regs_struct)) { /* Use this hardwired version -- sometimes the * debuginfo doesn't pick this up even though * it exists in the kernel; it shouldn't change. */ struct x86_user_regs_struct { long ebx, ecx, edx, esi, edi, ebp, eax; unsigned short ds, __ds, es, __es; unsigned short fs, __fs, gs, __gs; long orig_eax, eip; unsigned short cs, __cs; long eflags, esp; unsigned short ss, __ss; }; ASSIGN_SIZE(user_regs_struct) = sizeof(struct x86_user_regs_struct); ASSIGN_OFFSET(user_regs_struct_ebp) = offsetof(struct x86_user_regs_struct, ebp); ASSIGN_OFFSET(user_regs_struct_esp) = offsetof(struct x86_user_regs_struct, esp); ASSIGN_OFFSET(user_regs_struct_eip) = offsetof(struct x86_user_regs_struct, eip); ASSIGN_OFFSET(user_regs_struct_eax) = offsetof(struct x86_user_regs_struct, eax); ASSIGN_OFFSET(user_regs_struct_ebx) = offsetof(struct x86_user_regs_struct, ebx); ASSIGN_OFFSET(user_regs_struct_ecx) = offsetof(struct x86_user_regs_struct, ecx); ASSIGN_OFFSET(user_regs_struct_edx) = offsetof(struct x86_user_regs_struct, edx); ASSIGN_OFFSET(user_regs_struct_esi) = offsetof(struct x86_user_regs_struct, esi); ASSIGN_OFFSET(user_regs_struct_edi) = offsetof(struct x86_user_regs_struct, edi); ASSIGN_OFFSET(user_regs_struct_eflags) = offsetof(struct x86_user_regs_struct, eflags); ASSIGN_OFFSET(user_regs_struct_cs) = offsetof(struct x86_user_regs_struct, cs); ASSIGN_OFFSET(user_regs_struct_ds) = offsetof(struct x86_user_regs_struct, ds); ASSIGN_OFFSET(user_regs_struct_es) = offsetof(struct x86_user_regs_struct, es); ASSIGN_OFFSET(user_regs_struct_fs) = offsetof(struct x86_user_regs_struct, fs); ASSIGN_OFFSET(user_regs_struct_gs) = offsetof(struct x86_user_regs_struct, gs); ASSIGN_OFFSET(user_regs_struct_ss) = offsetof(struct x86_user_regs_struct, ss); } MEMBER_OFFSET_INIT(thread_struct_cr3, "thread_struct", "cr3"); STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); STRUCT_SIZE_INIT(irq_ctx, "irq_ctx"); if (STRUCT_EXISTS("e820map")) { STRUCT_SIZE_INIT(e820map, "e820map"); MEMBER_OFFSET_INIT(e820map_nr_map, "e820map", "nr_map"); } else { STRUCT_SIZE_INIT(e820map, "e820_table"); MEMBER_OFFSET_INIT(e820map_nr_map, "e820_table", "nr_entries"); } if (STRUCT_EXISTS("e820entry")) { STRUCT_SIZE_INIT(e820entry, "e820entry"); MEMBER_OFFSET_INIT(e820entry_addr, "e820entry", "addr"); MEMBER_OFFSET_INIT(e820entry_size, "e820entry", "size"); MEMBER_OFFSET_INIT(e820entry_type, "e820entry", "type"); } else { STRUCT_SIZE_INIT(e820entry, "e820_entry"); MEMBER_OFFSET_INIT(e820entry_addr, "e820_entry", "addr"); MEMBER_OFFSET_INIT(e820entry_size, "e820_entry", "size"); MEMBER_OFFSET_INIT(e820entry_type, "e820_entry", "type"); } if (!VALID_STRUCT(irq_ctx)) STRUCT_SIZE_INIT(irq_ctx, "irq_stack"); if (KVMDUMP_DUMPFILE()) set_kvm_iohole(NULL); if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else machdep->nr_irqs = 224; /* NR_IRQS */ if (!machdep->hz) { machdep->hz = HZ; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) machdep->hz = 1000; } if (machdep->flags & PAE) { if (THIS_KERNEL_VERSION < LINUX(2,6,26)) machdep->section_size_bits = _SECTION_SIZE_BITS_PAE_ORIG; else machdep->section_size_bits = _SECTION_SIZE_BITS_PAE_2_6_26; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_PAE; } else { machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; } if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { if (machdep->flags & PAE) machdep->uvtop = x86_uvtop_xen_wpt_PAE; else machdep->uvtop = x86_uvtop_xen_wpt; } if (XEN()) { MEMBER_OFFSET_INIT(vcpu_guest_context_user_regs, "vcpu_guest_context", "user_regs"); MEMBER_OFFSET_INIT(cpu_user_regs_esp, "cpu_user_regs", "esp"); MEMBER_OFFSET_INIT(cpu_user_regs_eip, "cpu_user_regs", "eip"); } if (THIS_KERNEL_VERSION < LINUX(2,6,24)) machdep->line_number_hooks = x86_line_number_hooks; eframe_init(); if (THIS_KERNEL_VERSION >= LINUX(2,6,28)) machdep->machspec->page_protnone = _PAGE_GLOBAL; else machdep->machspec->page_protnone = _PAGE_PSE; STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); STRUCT_SIZE_INIT(percpu_data, "percpu_data"); if (!remap_init()) machdep->machspec->max_numnodes = -1; MEMBER_OFFSET_INIT(inactive_task_frame_ret_addr, "inactive_task_frame", "ret_addr"); break; case POST_INIT: read_idt_table(READ_IDT_INIT); break; case LOG_ONLY: machdep->kvbase = kt->vmcoreinfo._stext_SYMBOL & ~KVBASE_MASK; break; } } /* * Handle non-default (c0000000) values of CONFIG_PAGE_OFFSET * with "--machdep page_offset=
" */ static void x86_parse_cmdline_args(void) { int index, i, c, err; char *arglist[MAXARGS]; char buf[BUFSIZE]; char *p; ulong value = 0; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { error(WARNING, "ignoring --machdep option: %x\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { err = 0; if (STRNEQ(arglist[i], "page_offset=")) { int flags = RETURN_ON_ERROR | QUIET; p = arglist[i] + strlen("page_offset="); if (strlen(p)) value = htol(p, flags, &err); if (!err) { machdep->kvbase = value; error(NOTE, "setting PAGE_OFFSET to: 0x%lx\n\n", machdep->kvbase); continue; } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); } } } /* * Account for addition of pt_regs.xgs field in 2.6.20+ kernels. */ static void eframe_init(void) { if (INVALID_SIZE(pt_regs)) { if (THIS_KERNEL_VERSION < LINUX(2,6,20)) ASSIGN_SIZE(pt_regs) = (MAX_USER_EFRAME_SIZE-2)*sizeof(ulong); else { ASSIGN_SIZE(pt_regs) = MAX_USER_EFRAME_SIZE*sizeof(ulong); INT_EFRAME_SS = 15; INT_EFRAME_ESP = 14; INT_EFRAME_EFLAGS = 13; INT_EFRAME_CS = 12; INT_EFRAME_EIP = 11; INT_EFRAME_ERR = 10; INT_EFRAME_GS = 9; } return; } if (MEMBER_EXISTS("pt_regs", "esp")) { INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "xss") / 4; INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "esp") / 4; INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "eflags") / 4; INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "xcs") / 4; INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "eip") / 4; INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_eax") / 4; if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "xgs")) != -1) INT_EFRAME_GS /= 4; INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "xes") / 4; INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "xds") / 4; INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "eax") / 4; INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "ebp") / 4; INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "edi") / 4; INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "esi") / 4; INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "edx") / 4; INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "ecx") / 4; INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "ebx") / 4; } else { INT_EFRAME_SS = MEMBER_OFFSET("pt_regs", "ss") / 4; INT_EFRAME_ESP = MEMBER_OFFSET("pt_regs", "sp") / 4; INT_EFRAME_EFLAGS = MEMBER_OFFSET("pt_regs", "flags") / 4; INT_EFRAME_CS = MEMBER_OFFSET("pt_regs", "cs") / 4; INT_EFRAME_EIP = MEMBER_OFFSET("pt_regs", "ip") / 4; INT_EFRAME_ERR = MEMBER_OFFSET("pt_regs", "orig_ax") / 4; if ((INT_EFRAME_GS = MEMBER_OFFSET("pt_regs", "gs")) != -1) INT_EFRAME_GS /= 4; INT_EFRAME_ES = MEMBER_OFFSET("pt_regs", "es") / 4; INT_EFRAME_DS = MEMBER_OFFSET("pt_regs", "ds") / 4; INT_EFRAME_EAX = MEMBER_OFFSET("pt_regs", "ax") / 4; INT_EFRAME_EBP = MEMBER_OFFSET("pt_regs", "bp") / 4; INT_EFRAME_EDI = MEMBER_OFFSET("pt_regs", "di") / 4; INT_EFRAME_ESI = MEMBER_OFFSET("pt_regs", "si") / 4; INT_EFRAME_EDX = MEMBER_OFFSET("pt_regs", "dx") / 4; INT_EFRAME_ECX = MEMBER_OFFSET("pt_regs", "cx") / 4; INT_EFRAME_EBX = MEMBER_OFFSET("pt_regs", "bx") / 4; } } /* * Locate regions remapped by the remap allocator */ static int remap_init(void) { ulong start_vaddr, end_vaddr, start_pfn; int max_numnodes; struct machine_specific *ms; struct syment *sp; if (! (sp = symbol_search("node_remap_start_vaddr")) ) return FALSE; start_vaddr = sp->value; if (! (sp = symbol_search("node_remap_end_vaddr")) ) return FALSE; end_vaddr = sp->value; if (! (sp = symbol_search("node_remap_start_pfn")) ) return FALSE; start_pfn = sp->value; max_numnodes = get_array_length("node_remap_start_pfn", NULL, sizeof(ulong)); if (max_numnodes < 1) max_numnodes = 1; ms = machdep->machspec; ms->remap_start_vaddr = calloc(3 * max_numnodes, sizeof(ulong)); if (!ms->remap_start_vaddr) error(FATAL, "cannot malloc remap array"); ms->remap_end_vaddr = ms->remap_start_vaddr + max_numnodes; ms->remap_start_pfn = ms->remap_end_vaddr + max_numnodes; readmem(start_vaddr, KVADDR, ms->remap_start_vaddr, max_numnodes * sizeof(ulong), "node_remap_start_vaddr", FAULT_ON_ERROR); readmem(end_vaddr, KVADDR, ms->remap_end_vaddr, max_numnodes * sizeof(ulong), "node_remap_end_vaddr", FAULT_ON_ERROR); readmem(start_pfn, KVADDR, ms->remap_start_pfn, max_numnodes * sizeof(ulong), "node_remap_end_vaddr", FAULT_ON_ERROR); ms->max_numnodes = max_numnodes; return TRUE; } static int x86_kvtop_remap(ulong kvaddr, physaddr_t *paddr) { struct machine_specific *ms; int i; ms = machdep->machspec; /* ms->max_numnodes is -1 when unused. */ for (i = 0; i < ms->max_numnodes; ++i) { if (kvaddr >= ms->remap_start_vaddr[i] && kvaddr < ms->remap_end_vaddr[i]) { *paddr = PTOB(ms->remap_start_pfn[i]) + kvaddr - ms->remap_start_vaddr[i]; return TRUE; } } return FALSE; } /* * Needs to be done this way because of potential 4G/4G split. */ static int x86_is_uvaddr(ulong vaddr, struct task_context *tc) { return IN_TASK_VMA(tc->task, vaddr); } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ #define _4MB_PAGE_MASK (~((MEGABYTES(4))-1)) #define _2MB_PAGE_MASK (~((MEGABYTES(2))-1)) static int x86_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_cr3)) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no cr3 or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + (vaddr >> PGDIR_SHIFT); FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!(pgd_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) goto no_upage; if (pgd_pte & _PAGE_4M) { if (verbose) { fprintf(fp, " PAGE: %s (4MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pgd_pte)))); x86_translate_pte(pgd_pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pgd_pte) + (vaddr & ~_4MB_PAGE_MASK); return TRUE; } page_middle = page_dir; FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if (!pmd_pte) goto no_upage; #ifdef PTES_IN_LOWMEM page_table = (ulong *)(PTOV(NONPAE_PAGEBASE(pmd_pte)) + ((vaddr>>10) & ((PTRS_PER_PTE-1)<<2))); FILL_PTBL(NONPAE_PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); #else page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + ((vaddr>>10) & ((PTRS_PER_PTE-1)<<2))); FILL_PTBL(NONPAE_PAGEBASE(page_table), PHYSADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); #endif if (verbose) fprintf(fp, " PTE: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_table)), pte); if (!(pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); x86_translate_pte(pte, 0, 0); } goto no_upage; } *paddr = NONPAE_PAGEBASE(pte) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pte)))); x86_translate_pte(pte, 0, 0); } return TRUE; no_upage: return FALSE; } static int x86_uvtop_xen_wpt(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *machine_page_table, *pseudo_page_table; ulong pgd_pte, pseudo_pgd_pte; ulong pmd_pte; ulong machine_pte, pseudo_pte; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_cr3)) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no cr3 or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + (vaddr >> PGDIR_SHIFT); FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!pgd_pte) goto no_upage; if (pgd_pte & _PAGE_4M) { if (verbose) fprintf(fp, " PAGE: %s (4MB) [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pgd_pte)))); pseudo_pgd_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(pgd_pte)); if (pseudo_pgd_pte == XEN_MFN_NOT_FOUND) { if (verbose) fprintf(fp, " PAGE: page not available\n"); *paddr = PADDR_NOT_AVAILABLE; return FALSE; } pseudo_pgd_pte |= PAGEOFFSET(pgd_pte); if (verbose) { fprintf(fp, " PAGE: %s (4MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pseudo_pgd_pte)))); x86_translate_pte(pseudo_pgd_pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pseudo_pgd_pte) + (vaddr & ~_4MB_PAGE_MASK); return TRUE; } page_middle = page_dir; FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if (!pmd_pte) goto no_upage; machine_page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + ((vaddr>>10) & ((PTRS_PER_PTE-1)<<2))); pseudo_page_table = (ulong *) xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_page_table)); FILL_PTBL(NONPAE_PAGEBASE(pseudo_page_table), PHYSADDR, PAGESIZE()); machine_pte = ULONG(machdep->ptbl + PAGEOFFSET(machine_page_table)); if (verbose) { fprintf(fp, " PTE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)machine_page_table))); fprintf(fp, " PTE: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pseudo_page_table + PAGEOFFSET(machine_page_table))), machine_pte); } if (!(machine_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { *paddr = machine_pte; if (machine_pte && verbose) { fprintf(fp, "\n"); x86_translate_pte(machine_pte, 0, 0); } goto no_upage; } pseudo_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_pte)); pseudo_pte |= PAGEOFFSET(machine_pte); *paddr = NONPAE_PAGEBASE(pseudo_pte) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(machine_pte)))); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pseudo_pte)))); x86_translate_pte(pseudo_pte, 0, 0); } return TRUE; no_upage: return FALSE; } static int x86_uvtop_PAE(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulonglong *pgd; ulonglong page_dir_entry; ulonglong page_middle; ulonglong page_middle_entry; ulonglong page_table; ulonglong page_table_entry; ulonglong physpage; ulonglong ull; ulong offset; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_cr3)) pgd = (ulonglong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no cr3 or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = (ulonglong *)(ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd))); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); offset = ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * sizeof(ulonglong); page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); if (verbose) fprintf(fp, " PGD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), page_dir_entry); if (!(page_dir_entry & _PAGE_PRESENT)) { goto no_upage; } page_middle = PAE_PAGEBASE(page_dir_entry); FILL_PMD_PAE(page_middle, PHYSADDR, PAGESIZE()); offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_middle_entry); } if (!(page_middle_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { goto no_upage; } if (page_middle_entry & _PAGE_PSE) { if (verbose) { ull = PAE_PAGEBASE(page_middle_entry); fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_middle_entry); } physpage = PAE_PAGEBASE(page_middle_entry) + (vaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } page_table = PAE_PAGEBASE(page_middle_entry); FILL_PTBL_PAE(page_table, PHYSADDR, PAGESIZE()); offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * sizeof(ulonglong); page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_table_entry); } if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { *paddr = page_table_entry; if (page_table_entry && verbose) { fprintf(fp, "\n"); x86_translate_pte(0, 0, page_table_entry); } goto no_upage; } physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(vaddr); *paddr = physpage; if (verbose) { ull = PAE_PAGEBASE(page_table_entry); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_table_entry); } return TRUE; no_upage: return FALSE; } static int x86_uvtop_xen_wpt_PAE(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulonglong *pgd; ulonglong page_dir_entry; ulonglong page_middle, pseudo_page_middle; ulonglong page_middle_entry; ulonglong page_table, pseudo_page_table; ulonglong page_table_entry, pte; ulonglong physpage, pseudo_physpage; ulonglong ull; ulong offset; char buf[BUFSIZE]; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_cr3)) pgd = (ulonglong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no cr3 or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = (ulonglong *)(ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd))); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); offset = ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * sizeof(ulonglong); page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); if (verbose) fprintf(fp, " PGD: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), page_dir_entry); if (!(page_dir_entry & _PAGE_PRESENT)) { goto no_upage; } page_middle = PAE_PAGEBASE(page_dir_entry); pseudo_page_middle = xen_m2p(page_middle); if (verbose) fprintf(fp, " PGD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), pseudo_page_middle | PAGEOFFSET(page_dir_entry) | (page_dir_entry & _PAGE_NX)); FILL_PMD_PAE(pseudo_page_middle, PHYSADDR, PAGESIZE()); offset = ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_middle_entry); } if (!(page_middle_entry & _PAGE_PRESENT)) { goto no_upage; } if (page_middle_entry & _PAGE_PSE) { error(FATAL, "_PAGE_PSE in an mfn not supported\n"); /* XXX */ if (verbose) { ull = PAE_PAGEBASE(page_middle_entry); fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_middle_entry); } physpage = PAE_PAGEBASE(page_middle_entry) + (vaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } page_table = PAE_PAGEBASE(page_middle_entry); pseudo_page_table = xen_m2p(page_table); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), pseudo_page_table | PAGEOFFSET(page_middle_entry) | (page_middle_entry & _PAGE_NX)); } FILL_PTBL_PAE(pseudo_page_table, PHYSADDR, PAGESIZE()); offset = ((vaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * sizeof(ulonglong); page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_table_entry); } if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { *paddr = page_table_entry; if (page_table_entry && verbose) { fprintf(fp, "\n"); x86_translate_pte(0, 0, page_table_entry); } goto no_upage; } physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(vaddr); pseudo_physpage = xen_m2p(physpage); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), pseudo_physpage | PAGEOFFSET(page_table_entry) | (page_table_entry & _PAGE_NX)); } *paddr = pseudo_physpage + PAGEOFFSET(vaddr); if (verbose) { physpage = PAE_PAGEBASE(physpage); fprintf(fp, " PAGE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&physpage))); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&pseudo_physpage))); pte = pseudo_physpage | PAGEOFFSET(page_table_entry) | (page_table_entry & _PAGE_NX); x86_translate_pte(0, 0, pte); } return TRUE; no_upage: return FALSE; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int x86_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; char buf[BUFSIZE]; if (!IS_KVADDR(kvaddr)) return FALSE; if (XEN_HYPER_MODE()) { if (DIRECTMAP_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - DIRECTMAP_VIRT_START; return TRUE; } pgd = (ulong *)symbol_value("idle_pg_table_l2"); } else { if (x86_kvtop_remap(kvaddr, paddr)) { if (!verbose) return TRUE; } else if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } else if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) return (x86_kvtop_xen_wpt(tc, kvaddr, paddr, verbose)); pgd = (ulong *)vt->kernel_pgd[0]; } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + (kvaddr >> PGDIR_SHIFT); FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!pgd_pte) goto no_kpage; if (pgd_pte & _PAGE_4M) { if (verbose) { fprintf(fp, " PAGE: %s (4MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pgd_pte)))); x86_translate_pte(pgd_pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pgd_pte) + (kvaddr & ~_4MB_PAGE_MASK); return TRUE; } page_middle = page_dir; FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if (!pmd_pte) goto no_kpage; #ifdef PTES_IN_LOWMEM page_table = (ulong *)(PTOV(NONPAE_PAGEBASE(pmd_pte)) + ((kvaddr>>10) & ((PTRS_PER_PTE-1)<<2))); FILL_PTBL(NONPAE_PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); #else page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + ((kvaddr>>10) & ((PTRS_PER_PTE-1)<<2))); FILL_PTBL(NONPAE_PAGEBASE(page_table), PHYSADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); #endif if (verbose) fprintf(fp, " PTE: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_table)), pte); if (!(pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { if (pte && verbose) { fprintf(fp, "\n"); x86_translate_pte(pte, 0, 0); } goto no_kpage; } if (verbose) { fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pte)))); x86_translate_pte(pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pte) + PAGEOFFSET(kvaddr); return TRUE; no_kpage: return FALSE; } static int x86_kvtop_xen_wpt(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *machine_page_table, *pseudo_page_table; ulong pgd_pte, pseudo_pgd_pte; ulong pmd_pte; ulong machine_pte, pseudo_pte; char buf[BUFSIZE]; pgd = (ulong *)vt->kernel_pgd[0]; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + (kvaddr >> PGDIR_SHIFT); FILL_PGD(NONPAE_PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!pgd_pte) goto no_kpage; if (pgd_pte & _PAGE_4M) { if (verbose) fprintf(fp, " PAGE: %s (4MB) [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pgd_pte)))); pseudo_pgd_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(pgd_pte)); if (pseudo_pgd_pte == XEN_MFN_NOT_FOUND) { if (verbose) fprintf(fp, " PAGE: page not available\n"); *paddr = PADDR_NOT_AVAILABLE; return FALSE; } pseudo_pgd_pte |= PAGEOFFSET(pgd_pte); if (verbose) { fprintf(fp, " PAGE: %s (4MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pseudo_pgd_pte)))); x86_translate_pte(pseudo_pgd_pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pseudo_pgd_pte) + (kvaddr & ~_4MB_PAGE_MASK); return TRUE; } page_middle = page_dir; FILL_PMD(NONPAE_PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if (!pmd_pte) goto no_kpage; machine_page_table = (ulong *)((NONPAE_PAGEBASE(pmd_pte)) + ((kvaddr>>10) & ((PTRS_PER_PTE-1)<<2))); pseudo_page_table = (ulong *) xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_page_table)); FILL_PTBL(NONPAE_PAGEBASE(pseudo_page_table), PHYSADDR, PAGESIZE()); machine_pte = ULONG(machdep->ptbl + PAGEOFFSET(machine_page_table)); if (verbose) { fprintf(fp, " PTE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)machine_page_table))); fprintf(fp, " PTE: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pseudo_page_table + PAGEOFFSET(machine_page_table))), machine_pte); } if (!(machine_pte & (_PAGE_PRESENT | _PAGE_PROTNONE))) { if (machine_pte && verbose) { fprintf(fp, "\n"); x86_translate_pte(machine_pte, 0, 0); } goto no_kpage; } pseudo_pte = xen_m2p_nonPAE(NONPAE_PAGEBASE(machine_pte)); pseudo_pte |= PAGEOFFSET(machine_pte); if (verbose) { fprintf(fp, " PAGE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(machine_pte)))); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(NONPAE_PAGEBASE(pseudo_pte)))); x86_translate_pte(pseudo_pte, 0, 0); } *paddr = NONPAE_PAGEBASE(pseudo_pte) + PAGEOFFSET(kvaddr); return TRUE; no_kpage: return FALSE; } static int x86_kvtop_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulonglong *pgd; ulonglong page_dir_entry; ulonglong page_middle; ulonglong page_middle_entry; ulonglong page_table; ulonglong page_table_entry; ulonglong physpage; ulonglong ull; char buf[BUFSIZE]; ulong offset; if (!IS_KVADDR(kvaddr)) return FALSE; if (XEN_HYPER_MODE()) { if (DIRECTMAP_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - DIRECTMAP_VIRT_START; return TRUE; } if (symbol_exists("idle_pg_table_l3")) pgd = (ulonglong *)symbol_value("idle_pg_table_l3"); else pgd = (ulonglong *)symbol_value("idle_pg_table"); } else { if (x86_kvtop_remap(kvaddr, paddr)) { if (!verbose) return TRUE; } else if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } else if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) return (x86_kvtop_xen_wpt_PAE(tc, kvaddr, paddr, verbose)); pgd = (ulonglong *)vt->kernel_pgd[0]; } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); offset = ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * sizeof(ulonglong); page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); if (verbose) fprintf(fp, " PGD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), page_dir_entry); if (!(page_dir_entry & _PAGE_PRESENT)) { goto no_kpage; } page_middle = PAE_PAGEBASE(page_dir_entry); FILL_PMD_PAE(page_middle, PHYSADDR, PAGESIZE()); offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_middle_entry); } if (!(page_middle_entry & _PAGE_PRESENT)) { goto no_kpage; } if (page_middle_entry & _PAGE_PSE) { if (verbose) { ull = PAE_PAGEBASE(page_middle_entry); fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_middle_entry); } physpage = PAE_PAGEBASE(page_middle_entry) + (kvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } page_table = PAE_PAGEBASE(page_middle_entry); FILL_PTBL_PAE(page_table, PHYSADDR, PAGESIZE()); offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * sizeof(ulonglong); page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_table_entry); } if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { if (page_table_entry && verbose) { fprintf(fp, "\n"); x86_translate_pte(0, 0, page_table_entry); } goto no_kpage; } physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(kvaddr); *paddr = physpage; if (verbose) { ull = PAE_PAGEBASE(page_table_entry); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_table_entry); } return TRUE; no_kpage: return FALSE; } static int x86_kvtop_xen_wpt_PAE(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulonglong *pgd; ulonglong page_dir_entry; ulonglong page_middle, pseudo_page_middle; ulonglong page_middle_entry; ulonglong page_table, pseudo_page_table; ulonglong page_table_entry, pte; ulonglong physpage, pseudo_physpage; ulonglong ull; ulong offset; char buf[BUFSIZE]; pgd = (ulonglong *)vt->kernel_pgd[0]; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(pgd, KVADDR, PTRS_PER_PGD * sizeof(ulonglong)); offset = ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) * sizeof(ulonglong); page_dir_entry = *((ulonglong *)&machdep->pgd[offset]); if (verbose) fprintf(fp, " PGD: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), page_dir_entry); if (!(page_dir_entry & _PAGE_PRESENT)) { goto no_kpage; } page_middle = PAE_PAGEBASE(page_dir_entry); pseudo_page_middle = xen_m2p(page_middle); if (verbose) fprintf(fp, " PGD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR((ulong)pgd + offset)), pseudo_page_middle | PAGEOFFSET(page_dir_entry) | (page_dir_entry & _PAGE_NX)); FILL_PMD_PAE(pseudo_page_middle, PHYSADDR, PAGESIZE()); offset = ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)) * sizeof(ulonglong); page_middle_entry = *((ulonglong *)&machdep->pmd[offset]); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_middle_entry); } if (!(page_middle_entry & _PAGE_PRESENT)) { goto no_kpage; } if (page_middle_entry & _PAGE_PSE) { error(FATAL, "_PAGE_PSE in an mfn not supported\n"); /* XXX */ if (verbose) { ull = PAE_PAGEBASE(page_middle_entry); fprintf(fp, " PAGE: %s (2MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull))); x86_translate_pte(0, 0, page_middle_entry); } physpage = PAE_PAGEBASE(page_middle_entry) + (kvaddr & ~_2MB_PAGE_MASK); *paddr = physpage; return TRUE; } page_table = PAE_PAGEBASE(page_middle_entry); pseudo_page_table = xen_m2p(page_table); if (verbose) { ull = page_middle + offset; fprintf(fp, " PMD: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), pseudo_page_table | PAGEOFFSET(page_middle_entry) | (page_middle_entry & _PAGE_NX)); } FILL_PTBL_PAE(pseudo_page_table, PHYSADDR, PAGESIZE()); offset = ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)) * sizeof(ulonglong); page_table_entry = *((ulonglong *)&machdep->ptbl[offset]); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), page_table_entry); } if (!(page_table_entry & (_PAGE_PRESENT | _PAGE_PROTNONE))) { if (page_table_entry && verbose) { fprintf(fp, "\n"); x86_translate_pte(0, 0, page_table_entry); } goto no_kpage; } physpage = PAE_PAGEBASE(page_table_entry) + PAGEOFFSET(kvaddr); pseudo_physpage = xen_m2p(physpage); if (verbose) { ull = page_table + offset; fprintf(fp, " PTE: %s => %llx\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&ull)), pseudo_physpage | PAGEOFFSET(page_table_entry) | (page_table_entry & _PAGE_NX)); } *paddr = pseudo_physpage + PAGEOFFSET(kvaddr); if (verbose) { physpage = PAE_PAGEBASE(physpage); fprintf(fp, " PAGE: %s [machine]\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&physpage))); fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&pseudo_physpage))); pte = pseudo_physpage | PAGEOFFSET(page_table_entry) | (page_table_entry & _PAGE_NX); x86_translate_pte(0, 0, pte); } return TRUE; no_kpage: return FALSE; } void x86_clear_machdep_cache(void) { machdep->machspec->last_pmd_read_PAE = 0; machdep->machspec->last_ptbl_read_PAE = 0; } /* * Get the relevant page directory pointer from a task structure. */ static ulong x86_get_task_pgd(ulong task) { long offset; ulong cr3; offset = OFFSET_OPTION(task_struct_thread, task_struct_tss); if (INVALID_MEMBER(thread_struct_cr3)) error(FATAL, "cr3 does not exist in this kernel's thread_struct\n"); offset += OFFSET(thread_struct_cr3); readmem(task + offset, KVADDR, &cr3, sizeof(ulong), "task thread cr3", FAULT_ON_ERROR); return(PTOV(cr3)); } /* * Calculate and return the speed of the processor. */ ulong x86_processor_speed(void) { unsigned long cpu_hz, cpu_khz; if (machdep->mhz) return (machdep->mhz); if (symbol_exists("cpu_hz")) { get_symbol_data("cpu_hz", sizeof(long), &cpu_hz); if (cpu_hz) return (machdep->mhz = cpu_hz/1000000); } if (symbol_exists("cpu_khz")) { get_symbol_data("cpu_khz", sizeof(long), &cpu_khz); if (cpu_khz) return(machdep->mhz = cpu_khz/1000); } return 0; } void x86_dump_machdep_table(ulong arg) { int others; ulong xen_wpt; char buf[BUFSIZE]; struct machine_specific *ms; int i, max_numnodes; switch (arg) { default: break; } others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PAE) fprintf(fp, "%sPAE", others++ ? "|" : ""); if (machdep->flags & OMIT_FRAME_PTR) fprintf(fp, "%sOMIT_FRAME_PTR", others++ ? "|" : ""); if (machdep->flags & FRAMESIZE_DEBUG) fprintf(fp, "%sFRAMESIZE_DEBUG", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: x86_eframe_search()\n"); fprintf(fp, " back_trace: x86_back_trace_cmd()\n"); fprintf(fp, "get_processor_speed: x86_processor_speed()\n"); xen_wpt = XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES); if (machdep->flags & PAE) { fprintf(fp, " uvtop: %s()\n", xen_wpt ? "x86_uvtop_xen_wpt_PAE" : "x86_uvtop_PAE"); fprintf(fp, " kvtop: x86_kvtop_PAE()%s\n", xen_wpt ? " -> x86_kvtop_xen_wpt_PAE()" : ""); } else { fprintf(fp, " uvtop: %s()\n", xen_wpt ? "x86_uvtop_xen_wpt" : "x86_uvtop"); fprintf(fp, " kvtop: x86_kvtop()%s\n", xen_wpt ? " -> x86_kvtop_xen_wpt()" : ""); } fprintf(fp, " get_task_pgd: x86_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_stack_frame: x86_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: x86_translate_pte()\n"); fprintf(fp, " memory_size: x86_memory_size()\n"); fprintf(fp, " vmalloc_start: x86_vmalloc_start()\n"); fprintf(fp, " is_task_addr: x86_is_task_addr()\n"); fprintf(fp, " verify_symbol: x86_verify_symbol()\n"); fprintf(fp, " dis_filter: x86_dis_filter()\n"); fprintf(fp, " cmd_mach: x86_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: x86_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: %s\n", COMMON_VADDR_SPACE() ? "x86_is_uvaddr()" : "generic_is_uvaddr()"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: x86_init_kernel_pgd()\n"); fprintf(fp, " value_to_symbol: %s\n", machdep->value_to_symbol == generic_machdep_value_to_symbol ? "generic_machdep_value_to_symbol()" : "x86_is_entry_tramp_address()"); fprintf(fp, " line_number_hooks: %s\n", machdep->line_number_hooks ? "x86_line_number_hooks" : "(not used)"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " xendump_p2m_create: x86_xendump_p2m_create()\n"); fprintf(fp, " xendump_p2m_create: %s\n", PVOPS_XEN() ? "x86_pvops_xendump_p2m_create()" : "x86_xendump_p2m_create()"); fprintf(fp, " xendump_panic_task: x86_xendump_panic_task()\n"); fprintf(fp, " get_xendump_regs: x86_get_xendump_regs()\n"); fprintf(fp, "xen_kdump_p2m_create: x86_xen_kdump_p2m_create()\n"); fprintf(fp, "clear_machdep_cache: x86_clear_machdep_cache()\n"); fprintf(fp, " INT_EFRAME_[reg]:\n"); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "SS: "), INT_EFRAME_SS); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "ESP: "), INT_EFRAME_ESP); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EFLAGS: "), INT_EFRAME_EFLAGS); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "CS: "), INT_EFRAME_CS); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "IP: "), INT_EFRAME_EIP); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "ERR: "), INT_EFRAME_ERR); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "ES: "), INT_EFRAME_ES); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "DS: "), INT_EFRAME_DS); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EAX: "), INT_EFRAME_EAX); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EBP: "), INT_EFRAME_EBP); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EDI: "), INT_EFRAME_EDI); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "ESI: "), INT_EFRAME_ESI); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EDX: "), INT_EFRAME_EDX); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "ECX: "), INT_EFRAME_ECX); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "EBX: "), INT_EFRAME_EBX); fprintf(fp, "%s %d\n", mkstring(buf, 21, RJUST, "GS: "), INT_EFRAME_GS); fprintf(fp, " machspec: x86_machine_specific\n"); fprintf(fp, " idt_table: %lx\n", (ulong)machdep->machspec->idt_table); fprintf(fp, " entry_tramp_start: %lx\n", machdep->machspec->entry_tramp_start); fprintf(fp, " entry_tramp_end: %lx\n", machdep->machspec->entry_tramp_end); fprintf(fp, " entry_tramp_start_phys: %llx\n", machdep->machspec->entry_tramp_start_phys); fprintf(fp, " last_pmd_read_PAE: %llx\n", machdep->machspec->last_pmd_read_PAE); fprintf(fp, " last_ptbl_read_PAE: %llx\n", machdep->machspec->last_ptbl_read_PAE); fprintf(fp, " page_protnone: %lx\n", machdep->machspec->page_protnone); ms = machdep->machspec; max_numnodes = ms->max_numnodes; fprintf(fp, " MAX_NUMNODES: "); if (max_numnodes < 0) { fprintf(fp, "(unused)\n"); } else { fprintf(fp, "%d\n", max_numnodes); fprintf(fp, " remap_start_vaddr:"); for (i = 0; i < max_numnodes; ++i) { if ((i % 8) == 0) fprintf(fp, "\n "); fprintf(fp, "%08lx ", ms->remap_start_vaddr[i]); } fprintf(fp, "\n"); fprintf(fp, " remap_end_vaddr:"); for (i = 0; i < max_numnodes; ++i) { if ((i % 8) == 0) fprintf(fp, "\n "); fprintf(fp, "%08lx ", ms->remap_end_vaddr[i]); } fprintf(fp, "\n"); fprintf(fp, " remap_start_pfn:"); for (i = 0; i < max_numnodes; ++i) { if ((i % 8) == 0) fprintf(fp, "\n "); fprintf(fp, "%08lx ", ms->remap_start_pfn[i]); } fprintf(fp, "\n"); } } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void x86_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (pcp) *pcp = x86_get_pc(bt); if (spp) *spp = x86_get_sp(bt); } /* * Get the saved PC from a user-space copy of the kernel stack. */ static ulong x86_get_pc(struct bt_info *bt) { ulong offset; ulong eip, inactive_task_frame; if (tt->flags & THREAD_INFO) { if (VALID_MEMBER(task_struct_thread_eip)) readmem(bt->task + OFFSET(task_struct_thread_eip), KVADDR, &eip, sizeof(void *), "thread_struct eip", FAULT_ON_ERROR); else if (VALID_MEMBER(inactive_task_frame_ret_addr)) { readmem(bt->task + OFFSET(task_struct_thread_esp), KVADDR, &inactive_task_frame, sizeof(void *), "task_struct.inactive_task_frame", FAULT_ON_ERROR); readmem(inactive_task_frame + OFFSET(inactive_task_frame_ret_addr), KVADDR, &eip, sizeof(void *), "inactive_task_frame.ret_addr", FAULT_ON_ERROR); } else error(FATAL, "cannot determine ip address\n"); return eip; } offset = OFFSET_OPTION(task_struct_thread_eip, task_struct_tss_eip); return GET_STACK_ULONG(offset); } /* * Get the saved SP from a user-space copy of the kernel stack if it * cannot be found in the panic_ksp array. */ static ulong x86_get_sp(struct bt_info *bt) { ulong offset, ksp; if (get_panic_ksp(bt, &ksp)) return ksp; if (tt->flags & THREAD_INFO) { readmem(bt->task + OFFSET(task_struct_thread_esp), KVADDR, &ksp, sizeof(void *), "thread_struct esp", FAULT_ON_ERROR); if (VALID_MEMBER(inactive_task_frame_ret_addr)) ksp += OFFSET(inactive_task_frame_ret_addr); return ksp; } offset = OFFSET_OPTION(task_struct_thread_esp, task_struct_tss_esp); return GET_STACK_ULONG(offset); } /* * Translate a PTE, returning TRUE if the page is _PAGE_PRESENT. * If a physaddr pointer is passed in, don't print anything. */ static int x86_translate_pte(ulong pte, void *physaddr, ulonglong pae_pte) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; ulonglong paddr; int nx_bit_set; nx_bit_set = FALSE; if (machdep->flags & PAE) { paddr = PAE_PAGEBASE(pae_pte); sprintf(ptebuf, "%llx", pae_pte); if (pae_pte & _PAGE_NX) nx_bit_set = TRUE; pte = (ulong)pae_pte; } else { paddr = NONPAE_PAGEBASE(pte); sprintf(ptebuf, "%lx", pte); } page_present = (pte & (_PAGE_PRESENT|_PAGE_PROTNONE)); if (physaddr) { if (machdep->flags & PAE) *((ulonglong *)physaddr) = paddr; else *((ulong *)physaddr) = (ulong)paddr; return page_present; } len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(machdep->flags & PAE ? pae_pte : pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%llx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_PRESENT) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (pte & _PAGE_RW) fprintf(fp, "%sRW", others++ ? "|" : ""); if (pte & _PAGE_USER) fprintf(fp, "%sUSER", others++ ? "|" : ""); if (pte & _PAGE_PWT) fprintf(fp, "%sPWT", others++ ? "|" : ""); if (pte & _PAGE_PCD) fprintf(fp, "%sPCD", others++ ? "|" : ""); if (pte & _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); if (pte & _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if ((pte & _PAGE_PSE) && (pte && _PAGE_PRESENT)) fprintf(fp, "%sPSE", others++ ? "|" : ""); if (pte & _PAGE_GLOBAL) fprintf(fp, "%sGLOBAL", others++ ? "|" : ""); if (pte & _PAGE_PROTNONE && !(pte && _PAGE_PRESENT)) fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); if (nx_bit_set) fprintf(fp, "%sNX", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return page_present; } /* * For the time being, walk through the kernel page directory looking * for the 4MB PTEs. Zones might make this common code in the future. */ static uint64_t x86_memory_size(void) { int i, j; ulong *pp; ulong kpgd[PTRS_PER_PGD]; uint64_t vm_total; uint64_t pgd_total; if (machdep->memsize) return machdep->memsize; if (!(machdep->flags & PAE)) { readmem(vt->kernel_pgd[0], KVADDR, kpgd, sizeof(ulong) * PTRS_PER_PGD, "kernel page directory", FAULT_ON_ERROR); for (i = j = 0, pp = &kpgd[0]; i < PTRS_PER_PGD; i++, pp++) { if ((*pp & (_PAGE_PRESENT|_PAGE_4M)) == (_PAGE_PRESENT|_PAGE_4M) ) { j++; } } pgd_total = (uint64_t)j * (uint64_t)(MEGABYTES(4)); } else pgd_total = 0; /* * Use the memory node data (or its equivalent) if it's larger than * the page directory total. */ vm_total = total_node_memory(); machdep->memsize = MAX(pgd_total, vm_total); return (machdep->memsize); } /* * Determine where vmalloc'd memory starts. */ static ulong x86_vmalloc_start(void) { return (first_vmalloc_address()); } /* * Do the work for cmd_irq() -d option. */ void x86_display_idt_table(void) { int i; ulong *ip; char buf[BUFSIZE]; ip = read_idt_table(READ_IDT_RUNTIME); for (i = 0; i < 256; i++, ip += 2) { if (i < 10) fprintf(fp, " "); else if (i < 100) fprintf(fp, " "); fprintf(fp, "[%d] %s\n", i, extract_idt_function(ip, buf, NULL)); } } /* * Extract the function name out of the IDT entry. */ static char * extract_idt_function(ulong *ip, char *buf, ulong *retaddr) { ulong i1, i2, addr; char locbuf[BUFSIZE]; physaddr_t phys; if (buf) BZERO(buf, BUFSIZE); i1 = *ip; i2 = *(ip+1); i1 &= 0x0000ffff; i2 &= 0xffff0000; addr = i1 | i2; if (retaddr) *retaddr = addr; if (!buf) return NULL; value_to_symstr(addr, locbuf, 0); if (strlen(locbuf)) sprintf(buf, "%s", locbuf); else { sprintf(buf, "%08lx", addr); if (kvtop(NULL, addr, &phys, 0)) { addr = machdep->kvbase + (ulong)phys; if (value_to_symstr(addr, locbuf, 0)) { strcat(buf, " <"); strcat(buf, locbuf); strcat(buf, ">"); } } } return buf; } /* * Read the IDT table into a (hopefully) malloc'd buffer. */ static ulong * read_idt_table(int flag) { ulong *idt, addr, offset; physaddr_t phys; long desc_struct_size; struct syment *sp; struct machine_specific *ms; idt = NULL; ms = machdep->machspec; if (ms->idt_table) return ms->idt_table; desc_struct_size = SIZE(desc_struct) * 256; switch (flag) { case READ_IDT_INIT: if (!symbol_exists("idt_table")) return NULL; if (!(idt = (ulong *)malloc(desc_struct_size))) { error(WARNING, "cannot malloc idt_table\n\n"); return NULL; } if (!readmem(symbol_value("idt_table"), KVADDR, idt, desc_struct_size, "idt_table", RETURN_ON_ERROR)) { error(WARNING, "cannot read idt_table\n\n"); return NULL; } ms->idt_table = idt; addr = 0; extract_idt_function(idt, NULL, &addr); if (addr) { if (symbol_exists("__entry_tramp_start") && symbol_exists("__entry_tramp_end") && symbol_exists("__start___entry_text")) { ms->entry_tramp_start = symbol_value("__start___entry_text"); ms->entry_tramp_end = ms->entry_tramp_start + (symbol_value("__entry_tramp_end") - symbol_value("__entry_tramp_start")); ms->entry_tramp_start_phys = 0; machdep->value_to_symbol = x86_is_entry_tramp_address; } else if (!(sp = value_search(addr, &offset))) { addr = VIRTPAGEBASE(addr); if (kvtop(NULL, addr, &phys, 0) && (sp = value_search(PTOV(phys), &offset)) && STREQ(sp->name, "entry_tramp_start")) { ms->entry_tramp_start = addr; ms->entry_tramp_start_phys = phys; ms->entry_tramp_end = addr + (symbol_value("entry_tramp_end") - symbol_value("entry_tramp_start")); machdep->value_to_symbol = x86_is_entry_tramp_address; } } } break; case READ_IDT_RUNTIME: if (!symbol_exists("idt_table")) error(FATAL, "idt_table does not exist on this architecture\n"); idt = (ulong *)GETBUF(desc_struct_size); readmem(symbol_value("idt_table"), KVADDR, idt, desc_struct_size, "idt_table", FAULT_ON_ERROR); break; } return idt; } /* * If the address fits in the entry_tramp_start page, find the syment * associated with it. */ struct syment * x86_is_entry_tramp_address(ulong vaddr, ulong *retoffset) { struct syment *sp; struct machine_specific *ms; ulong addr, offset; ms = machdep->machspec; if (!ms->entry_tramp_start || !((vaddr >= ms->entry_tramp_start) && (vaddr <= ms->entry_tramp_end))) return NULL; /* * Check new vs. old style handling of entry_tramp addresses: * * - The old way requires creation of the real symbol address from * the entry_tramp address passed in. * - The new way just uses the absolute (A) symbols that are built * in using the entry_tramp addresses, w/no phys address required. */ if (ms->entry_tramp_start_phys) /* old */ addr = machdep->kvbase + (ulong)ms->entry_tramp_start_phys + PAGEOFFSET(vaddr); else /* new */ addr = vaddr; if ((sp = value_search_base_kernel(addr, &offset))) { if (retoffset) *retoffset = offset; if (CRASHDEBUG(4)) console("x86_is_entry_tramp_address: %lx: %s %lx+%ld\n", vaddr, sp->name, sp->value, offset); if (STREQ(sp->name, "entry_tramp_start")) sp++; } return sp; } /* * X86 tasks are all stacksize-aligned, except when split from the stack. */ static int x86_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * Keep or reject a symbol from the namelist. */ static int x86_verify_symbol(const char *name, ulong value, char type) { if (XEN_HYPER_MODE() && STREQ(name, "__per_cpu_shift")) return TRUE; if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "_text") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if (STREQ(name, "Letext") || STREQ(name, "gcc2_compiled.")) return FALSE; return TRUE; } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int x86_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on x86) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ if (CRASHDEBUG(1)) console("IN: %s", inbuf); colon = (inbuf[0] != ' ') ? strstr(inbuf, ":") : NULL; if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } else if (STREQ(argv[argc-2], "call") && hexadecimal(argv[argc-1], 0)) { /* * Update module code of the form: * * call 0xe081e1e0 * * to show a bracketed direct call target. */ p1 = &LASTCHAR(inbuf); if (extract_hex(argv[argc-1], &value, NULLCHAR, TRUE)) { sprintf(buf1, " <%s>\n", value_to_symstr(value, buf2, output_radix)); if (IS_MODULE_VADDR(value) && !strstr(buf2, "+")) sprintf(p1, "%s", buf1); } } else if (STREQ(argv[2], "ud2a")) pc->curcmd_flags |= UD2A_INSTRUCTION; else if (STREQ(argv[2], "(bad)")) pc->curcmd_flags |= BAD_INSTRUCTION; if (CRASHDEBUG(1)) console(" %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int x86_get_smp_cpus(void) { int count, cpucount; if ((count = get_cpus_online()) == 0) { count = kt->cpus; if (symbol_exists("cpucount")) { get_symbol_data("cpucount", sizeof(int), &cpucount); cpucount++; count = MAX(cpucount, kt->cpus); } } if (XEN() && (count == 1) && symbol_exists("cpu_present_map")) { ulong cpu_present_map; get_symbol_data("cpu_present_map", sizeof(ulong), &cpu_present_map); cpucount = count_bits_long(cpu_present_map); count = MAX(cpucount, kt->cpus); } if (KVMDUMP_DUMPFILE() && (count < get_cpus_present())) return(get_highest_cpu_present()+1); return MAX(count, get_highest_cpu_online()+1); } /* * Machine dependent command. */ void x86_cmd_mach(void) { int c, cflag, mflag; unsigned int radix; cflag = mflag = radix = 0; while ((c = getopt(argcnt, args, "cmxd")) != EOF) { switch(c) { case 'c': cflag++; break; case 'm': mflag++; x86_display_memmap(); break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (cflag) x86_display_cpu_data(radix); if (!cflag && !mflag) x86_display_machine_stats(); } /* * "mach" command output. */ static void x86_display_machine_stats(void) { int c; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); if (!STREQ(kt->hypervisor, "(undetermined)") && !STREQ(kt->hypervisor, "bare hardware")) fprintf(fp, " HYPERVISOR: %s\n", kt->hypervisor); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); if (tt->flags & IRQSTACKS) { fprintf(fp, "HARD IRQ STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " HARD IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->hardirq_ctx[c]) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->hardirq_ctx[c]); } fprintf(fp, "SOFT IRQ STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " SOFT IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->softirq_ctx) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->softirq_ctx[c]); } } } static void x86_display_cpu_data(unsigned int radix) { int cpu; ulong cpu_data = 0; if (symbol_exists("cpu_data")) cpu_data = symbol_value("cpu_data"); else if (symbol_exists("boot_cpu_data")) cpu_data = symbol_value("boot_cpu_data"); for (cpu = 0; cpu < kt->cpus; cpu++) { fprintf(fp, "%sCPU %d:\n", cpu ? "\n" : "", cpu); dump_struct("cpuinfo_x86", cpu_data, radix); cpu_data += SIZE(cpuinfo_x86); } } static char *e820type[] = { "(invalid type)", "E820_RAM", "E820_RESERVED", "E820_ACPI", "E820_NVS", "E820_UNUSABLE", }; static void x86_display_memmap(void) { ulong e820; int nr_map, i; char *buf, *e820entry_ptr; ulonglong addr, size; uint type; if (kernel_symbol_exists("e820")) { if (get_symbol_type("e820", NULL, NULL) == TYPE_CODE_PTR) get_symbol_data("e820", sizeof(void *), &e820); else e820 = symbol_value("e820"); } else if (kernel_symbol_exists("e820_table")) get_symbol_data("e820_table", sizeof(void *), &e820); else error(FATAL, "neither e820 or e820_table symbols exist\n"); if (CRASHDEBUG(1)) { if (STRUCT_EXISTS("e820map")) dump_struct("e820map", e820, RADIX(16)); else if (STRUCT_EXISTS("e820_table")) dump_struct("e820_table", e820, RADIX(16)); } buf = (char *)GETBUF(SIZE(e820map)); readmem(e820, KVADDR, &buf[0], SIZE(e820map), "e820map", FAULT_ON_ERROR); nr_map = INT(buf + OFFSET(e820map_nr_map)); fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE\n"); for (i = 0; i < nr_map; i++) { e820entry_ptr = buf + sizeof(int) + (SIZE(e820entry) * i); addr = ULONGLONG(e820entry_ptr + OFFSET(e820entry_addr)); size = ULONGLONG(e820entry_ptr + OFFSET(e820entry_size)); type = UINT(e820entry_ptr + OFFSET(e820entry_type)); fprintf(fp, "%016llx - %016llx ", addr, addr+size); if (type >= (sizeof(e820type)/sizeof(char *))) { if (type == 12) fprintf(fp, "E820_PRAM\n"); else if (type == 128) fprintf(fp, "E820_RESERVED_KERN\n"); else fprintf(fp, "type %d\n", type); } else fprintf(fp, "%s\n", e820type[type]); } } /* * Check a few functions to determine whether the kernel was built * with the -fomit-frame-pointer flag. */ #define PUSH_BP_MOV_ESP_BP 0xe58955 #define PUSH_BP_CLR_EAX_MOV_ESP_BP 0xe589c03155ULL static int x86_omit_frame_pointer(void) { ulonglong push_bp_mov_esp_bp; int i; char *checkfuncs[] = {"sys_open", "sys_fork", "sys_read"}; if (pc->flags & KERNEL_DEBUG_QUERY) return FALSE; for (i = 0; i < 2; i++) { if (!readmem(symbol_value(checkfuncs[i]), KVADDR, &push_bp_mov_esp_bp, sizeof(ulonglong), "x86_omit_frame_pointer", RETURN_ON_ERROR)) return TRUE; if (!(((push_bp_mov_esp_bp & 0x0000ffffffULL) == PUSH_BP_MOV_ESP_BP) || ((push_bp_mov_esp_bp & 0xffffffffffULL) == PUSH_BP_CLR_EAX_MOV_ESP_BP))) return TRUE; } return FALSE; } /* * Disassemble an address and determine whether the instruction calls * a function; if so, return a pointer to the name of the called function. */ char * x86_function_called_by(ulong eip) { struct syment *sp; char buf[BUFSIZE], *p1, *p2, *funcname; ulong value, offset; unsigned char byte; funcname = NULL; if (!readmem(eip, KVADDR, &byte, sizeof(unsigned char), "call byte", RETURN_ON_ERROR)) return funcname; if (byte != 0xe8) return funcname; sprintf(buf, "x/i 0x%lx", eip); open_tmpfile2(); if (gdb_pass_through(buf, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { rewind(pc->tmpfile2); while (fgets(buf, BUFSIZE, pc->tmpfile2)) { if ((p1 = strstr(buf, "call "))) { p1 += strlen("call "); if ((p2 = strstr(p1, " <"))) { p2 += strlen(" <"); if ((p1 = strstr(p2, ">"))) *p1 = NULLCHAR; if ((sp = symbol_search(p2))) funcname = sp->name; } else if ((p2 = strstr(p1, "0x"))) { if (!extract_hex(strip_linefeeds(p2), &value, NULLCHAR, TRUE)) continue; if ((sp = value_search(value, &offset)) && !offset) funcname = sp->name; } } } } close_tmpfile2(); return funcname; } struct syment * x86_text_lock_jmp(ulong eip, ulong *offset) { int i, c; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *arglist[MAXARGS]; struct syment *sp; ulong value; sprintf(buf1, "x/10i 0x%lx", eip); buf2[0] = NULLCHAR; value = 0; open_tmpfile2(); if (gdb_pass_through(buf1, pc->tmpfile2, GNU_RETURN_ON_ERROR)) { rewind(pc->tmpfile2); while (fgets(buf1, BUFSIZE, pc->tmpfile2)) { if (!(c = parse_line(buf1, arglist))) continue; for (i = 0; i < c; i++) { if (STREQ(arglist[i], "jmp") && ((i+1)kernel_pgd[i] = value; } static ulong xen_m2p_nonPAE(ulong machine) { ulonglong pseudo; pseudo = xen_m2p((ulonglong)machine); if (pseudo == XEN_MACHADDR_NOT_FOUND) return XEN_MFN_NOT_FOUND; return ((ulong)pseudo); } #include "netdump.h" #include "xen_dom0.h" /* * From the xen vmcore, create an index of mfns for each page that makes * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. */ #define MAX_X86_FRAMES (16) #define MFNS_PER_FRAME (PAGESIZE()/sizeof(ulong)) static int x86_xen_kdump_p2m_create(struct xen_kdump_data *xkd) { int i, j; ulong kvaddr; ulong *up; ulonglong *ulp; ulong frames; ulong frame_mfn[MAX_X86_FRAMES] = { 0 }; int mfns[MAX_X86_FRAMES] = { 0 }; /* * Temporarily read physical (machine) addresses from vmcore. */ pc->curcmd_flags |= XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (temporary): force XEN_MACHINE_ADDR\n"); if (xkd->flags & KDUMP_CR3) goto use_cr3; xkd->p2m_frames = 0; if (CRASHDEBUG(1)) fprintf(fp, "x86_xen_kdump_p2m_create: p2m_mfn: %lx\n", xkd->p2m_mfn); if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->page, PAGESIZE(), "xen kdump p2m mfn page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn page\n"); if (CRASHDEBUG(1)) { up = (ulong *)xkd->page; for (i = 0; i < 4; i++) { fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } fprintf(fp, "\n"); } for (i = 0, up = (ulong *)xkd->page; i < MAX_X86_FRAMES; i++, up++) frame_mfn[i] = *up; for (i = 0; i < MAX_X86_FRAMES; i++) { if (!frame_mfn[i]) break; if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, xkd->page, PAGESIZE(), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn list page\n"); for (j = 0, up = (ulong *)xkd->page; j < MFNS_PER_FRAME; j++, up++) if (*up) mfns[i]++; xkd->p2m_frames += mfns[i]; if (CRASHDEBUG(7)) { up = (ulong *)xkd->page; for (j = 0; j < 256; j++) { fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((j * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } } if (CRASHDEBUG(1)) fprintf(fp, "p2m_frames: %d\n", xkd->p2m_frames); if ((xkd->p2m_mfn_frame_list = (ulong *) malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); for (i = 0, frames = xkd->p2m_frames; frames; i++) { if (!readmem(PTOB(frame_mfn[i]), PHYSADDR, &xkd->p2m_mfn_frame_list[i * MFNS_PER_FRAME], mfns[i] * sizeof(ulong), "xen kdump p2m mfn list page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn list page\n"); frames -= mfns[i]; } if (CRASHDEBUG(2)) { for (i = 0; i < xkd->p2m_frames; i++) fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); fprintf(fp, "\n"); } pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; use_cr3: if (CRASHDEBUG(1)) fprintf(fp, "x86_xen_kdump_p2m_create: cr3: %lx\n", xkd->cr3); if (!readmem(PTOB(xkd->cr3), PHYSADDR, machdep->pgd, PAGESIZE(), "xen kdump cr3 page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump cr3 page\n"); if (CRASHDEBUG(7)) { fprintf(fp, "contents of page directory page:\n"); if (machdep->flags & PAE) { ulp = (ulonglong *)machdep->pgd; fprintf(fp, "%016llx %016llx %016llx %016llx\n", *ulp, *(ulp+1), *(ulp+2), *(ulp+3)); } else { up = (ulong *)machdep->pgd; for (i = 0; i < 256; i++) { fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } } kvaddr = symbol_value("max_pfn"); if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) return FALSE; up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); xkd->p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if (CRASHDEBUG(1)) fprintf(fp, "max_pfn at %lx: %lx (%ld) -> %d p2m_frames\n", kvaddr, *up, *up, xkd->p2m_frames); if ((xkd->p2m_mfn_frame_list = (ulong *) malloc(xkd->p2m_frames * sizeof(ulong))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); kvaddr = symbol_value("phys_to_machine_mapping"); if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) return FALSE; up = (ulong *)(xkd->page + PAGEOFFSET(kvaddr)); kvaddr = *up; if (CRASHDEBUG(1)) fprintf(fp, "phys_to_machine_mapping: %lx\n", kvaddr); if (CRASHDEBUG(7)) { fprintf(fp, "contents of first phys_to_machine_mapping page:\n"); if (!x86_xen_kdump_load_page(kvaddr, xkd->page)) error(INFO, "cannot read first phys_to_machine_mapping page\n"); up = (ulong *)xkd->page; for (i = 0; i < 256; i++) { fprintf(fp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } machdep->last_ptbl_read = BADADDR; machdep->last_pmd_read = BADADDR; machdep->last_pgd_read = BADADDR; for (i = 0; i < xkd->p2m_frames; i++) { xkd->p2m_mfn_frame_list[i] = x86_xen_kdump_page_mfn(kvaddr); kvaddr += PAGESIZE(); } if (CRASHDEBUG(1)) { for (i = 0; i < xkd->p2m_frames; i++) fprintf(fp, "%lx ", xkd->p2m_mfn_frame_list[i]); fprintf(fp, "\n"); } machdep->last_ptbl_read = 0; machdep->last_pmd_read = 0; machdep->last_pgd_read = 0; pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; } /* * Find the page associate with the kvaddr, and read its contents * into the passed-in buffer. */ static char * x86_xen_kdump_load_page(ulong kvaddr, char *pgbuf) { ulong *entry; ulong *up; ulong mfn; if (machdep->flags & PAE) return x86_xen_kdump_load_page_PAE(kvaddr, pgbuf); up = (ulong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (*entry) >> PAGESHIFT(); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen kdump pgd entry", RETURN_ON_ERROR)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return NULL; } up = (ulong *)pgbuf; entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); mfn = (*entry) >> PAGESHIFT(); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen page table page", RETURN_ON_ERROR)) { error(INFO, "cannot read/find page table page\n"); return NULL; } return pgbuf; } static char * x86_xen_kdump_load_page_PAE(ulong kvaddr, char *pgbuf) { ulonglong *entry; ulonglong *up; ulong mfn; up = (ulonglong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen kdump pgd entry", RETURN_ON_ERROR)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return NULL; } up = (ulonglong *)pgbuf; entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen kdump pmd entry", RETURN_ON_ERROR)) { error(INFO, "cannot read/find pmd entry from pgd\n"); return NULL; } up = (ulonglong *)pgbuf; entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!readmem(PTOB(mfn), PHYSADDR, pgbuf, PAGESIZE(), "xen kdump page table page", RETURN_ON_ERROR)) { error(INFO, "cannot read/find page table page from pmd\n"); return NULL; } return pgbuf; } /* * Return the mfn value associated with a virtual address. */ static ulong x86_xen_kdump_page_mfn(ulong kvaddr) { ulong *entry; ulong *up; ulong mfn; if (machdep->flags & PAE) return x86_xen_kdump_page_mfn_PAE(kvaddr); up = (ulong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (*entry) >> PAGESHIFT(); if ((mfn != machdep->last_ptbl_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), "xen kdump pgd entry", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pgd entry from cr3 page (mfn: %lx)\n", mfn); machdep->last_ptbl_read = mfn; up = (ulong *)machdep->ptbl; entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); mfn = (*entry) >> PAGESHIFT(); return mfn; } static ulong x86_xen_kdump_page_mfn_PAE(ulong kvaddr) { ulonglong *entry; ulonglong *up; ulong mfn; up = (ulonglong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (ulong)((*entry) >> PAGESHIFT()); if ((mfn != machdep->last_pmd_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->pmd, PAGESIZE(), "xen kdump pgd entry", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pgd entry from cr3 page (mfn: %lx)\n", mfn); machdep->last_pmd_read = mfn; up = (ulonglong *)machdep->pmd; entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if ((mfn != machdep->last_ptbl_read) && !readmem(PTOB(mfn), PHYSADDR, machdep->ptbl, PAGESIZE(), "xen kdump pmd entry", RETURN_ON_ERROR)) error(FATAL, "cannot read/find pmd entry from pgd (mfn: %lx)\n", mfn); machdep->last_ptbl_read = mfn; up = (ulonglong *)machdep->ptbl; entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); return mfn; } #include "xendump.h" /* * Create an index of mfns for each page that makes up the * kernel's complete phys_to_machine_mapping[max_pfn] array. */ static int x86_xendump_p2m_create(struct xendump_data *xd) { int i, idx; ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; ulong *up; ulonglong *ulp; off_t offset; /* * Check for pvops Xen kernel before presuming it's HVM. */ if (symbol_exists("pv_init_ops") && (symbol_exists("xen_patch") || symbol_exists("paravirt_patch_default")) && (xd->xc_core.header.xch_magic == XC_CORE_MAGIC)) return x86_pvops_xendump_p2m_create(xd); if (!symbol_exists("phys_to_machine_mapping")) { xd->flags |= XC_CORE_NO_P2M; return TRUE; } if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == INVALID_OFFSET) error(FATAL, "cannot determine vcpu_guest_context.ctrlreg offset\n"); else if (CRASHDEBUG(1)) fprintf(xd->ofp, "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", ctrlreg_offset); offset = xd->xc_core.header.xch_ctxt_offset + (off_t)ctrlreg_offset; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_ctxt_offset\n"); if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != sizeof(ctrlreg)) error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); mfn = (ctrlreg[3] >> PAGESHIFT()) | (ctrlreg[3] << (BITS()-PAGESHIFT())); for (i = 0; CRASHDEBUG(1) && (i < 8); i++) { fprintf(xd->ofp, "ctrlreg[%d]: %lx", i, ctrlreg[i]); if (i == 3) fprintf(xd->ofp, " -> mfn: %lx", mfn); fprintf(xd->ofp, "\n"); } if (!xc_core_mfn_to_page(mfn, machdep->pgd)) error(FATAL, "cannot read/find cr3 page\n"); machdep->last_pgd_read = mfn; if (CRASHDEBUG(1)) { fprintf(xd->ofp, "contents of page directory page:\n"); if (machdep->flags & PAE) { ulp = (ulonglong *)machdep->pgd; fprintf(xd->ofp, "%016llx %016llx %016llx %016llx\n", *ulp, *(ulp+1), *(ulp+2), *(ulp+3)); } else { up = (ulong *)machdep->pgd; for (i = 0; i < 256; i++) { fprintf(xd->ofp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } } kvaddr = symbol_value("max_pfn"); if (!x86_xendump_load_page(kvaddr, xd->page)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(xd->ofp, "max_pfn: %lx\n", *up); xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if ((xd->xc_core.p2m_frame_index_list = (ulong *) malloc(xd->xc_core.p2m_frames * sizeof(int))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); kvaddr = symbol_value("phys_to_machine_mapping"); if (!x86_xendump_load_page(kvaddr, xd->page)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(fp, "phys_to_machine_mapping: %lx\n", *up); kvaddr = *up; machdep->last_ptbl_read = BADADDR; machdep->last_pmd_read = BADADDR; for (i = 0; i < xd->xc_core.p2m_frames; i++) { if ((idx = x86_xendump_page_index(kvaddr)) == MFN_NOT_FOUND) return FALSE; xd->xc_core.p2m_frame_index_list[i] = idx; kvaddr += PAGESIZE(); } machdep->last_ptbl_read = 0; machdep->last_pmd_read = 0; return TRUE; } static int x86_pvops_xendump_p2m_create(struct xendump_data *xd) { int i; ulong mfn, kvaddr, ctrlreg[8], ctrlreg_offset; ulong *up; ulonglong *ulp; off_t offset; if ((ctrlreg_offset = MEMBER_OFFSET("vcpu_guest_context", "ctrlreg")) == INVALID_OFFSET) error(FATAL, "cannot determine vcpu_guest_context.ctrlreg offset\n"); else if (CRASHDEBUG(1)) fprintf(xd->ofp, "MEMBER_OFFSET(vcpu_guest_context, ctrlreg): %ld\n", ctrlreg_offset); offset = xd->xc_core.header.xch_ctxt_offset + (off_t)ctrlreg_offset; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_ctxt_offset\n"); if (read(xd->xfd, &ctrlreg, sizeof(ctrlreg)) != sizeof(ctrlreg)) error(FATAL, "cannot read vcpu_guest_context ctrlreg[8]\n"); mfn = (ctrlreg[3] >> PAGESHIFT()) | (ctrlreg[3] << (BITS()-PAGESHIFT())); for (i = 0; CRASHDEBUG(1) && (i < 8); i++) { fprintf(xd->ofp, "ctrlreg[%d]: %lx", i, ctrlreg[i]); if (i == 3) fprintf(xd->ofp, " -> mfn: %lx", mfn); fprintf(xd->ofp, "\n"); } if (!xc_core_mfn_to_page(mfn, machdep->pgd)) error(FATAL, "cannot read/find cr3 page\n"); machdep->last_pgd_read = mfn; if (CRASHDEBUG(1)) { fprintf(xd->ofp, "contents of page directory page:\n"); if (machdep->flags & PAE) { ulp = (ulonglong *)machdep->pgd; fprintf(xd->ofp, "%016llx %016llx %016llx %016llx\n", *ulp, *(ulp+1), *(ulp+2), *(ulp+3)); } else { up = (ulong *)machdep->pgd; for (i = 0; i < 256; i++) { fprintf(xd->ofp, "%08lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } } kvaddr = symbol_value("max_pfn"); if (!x86_xendump_load_page(kvaddr, xd->page)) return FALSE; up = (ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (CRASHDEBUG(1)) fprintf(xd->ofp, "max_pfn: %lx\n", *up); xd->xc_core.p2m_frames = (*up/(PAGESIZE()/sizeof(ulong))) + ((*up%(PAGESIZE()/sizeof(ulong))) ? 1 : 0); if ((xd->xc_core.p2m_frame_index_list = (ulong *) malloc(xd->xc_core.p2m_frames * sizeof(int))) == NULL) error(FATAL, "cannot malloc p2m_frame_index_list"); if (symbol_exists("p2m_mid_missing")) return x86_pvops_xendump_p2m_l3_create(xd); else return x86_pvops_xendump_p2m_l2_create(xd); } static int x86_pvops_xendump_p2m_l2_create(struct xendump_data *xd) { int i, idx, p; ulong kvaddr, *up; machdep->last_ptbl_read = BADADDR; machdep->last_pmd_read = BADADDR; kvaddr = symbol_value("p2m_top"); for (p = 0; p < xd->xc_core.p2m_frames; p += XEN_PFNS_PER_PAGE) { if (!x86_xendump_load_page(kvaddr, xd->page)) return FALSE; if (CRASHDEBUG(7)) x86_debug_dump_page(xd->ofp, xd->page, "contents of page:"); up = (ulong *)(xd->page); for (i = 0; i < XEN_PFNS_PER_PAGE; i++, up++) { if ((p+i) >= xd->xc_core.p2m_frames) break; if ((idx = x86_xendump_page_index(*up)) == MFN_NOT_FOUND) return FALSE; xd->xc_core.p2m_frame_index_list[p+i] = idx; } kvaddr += PAGESIZE(); } machdep->last_ptbl_read = 0; machdep->last_pmd_read = 0; return TRUE; } static int x86_pvops_xendump_p2m_l3_create(struct xendump_data *xd) { int i, idx, j, p2m_frame, ret = FALSE; ulong kvaddr, *p2m_mid, p2m_mid_missing, p2m_missing, *p2m_top; p2m_top = NULL; machdep->last_ptbl_read = BADADDR; machdep->last_pmd_read = BADADDR; kvaddr = symbol_value("p2m_missing"); if (!x86_xendump_load_page(kvaddr, xd->page)) goto err; p2m_missing = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); kvaddr = symbol_value("p2m_mid_missing"); if (!x86_xendump_load_page(kvaddr, xd->page)) goto err; p2m_mid_missing = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); kvaddr = symbol_value("p2m_top"); if (!x86_xendump_load_page(kvaddr, xd->page)) goto err; kvaddr = *(ulong *)(xd->page + PAGEOFFSET(kvaddr)); if (!x86_xendump_load_page(kvaddr, xd->page)) goto err; if (CRASHDEBUG(7)) x86_debug_dump_page(xd->ofp, xd->page, "contents of p2m_top page:"); p2m_top = (ulong *)GETBUF(PAGESIZE()); memcpy(p2m_top, xd->page, PAGESIZE()); for (i = 0; i < XEN_P2M_TOP_PER_PAGE; ++i) { p2m_frame = i * XEN_P2M_MID_PER_PAGE; if (p2m_frame >= xd->xc_core.p2m_frames) break; if (p2m_top[i] == p2m_mid_missing) continue; if (!x86_xendump_load_page(p2m_top[i], xd->page)) goto err; if (CRASHDEBUG(7)) x86_debug_dump_page(xd->ofp, xd->page, "contents of p2m_mid page:"); p2m_mid = (ulong *)xd->page; for (j = 0; j < XEN_P2M_MID_PER_PAGE; ++j, ++p2m_frame) { if (p2m_frame >= xd->xc_core.p2m_frames) break; if (p2m_mid[j] == p2m_missing) continue; idx = x86_xendump_page_index(p2m_mid[j]); if (idx == MFN_NOT_FOUND) goto err; xd->xc_core.p2m_frame_index_list[p2m_frame] = idx; } } machdep->last_ptbl_read = 0; machdep->last_pmd_read = 0; ret = TRUE; err: if (p2m_top) FREEBUF(p2m_top); return ret; } static void x86_debug_dump_page(FILE *ofp, char *page, char *name) { int i; ulong *up; fprintf(ofp, "%s\n", name); up = (ulong *)page; for (i = 0; i < 256; i++) { fprintf(ofp, "%016lx: %08lx %08lx %08lx %08lx\n", (ulong)((i * 4) * sizeof(ulong)), *up, *(up+1), *(up+2), *(up+3)); up += 4; } } /* * Find the page associate with the kvaddr, and read its contents * into the passed-in buffer. */ static char * x86_xendump_load_page(ulong kvaddr, char *pgbuf) { ulong *entry; ulong *up; ulong mfn; if (machdep->flags & PAE) return x86_xendump_load_page_PAE(kvaddr, pgbuf); up = (ulong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (*entry) >> PAGESHIFT(); if (!xc_core_mfn_to_page(mfn, pgbuf)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return NULL; } up = (ulong *)pgbuf; entry = up + ((kvaddr >> 12) & (PTRS_PER_PTE-1)); mfn = (*entry) >> PAGESHIFT(); if (!xc_core_mfn_to_page(mfn, pgbuf)) { error(INFO, "cannot read/find page table page\n"); return NULL; } return pgbuf; } static char * x86_xendump_load_page_PAE(ulong kvaddr, char *pgbuf) { ulonglong *entry; ulonglong *up; ulong mfn; up = (ulonglong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!xc_core_mfn_to_page(mfn, pgbuf)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return NULL; } up = (ulonglong *)pgbuf; entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!xc_core_mfn_to_page(mfn, pgbuf)) { error(INFO, "cannot read/find pmd entry from pgd\n"); return NULL; } up = (ulonglong *)pgbuf; entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if (!xc_core_mfn_to_page(mfn, pgbuf)) { error(INFO, "cannot read/find page table page from pmd\n"); return NULL; } return pgbuf; } /* * Find the dumpfile page index associated with the kvaddr. */ static int x86_xendump_page_index(ulong kvaddr) { int idx; ulong *entry; ulong *up; ulong mfn; if (machdep->flags & PAE) return x86_xendump_page_index_PAE(kvaddr); up = (ulong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (*entry) >> PAGESHIFT(); if ((mfn != machdep->last_ptbl_read) && !xc_core_mfn_to_page(mfn, machdep->ptbl)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return MFN_NOT_FOUND; } machdep->last_ptbl_read = mfn; up = (ulong *)machdep->ptbl; entry = up + ((kvaddr>>12) & (PTRS_PER_PTE-1)); mfn = (*entry) >> PAGESHIFT(); if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) error(INFO, "cannot determine page index for %lx\n", kvaddr); return idx; } static int x86_xendump_page_index_PAE(ulong kvaddr) { int idx; ulonglong *entry; ulonglong *up; ulong mfn; up = (ulonglong *)machdep->pgd; entry = up + (kvaddr >> PGDIR_SHIFT); mfn = (ulong)((*entry) >> PAGESHIFT()); if ((mfn != machdep->last_pmd_read) && !xc_core_mfn_to_page(mfn, machdep->pmd)) { error(INFO, "cannot read/find pgd entry from cr3 page\n"); return MFN_NOT_FOUND; } machdep->last_pmd_read = mfn; up = (ulonglong *)machdep->pmd; entry = up + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PMD-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if ((mfn != machdep->last_ptbl_read) && !xc_core_mfn_to_page(mfn, machdep->ptbl)) { error(INFO, "cannot read/find pmd entry from pgd\n"); return MFN_NOT_FOUND; } machdep->last_ptbl_read = mfn; up = (ulonglong *)machdep->ptbl; entry = up + ((kvaddr >> PAGESHIFT()) & (PTRS_PER_PTE-1)); mfn = (ulong)((*entry) >> PAGESHIFT()); if ((idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) error(INFO, "cannot determine page index for %lx\n", kvaddr); return idx; } /* * Pull the esp from the cpu_user_regs struct in the header * turn it into a task, and match it with the active_set. * Unfortunately, the registers in the vcpu_guest_context * are not necessarily those of the panic task, so for now * let get_active_set_panic_task() get the right task. */ static ulong x86_xendump_panic_task(struct xendump_data *xd) { return NO_TASK; #ifdef TO_BE_REVISITED int i; ulong esp; off_t offset; ulong task; if (INVALID_MEMBER(vcpu_guest_context_user_regs) || INVALID_MEMBER(cpu_user_regs_esp)) return NO_TASK; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_esp); if (lseek(xd->xfd, offset, SEEK_SET) == -1) return NO_TASK; if (read(xd->xfd, &esp, sizeof(ulong)) != sizeof(ulong)) return NO_TASK; if (IS_KVADDR(esp) && (task = stkptr_to_task(esp))) { for (i = 0; i < NR_CPUS; i++) { if (task == tt->active_set[i]) { if (CRASHDEBUG(0)) error(INFO, "x86_xendump_panic_task: esp: %lx -> task: %lx\n", esp, task); return task; } } error(WARNING, "x86_xendump_panic_task: esp: %lx -> task: %lx (not active)\n", esp); } return NO_TASK; #endif } /* * Because of an off-by-one vcpu bug in early xc_domain_dumpcore() * instantiations, the registers in the vcpu_guest_context are not * necessarily those of the panic task. If not, the eip/esp will be * in stop_this_cpu, as a result of the IP interrupt in panic(), * but the trace is strange because it comes out of the hypervisor * at least if the vcpu had been idle. */ static void x86_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *eip, ulong *esp) { ulong task, xeip, xesp; off_t offset; if (INVALID_MEMBER(vcpu_guest_context_user_regs) || INVALID_MEMBER(cpu_user_regs_eip) || INVALID_MEMBER(cpu_user_regs_esp)) goto generic; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_esp); if (lseek(xd->xfd, offset, SEEK_SET) == -1) goto generic; if (read(xd->xfd, &xesp, sizeof(ulong)) != sizeof(ulong)) goto generic; offset = xd->xc_core.header.xch_ctxt_offset + (off_t)OFFSET(vcpu_guest_context_user_regs) + (off_t)OFFSET(cpu_user_regs_eip); if (lseek(xd->xfd, offset, SEEK_SET) == -1) goto generic; if (read(xd->xfd, &xeip, sizeof(ulong)) != sizeof(ulong)) goto generic; if (IS_KVADDR(xesp) && (task = stkptr_to_task(xesp)) && (task == bt->task)) { if (CRASHDEBUG(1)) fprintf(xd->ofp, "hooks from vcpu_guest_context: eip: %lx esp: %lx\n", xeip, xesp); *eip = xeip; *esp = xesp; return; } generic: return machdep->get_stack_frame(bt, eip, esp); } /* for Xen Hypervisor analysis */ static int x86_xenhyper_is_kvaddr(ulong addr) { if (machdep->flags & PAE) { return (addr >= HYPERVISOR_VIRT_START_PAE); } return (addr >= HYPERVISOR_VIRT_START); } static ulong x86_get_stackbase_hyper(ulong task) { struct xen_hyper_vcpu_context *vcc; int pcpu; ulong init_tss; ulong esp, base; char *buf; /* task means vcpu here */ vcc = xen_hyper_vcpu_to_vcpu_context(task); if (!vcc) error(FATAL, "invalid vcpu\n"); pcpu = vcc->processor; if (!xen_hyper_test_pcpu_id(pcpu)) { error(FATAL, "invalid pcpu number\n"); } if (symbol_exists("init_tss")) { init_tss = symbol_value("init_tss"); init_tss += XEN_HYPER_SIZE(tss) * pcpu; } else { init_tss = symbol_value("per_cpu__init_tss"); init_tss = xen_hyper_per_cpu(init_tss, pcpu); } buf = GETBUF(XEN_HYPER_SIZE(tss)); if (!readmem(init_tss, KVADDR, buf, XEN_HYPER_SIZE(tss), "init_tss", RETURN_ON_ERROR)) { error(FATAL, "cannot read init_tss.\n"); } esp = ULONG(buf + XEN_HYPER_OFFSET(tss_esp0)); FREEBUF(buf); base = esp & (~(STACKSIZE() - 1)); return base; } static ulong x86_get_stacktop_hyper(ulong task) { return x86_get_stackbase_hyper(task) + STACKSIZE(); } static void x86_get_stack_frame_hyper(struct bt_info *bt, ulong *pcp, ulong *spp) { struct xen_hyper_vcpu_context *vcc; int pcpu; ulong *regs; ulong esp, eip; /* task means vcpu here */ vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); if (!vcc) error(FATAL, "invalid vcpu\n"); pcpu = vcc->processor; if (!xen_hyper_test_pcpu_id(pcpu)) { error(FATAL, "invalid pcpu number\n"); } if (bt->flags & BT_TEXT_SYMBOLS_ALL) { if (spp) *spp = x86_get_stackbase_hyper(bt->task); if (pcp) *pcp = 0; bt->flags &= ~BT_TEXT_SYMBOLS_ALL; return; } regs = (ulong *)xen_hyper_id_to_dumpinfo_context(pcpu)->pr_reg_ptr; esp = XEN_HYPER_X86_NOTE_ESP(regs); eip = XEN_HYPER_X86_NOTE_EIP(regs); if (spp) { if (esp < x86_get_stackbase_hyper(bt->task) || esp >= x86_get_stacktop_hyper(bt->task)) *spp = x86_get_stackbase_hyper(bt->task); else *spp = esp; } if (pcp) { if (is_kernel_text(eip)) *pcp = eip; else *pcp = 0; } } static void x86_init_hyper(int when) { switch (when) { case PRE_SYMTAB: machdep->verify_symbol = x86_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 4; /* ODA: magic num */ if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->machspec = &x86_machine_specific; /* some members used */ break; case PRE_GDB: if (symbol_exists("create_pae_xen_mappings") || symbol_exists("idle_pg_table_l3")) { machdep->flags |= PAE; PGDIR_SHIFT = PGDIR_SHIFT_3LEVEL; PTRS_PER_PTE = PTRS_PER_PTE_3LEVEL; PTRS_PER_PGD = PTRS_PER_PGD_3LEVEL; machdep->kvtop = x86_kvtop_PAE; machdep->kvbase = HYPERVISOR_VIRT_START_PAE; } else { PGDIR_SHIFT = PGDIR_SHIFT_2LEVEL; PTRS_PER_PTE = PTRS_PER_PTE_2LEVEL; PTRS_PER_PGD = PTRS_PER_PGD_2LEVEL; machdep->kvtop = x86_kvtop; free(machdep->pmd); machdep->pmd = machdep->pgd; machdep->kvbase = HYPERVISOR_VIRT_START; } machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->identity_map_base = DIRECTMAP_VIRT_START; machdep->is_kvaddr = x86_xenhyper_is_kvaddr; machdep->eframe_search = x86_eframe_search; machdep->back_trace = x86_back_trace_cmd; machdep->processor_speed = x86_processor_speed; /* ODA: check */ machdep->dump_irq = generic_dump_irq; /* ODA: check */ machdep->get_stack_frame = x86_get_stack_frame_hyper; machdep->get_stackbase = x86_get_stackbase_hyper; machdep->get_stacktop = x86_get_stacktop_hyper; machdep->translate_pte = x86_translate_pte; machdep->memory_size = xen_hyper_x86_memory_size; machdep->dis_filter = x86_dis_filter; // machdep->cmd_mach = x86_cmd_mach; /* ODA: check */ machdep->get_smp_cpus = xen_hyper_x86_get_smp_cpus; // machdep->line_number_hooks = x86_line_number_hooks; /* ODA: check */ machdep->flags |= FRAMESIZE_DEBUG; /* ODA: check */ machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->clear_machdep_cache = x86_clear_machdep_cache; /* machdep table for Xen Hypervisor */ xhmachdep->pcpu_init = xen_hyper_x86_pcpu_init; break; case POST_GDB: #if 0 /* ODA: need this ? */ if (x86_omit_frame_pointer()) { machdep->flags |= OMIT_FRAME_PTR; #endif XEN_HYPER_STRUCT_SIZE_INIT(cpu_time, "cpu_time"); XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_x86, "cpuinfo_x86"); XEN_HYPER_STRUCT_SIZE_INIT(tss, "tss_struct"); XEN_HYPER_MEMBER_OFFSET_INIT(tss_esp0, "tss_struct", "esp0"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_local_tsc_stamp, "cpu_time", "local_tsc_stamp"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_stime_local_stamp, "cpu_time", "stime_local_stamp"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_stime_master_stamp, "cpu_time", "stime_master_stamp"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_tsc_scale, "cpu_time", "tsc_scale"); XEN_HYPER_MEMBER_OFFSET_INIT(cpu_time_calibration_timer, "cpu_time", "calibration_timer"); if (symbol_exists("cpu_data")) { xht->cpu_data_address = symbol_value("cpu_data"); } /* KAK Can this be calculated? */ if (!machdep->hz) { machdep->hz = XEN_HYPER_HZ; } break; case POST_INIT: break; } } #endif /* X86 */ crash-utility-crash-9cd43f5/unwind.h0000664000372000037200000005305615107550337017062 0ustar juerghjuergh/* * Copyright (C) 1999-2000 Hewlett-Packard Co * Copyright (C) 1999-2000 David Mosberger-Tang */ /* * Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999 David Mosberger-Tang */ /* * unwind.h * * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * include/asm-ia64/fpu.h (kernel-2.4.18-6.23) * include/asm-ia64/unwind.h (kernel-2.4.18-6.23) */ #ifndef _ASM_IA64_FPU_H #define _ASM_IA64_FPU_H struct ia64_fpreg { union { unsigned long bits[2]; } u; } __attribute__ ((aligned (16))); #endif /* _ASM_IA64_FPU_H */ #ifndef _ASM_IA64_UNWIND_H #define _ASM_IA64_UNWIND_H /* * A simple API for unwinding kernel stacks. This is used for * debugging and error reporting purposes. The kernel doesn't need * full-blown stack unwinding with all the bells and whitles, so there * is not much point in implementing the full IA-64 unwind API (though * it would of course be possible to implement the kernel API on top * of it). */ struct task_struct; /* forward declaration */ struct switch_stack; /* forward declaration */ enum unw_application_register { UNW_AR_BSP, UNW_AR_BSPSTORE, UNW_AR_PFS, UNW_AR_RNAT, UNW_AR_UNAT, UNW_AR_LC, UNW_AR_EC, UNW_AR_FPSR, UNW_AR_RSC, UNW_AR_CCV, UNW_AR_CSD, UNW_AR_SSD }; /* * The following declarations are private to the unwind * implementation: */ struct unw_stack { unsigned long limit; unsigned long top; }; #define UNW_FLAG_INTERRUPT_FRAME (1UL << 0) /* * No user of this module should every access this structure directly * as it is subject to change. It is declared here solely so we can * use automatic variables. */ struct unw_frame_info { struct unw_stack regstk; struct unw_stack memstk; unsigned int flags; short hint; short prev_script; /* current frame info: */ unsigned long bsp; /* backing store pointer value */ unsigned long sp; /* stack pointer value */ unsigned long psp; /* previous sp value */ unsigned long ip; /* instruction pointer value */ unsigned long pr; /* current predicate values */ unsigned long *cfm_loc; /* cfm save location (or NULL) */ #if defined(UNWIND_V2) || defined(UNWIND_V3) unsigned long pt; /* struct pt_regs location */ #endif struct task_struct *task; struct switch_stack *sw; /* preserved state: */ unsigned long *bsp_loc; /* previous bsp save location */ unsigned long *bspstore_loc; unsigned long *pfs_loc; unsigned long *rnat_loc; unsigned long *rp_loc; unsigned long *pri_unat_loc; unsigned long *unat_loc; unsigned long *pr_loc; unsigned long *lc_loc; unsigned long *fpsr_loc; struct unw_ireg { unsigned long *loc; struct unw_ireg_nat { long type : 3; /* enum unw_nat_type */ signed long off : 61; /* NaT word is at loc+nat.off */ } nat; } r4, r5, r6, r7; unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc; struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16]; }; /* * The official API follows below: */ /* * Initialize unwind support. */ extern void unw_init (void); extern void unw_create_gate_table (void); extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp, const void *table_start, const void *table_end); extern void unw_remove_unwind_table (void *handle); /* * Prepare to unwind blocked task t. */ #ifndef REDHAT extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw); #endif /* !REDHAT */ /* * Prepare to unwind the currently running thread. */ extern void unw_init_running (void (*callback)(struct unw_frame_info *info, void *arg), void *arg); /* * Unwind to previous to frame. Returns 0 if successful, negative * number in case of an error. */ #ifndef REDHAT extern int unw_unwind (struct unw_frame_info *info); #endif /* !REDHAT */ /* * Unwind until the return pointer is in user-land (or until an error * occurs). Returns 0 if successful, negative number in case of * error. */ extern int unw_unwind_to_user (struct unw_frame_info *info); #define unw_is_intr_frame(info) (((info)->flags & UNW_FLAG_INTERRUPT_FRAME) != 0) static inline int unw_get_ip (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->ip; return 0; } static inline int unw_get_sp (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->sp; return 0; } static inline int unw_get_psp (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->psp; return 0; } static inline int unw_get_bsp (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->bsp; return 0; } static inline int unw_get_cfm (struct unw_frame_info *info, unsigned long *valp) { *valp = *(info)->cfm_loc; return 0; } static inline int unw_set_cfm (struct unw_frame_info *info, unsigned long val) { *(info)->cfm_loc = val; return 0; } static inline int unw_get_rp (struct unw_frame_info *info, unsigned long *val) { if (!info->rp_loc) return -1; *val = *info->rp_loc; return 0; } #ifdef UNWIND_V1 extern int unw_access_gr_v1 (struct unw_frame_info *, int, unsigned long *, char *, int); extern int unw_access_br_v1 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_fr_v1 (struct unw_frame_info *, int, struct ia64_fpreg *, int); extern int unw_access_ar_v1 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_pr_v1 (struct unw_frame_info *, unsigned long *, int); #define unw_access_gr unw_access_gr_v1 #define unw_access_br unw_access_br_v1 #define unw_access_fr unw_access_fr_v1 #define unw_access_ar unw_access_ar_v1 #define unw_access_pr unw_access_pr_v1 #endif #ifdef UNWIND_V2 extern int unw_access_gr_v2 (struct unw_frame_info *, int, unsigned long *, char *, int); extern int unw_access_br_v2 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_fr_v2 (struct unw_frame_info *, int, struct ia64_fpreg *, int); extern int unw_access_ar_v2 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_pr_v2 (struct unw_frame_info *, unsigned long *, int); #define unw_access_gr unw_access_gr_v2 #define unw_access_br unw_access_br_v2 #define unw_access_fr unw_access_fr_v2 #define unw_access_ar unw_access_ar_v2 #define unw_access_pr unw_access_pr_v2 #endif #ifdef UNWIND_V3 extern int unw_access_gr_v3 (struct unw_frame_info *, int, unsigned long *, char *, int); extern int unw_access_br_v3 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_fr_v3 (struct unw_frame_info *, int, struct ia64_fpreg *, int); extern int unw_access_ar_v3 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_pr_v3 (struct unw_frame_info *, unsigned long *, int); #define unw_access_gr unw_access_gr_v3 #define unw_access_br unw_access_br_v3 #define unw_access_fr unw_access_fr_v3 #define unw_access_ar unw_access_ar_v3 #define unw_access_pr unw_access_pr_v3 #endif static inline int unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat) { return unw_access_gr(i, n, &v, &nat, 1); } static inline int unw_set_br (struct unw_frame_info *i, int n, unsigned long v) { return unw_access_br(i, n, &v, 1); } static inline int unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v) { return unw_access_fr(i, n, &v, 1); } static inline int unw_set_ar (struct unw_frame_info *i, int n, unsigned long v) { return unw_access_ar(i, n, &v, 1); } static inline int unw_set_pr (struct unw_frame_info *i, unsigned long v) { return unw_access_pr(i, &v, 1); } #define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0) #define unw_get_br(i,n,v) unw_access_br(i,n,v,0) #define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0) #define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0) #define unw_get_pr(i,v) unw_access_pr(i,v,0) #ifdef UNWIND_V1 struct switch_stack { unsigned long caller_unat; /* user NaT collection register (preserved) */ unsigned long ar_fpsr; /* floating-point status register */ struct ia64_fpreg f2; /* preserved */ struct ia64_fpreg f3; /* preserved */ struct ia64_fpreg f4; /* preserved */ struct ia64_fpreg f5; /* preserved */ struct ia64_fpreg f10; /* scratch, but untouched by kernel */ struct ia64_fpreg f11; /* scratch, but untouched by kernel */ struct ia64_fpreg f12; /* scratch, but untouched by kernel */ struct ia64_fpreg f13; /* scratch, but untouched by kernel */ struct ia64_fpreg f14; /* scratch, but untouched by kernel */ struct ia64_fpreg f15; /* scratch, but untouched by kernel */ struct ia64_fpreg f16; /* preserved */ struct ia64_fpreg f17; /* preserved */ struct ia64_fpreg f18; /* preserved */ struct ia64_fpreg f19; /* preserved */ struct ia64_fpreg f20; /* preserved */ struct ia64_fpreg f21; /* preserved */ struct ia64_fpreg f22; /* preserved */ struct ia64_fpreg f23; /* preserved */ struct ia64_fpreg f24; /* preserved */ struct ia64_fpreg f25; /* preserved */ struct ia64_fpreg f26; /* preserved */ struct ia64_fpreg f27; /* preserved */ struct ia64_fpreg f28; /* preserved */ struct ia64_fpreg f29; /* preserved */ struct ia64_fpreg f30; /* preserved */ struct ia64_fpreg f31; /* preserved */ unsigned long r4; /* preserved */ unsigned long r5; /* preserved */ unsigned long r6; /* preserved */ unsigned long r7; /* preserved */ unsigned long b0; /* so we can force a direct return in copy_thread */ unsigned long b1; unsigned long b2; unsigned long b3; unsigned long b4; unsigned long b5; unsigned long ar_pfs; /* previous function state */ unsigned long ar_lc; /* loop counter (preserved) */ unsigned long ar_unat; /* NaT bits for r4-r7 */ unsigned long ar_rnat; /* RSE NaT collection register */ unsigned long ar_bspstore; /* RSE dirty base (preserved) */ unsigned long pr; /* 64 predicate registers (1 bit each) */ }; struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b6; /* scratch */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long r14; /* scratch */ unsigned long r15; /* scratch */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long b0; /* return pointer (bp) */ unsigned long b7; /* scratch */ /* * Floating point registers that the kernel considers * scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ }; #endif /* UNWIND_V1 */ #ifdef UNWIND_V2 struct switch_stack { unsigned long caller_unat; /* user NaT collection register (preserved) */ unsigned long ar_fpsr; /* floating-point status register */ struct ia64_fpreg f2; /* preserved */ struct ia64_fpreg f3; /* preserved */ struct ia64_fpreg f4; /* preserved */ struct ia64_fpreg f5; /* preserved */ struct ia64_fpreg f10; /* scratch, but untouched by kernel */ struct ia64_fpreg f11; /* scratch, but untouched by kernel */ struct ia64_fpreg f12; /* scratch, but untouched by kernel */ struct ia64_fpreg f13; /* scratch, but untouched by kernel */ struct ia64_fpreg f14; /* scratch, but untouched by kernel */ struct ia64_fpreg f15; /* scratch, but untouched by kernel */ struct ia64_fpreg f16; /* preserved */ struct ia64_fpreg f17; /* preserved */ struct ia64_fpreg f18; /* preserved */ struct ia64_fpreg f19; /* preserved */ struct ia64_fpreg f20; /* preserved */ struct ia64_fpreg f21; /* preserved */ struct ia64_fpreg f22; /* preserved */ struct ia64_fpreg f23; /* preserved */ struct ia64_fpreg f24; /* preserved */ struct ia64_fpreg f25; /* preserved */ struct ia64_fpreg f26; /* preserved */ struct ia64_fpreg f27; /* preserved */ struct ia64_fpreg f28; /* preserved */ struct ia64_fpreg f29; /* preserved */ struct ia64_fpreg f30; /* preserved */ struct ia64_fpreg f31; /* preserved */ unsigned long r4; /* preserved */ unsigned long r5; /* preserved */ unsigned long r6; /* preserved */ unsigned long r7; /* preserved */ unsigned long b0; /* so we can force a direct return in copy_thread */ unsigned long b1; unsigned long b2; unsigned long b3; unsigned long b4; unsigned long b5; unsigned long ar_pfs; /* previous function state */ unsigned long ar_lc; /* loop counter (preserved) */ unsigned long ar_unat; /* NaT bits for r4-r7 */ unsigned long ar_rnat; /* RSE NaT collection register */ unsigned long ar_bspstore; /* RSE dirty base (preserved) */ unsigned long pr; /* 64 predicate registers (1 bit each) */ }; struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b6; /* scratch */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long r14; /* scratch */ unsigned long r15; /* scratch */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long b0; /* return pointer (bp) */ unsigned long b7; /* scratch */ /* * Floating point registers that the kernel considers * scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ }; #endif /* UNWIND_V2 */ #ifdef UNWIND_V3 struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ struct ia64_fpreg f10; /* scratch */ struct ia64_fpreg f11; /* scratch */ }; /* * This structure contains the addition registers that need to * preserved across a context switch. This generally consists of * "preserved" registers. */ struct switch_stack { unsigned long caller_unat; /* user NaT collection register (preserved) */ unsigned long ar_fpsr; /* floating-point status register */ struct ia64_fpreg f2; /* preserved */ struct ia64_fpreg f3; /* preserved */ struct ia64_fpreg f4; /* preserved */ struct ia64_fpreg f5; /* preserved */ struct ia64_fpreg f12; /* scratch, but untouched by kernel */ struct ia64_fpreg f13; /* scratch, but untouched by kernel */ struct ia64_fpreg f14; /* scratch, but untouched by kernel */ struct ia64_fpreg f15; /* scratch, but untouched by kernel */ struct ia64_fpreg f16; /* preserved */ struct ia64_fpreg f17; /* preserved */ struct ia64_fpreg f18; /* preserved */ struct ia64_fpreg f19; /* preserved */ struct ia64_fpreg f20; /* preserved */ struct ia64_fpreg f21; /* preserved */ struct ia64_fpreg f22; /* preserved */ struct ia64_fpreg f23; /* preserved */ struct ia64_fpreg f24; /* preserved */ struct ia64_fpreg f25; /* preserved */ struct ia64_fpreg f26; /* preserved */ struct ia64_fpreg f27; /* preserved */ struct ia64_fpreg f28; /* preserved */ struct ia64_fpreg f29; /* preserved */ struct ia64_fpreg f30; /* preserved */ struct ia64_fpreg f31; /* preserved */ unsigned long r4; /* preserved */ unsigned long r5; /* preserved */ unsigned long r6; /* preserved */ unsigned long r7; /* preserved */ unsigned long b0; /* so we can force a direct return in copy_thread */ unsigned long b1; unsigned long b2; unsigned long b3; unsigned long b4; unsigned long b5; unsigned long ar_pfs; /* previous function state */ unsigned long ar_lc; /* loop counter (preserved) */ unsigned long ar_unat; /* NaT bits for r4-r7 */ unsigned long ar_rnat; /* RSE NaT collection register */ unsigned long ar_bspstore; /* RSE dirty base (preserved) */ unsigned long pr; /* 64 predicate registers (1 bit each) */ }; #endif /* UNWIND_V3 */ #endif /* _ASM_UNWIND_H */ crash-utility-crash-9cd43f5/unwind_x86_64.h0000664000372000037200000000604015107550337020067 0ustar juerghjuergh/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define CONFIG_64BIT 1 #define NULL ((void *)0) typedef unsigned long size_t; typedef unsigned char u8; typedef signed short s16; typedef unsigned short u16; typedef signed int s32; typedef unsigned int u32; typedef unsigned long long u64; struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long rbp; unsigned long rbx; /* arguments: non interrupts/non tracing syscalls only save upto here*/ unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long rax; unsigned long rcx; unsigned long rdx; unsigned long rsi; unsigned long rdi; unsigned long orig_rax; /* end of arguments */ /* cpu exception frame or undefined */ unsigned long rip; unsigned long cs; unsigned long eflags; unsigned long rsp; unsigned long ss; /* top of stack page */ }; struct unwind_frame_info { struct pt_regs regs; }; extern int unwind(struct unwind_frame_info *, int); extern void init_unwind_table(void); extern void free_unwind_table(void); #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) #define get_unaligned(ptr) (*(ptr)) //#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) #define THREAD_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define UNW_PC(frame) (frame)->regs.rip #define UNW_SP(frame) (frame)->regs.rsp #ifdef CONFIG_FRAME_POINTER #define UNW_FP(frame) (frame)->regs.rbp #define FRAME_RETADDR_OFFSET 8 #define FRAME_LINK_OFFSET 0 #define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1)) #define STACK_TOP(tsk) ((tsk)->thread.rsp0) #endif #define EXTRA_INFO(f) { BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) % FIELD_SIZEOF(struct unwind_frame_info, f)) + offsetof(struct unwind_frame_info, f)/ FIELD_SIZEOF(struct unwind_frame_info, f), FIELD_SIZEOF(struct unwind_frame_info, f) } #define PTREGS_INFO(f) EXTRA_INFO(regs.f) #define UNW_REGISTER_INFO \ PTREGS_INFO(rax),\ PTREGS_INFO(rdx),\ PTREGS_INFO(rcx),\ PTREGS_INFO(rbx), \ PTREGS_INFO(rsi), \ PTREGS_INFO(rdi), \ PTREGS_INFO(rbp), \ PTREGS_INFO(rsp), \ PTREGS_INFO(r8), \ PTREGS_INFO(r9), \ PTREGS_INFO(r10),\ PTREGS_INFO(r11), \ PTREGS_INFO(r12), \ PTREGS_INFO(r13), \ PTREGS_INFO(r14), \ PTREGS_INFO(r15), \ PTREGS_INFO(rip) crash-utility-crash-9cd43f5/vmcore.h0000664000372000037200000000201715107550337017040 0ustar juerghjuergh/* * vmcore.h * * Copyright (C) 2019 Chelsio Communications. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef _VMCORE_H #define _VMCORE_H #include #ifndef NT_VMCOREDD #define NT_VMCOREDD 0x700 #endif #define VMCOREDD_NOTE_NAME "LINUX" #define VMCOREDD_MAX_NAME_BYTES 44 struct vmcoredd_header { __u32 n_namesz; /* Name size */ __u32 n_descsz; /* Content size */ __u32 n_type; /* NT_VMCOREDD */ __u8 name[8]; /* LINUX\0\0\0 */ __u8 dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Device dump's name */ }; #endif /* _VMCORE_H */ crash-utility-crash-9cd43f5/task.c0000664000372000037200000111134415107550337016507 0ustar juerghjuergh/* task.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2018 David Anderson * Copyright (C) 2002-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" static ulong get_panic_context(void); static int sort_by_pid(const void *, const void *); static void show_ps(ulong, struct psinfo *); static struct task_context *panic_search(void); static void allocate_task_space(int); static void refresh_fixed_task_table(void); static void refresh_unlimited_task_table(void); static void refresh_pidhash_task_table(void); static void refresh_pid_hash_task_table(void); static void refresh_hlist_task_table(void); static void refresh_hlist_task_table_v2(void); static void refresh_hlist_task_table_v3(void); static void refresh_active_task_table(void); static int radix_tree_task_callback(ulong); static void refresh_radix_tree_task_table(void); static void refresh_xarray_task_table(void); static struct task_context *add_context(ulong, char *); static void refresh_context(ulong, ulong); static ulong parent_of(ulong); static void parent_list(ulong); static void child_list(ulong); static void initialize_task_state(void); static void dump_task_states(void); static void show_ps_data(ulong, struct task_context *, struct psinfo *); static void show_task_times(struct task_context *, ulong); static void show_task_args(struct task_context *); static void show_task_rlimit(struct task_context *); static void show_tgid_list(ulong); static int compare_start_time(const void *, const void *); static int start_time_timespec(void); static ulonglong convert_start_time(ulonglong, ulonglong); static ulong search_panic_task_by_cpu(char *); static ulong search_panic_task_by_keywords(char *, int *); static ulong get_log_panic_task(void); static ulong get_dumpfile_panic_task(void); static ulong get_active_set_panic_task(void); static void populate_panic_threads(void); static int verify_task(struct task_context *, int); static ulong get_idle_task(int, char *); static ulong get_curr_task(int, char *); static long rq_idx(int); static long cpu_idx(int); static void dump_runq(void); static void dump_on_rq_timestamp(void); static void dump_on_rq_lag(void); static void dump_on_rq_milliseconds(void); static void dump_runqueues(void); static void dump_prio_array(int, ulong, char *); static void dump_task_runq_entry(struct task_context *, int); static void print_group_header_fair(int, ulong, void *); static void print_parent_task_group_fair(void *, int); static int dump_tasks_in_lower_dequeued_cfs_rq(int, ulong, int, struct task_context *); static int dump_tasks_in_cfs_rq(ulong); static int dump_tasks_in_task_group_cfs_rq(int, ulong, int, struct task_context *); static void dump_on_rq_tasks(void); static void cfs_rq_offset_init(void); static void task_group_offset_init(void); static void dump_CFS_runqueues(void); static void print_group_header_rt(ulong, void *); static void print_parent_task_group_rt(void *, int); static int dump_tasks_in_lower_dequeued_rt_rq(int, ulong, int); static int dump_RT_prio_array(ulong, char *); static void dump_tasks_in_task_group_rt_rq(int, ulong, int); static char *get_task_group_name(ulong); static void sort_task_group_info_array(void); static void print_task_group_info_array(void); static void reuse_task_group_info_array(void); static void free_task_group_info_array(void); static void fill_task_group_info_array(int, ulong, char *, int); static void dump_tasks_by_task_group(void); static void task_struct_member(struct task_context *,unsigned int, struct reference *); static void signal_reference(struct task_context *, ulong, struct reference *); static void do_sig_thread_group(ulong); static void dump_signal_data(struct task_context *, ulong); #define TASK_LEVEL (0x1) #define THREAD_GROUP_LEVEL (0x2) #define TASK_INDENT (0x4) static int sigrt_minmax(int *, int *); static void signame_list(void); static void sigqueue_list(ulong); static ulonglong task_signal(ulong, ulong*); static ulonglong task_blocked(ulong); static void translate_sigset(ulonglong); static ulonglong sigaction_mask(ulong); static int task_has_cpu(ulong, char *); static int is_foreach_keyword(char *, int *); static void foreach_cleanup(void *); static void ps_cleanup(void *); static char *task_pointer_string(struct task_context *, ulong, char *); static int panic_context_adjusted(struct task_context *tc); static void show_last_run(struct task_context *, struct psinfo *); static void show_milliseconds(struct task_context *, struct psinfo *); static char *translate_nanoseconds(ulonglong, char *); static int sort_by_last_run(const void *arg1, const void *arg2); static void sort_context_array_by_last_run(void); static void show_ps_summary(ulong); static void irqstacks_init(void); static void parse_task_thread(int argcnt, char *arglist[], struct task_context *); static void stack_overflow_check_init(void); static int has_sched_policy(ulong, ulong); static ulong task_policy(ulong); static ulong sched_policy_bit_from_str(const char *); static ulong make_sched_policy(const char *); void crash_get_current_task_info(unsigned long *, char **); static struct sched_policy_info { ulong value; char *name; } sched_policy_info[] = { { SCHED_NORMAL, "NORMAL" }, { SCHED_FIFO, "FIFO" }, { SCHED_RR, "RR" }, { SCHED_BATCH, "BATCH" }, { SCHED_ISO, "ISO" }, { SCHED_IDLE, "IDLE" }, { SCHED_DEADLINE, "DEADLINE" }, { ULONG_MAX, NULL } }; enum PANIC_TASK_FOUND_RESULT { FOUND_NO_PANIC_KEYWORD, FOUND_PANIC_KEYWORD, FOUND_PANIC_TASK }; const char *panic_keywords[] = { "Unable to handle kernel", "BUG: unable to handle kernel", "Kernel BUG at", "kernel BUG at", "Bad mode in", "Oops", "Kernel panic", NULL, }; /* * Figure out how much space will be required to hold the task context * data, malloc() it, and call refresh_task_table() to fill it up. * Gather a few key offset and size values. Lastly, get, and then set, * the initial context. */ void task_init(void) { long len; int dim, task_struct_size; struct syment *nsp; long tss_offset, thread_offset; long eip_offset, esp_offset, ksp_offset; struct gnu_request req; ulong active_pid; if (!(tt->idle_threads = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc idle_threads array"); if (DUMPFILE() && !(tt->panic_threads = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc panic_threads array"); if (kernel_symbol_exists("nr_tasks")) { /* * Figure out what maximum NR_TASKS would be by getting the * address of the next symbol after "task". */ tt->task_start = symbol_value("task"); if ((nsp = next_symbol("task", NULL)) == NULL) error(FATAL, "cannot determine size of task table\n"); tt->flags |= TASK_ARRAY_EXISTS; tt->task_end = nsp->value; tt->max_tasks = (tt->task_end-tt->task_start) / sizeof(void *); allocate_task_space(tt->max_tasks); tss_offset = MEMBER_OFFSET_INIT(task_struct_tss, "task_struct", "tss"); eip_offset = MEMBER_OFFSET_INIT(thread_struct_eip, "thread_struct", "eip"); esp_offset = MEMBER_OFFSET_INIT(thread_struct_esp, "thread_struct", "esp"); ksp_offset = MEMBER_OFFSET_INIT(thread_struct_ksp, "thread_struct", "ksp"); ASSIGN_OFFSET(task_struct_tss_eip) = (eip_offset == INVALID_OFFSET) ? INVALID_OFFSET : tss_offset + eip_offset; ASSIGN_OFFSET(task_struct_tss_esp) = (esp_offset == INVALID_OFFSET) ? INVALID_OFFSET : tss_offset + esp_offset; ASSIGN_OFFSET(task_struct_tss_ksp) = (ksp_offset == INVALID_OFFSET) ? INVALID_OFFSET : tss_offset + ksp_offset; tt->flags |= TASK_REFRESH; tt->refresh_task_table = refresh_fixed_task_table; readmem(tt->task_start, KVADDR, &tt->idle_threads[0], kt->cpus * sizeof(void *), "idle threads", FAULT_ON_ERROR); } else { /* * Make the task table big enough to hold what's running. * It can be realloc'd later if it grows on a live system. */ get_symbol_data("nr_threads", sizeof(int), &tt->nr_threads); tt->max_tasks = tt->nr_threads + NR_CPUS + TASK_SLUSH; allocate_task_space(tt->max_tasks); thread_offset = MEMBER_OFFSET_INIT(task_struct_thread, "task_struct", "thread"); eip_offset = MEMBER_OFFSET_INIT(thread_struct_eip, "thread_struct", "eip"); esp_offset = MEMBER_OFFSET_INIT(thread_struct_esp, "thread_struct", "esp"); /* * Handle x86/x86_64 merger. */ if (eip_offset == INVALID_OFFSET) eip_offset = MEMBER_OFFSET_INIT(thread_struct_eip, "thread_struct", "ip"); if (esp_offset == INVALID_OFFSET) esp_offset = MEMBER_OFFSET_INIT(thread_struct_esp, "thread_struct", "sp"); ksp_offset = MEMBER_OFFSET_INIT(thread_struct_ksp, "thread_struct", "ksp"); ASSIGN_OFFSET(task_struct_thread_eip) = (eip_offset == INVALID_OFFSET) ? INVALID_OFFSET : thread_offset + eip_offset; ASSIGN_OFFSET(task_struct_thread_esp) = (esp_offset == INVALID_OFFSET) ? INVALID_OFFSET : thread_offset + esp_offset; ASSIGN_OFFSET(task_struct_thread_ksp) = (ksp_offset == INVALID_OFFSET) ? INVALID_OFFSET : thread_offset + ksp_offset; tt->flags |= TASK_REFRESH; tt->refresh_task_table = refresh_unlimited_task_table; get_idle_threads(&tt->idle_threads[0], kt->cpus); } /* * Handle CONFIG_THREAD_INFO_IN_TASK changes */ MEMBER_OFFSET_INIT(task_struct_stack, "task_struct", "stack"); MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", "thread_info"); if (VALID_MEMBER(task_struct_thread_info)) { switch (MEMBER_TYPE("task_struct", "thread_info")) { case TYPE_CODE_PTR: break; case TYPE_CODE_STRUCT: tt->flags |= THREAD_INFO_IN_TASK; break; default: error(FATAL, "unexpected type code for task_struct.thread_info: %ld\n", MEMBER_TYPE("task_struct", "thread_info")); break; } } else if (VALID_MEMBER(task_struct_stack)) MEMBER_OFFSET_INIT(task_struct_thread_info, "task_struct", "stack"); MEMBER_OFFSET_INIT(task_struct_cpu, "task_struct", "cpu"); if (VALID_MEMBER(task_struct_thread_info)) { if (tt->flags & THREAD_INFO_IN_TASK && VALID_MEMBER(task_struct_cpu)) { MEMBER_OFFSET_INIT(thread_info_flags, "thread_info", "flags"); /* (unnecessary) reminders */ ASSIGN_OFFSET(thread_info_task) = INVALID_OFFSET; ASSIGN_OFFSET(thread_info_cpu) = INVALID_OFFSET; ASSIGN_OFFSET(thread_info_previous_esp) = INVALID_OFFSET; } else { MEMBER_OFFSET_INIT(thread_info_task, "thread_info", "task"); MEMBER_OFFSET_INIT(thread_info_cpu, "thread_info", "cpu"); MEMBER_OFFSET_INIT(thread_info_flags, "thread_info", "flags"); MEMBER_OFFSET_INIT(thread_info_previous_esp, "thread_info", "previous_esp"); } STRUCT_SIZE_INIT(thread_info, "thread_info"); tt->flags |= THREAD_INFO; } MEMBER_OFFSET_INIT(task_struct_state, "task_struct", "state"); MEMBER_SIZE_INIT(task_struct_state, "task_struct", "state"); if (INVALID_MEMBER(task_struct_state)) { MEMBER_OFFSET_INIT(task_struct_state, "task_struct", "__state"); MEMBER_SIZE_INIT(task_struct_state, "task_struct", "__state"); } MEMBER_OFFSET_INIT(task_struct_exit_state, "task_struct", "exit_state"); MEMBER_SIZE_INIT(task_struct_exit_state, "task_struct", "exit_state"); MEMBER_OFFSET_INIT(task_struct_pid, "task_struct", "pid"); MEMBER_OFFSET_INIT(task_struct_comm, "task_struct", "comm"); MEMBER_OFFSET_INIT(task_struct_next_task, "task_struct", "next_task"); MEMBER_OFFSET_INIT(task_struct_processor, "task_struct", "processor"); MEMBER_OFFSET_INIT(task_struct_p_pptr, "task_struct", "p_pptr"); MEMBER_OFFSET_INIT(task_struct_parent, "task_struct", "parent"); if (INVALID_MEMBER(task_struct_parent)) MEMBER_OFFSET_INIT(task_struct_parent, "task_struct", "real_parent"); MEMBER_OFFSET_INIT(task_struct_has_cpu, "task_struct", "has_cpu"); MEMBER_OFFSET_INIT(task_struct_cpus_runnable, "task_struct", "cpus_runnable"); MEMBER_OFFSET_INIT(task_struct_active_mm, "task_struct", "active_mm"); MEMBER_OFFSET_INIT(task_struct_next_run, "task_struct", "next_run"); MEMBER_OFFSET_INIT(task_struct_flags, "task_struct", "flags"); MEMBER_SIZE_INIT(task_struct_flags, "task_struct", "flags"); MEMBER_OFFSET_INIT(task_struct_policy, "task_struct", "policy"); MEMBER_SIZE_INIT(task_struct_policy, "task_struct", "policy"); MEMBER_OFFSET_INIT(task_struct_pidhash_next, "task_struct", "pidhash_next"); MEMBER_OFFSET_INIT(task_struct_pgrp, "task_struct", "pgrp"); MEMBER_OFFSET_INIT(task_struct_tgid, "task_struct", "tgid"); MEMBER_OFFSET_INIT(task_struct_pids, "task_struct", "pids"); MEMBER_OFFSET_INIT(task_struct_last_run, "task_struct", "last_run"); MEMBER_OFFSET_INIT(task_struct_timestamp, "task_struct", "timestamp"); MEMBER_OFFSET_INIT(task_struct_sched_info, "task_struct", "sched_info"); if (VALID_MEMBER(task_struct_sched_info)) MEMBER_OFFSET_INIT(sched_info_last_arrival, "sched_info", "last_arrival"); if (VALID_MEMBER(task_struct_last_run) || VALID_MEMBER(task_struct_timestamp) || VALID_MEMBER(sched_info_last_arrival)) { char buf[BUFSIZE]; strcpy(buf, "alias last ps -l"); alias_init(buf); } MEMBER_OFFSET_INIT(task_struct_pid_links, "task_struct", "pid_links"); MEMBER_OFFSET_INIT(pid_link_pid, "pid_link", "pid"); MEMBER_OFFSET_INIT(pid_hash_chain, "pid", "hash_chain"); STRUCT_SIZE_INIT(pid_link, "pid_link"); STRUCT_SIZE_INIT(upid, "upid"); if (VALID_STRUCT(upid)) { MEMBER_OFFSET_INIT(upid_nr, "upid", "nr"); MEMBER_OFFSET_INIT(upid_ns, "upid", "ns"); MEMBER_OFFSET_INIT(upid_pid_chain, "upid", "pid_chain"); MEMBER_OFFSET_INIT(pid_numbers, "pid", "numbers"); ARRAY_LENGTH_INIT(len, pid_numbers, "pid.numbers", NULL, 0); MEMBER_OFFSET_INIT(pid_tasks, "pid", "tasks"); tt->init_pid_ns = symbol_value("init_pid_ns"); } MEMBER_OFFSET_INIT(pid_pid_chain, "pid", "pid_chain"); STRUCT_SIZE_INIT(task_struct, "task_struct"); if (kernel_symbol_exists("arch_task_struct_size") && readmem(symbol_value("arch_task_struct_size"), KVADDR, &task_struct_size, sizeof(int), "arch_task_struct_size", RETURN_ON_ERROR)) { ASSIGN_SIZE(task_struct) = task_struct_size; if (STRUCT_SIZE("task_struct") != SIZE(task_struct)) add_to_downsized("task_struct"); if (CRASHDEBUG(1)) fprintf(fp, "downsize task_struct: %ld to %ld\n", STRUCT_SIZE("task_struct"), SIZE(task_struct)); } MEMBER_OFFSET_INIT(task_struct_sig, "task_struct", "sig"); MEMBER_OFFSET_INIT(task_struct_signal, "task_struct", "signal"); MEMBER_OFFSET_INIT(task_struct_blocked, "task_struct", "blocked"); MEMBER_OFFSET_INIT(task_struct_sigpending, "task_struct", "sigpending"); MEMBER_OFFSET_INIT(task_struct_pending, "task_struct", "pending"); MEMBER_OFFSET_INIT(task_struct_sigqueue, "task_struct", "sigqueue"); MEMBER_OFFSET_INIT(task_struct_sighand, "task_struct", "sighand"); MEMBER_OFFSET_INIT(signal_struct_count, "signal_struct", "count"); MEMBER_OFFSET_INIT(signal_struct_nr_threads, "signal_struct", "nr_threads"); MEMBER_OFFSET_INIT(signal_struct_action, "signal_struct", "action"); MEMBER_OFFSET_INIT(signal_struct_shared_pending, "signal_struct", "shared_pending"); MEMBER_OFFSET_INIT(k_sigaction_sa, "k_sigaction", "sa"); MEMBER_OFFSET_INIT(sigaction_sa_handler, "sigaction", "sa_handler"); MEMBER_OFFSET_INIT(sigaction_sa_mask, "sigaction", "sa_mask"); MEMBER_OFFSET_INIT(sigaction_sa_flags, "sigaction", "sa_flags"); MEMBER_OFFSET_INIT(sigpending_head, "sigpending", "head"); if (INVALID_MEMBER(sigpending_head)) MEMBER_OFFSET_INIT(sigpending_list, "sigpending", "list"); MEMBER_OFFSET_INIT(sigpending_signal, "sigpending", "signal"); MEMBER_SIZE_INIT(sigpending_signal, "sigpending", "signal"); STRUCT_SIZE_INIT(sigqueue, "sigqueue"); STRUCT_SIZE_INIT(signal_queue, "signal_queue"); STRUCT_SIZE_INIT(sighand_struct, "sighand_struct"); if (VALID_STRUCT(sighand_struct)) MEMBER_OFFSET_INIT(sighand_struct_action, "sighand_struct", "action"); MEMBER_OFFSET_INIT(siginfo_si_signo, "siginfo", "si_signo"); STRUCT_SIZE_INIT(signal_struct, "signal_struct"); STRUCT_SIZE_INIT(k_sigaction, "k_sigaction"); MEMBER_OFFSET_INIT(task_struct_start_time, "task_struct", "start_time"); MEMBER_SIZE_INIT(task_struct_start_time, "task_struct", "start_time"); MEMBER_SIZE_INIT(task_struct_utime, "task_struct", "utime"); MEMBER_SIZE_INIT(task_struct_stime, "task_struct", "stime"); MEMBER_OFFSET_INIT(task_struct_times, "task_struct", "times"); MEMBER_OFFSET_INIT(tms_tms_utime, "tms", "tms_utime"); MEMBER_OFFSET_INIT(tms_tms_stime, "tms", "tms_stime"); MEMBER_OFFSET_INIT(task_struct_utime, "task_struct", "utime"); MEMBER_OFFSET_INIT(task_struct_stime, "task_struct", "stime"); STRUCT_SIZE_INIT(cputime_t, "cputime_t"); if ((THIS_KERNEL_VERSION < LINUX(4,8,0)) && symbol_exists("cfq_slice_async")) { uint cfq_slice_async; get_symbol_data("cfq_slice_async", sizeof(int), &cfq_slice_async); if (cfq_slice_async) { machdep->hz = cfq_slice_async * 25; if (CRASHDEBUG(2)) fprintf(fp, "cfq_slice_async exists: setting hz to %d\n", machdep->hz); } } else if ((symbol_exists("dd_init_queue") && gdb_set_crash_scope(symbol_value("dd_init_queue"), "dd_init_queue")) || (symbol_exists("dd_init_sched") && gdb_set_crash_scope(symbol_value("dd_init_sched"), "dd_init_sched")) || (symbol_exists("deadline_init_queue") && gdb_set_crash_scope(symbol_value("deadline_init_queue"), "deadline_init_queue"))) { char buf[BUFSIZE]; uint write_expire = 0; open_tmpfile(); sprintf(buf, "printf \"%%d\", write_expire"); if (gdb_pass_through(buf, pc->tmpfile, GNU_RETURN_ON_ERROR)) { rewind(pc->tmpfile); if (fgets(buf, BUFSIZE, pc->tmpfile)) sscanf(buf, "%d", &write_expire); } close_tmpfile(); if (write_expire) { machdep->hz = write_expire / 5; if (CRASHDEBUG(2)) fprintf(fp, "write_expire exists: setting hz to %d\n", machdep->hz); } gdb_set_crash_scope(0, NULL); } if (VALID_MEMBER(runqueue_arrays)) MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct", "run_list"); MEMBER_OFFSET_INIT(task_struct_rss_stat, "task_struct", "rss_stat"); MEMBER_OFFSET_INIT(task_rss_stat_count, "task_rss_stat", "count"); if ((tt->task_struct = (char *)malloc(SIZE(task_struct))) == NULL) error(FATAL, "cannot malloc task_struct space."); if ((tt->mm_struct = (char *)malloc(SIZE(mm_struct))) == NULL) error(FATAL, "cannot malloc mm_struct space."); if ((tt->flags & THREAD_INFO) && ((tt->thread_info = (char *)malloc(SIZE(thread_info))) == NULL)) error(FATAL, "cannot malloc thread_info space."); STRUCT_SIZE_INIT(task_union, "task_union"); STRUCT_SIZE_INIT(thread_union, "thread_union"); if (VALID_SIZE(task_union) && (SIZE(task_union) != STACKSIZE())) { error(WARNING, "\nnon-standard stack size: %ld\n", len = SIZE(task_union)); machdep->stacksize = len; } else if (VALID_SIZE(thread_union) && ((len = SIZE(thread_union)) != STACKSIZE())) { machdep->stacksize = len; } else if (!VALID_SIZE(thread_union) && !VALID_SIZE(task_union)) { len = 0; if (kernel_symbol_exists("__start_init_stack") && kernel_symbol_exists("__end_init_stack")) { len = symbol_value("__end_init_stack"); len -= symbol_value("__start_init_stack"); } else if (kernel_symbol_exists("__start_init_task") && kernel_symbol_exists("__end_init_task")) { len = symbol_value("__end_init_task"); len -= symbol_value("__start_init_task"); } if (len) { ASSIGN_SIZE(thread_union) = len; machdep->stacksize = len; } } MEMBER_OFFSET_INIT(pid_namespace_idr, "pid_namespace", "idr"); MEMBER_OFFSET_INIT(idr_idr_rt, "idr", "idr_rt"); if (symbol_exists("height_to_maxindex") || symbol_exists("height_to_maxnodes")) { int newver = symbol_exists("height_to_maxnodes"); int tmp ATTRIBUTE_UNUSED; if (!newver) { if (LKCD_KERNTYPES()) ARRAY_LENGTH_INIT_ALT(tmp, "height_to_maxindex", "radix_tree_preload.nodes", NULL, 0); else ARRAY_LENGTH_INIT(tmp, height_to_maxindex, "height_to_maxindex", NULL, 0); } else { if (LKCD_KERNTYPES()) ARRAY_LENGTH_INIT_ALT(tmp, "height_to_maxnodes", "radix_tree_preload.nodes", NULL, 0); else ARRAY_LENGTH_INIT(tmp, height_to_maxnodes, "height_to_maxnodes", NULL, 0); } STRUCT_SIZE_INIT(radix_tree_root, "radix_tree_root"); STRUCT_SIZE_INIT(radix_tree_node, "radix_tree_node"); MEMBER_OFFSET_INIT(radix_tree_root_height, "radix_tree_root","height"); MEMBER_OFFSET_INIT(radix_tree_root_rnode, "radix_tree_root","rnode"); MEMBER_OFFSET_INIT(radix_tree_node_slots, "radix_tree_node","slots"); MEMBER_OFFSET_INIT(radix_tree_node_height, "radix_tree_node","height"); MEMBER_OFFSET_INIT(radix_tree_node_shift, "radix_tree_node","shift"); } STRUCT_SIZE_INIT(xarray, "xarray"); STRUCT_SIZE_INIT(xa_node, "xa_node"); MEMBER_OFFSET_INIT(xarray_xa_head, "xarray","xa_head"); MEMBER_OFFSET_INIT(xa_node_slots, "xa_node","slots"); MEMBER_OFFSET_INIT(xa_node_shift, "xa_node","shift"); if (symbol_exists("pidhash") && symbol_exists("pid_hash") && !symbol_exists("pidhash_shift")) error(FATAL, "pidhash and pid_hash both exist -- cannot distinquish between them\n"); if (VALID_MEMBER(pid_namespace_idr)) { STRUCT_SIZE_INIT(pid, "pid"); if (STREQ(MEMBER_TYPE_NAME("idr", "idr_rt"), "xarray")) { tt->refresh_task_table = refresh_xarray_task_table; tt->pid_xarray = symbol_value("init_pid_ns") + OFFSET(pid_namespace_idr) + OFFSET(idr_idr_rt); tt->flags |= PID_XARRAY; } else if STREQ(MEMBER_TYPE_NAME("idr", "idr_rt"), "radix_tree_root") { if (MEMBER_EXISTS("radix_tree_root", "rnode")) { tt->refresh_task_table = refresh_radix_tree_task_table; tt->pid_radix_tree = symbol_value("init_pid_ns") + OFFSET(pid_namespace_idr) + OFFSET(idr_idr_rt); tt->flags |= PID_RADIX_TREE; } else if (MEMBER_EXISTS("radix_tree_root", "xa_head")) { tt->refresh_task_table = refresh_xarray_task_table; tt->pid_xarray = symbol_value("init_pid_ns") + OFFSET(pid_namespace_idr) + OFFSET(idr_idr_rt); tt->flags |= PID_XARRAY; } } else error(FATAL, "unknown pid_namespace.idr type: %s\n", MEMBER_TYPE_NAME("idr", "idr_rt")); } else if (symbol_exists("pid_hash") && symbol_exists("pidhash_shift")) { int pidhash_shift; if (get_symbol_type("PIDTYPE_PID", NULL, &req) != TYPE_CODE_ENUM) error(FATAL, "cannot determine PIDTYPE_PID pid_hash dimension\n"); get_symbol_data("pidhash_shift", sizeof(int), &pidhash_shift); tt->pidhash_len = 1 << pidhash_shift; get_symbol_data("pid_hash", sizeof(ulong), &tt->pidhash_addr); if (VALID_MEMBER(pid_link_pid) && VALID_MEMBER(pid_hash_chain)) { get_symbol_data("pid_hash", sizeof(ulong), &tt->pidhash_addr); tt->refresh_task_table = refresh_pid_hash_task_table; } else { tt->pidhash_addr = symbol_value("pid_hash"); if (LKCD_KERNTYPES()) { if (VALID_STRUCT(pid_link)) { if (VALID_STRUCT(upid) && VALID_MEMBER(pid_numbers)) tt->refresh_task_table = refresh_hlist_task_table_v3; else tt->refresh_task_table = refresh_hlist_task_table_v2; } else tt->refresh_task_table = refresh_hlist_task_table; builtin_array_length("pid_hash", tt->pidhash_len, NULL); } else { if (!get_array_length("pid_hash", NULL, sizeof(void *)) && VALID_STRUCT(pid_link)) { if (VALID_STRUCT(upid) && VALID_MEMBER(pid_numbers)) tt->refresh_task_table = refresh_hlist_task_table_v3; else tt->refresh_task_table = refresh_hlist_task_table_v2; } else tt->refresh_task_table = refresh_hlist_task_table; } } tt->flags |= PID_HASH; } else if (symbol_exists("pid_hash")) { if (get_symbol_type("PIDTYPE_PGID", NULL, &req) != TYPE_CODE_ENUM) error(FATAL, "cannot determine PIDTYPE_PID pid_hash dimension\n"); if (!(tt->pidhash_len = get_array_length("pid_hash", &dim, SIZE(list_head)))) error(FATAL, "cannot determine pid_hash array dimensions\n"); tt->pidhash_addr = symbol_value("pid_hash"); tt->refresh_task_table = refresh_pid_hash_task_table; tt->flags |= PID_HASH; } else if (symbol_exists("pidhash")) { tt->pidhash_addr = symbol_value("pidhash"); tt->pidhash_len = get_array_length("pidhash", NULL, 0); if (tt->pidhash_len == 0) { if (!(nsp = next_symbol("pidhash", NULL))) error(FATAL, "cannot determine pidhash length\n"); tt->pidhash_len = (nsp->value-tt->pidhash_addr) / sizeof(void *); } if (ACTIVE()) tt->refresh_task_table = refresh_pidhash_task_table; tt->flags |= PIDHASH; } tt->pf_kthread = UNINITIALIZED; get_active_set(); if (tt->flags & ACTIVE_ONLY) tt->refresh_task_table = refresh_active_task_table; tt->refresh_task_table(); if (tt->flags & TASK_REFRESH_OFF) tt->flags &= ~(TASK_REFRESH|TASK_REFRESH_OFF); /* * Get the IRQ stacks info if it's configured. */ if (VALID_STRUCT(irq_ctx)) irqstacks_init(); if (ACTIVE()) { active_pid = REMOTE() ? pc->server_pid : LOCAL_ACTIVE() ? pc->program_pid : 1; set_context(NO_TASK, active_pid, FALSE); tt->this_task = pid_to_task(active_pid); } else { if (INVALID_SIZE(note_buf)) STRUCT_SIZE_INIT(note_buf, "note_buf_t"); if (KDUMP_DUMPFILE()) map_cpus_to_prstatus(); else if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE()) map_cpus_to_prstatus_kdump_cmprs(); please_wait("determining panic task"); set_context(get_panic_context(), NO_PID, TRUE); please_wait_done(); } sort_context_array(); sort_tgid_array(); if (pc->flags & SILENT) initialize_task_state(); stack_overflow_check_init(); if (machdep->hz) { ulonglong uptime_jiffies; ulong uptime_sec; get_uptime(NULL, &uptime_jiffies); uptime_sec = (uptime_jiffies)/(ulonglong)machdep->hz; kt->boot_date.tv_sec = kt->date.tv_sec - uptime_sec; kt->boot_date.tv_nsec = 0; } tt->flags |= TASK_INIT_DONE; } /* * Store the pointers to the hard and soft irq_ctx arrays as well as * the task pointers contained within each of them. */ static void irqstacks_init(void) { int i; char *thread_info_buf; struct syment *hard_sp, *soft_sp; ulong ptr, hardirq_next_sp = 0; if (!(tt->hardirq_ctx = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc hardirq_ctx space."); if (!(tt->hardirq_tasks = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc hardirq_tasks space."); if (!(tt->softirq_ctx = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc softirq_ctx space."); if (!(tt->softirq_tasks = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc softirq_tasks space."); thread_info_buf = GETBUF(SIZE(irq_ctx)); if ((hard_sp = per_cpu_symbol_search("per_cpu__hardirq_ctx")) || (hard_sp = per_cpu_symbol_search("per_cpu__hardirq_stack"))) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { for (i = 0; i < NR_CPUS; i++) { if (!kt->__per_cpu_offset[i]) continue; ptr = hard_sp->value + kt->__per_cpu_offset[i]; if (!readmem(ptr, KVADDR, &ptr, sizeof(void *), "hardirq ctx", RETURN_ON_ERROR)) { error(INFO, "cannot read hardirq_ctx[%d] at %lx\n", i, ptr); continue; } tt->hardirq_ctx[i] = ptr; } } else tt->hardirq_ctx[0] = hard_sp->value; } else if (symbol_exists("hardirq_ctx")) { i = get_array_length("hardirq_ctx", NULL, 0); get_symbol_data("hardirq_ctx", sizeof(long)*(i <= NR_CPUS ? i : NR_CPUS), &tt->hardirq_ctx[0]); } else error(WARNING, "cannot determine hardirq_ctx addresses\n"); /* TODO: Use multithreading to parallely update irq_tasks. */ for (i = 0; i < NR_CPUS; i++) { if (!(tt->hardirq_ctx[i])) continue; if (!readmem(tt->hardirq_ctx[i], KVADDR, thread_info_buf, SIZE(irq_ctx), "hardirq thread_union", RETURN_ON_ERROR)) { error(INFO, "cannot read hardirq_ctx[%d] at %lx\n", i, tt->hardirq_ctx[i]); continue; } if (MEMBER_EXISTS("irq_ctx", "tinfo")) tt->hardirq_tasks[i] = ULONG(thread_info_buf+OFFSET(thread_info_task)); else { hardirq_next_sp = ULONG(thread_info_buf); tt->hardirq_tasks[i] = stkptr_to_task(hardirq_next_sp); } } if ((soft_sp = per_cpu_symbol_search("per_cpu__softirq_ctx")) || (soft_sp = per_cpu_symbol_search("per_cpu__softirq_stack"))) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { for (i = 0; i < NR_CPUS; i++) { if (!kt->__per_cpu_offset[i]) continue; ptr = soft_sp->value + kt->__per_cpu_offset[i]; if (!readmem(ptr, KVADDR, &ptr, sizeof(void *), "softirq ctx", RETURN_ON_ERROR)) { error(INFO, "cannot read softirq_ctx[%d] at %lx\n", i, ptr); continue; } tt->softirq_ctx[i] = ptr; } } else tt->softirq_ctx[0] = soft_sp->value; } else if (symbol_exists("softirq_ctx")) { i = get_array_length("softirq_ctx", NULL, 0); get_symbol_data("softirq_ctx", sizeof(long)*(i <= NR_CPUS ? i : NR_CPUS), &tt->softirq_ctx[0]); } else error(WARNING, "cannot determine softirq_ctx addresses\n"); for (i = 0; i < NR_CPUS; i++) { if (!(tt->softirq_ctx[i])) continue; if (!readmem(tt->softirq_ctx[i], KVADDR, thread_info_buf, SIZE(irq_ctx), "softirq thread_union", RETURN_ON_ERROR)) { error(INFO, "cannot read softirq_ctx[%d] at %lx\n", i, tt->hardirq_ctx[i]); continue; } if (MEMBER_EXISTS("irq_ctx", "tinfo")) tt->softirq_tasks[i] = ULONG(thread_info_buf+OFFSET(thread_info_task)); else { tt->softirq_tasks[i] = stkptr_to_task(ULONG(thread_info_buf)); /* Checking if softirq => hardirq nested stack */ if ((tt->softirq_tasks[i] != NO_TASK) && hardirq_next_sp) { if ((tt->softirq_ctx[i] <= hardirq_next_sp) && (hardirq_next_sp < tt->softirq_ctx[i] + STACKSIZE())) tt->hardirq_tasks[i] = tt->softirq_tasks[i]; } } } tt->flags |= IRQSTACKS; FREEBUF(thread_info_buf); } int in_irq_ctx(ulonglong type, int cpu, ulong addr) { if (!(tt->flags & IRQSTACKS)) return FALSE; switch (type) { case BT_SOFTIRQ: if (tt->softirq_ctx[cpu] && (addr >= tt->softirq_ctx[cpu]) && (addr < (tt->softirq_ctx[cpu] + STACKSIZE()))) return TRUE; break; case BT_HARDIRQ: if (tt->hardirq_ctx[cpu] && (addr >= tt->hardirq_ctx[cpu]) && (addr < (tt->hardirq_ctx[cpu] + STACKSIZE()))) return TRUE; break; } return FALSE; } /* * Allocate or re-allocated space for the task_context array and task list. */ static void allocate_task_space(int cnt) { if (tt->context_array == NULL) { if (!(tt->task_local = (void *) malloc(cnt * sizeof(void *)))) error(FATAL, "cannot malloc kernel task array (%d tasks)", cnt); if (!(tt->context_array = (struct task_context *) malloc(cnt * sizeof(struct task_context)))) error(FATAL, "cannot malloc context array (%d tasks)", cnt); if (!(tt->context_by_task = (struct task_context **) malloc(cnt * sizeof(struct task_context*)))) error(FATAL, "cannot malloc context_by_task array (%d tasks)", cnt); if (!(tt->tgid_array = (struct tgid_context *) malloc(cnt * sizeof(struct tgid_context)))) error(FATAL, "cannot malloc tgid array (%d tasks)", cnt); } else { if (!(tt->task_local = (void *) realloc(tt->task_local, cnt * sizeof(void *)))) error(FATAL, "%scannot realloc kernel task array (%d tasks)", (pc->flags & RUNTIME) ? "" : "\n", cnt); if (!(tt->context_array = (struct task_context *) realloc(tt->context_array, cnt * sizeof(struct task_context)))) error(FATAL, "%scannot realloc context array (%d tasks)", (pc->flags & RUNTIME) ? "" : "\n", cnt); if (!(tt->context_by_task = (struct task_context **) realloc(tt->context_by_task, cnt * sizeof(struct task_context*)))) error(FATAL, "%scannot realloc context_by_task array (%d tasks)", (pc->flags & RUNTIME) ? "" : "\n", cnt); if (!(tt->tgid_array = (struct tgid_context *) realloc(tt->tgid_array, cnt * sizeof(struct tgid_context)))) error(FATAL, "%scannot realloc tgid array (%d tasks)", (pc->flags & RUNTIME) ? "" : "\n", cnt); } } /* * This routine runs one time on dumpfiles, and constantly on live systems. * It walks through the kernel task array looking for active tasks, and * populates the local task table with their essential data. */ static void refresh_fixed_task_table(void) { int i; ulong *tlp; ulong curtask; ulong retries; ulong curpid; char *tp; #define TASK_FREE(x) ((x == 0) || (((ulong)(x) >= tt->task_start) && \ ((ulong)(x) < tt->task_end))) #define TASK_IN_USE(x) (!TASK_FREE(x)) if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) return; if (DUMPFILE()) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "%splease wait... (gathering task table data)", GDB_PATCHED() ? "" : "\n"); fflush(fp); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } retries = 0; retry: if (!readmem(tt->task_start, KVADDR, tt->task_local, tt->max_tasks * sizeof(void *), "kernel task array", RETURN_ON_ERROR)) error(FATAL, "cannot read kernel task array"); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0; i < tt->max_tasks; i++, tlp++) { if (TASK_IN_USE(*tlp)) { if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry; } add_context(*tlp, tp); } } if (DUMPFILE()) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\r \r"); fflush(fp); } if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * Verify that a task_context's data makes sense enough to include * in the task_context array. */ static int verify_task(struct task_context *tc, int level) { int i; ulong next_task; ulong readflag; readflag = ACTIVE() ? (RETURN_ON_ERROR|QUIET) : (RETURN_ON_ERROR); switch (level) { case 1: if (!readmem(tc->task + OFFSET(task_struct_next_task), KVADDR, &next_task, sizeof(void *), "next_task", readflag)) { return FALSE; } if (!IS_TASK_ADDR(next_task)) return FALSE; if (tc->processor & ~NO_PROC_ID) return FALSE; /* fall through */ case 2: if (!IS_TASK_ADDR(tc->ptask)) return FALSE; if ((tc->processor < 0) || (tc->processor >= NR_CPUS)) { for (i = 0; i < NR_CPUS; i++) { if (tc->task == tt->active_set[i]) { error(WARNING, "active task %lx on cpu %d: corrupt cpu value: %u\n\n", tc->task, i, tc->processor); tc->processor = i; return TRUE; } } if (CRASHDEBUG(1)) error(INFO, "verify_task: task: %lx invalid processor: %u", tc->task, tc->processor); return FALSE; } break; } return TRUE; } /* * This routine runs one time on dumpfiles, and constantly on live systems. * It walks through the kernel task array looking for active tasks, and * populates the local task table with their essential data. */ #define MAX_UNLIMITED_TASK_RETRIES (500) void refresh_unlimited_task_table(void) { int i; ulong *tlp; ulong curtask; ulong curpid; struct list_data list_data, *ld; ulong init_tasks[NR_CPUS]; ulong retries; char *tp; int cnt; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) return; if (DUMPFILE()) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "%splease wait... (gathering task table data)", GDB_PATCHED() ? "" : "\n"); fflush(fp); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; tp = NULL; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } retries = 0; retry: if (retries && DUMPFILE()) { if (tt->flags & PIDHASH) { error(WARNING, "\ncannot gather a stable task list -- trying pidhash\n"); refresh_pidhash_task_table(); return; } error(FATAL, "\ncannot gather a stable task list\n"); } if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "cannot gather a stable task list\n"); /* * Populate the task_local array with a quick walk-through. * If there's not enough room in the local array, realloc() it. */ ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= RETURN_ON_LIST_ERROR; ld->start = symbol_value("init_task_union"); ld->member_offset = OFFSET(task_struct_next_task); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(tp); return; } if ((cnt = do_list(ld)) < 0) { retries++; goto retry; } if ((cnt+NR_CPUS+1) > tt->max_tasks) { tt->max_tasks = cnt + NR_CPUS + TASK_SLUSH; allocate_task_space(tt->max_tasks); hq_close(); if (!DUMPFILE()) retries++; goto retry; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); /* * If SMP, add in the other idle tasks. */ if (kt->flags & SMP) { /* * Now get the rest of the init_task[] entries, starting * at offset 1 since we've got the init_task already. */ BZERO(&init_tasks[0], sizeof(ulong) * NR_CPUS); get_idle_threads(&init_tasks[0], kt->cpus); tlp = (ulong *)tt->task_local; tlp += cnt; for (i = 1; i < kt->cpus; i++) { if (init_tasks[i]) { *tlp = init_tasks[i]; tlp++; } } } clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(INFO, "\ninvalid task address in task list: %lx\n", *tlp); retries++; goto retry; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry; } add_context(*tlp, tp); } if (DUMPFILE()) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\r \r"); fflush(fp); } if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * This routine runs one time on dumpfiles, and constantly on live systems. * It walks through the kernel pidhash array looking for active tasks, and * populates the local task table with their essential data. * * The following manner of refreshing the task table can be used for all * kernels that have a pidhash[] array, whether or not they still * have a fixed task[] array or an unlimited list. */ static void refresh_pidhash_task_table(void) { int i; char *pidhash, *tp; ulong *pp, next, pnext; int len, cnt; ulong curtask; ulong curpid; ulong retries; ulong *tlp; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\rplease wait... (gathering task table data)"); fflush(fp); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } len = tt->pidhash_len; pidhash = GETBUF(len * sizeof(ulong)); retries = 0; retry_pidhash: if (retries && DUMPFILE()) error(FATAL,"\ncannot gather a stable task list via pidhash\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via pidhash (%d retries)\n", retries); if (!readmem(tt->pidhash_addr, KVADDR, pidhash, len * sizeof(ulong), "pidhash contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pidhash array\n"); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(pidhash); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } /* * Then dump the pidhash contents. */ for (i = 0, pp = (ulong *)pidhash; i < len; i++, pp++) { if (!(*pp) || !IS_KVADDR(*pp)) continue; /* * Mininum verification here -- make sure that a task address * and its pidhash_next entry (if any) both appear to be * properly aligned before accepting the task. */ next = *pp; while (next) { if (!IS_TASK_ADDR(next)) { error(INFO, "%sinvalid task address in pidhash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pidhash; } if (!readmem(next + OFFSET(task_struct_pidhash_next), KVADDR, &pnext, sizeof(void *), "pidhash_next entry", QUIET|RETURN_ON_ERROR)) { error(INFO, "%scannot read from task: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pidhash; } if (!hq_enter(next)) { error(INFO, "%sduplicate task in pidhash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pidhash; } next = pnext; cnt++; } } if ((cnt+1) > tt->max_tasks) { tt->max_tasks = cnt + NR_CPUS + TASK_SLUSH; allocate_task_space(tt->max_tasks); hq_close(); if (!DUMPFILE()) retries++; goto retry_pidhash; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pidhash; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_pidhash; } add_context(*tlp, tp); } FREEBUF(pidhash); if (DUMPFILE()) { fprintf(fp, (pc->flags & SILENT) || !(pc->flags & TTY) ? "" : "\r \r"); fflush(fp); } if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * The following manner of refreshing the task table is used for all * kernels that have a pid_hash[][] array. * * This routine runs one time on dumpfiles, and constantly on live systems. * It walks through the kernel pid_hash[PIDTYPE_PID] array looking for active * tasks, and populates the local task table with their essential data. */ #define HASH_TO_TASK(X) ((ulong)(X) - (OFFSET(task_struct_pids) + \ OFFSET(pid_link_pid) + OFFSET(pid_hash_chain))) #define TASK_TO_HASH(X) ((ulong)(X) + (OFFSET(task_struct_pids) + \ OFFSET(pid_link_pid) + OFFSET(pid_hash_chain))) static void refresh_pid_hash_task_table(void) { int i; struct kernel_list_head *pid_hash, *pp, *kpp; char *tp; ulong next, pnext; int len, cnt; ulong curtask; ulong curpid; ulong retries; ulong *tlp; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } len = tt->pidhash_len; pid_hash = (struct kernel_list_head *)GETBUF(len * SIZE(list_head)); retries = 0; retry_pid_hash: if (retries && DUMPFILE()) error(FATAL, "\ncannot gather a stable task list via pid_hash\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via pid_hash (%d retries)\n", retries); if (!readmem(tt->pidhash_addr, KVADDR, pid_hash, len * SIZE(list_head), "pid_hash contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pid_hash array\n"); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(pid_hash); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } for (i = 0; i < len; i++) { pp = &pid_hash[i]; kpp = (struct kernel_list_head *)(tt->pidhash_addr + i * SIZE(list_head)); if (pp->next == kpp) continue; if (CRASHDEBUG(7)) console("%lx: pid_hash[%d]: %lx (%lx) %lx (%lx)\n", kpp, i, pp->next, HASH_TO_TASK(pp->next), pp->prev, HASH_TO_TASK(pp->prev)); next = (ulong)HASH_TO_TASK(pp->next); while (next) { if (!IS_TASK_ADDR(next)) { error(INFO, "%sinvalid task address in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } if (!readmem(TASK_TO_HASH(next), KVADDR, &pnext, sizeof(void *), "pid_hash entry", QUIET|RETURN_ON_ERROR)) { error(INFO, "%scannot read from task: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } if (!is_idle_thread(next) && !hq_enter(next)) { error(INFO, "%sduplicate task in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } cnt++; if (pnext == (ulong)kpp) break; next = HASH_TO_TASK(pnext); } } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } add_context(*tlp, tp); } FREEBUF(pid_hash); please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * Adapt to yet another scheme, using later 2.6 hlist_head and hlist_nodes. */ #define HLIST_TO_TASK(X) ((ulong)(X) - (OFFSET(task_struct_pids) + \ OFFSET(pid_pid_chain))) static void refresh_hlist_task_table(void) { int i; ulong *pid_hash; struct syment *sp; ulong pidhash_array; ulong kpp; char *tp; ulong next, pnext, pprev; char *nodebuf; int plen, len, cnt; long value; ulong curtask; ulong curpid; ulong retries; ulong *tlp; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } if (!(plen = get_array_length("pid_hash", NULL, sizeof(void *)))) { /* * Workaround for gcc omitting debuginfo data for pid_hash. */ if (enumerator_value("PIDTYPE_MAX", &value)) { if ((sp = next_symbol("pid_hash", NULL)) && (((sp->value - tt->pidhash_addr) / sizeof(void *)) < value)) error(WARNING, "possible pid_hash array mis-handling\n"); plen = (int)value; } else { error(WARNING, "cannot determine pid_hash array dimensions\n"); plen = 1; } } pid_hash = (ulong *)GETBUF(plen * sizeof(void *)); if (!readmem(tt->pidhash_addr, KVADDR, pid_hash, plen * SIZE(hlist_head), "pid_hash[] contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pid_hash array\n"); if (CRASHDEBUG(7)) for (i = 0; i < plen; i++) console("pid_hash[%d]: %lx\n", i, pid_hash[i]); /* * The zero'th (PIDTYPE_PID) entry is the hlist_head array * that we want. */ if (CRASHDEBUG(1)) { if (!enumerator_value("PIDTYPE_PID", &value)) error(WARNING, "possible pid_hash array mis-handling: PIDTYPE_PID: (unknown)\n"); else if (value != 0) error(WARNING, "possible pid_hash array mis-handling: PIDTYPE_PID: %d \n", value); } pidhash_array = pid_hash[0]; FREEBUF(pid_hash); len = tt->pidhash_len; pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head)); nodebuf = GETBUF(SIZE(hlist_node)); retries = 0; retry_pid_hash: if (retries && DUMPFILE()) error(FATAL, "\ncannot gather a stable task list via pid_hash\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via pid_hash (%d retries)\n", retries); if (!readmem(pidhash_array, KVADDR, pid_hash, len * SIZE(hlist_head), "pid_hash[0] contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pid_hash[0] array\n"); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(pid_hash); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } for (i = 0; i < len; i++) { if (!pid_hash[i]) continue; if (!readmem(pid_hash[i], KVADDR, nodebuf, SIZE(hlist_node), "pid_hash node", RETURN_ON_ERROR|QUIET)) { error(INFO, "\ncannot read pid_hash node\n"); if (DUMPFILE()) continue; hq_close(); retries++; goto retry_pid_hash; } kpp = pid_hash[i]; next = (ulong)HLIST_TO_TASK(kpp); pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); if (CRASHDEBUG(1)) console("pid_hash[%d]: %lx task: %lx (node: %lx) next: %lx pprev: %lx\n", i, pid_hash[i], next, kpp, pnext, pprev); while (next) { if (!IS_TASK_ADDR(next)) { error(INFO, "%sinvalid task address in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } if (!is_idle_thread(next) && !hq_enter(next)) { error(INFO, "%sduplicate task in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } cnt++; if (!pnext) break; if (!readmem((ulonglong)pnext, KVADDR, nodebuf, SIZE(hlist_node), "task hlist_node", RETURN_ON_ERROR|QUIET)) { error(INFO, "\ncannot read hlist_node from task\n"); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } kpp = (ulong)pnext; next = (ulong)HLIST_TO_TASK(kpp); pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); if (CRASHDEBUG(1)) console(" chained task: %lx (node: %lx) next: %lx pprev: %lx\n", (ulong)HLIST_TO_TASK(kpp), kpp, pnext, pprev); } } if (cnt > tt->max_tasks) { tt->max_tasks = cnt + TASK_SLUSH; allocate_task_space(tt->max_tasks); hq_close(); if (!DUMPFILE()) retries++; goto retry_pid_hash; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } add_context(*tlp, tp); } FREEBUF(pid_hash); FREEBUF(nodebuf); please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * 2.6.17 replaced: * static struct hlist_head *pid_hash[PIDTYPE_MAX]; * with * static struct hlist_head *pid_hash; */ static void refresh_hlist_task_table_v2(void) { int i; ulong *pid_hash; ulong pidhash_array; ulong kpp; char *tp; ulong next, pnext, pprev; char *nodebuf; int len, cnt; ulong curtask; ulong curpid; ulong retries; ulong *tlp; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } get_symbol_data("pid_hash", sizeof(void *), &pidhash_array); len = tt->pidhash_len; pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head)); nodebuf = GETBUF(SIZE(pid_link)); retries = 0; retry_pid_hash: if (retries && DUMPFILE()) error(FATAL, "\ncannot gather a stable task list via pid_hash\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via pid_hash (%d retries)\n", retries); if (!readmem(pidhash_array, KVADDR, pid_hash, len * SIZE(hlist_head), "pid_hash contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pid_hash array\n"); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(pid_hash); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } for (i = 0; i < len; i++) { if (!pid_hash[i]) continue; if (!readmem(pid_hash[i], KVADDR, nodebuf, SIZE(pid_link), "pid_hash node pid_link", RETURN_ON_ERROR|QUIET)) { error(INFO, "\ncannot read pid_hash node pid_link\n"); if (DUMPFILE()) continue; hq_close(); retries++; goto retry_pid_hash; } kpp = pid_hash[i]; next = ULONG(nodebuf + OFFSET(pid_link_pid)); if (next) next -= OFFSET(task_struct_pids); pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); if (CRASHDEBUG(1)) console("pid_hash[%d]: %lx task: %lx (node: %lx) next: %lx pprev: %lx\n", i, pid_hash[i], next, kpp, pnext, pprev); while (1) { if (next) { if (!IS_TASK_ADDR(next)) { error(INFO, "%sinvalid task address in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } if (!is_idle_thread(next) && !hq_enter(next)) { error(INFO, "%sduplicate task in pid_hash: %lx\n", DUMPFILE() ? "\n" : "", next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } } cnt++; if (!pnext) break; if (!readmem((ulonglong)pnext, KVADDR, nodebuf, SIZE(pid_link), "task hlist_node pid_link", RETURN_ON_ERROR|QUIET)) { error(INFO, "\ncannot read hlist_node pid_link from node next\n"); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } kpp = (ulong)pnext; next = ULONG(nodebuf + OFFSET(pid_link_pid)); if (next) next -= OFFSET(task_struct_pids); pnext = ULONG(nodebuf + OFFSET(hlist_node_next)); pprev = ULONG(nodebuf + OFFSET(hlist_node_pprev)); if (CRASHDEBUG(1)) console(" chained task: %lx (node: %lx) next: %lx pprev: %lx\n", next, kpp, pnext, pprev); } } if (cnt > tt->max_tasks) { tt->max_tasks = cnt + TASK_SLUSH; allocate_task_space(tt->max_tasks); hq_close(); if (!DUMPFILE()) retries++; goto retry_pid_hash; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } add_context(*tlp, tp); } FREEBUF(pid_hash); FREEBUF(nodebuf); please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * 2.6.24: The pid_hash[] hlist_head entries were changed to point * to the hlist_node structure embedded in a upid structure. */ static void refresh_hlist_task_table_v3(void) { int i; ulong *pid_hash; ulong pidhash_array; ulong kpp; char *tp; ulong next, pnext, pprev; ulong upid; char *nodebuf; int len, cnt; ulong curtask; ulong curpid; ulong retries; ulong *tlp; uint upid_nr; ulong upid_ns; int chained; ulong pid; ulong pid_tasks_0; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } get_symbol_data("pid_hash", sizeof(void *), &pidhash_array); len = tt->pidhash_len; pid_hash = (ulong *)GETBUF(len * SIZE(hlist_head)); nodebuf = GETBUF(SIZE(upid)); retries = 0; retry_pid_hash: if (retries && DUMPFILE()) error(FATAL, "\ncannot gather a stable task list via pid_hash\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via pid_hash (%d retries)\n", retries); if (!readmem(pidhash_array, KVADDR, pid_hash, len * SIZE(hlist_head), "pid_hash contents", RETURN_ON_ERROR)) error(FATAL, "\ncannot read pid_hash array\n"); if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); FREEBUF(pid_hash); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (!tt->idle_threads[i]) continue; if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } for (i = 0; i < len; i++) { if (!pid_hash[i]) continue; kpp = pid_hash[i]; upid = pid_hash[i] - OFFSET(upid_pid_chain); chained = 0; do_chained: if (!readmem(upid, KVADDR, nodebuf, SIZE(upid), "pid_hash upid", RETURN_ON_ERROR|QUIET)) { error(INFO, "\npid_hash[%d]: cannot read pid_hash upid\n", i); if (DUMPFILE()) continue; hq_close(); retries++; goto retry_pid_hash; } pnext = ULONG(nodebuf + OFFSET(upid_pid_chain) + OFFSET(hlist_node_next)); pprev = ULONG(nodebuf + OFFSET(upid_pid_chain) + OFFSET(hlist_node_pprev)); upid_nr = UINT(nodebuf + OFFSET(upid_nr)); upid_ns = ULONG(nodebuf + OFFSET(upid_ns)); /* * Use init_pid_ns level 0 (PIDTYPE_PID). */ if (upid_ns != tt->init_pid_ns) { if (!accessible(upid_ns)) { error(INFO, "%spid_hash[%d]: invalid upid.ns: %lx\n", DUMPFILE() ? "\n" : "", i, upid_ns); continue; } goto chain_next; } pid = upid - OFFSET(pid_numbers); if (!readmem(pid + OFFSET(pid_tasks), KVADDR, &pid_tasks_0, sizeof(void *), "pid tasks", RETURN_ON_ERROR|QUIET)) { error(INFO, "\npid_hash[%d]: cannot read pid.tasks[0]\n", i); if (DUMPFILE()) continue; hq_close(); retries++; goto retry_pid_hash; } if (pid_tasks_0 == 0) goto chain_next; next = pid_tasks_0 - OFFSET(task_struct_pids); if (CRASHDEBUG(1)) { if (chained) console(" %lx upid: %lx nr: %d pid: %lx\n" " pnext/pprev: %.*lx/%lx task: %lx\n", kpp, upid, upid_nr, pid, VADDR_PRLEN, pnext, pprev, next); else console("pid_hash[%4d]: %lx upid: %lx nr: %d pid: %lx\n" " pnext/pprev: %.*lx/%lx task: %lx\n", i, kpp, upid, upid_nr, pid, VADDR_PRLEN, pnext, pprev, next); } if (!IS_TASK_ADDR(next)) { error(INFO, "%spid_hash[%d]: invalid task address: %lx\n", DUMPFILE() ? "\n" : "", i, next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } if (!is_idle_thread(next) && !hq_enter(next)) { error(INFO, "%spid_hash[%d]: duplicate task: %lx\n", DUMPFILE() ? "\n" : "", i, next); if (DUMPFILE()) break; hq_close(); retries++; goto retry_pid_hash; } cnt++; chain_next: if (pnext) { if (chained >= tt->max_tasks) { error(INFO, "%spid_hash[%d]: corrupt/invalid upid chain\n", DUMPFILE() ? "\n" : "", i); continue; } kpp = pnext; upid = pnext - OFFSET(upid_pid_chain); chained++; goto do_chained; } } if (cnt > tt->max_tasks) { tt->max_tasks = cnt + TASK_SLUSH; allocate_task_space(tt->max_tasks); hq_close(); if (!DUMPFILE()) retries++; goto retry_pid_hash; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_pid_hash; } add_context(*tlp, tp); } FREEBUF(pid_hash); FREEBUF(nodebuf); please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * Linux 4.15: pid_hash[] replaced by IDR/radix_tree */ static int radix_tree_task_callback(ulong task) { ulong *tlp; if (tt->callbacks < tt->max_tasks) { tlp = (ulong *)tt->task_local; tlp += tt->callbacks++; *tlp = task; } return TRUE; } static void refresh_radix_tree_task_table(void) { int i, cnt; ulong count, retries, next, curtask, curpid, upid_ns, pid_tasks_0, task; ulong *tlp; char *tp; struct list_pair rtp; char *pidbuf; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } count = do_radix_tree(tt->pid_radix_tree, RADIX_TREE_COUNT, NULL); if (CRASHDEBUG(1)) console("do_radix_tree: count: %ld\n", count); retries = 0; pidbuf = GETBUF(SIZE(pid)); retry_radix_tree: if (retries && DUMPFILE()) error(FATAL, "\ncannot gather a stable task list via radix tree\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via radix tree (%d retries)\n", retries); if (count > tt->max_tasks) { tt->max_tasks = count + TASK_SLUSH; allocate_task_space(tt->max_tasks); } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); tt->callbacks = 0; rtp.index = 0; rtp.value = (void *)&radix_tree_task_callback; count = do_radix_tree(tt->pid_radix_tree, RADIX_TREE_DUMP_CB, &rtp); if (CRASHDEBUG(1)) console("do_radix_tree: count: %ld tt->callbacks: %d\n", count, tt->callbacks); if (count > tt->max_tasks) { retries++; goto retry_radix_tree; } if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (!tt->idle_threads[i]) continue; if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } for (i = 0; i < tt->max_tasks; i++) { tlp = (ulong *)tt->task_local; tlp += i; if ((next = *tlp) == 0) break; /* * Translate radix tree contents to PIDTYPE_PID task. * - the radix tree contents are struct pid pointers * - upid is contained in pid.numbers[0] * - upid.ns should point to init->init_pid_ns * - pid->tasks[0] is first hlist_node in task->pids[3] * - get task from address of task->pids[0] */ if (!readmem(next, KVADDR, pidbuf, SIZE(pid), "pid", RETURN_ON_ERROR|QUIET)) { error(INFO, "\ncannot read pid struct from radix tree\n"); if (DUMPFILE()) continue; hq_close(); retries++; goto retry_radix_tree; } upid_ns = ULONG(pidbuf + OFFSET(pid_numbers) + OFFSET(upid_ns)); if (upid_ns != tt->init_pid_ns) continue; pid_tasks_0 = ULONG(pidbuf + OFFSET(pid_tasks)); if (!pid_tasks_0) continue; if (VALID_MEMBER(task_struct_pids)) task = pid_tasks_0 - OFFSET(task_struct_pids); else task = pid_tasks_0 - OFFSET(task_struct_pid_links); if (CRASHDEBUG(1)) console("pid: %lx ns: %lx tasks[0]: %lx task: %lx\n", next, upid_ns, pid_tasks_0, task); if (is_idle_thread(task)) continue; if (!IS_TASK_ADDR(task)) { error(INFO, "%s: IDR radix tree: invalid task address: %lx\n", DUMPFILE() ? "\n" : "", task); if (DUMPFILE()) break; hq_close(); retries++; goto retry_radix_tree; } if (!hq_enter(task)) { error(INFO, "%s: IDR radix tree: duplicate task: %lx\n", DUMPFILE() ? "\n" : "", task); if (DUMPFILE()) break; hq_close(); retries++; goto retry_radix_tree; } cnt++; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_radix_tree; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_radix_tree; } add_context(*tlp, tp); } FREEBUF(pidbuf); please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * Linux 4.20: pid_hash[] IDR changed from radix tree to xarray */ static int xarray_task_callback(ulong task) { ulong *tlp; if (tt->callbacks < tt->max_tasks) { tlp = (ulong *)tt->task_local; tlp += tt->callbacks++; *tlp = task; } return TRUE; } static void refresh_xarray_task_table(void) { int i, cnt; ulong count, retries, next, curtask, curpid, upid_ns, pid_tasks_0, task; ulong *tlp; char *tp; struct list_pair xp; char *pidbuf; long pid_size = SIZE(pid); if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { /* impossible */ please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curpid = NO_PID; curtask = NO_TASK; /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } count = do_xarray(tt->pid_xarray, XARRAY_COUNT, NULL); if (CRASHDEBUG(1)) console("xarray: count: %ld\n", count); /* 6.5: b69f0aeb0689 changed pid.numbers[1] to numbers[] */ if (ARRAY_LENGTH(pid_numbers) == 0) pid_size += SIZE(upid); retries = 0; pidbuf = GETBUF(pid_size); retry_xarray: if (retries && DUMPFILE()) error(FATAL, "\ncannot gather a stable task list via xarray\n"); if ((retries == MAX_UNLIMITED_TASK_RETRIES) && !(tt->flags & TASK_INIT_DONE)) error(FATAL, "\ncannot gather a stable task list via xarray (%d retries)\n", retries); if (count > tt->max_tasks) { tt->max_tasks = count + TASK_SLUSH; allocate_task_space(tt->max_tasks); } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); tt->callbacks = 0; xp.index = 0; xp.value = (void *)&xarray_task_callback; count = do_xarray(tt->pid_xarray, XARRAY_DUMP_CB, &xp); if (CRASHDEBUG(1)) console("do_xarray: count: %ld tt->callbacks: %d\n", count, tt->callbacks); if (count > tt->max_tasks) { retries++; goto retry_xarray; } if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); return; } /* * Get the idle threads first. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (!tt->idle_threads[i]) continue; if (hq_enter(tt->idle_threads[i])) cnt++; else error(WARNING, "%sduplicate idle tasks?\n", DUMPFILE() ? "\n" : ""); } for (i = 0; i < tt->max_tasks; i++) { tlp = (ulong *)tt->task_local; tlp += i; if ((next = *tlp) == 0) break; /* * Translate xarray contents to PIDTYPE_PID task. * - the xarray contents are struct pid pointers * - upid is contained in pid.numbers[0] * - upid.ns should point to init->init_pid_ns * - pid->tasks[0] is first hlist_node in task->pids[3] * - get task from address of task->pids[0] */ if (!readmem(next, KVADDR, pidbuf, pid_size, "pid", RETURN_ON_ERROR|QUIET)) { error(INFO, "\ncannot read pid struct from xarray\n"); if (DUMPFILE()) continue; hq_close(); retries++; goto retry_xarray; } upid_ns = ULONG(pidbuf + OFFSET(pid_numbers) + OFFSET(upid_ns)); if (upid_ns != tt->init_pid_ns) continue; pid_tasks_0 = ULONG(pidbuf + OFFSET(pid_tasks)); if (!pid_tasks_0) continue; if (VALID_MEMBER(task_struct_pids)) task = pid_tasks_0 - OFFSET(task_struct_pids); else task = pid_tasks_0 - OFFSET(task_struct_pid_links); if (CRASHDEBUG(1)) console("pid: %lx ns: %lx tasks[0]: %lx task: %lx\n", next, upid_ns, pid_tasks_0, task); if (is_idle_thread(task)) continue; if (!IS_TASK_ADDR(task)) { error(INFO, "%s: IDR xarray: invalid task address: %lx\n", DUMPFILE() ? "\n" : "", task); if (DUMPFILE()) break; hq_close(); retries++; goto retry_xarray; } if (!hq_enter(task)) { error(INFO, "%s: IDR xarray: duplicate task: %lx\n", DUMPFILE() ? "\n" : "", task); if (DUMPFILE()) break; hq_close(); retries++; goto retry_xarray; } cnt++; } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_xarray; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_xarray; } add_context(*tlp, tp); } FREEBUF(pidbuf); please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } static void refresh_active_task_table(void) { int i; char *tp; int cnt; ulong curtask; ulong curpid; ulong retries; ulong *tlp; if (DUMPFILE() && (tt->flags & TASK_INIT_DONE)) /* impossible */ return; if (DUMPFILE()) { please_wait("gathering task table data"); if (!symbol_exists("panic_threads")) tt->flags |= POPULATE_PANIC; } if (ACTIVE() && !(tt->flags & TASK_REFRESH)) return; curtask = NO_TASK; curpid = NO_PID; retries = 0; get_active_set(); /* * The current task's task_context entry may change, * or the task may not even exist anymore. */ if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) { curtask = CURRENT_TASK(); curpid = CURRENT_PID(); } retry_active: if (!hq_open()) { error(INFO, "cannot hash task_struct entries\n"); if (!(tt->flags & TASK_INIT_DONE)) clean_exit(1); error(INFO, "using stale task_structs\n"); return; } /* * Get the active tasks. */ cnt = 0; for (i = 0; i < kt->cpus; i++) { if (hq_enter(tt->active_set[i])) cnt++; else error(WARNING, "%sduplicate active tasks?\n", DUMPFILE() ? "\n" : ""); } BZERO(tt->task_local, tt->max_tasks * sizeof(void *)); cnt = retrieve_list((ulong *)tt->task_local, cnt); hq_close(); clear_task_cache(); for (i = 0, tlp = (ulong *)tt->task_local, tt->running_tasks = 0; i < tt->max_tasks; i++, tlp++) { if (!(*tlp)) continue; if (!IS_TASK_ADDR(*tlp)) { error(WARNING, "%sinvalid task address found in task list: %lx\n", DUMPFILE() ? "\n" : "", *tlp); if (DUMPFILE()) continue; retries++; goto retry_active; } if (!(tp = fill_task_struct(*tlp))) { if (DUMPFILE()) continue; retries++; goto retry_active; } if (!add_context(*tlp, tp) && DUMPFILE()) error(WARNING, "corrupt/invalid active task: %lx\n", *tlp); } if (!tt->running_tasks) { if (DUMPFILE()) error(FATAL, "cannot determine any active tasks!\n"); retries++; goto retry_active; } please_wait_done(); if (ACTIVE() && (tt->flags & TASK_INIT_DONE)) refresh_context(curtask, curpid); tt->retries = MAX(tt->retries, retries); } /* * Initialize and return a new task_context structure with data from a task. * NULL is returned on error. */ static struct task_context * add_context(ulong task, char *tp) { pid_t *pid_addr, *tgid_addr; char *comm_addr; int *processor_addr; ulong *parent_addr; ulong *mm_addr; int has_cpu; int do_verify; struct task_context *tc; struct tgid_context *tg; processor_addr = NULL; if (tt->refresh_task_table == refresh_fixed_task_table) do_verify = 1; else if (tt->refresh_task_table == refresh_pid_hash_task_table) do_verify = 2; else if (tt->refresh_task_table == refresh_hlist_task_table) do_verify = 2; else if (tt->refresh_task_table == refresh_hlist_task_table_v2) do_verify = 2; else if (tt->refresh_task_table == refresh_hlist_task_table_v3) do_verify = 2; else if (tt->refresh_task_table == refresh_active_task_table) do_verify = 2; else do_verify = 0; tc = tt->context_array + tt->running_tasks; pid_addr = (pid_t *)(tp + OFFSET(task_struct_pid)); tgid_addr = (pid_t *)(tp + OFFSET(task_struct_tgid)); comm_addr = (char *)(tp + OFFSET(task_struct_comm)); if (tt->flags & THREAD_INFO) { if (tt->flags & THREAD_INFO_IN_TASK) tc->thread_info = task + OFFSET(task_struct_thread_info); else tc->thread_info = ULONG(tp + OFFSET(task_struct_thread_info)); fill_thread_info(tc->thread_info); if (tt->flags & THREAD_INFO_IN_TASK && VALID_MEMBER(task_struct_cpu)) processor_addr = (int *) (tp + OFFSET(task_struct_cpu)); else processor_addr = (int *) (tt->thread_info + OFFSET(thread_info_cpu)); } else if (VALID_MEMBER(task_struct_processor)) processor_addr = (int *) (tp + OFFSET(task_struct_processor)); else if (VALID_MEMBER(task_struct_cpu)) processor_addr = (int *) (tp + OFFSET(task_struct_cpu)); if (VALID_MEMBER(task_struct_p_pptr)) parent_addr = (ulong *)(tp + OFFSET(task_struct_p_pptr)); else parent_addr = (ulong *)(tp + OFFSET(task_struct_parent)); mm_addr = (ulong *)(tp + OFFSET(task_struct_mm)); has_cpu = task_has_cpu(task, tp); tc->pid = (ulong)(*pid_addr); strlcpy(tc->comm, comm_addr, TASK_COMM_LEN); if (machine_type("SPARC64")) tc->processor = *(unsigned short *)processor_addr; else tc->processor = *processor_addr; tc->ptask = *parent_addr; tc->mm_struct = *mm_addr; tc->task = task; tc->tc_next = NULL; /* * Fill a tgid_context structure with the data from * the incoming task. */ tg = tt->tgid_array + tt->running_tasks; tg->tgid = *tgid_addr; tg->task = task; tg->rss_cache = UNINITIALIZED; if (do_verify && !verify_task(tc, do_verify)) { error(INFO, "invalid task address: %lx\n", tc->task); BZERO(tc, sizeof(struct task_context)); return NULL; } if (has_cpu && (tt->flags & POPULATE_PANIC)) tt->panic_threads[tc->processor] = tc->task; tt->flags &= ~INDEXED_CONTEXTS; tt->running_tasks++; return tc; } /* * The current context may have moved to a new spot in the task table * or have exited since the last command. If it still exists, reset its * new position. If it doesn't exist, set the context back to the initial * crash context. If necessary, complain and show the restored context. */ static void refresh_context(ulong curtask, ulong curpid) { ulong value, complain; struct task_context *tc; if (task_exists(curtask) && pid_exists(curpid)) { set_context(curtask, NO_PID, FALSE); } else { set_context(tt->this_task, NO_PID, FALSE); complain = TRUE; if (STREQ(args[0], "set") && (argcnt == 2) && IS_A_NUMBER(args[1])) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: case STR_TASK: complain = FALSE; break; case STR_INVALID: complain = TRUE; break; } } if (complain) { error(INFO, "current context no longer exists -- " "restoring \"%s\" context:\n\n", pc->program_name); show_context(CURRENT_CONTEXT()); fprintf(fp, "\n"); } } } static int sort_by_task(const void *arg1, const void *arg2) { const struct task_context *t1, *t2; t1 = *(const struct task_context **)arg1; t2 = *(const struct task_context **)arg2; if (t1->task == t2->task) return 0; return (t1->task < t2->task) ? -1 : 1; } /* sort context_by_task by task address */ static void sort_context_by_task(void) { int i; for (i = 0; i < tt->running_tasks; i++) tt->context_by_task[i] = &tt->context_array[i]; qsort(tt->context_by_task, tt->running_tasks, sizeof(*tt->context_by_task), sort_by_task); tt->flags |= INDEXED_CONTEXTS; } /* * Sort the task_context array by PID number; for PID 0, sort by processor. */ void sort_context_array(void) { ulong curtask; curtask = CURRENT_TASK(); qsort((void *)tt->context_array, (size_t)tt->running_tasks, sizeof(struct task_context), sort_by_pid); set_context(curtask, NO_PID, FALSE); sort_context_by_task(); } static int sort_by_pid(const void *arg1, const void *arg2) { struct task_context *t1, *t2; t1 = (struct task_context *)arg1; t2 = (struct task_context *)arg2; if ((t1->pid == 0) && (t2->pid == 0)) return (t1->processor < t2->processor ? -1 : t1->processor == t2->processor ? 0 : 1); else return (t1->pid < t2->pid ? -1 : t1->pid == t2->pid ? 0 : 1); } static int sort_by_last_run(const void *arg1, const void *arg2) { ulong task_last_run_stamp(ulong); struct task_context *t1, *t2; ulonglong lr1, lr2; t1 = (struct task_context *)arg1; t2 = (struct task_context *)arg2; lr1 = task_last_run(t1->task); lr2 = task_last_run(t2->task); return (lr2 < lr1 ? -1 : lr2 == lr1 ? 0 : 1); } static void sort_context_array_by_last_run(void) { ulong curtask; curtask = CURRENT_TASK(); qsort((void *)tt->context_array, (size_t)tt->running_tasks, sizeof(struct task_context), sort_by_last_run); set_context(curtask, NO_PID, FALSE); sort_context_by_task(); } /* * Set the tgid_context array by tgid number. */ void sort_tgid_array(void) { if (VALID_MEMBER(mm_struct_rss) || (!VALID_MEMBER(task_struct_rss_stat))) return; qsort((void *)tt->tgid_array, (size_t)tt->running_tasks, sizeof(struct tgid_context), sort_by_tgid); tt->last_tgid = tt->tgid_array; } int sort_by_tgid(const void *arg1, const void *arg2) { struct tgid_context *t1, *t2; t1 = (struct tgid_context *)arg1; t2 = (struct tgid_context *)arg2; return (t1->tgid < t2->tgid ? -1 : t1->tgid == t2->tgid ? 0 : 1); } /* * Keep a stash of the last task_struct accessed. Chances are it will * be hit several times before the next task is accessed. */ char * fill_task_struct(ulong task) { if (XEN_HYPER_MODE()) return NULL; if (!IS_LAST_TASK_READ(task)) { if (!readmem(task, KVADDR, tt->task_struct, SIZE(task_struct), "fill_task_struct", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { tt->last_task_read = 0; return NULL; } } tt->last_task_read = task; return(tt->task_struct); } /* * Keep a stash of the last thread_info struct accessed. Chances are it will * be hit several times before the next task is accessed. */ char * fill_thread_info(ulong thread_info) { if (!IS_LAST_THREAD_INFO_READ(thread_info)) { if (!readmem(thread_info, KVADDR, tt->thread_info, SIZE(thread_info), "fill_thread_info", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { tt->last_thread_info_read = 0; return NULL; } } tt->last_thread_info_read = thread_info; return(tt->thread_info); } /* * Used by back_trace(), copy the complete kernel stack into a local buffer * and fill the task_struct buffer, dealing with possible future separation * of task_struct and stack and/or cache coloring of stack top. */ void fill_stackbuf(struct bt_info *bt) { if (!bt->stackbuf) { bt->stackbuf = GETBUF(bt->stacktop - bt->stackbase); if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "stack contents", RETURN_ON_ERROR)) error(FATAL, "read of stack at %lx failed\n", bt->stackbase); } if (XEN_HYPER_MODE()) return; if (!IS_LAST_TASK_READ(bt->task)) { if (bt->stackbase == bt->task) { BCOPY(bt->stackbuf, tt->task_struct, SIZE(task_struct)); tt->last_task_read = bt->task; } else fill_task_struct(bt->task); } } /* * Keeping the task_struct info intact, alter the contents of the already * allocated local copy of a kernel stack, for things like IRQ stacks or * non-standard eframe searches. The caller must change the stackbase * and stacktop values. */ void alter_stackbuf(struct bt_info *bt) { if (!readmem(bt->stackbase, KVADDR, bt->stackbuf, bt->stacktop - bt->stackbase, "stack contents", RETURN_ON_ERROR)) error(FATAL, "read of stack at %lx failed\n", bt->stackbase); } /* * In the same vein as fill_task_struct(), keep a stash of the mm_struct * of a task. */ char *fill_mm_struct(ulong mm) { if (!IS_LAST_MM_READ(mm)) { if (!readmem(mm, KVADDR, tt->mm_struct, SIZE(mm_struct), "fill_mm_struct", ACTIVE() ? (RETURN_ON_ERROR|QUIET) : RETURN_ON_ERROR)) { tt->last_mm_read = 0; return NULL; } } tt->last_mm_read = mm; return(tt->mm_struct); } /* * If active, clear out references to the last task and mm_struct read. */ void clear_task_cache(void) { if (ACTIVE()) tt->last_task_read = tt->last_mm_read = 0; } /* * Shorthand command to dump the current context's task_struct, or if * pid or task arguments are entered, the task_structs of the targets. * References to structure members can be given to pare down the output, * which are put in a comma-separated list. */ void cmd_task(void) { int c, tcnt, bogus; unsigned int radix; ulong value; struct reference *ref; struct task_context *tc; ulong *tasklist; char *memberlist; tasklist = (ulong *)GETBUF((MAXARGS+NR_CPUS)*sizeof(ulong)); ref = (struct reference *)GETBUF(sizeof(struct reference)); memberlist = GETBUF(BUFSIZE); ref->str = memberlist; radix = 0; while ((c = getopt(argcnt, args, "xdhR:")) != EOF) { switch(c) { case 'h': case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; case 'R': if (strlen(ref->str)) strcat(ref->str, ","); strcat(ref->str, optarg); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); tcnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) tasklist[tcnt++] = tc->task; break; case STR_TASK: tasklist[tcnt++] = value; break; case STR_INVALID: bogus++; error(INFO, "invalid task or pid value: %s\n\n", args[optind]); break; } } else if (strstr(args[optind], ",") || MEMBER_EXISTS("task_struct", args[optind])) { if (strlen(ref->str)) strcat(ref->str, ","); strcat(ref->str, args[optind]); } else if (strstr(args[optind], ".") || strstr(args[optind], "[")) { if (strlen(ref->str)) strcat(ref->str, ","); strcat(ref->str, args[optind]); } else error(INFO, "invalid task, pid, or task_struct member: %s\n\n", args[optind]); optind++; } if (!tcnt && !bogus) tasklist[tcnt++] = CURRENT_TASK(); for (c = 0; c < tcnt; c++) do_task(tasklist[c], 0, strlen(ref->str) ? ref : NULL, radix); } /* * Do the work for the task command. */ void do_task(ulong task, ulong flags, struct reference *ref, unsigned int radix) { struct task_context *tc; tc = task_to_context(task); if (ref) { print_task_header(fp, tc, 0); task_struct_member(tc, radix, ref); } else { if (!(flags & FOREACH_TASK)) print_task_header(fp, tc, 0); dump_struct("task_struct", task, radix); if (tt->flags & THREAD_INFO) { fprintf(fp, "\n"); dump_struct("thread_info", tc->thread_info, radix); } } fprintf(fp, "\n"); } /* * Search the task_struct for the referenced field. */ static void task_struct_member(struct task_context *tc, unsigned int radix, struct reference *ref) { int i; int argcnt; char *arglist[MAXARGS]; char *refcopy; struct datatype_member dm; if ((count_chars(ref->str, ',')+1) > MAXARGS) { error(INFO, "too many -R arguments in comma-separated list!\n"); return; } refcopy = GETBUF(strlen(ref->str)+1); strcpy(refcopy, ref->str); replace_string(refcopy, ",", ' '); argcnt = parse_line(refcopy, arglist); open_tmpfile(); dump_struct("task_struct", tc->task, radix); if (tt->flags & THREAD_INFO) dump_struct("thread_info", tc->thread_info, radix); for (i = 0; i < argcnt; i++) { if (count_chars(arglist[i], '.') || count_chars(arglist[i], '[')) { dm.member = arglist[i]; parse_for_member_extended(&dm, 0); } else { if (!MEMBER_EXISTS("task_struct", arglist[i]) && !MEMBER_EXISTS("thread_info", arglist[i])) error(INFO, "%s: not a task_struct or " "thread_info member\n", arglist[i]); parse_task_thread(1, &arglist[i], tc); } } close_tmpfile(); FREEBUF(refcopy); } static void parse_task_thread(int argcnt, char *arglist[], struct task_context *tc) { char buf[BUFSIZE]; char lookfor1[BUFSIZE]; char lookfor2[BUFSIZE]; char lookfor3[BUFSIZE]; int i, cnt, randomized; rewind(pc->tmpfile); BZERO(lookfor1, BUFSIZE); BZERO(lookfor2, BUFSIZE); BZERO(lookfor3, BUFSIZE); randomized = FALSE; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STREQ(buf, " {\n")) randomized = TRUE; else if (randomized && (STREQ(buf, " }, \n") || STREQ(buf, " },\n"))) randomized = FALSE; if (strlen(lookfor2)) { fprintf(pc->saved_fp, "%s", buf); if (STRNEQ(buf, lookfor2)) BZERO(lookfor2, BUFSIZE); continue; } if (strlen(lookfor3)) { fprintf(pc->saved_fp, "%s", buf); if (strstr(buf, lookfor3)) BZERO(lookfor3, BUFSIZE); continue; } for (i = 0; i < argcnt; i++) { BZERO(lookfor1, BUFSIZE); BZERO(lookfor2, BUFSIZE); BZERO(lookfor3, BUFSIZE); sprintf(lookfor1, "%s %s = ", randomized ? " " : "", arglist[i]); if (STRNEQ(buf, lookfor1)) { fprintf(pc->saved_fp, "%s", buf); if (strstr(buf, "{{\n")) sprintf(lookfor2, "%s }},", randomized ? " " : ""); else if (strstr(buf, " = {\n")) { cnt = count_leading_spaces(buf); sprintf(lookfor2, "%s}", space(cnt)); } else if (strstr(buf, "{")) sprintf(lookfor3, "},"); break; } } } } static char *ps_exclusive = "-a, -t, -c, -p, -g, -l, -m, -S, -r and -A flags are all mutually-exclusive\n"; static void check_ps_exclusive(ulong flag, ulong thisflag) { if (flag & (PS_EXCLUSIVE & ~thisflag)) error(FATAL, ps_exclusive); } /* * Display ps-like data for all tasks, or as specified by pid, task, or * command-name arguments. */ void cmd_ps(void) { int c, ac; ulong flag; ulong value; static struct psinfo psinfo; struct task_context *tc; char *cpuspec, *p; BZERO(&psinfo, sizeof(struct psinfo)); cpuspec = NULL; flag = 0; while ((c = getopt(argcnt, args, "HASgstcpkuGlmarC:y:")) != EOF) { switch(c) { case 'k': if (flag & PS_USER) error(FATAL, "-u and -k are mutually exclusive\n"); flag |= PS_KERNEL; break; case 'u': if (flag & PS_KERNEL) error(FATAL, "-u and -k are mutually exclusive\n"); flag |= PS_USER; break; case 'G': if (flag & PS_GROUP) break; else if (hq_open()) flag |= PS_GROUP; else error(INFO, "cannot hash thread group tasks\n"); break; /* * The a, t, c, p, g, l and r flags are all mutually-exclusive. */ case 'g': check_ps_exclusive(flag, PS_TGID_LIST); flag |= PS_TGID_LIST; break; case 'a': check_ps_exclusive(flag, PS_ARGV_ENVP); flag |= PS_ARGV_ENVP; break; case 't': check_ps_exclusive(flag, PS_TIMES); flag |= PS_TIMES; break; case 'c': check_ps_exclusive(flag, PS_CHILD_LIST); flag |= PS_CHILD_LIST; break; case 'p': check_ps_exclusive(flag, PS_PPID_LIST); flag |= PS_PPID_LIST; break; case 'm': if (INVALID_MEMBER(task_struct_last_run) && INVALID_MEMBER(task_struct_timestamp) && INVALID_MEMBER(sched_info_last_arrival)) { error(INFO, "last-run timestamps do not exist in this kernel\n"); argerrs++; break; } if (INVALID_MEMBER(rq_timestamp)) option_not_supported(c); check_ps_exclusive(flag, PS_MSECS); flag |= PS_MSECS; break; case 'l': if (INVALID_MEMBER(task_struct_last_run) && INVALID_MEMBER(task_struct_timestamp) && INVALID_MEMBER(sched_info_last_arrival)) { error(INFO, "last-run timestamps do not exist in this kernel\n"); argerrs++; break; } check_ps_exclusive(flag, PS_LAST_RUN); flag |= PS_LAST_RUN; break; case 's': flag |= PS_KSTACKP; break; case 'r': check_ps_exclusive(flag, PS_RLIMIT); flag |= PS_RLIMIT; break; case 'S': check_ps_exclusive(flag, PS_SUMMARY); flag |= PS_SUMMARY; break; case 'C': cpuspec = optarg; psinfo.cpus = get_cpumask_buf(); make_cpumask(cpuspec, psinfo.cpus, FAULT_ON_ERROR, NULL); break; case 'y': flag |= PS_POLICY; psinfo.policy = make_sched_policy(optarg); break; case 'A': check_ps_exclusive(flag, PS_ACTIVE); flag |= PS_ACTIVE; break; case 'H': flag |= PS_NO_HEADER; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (flag & (PS_LAST_RUN|PS_MSECS)) sort_context_array_by_last_run(); else if (psinfo.cpus) { error(INFO, "-C option is only applicable with -l and -m\n"); goto bailout; } if (!args[optind]) { show_ps(PS_SHOW_ALL|flag, &psinfo); return; } if (flag & PS_SUMMARY) error(FATAL, "-S option takes no arguments\n"); if (psinfo.cpus) error(INFO, "-C option is not applicable with specified tasks\n"); ac = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: psinfo.pid[ac] = value; psinfo.task[ac] = NO_TASK; psinfo.type[ac] = PS_BY_PID; flag |= PS_BY_PID; break; case STR_TASK: psinfo.task[ac] = value; psinfo.pid[ac] = NO_PID; psinfo.type[ac] = PS_BY_TASK; flag |= PS_BY_TASK; break; case STR_INVALID: error(INFO, "invalid task or pid value: %s\n\n", args[optind]); break; } ac++; } else if (SINGLE_QUOTED_STRING(args[optind])) { /* * Regular expression is exclosed within "'" character. * The args[optind] string may not be modified, so a copy * is duplicated. */ if (psinfo.regexs == MAX_PS_ARGS) error(INFO, "too many expressions specified!\n"); else { p = strdup(&args[optind][1]); LASTCHAR(p) = NULLCHAR; if (regcomp(&psinfo.regex_data[psinfo.regexs].regex, p, REG_EXTENDED|REG_NOSUB)) { error(INFO, "invalid regular expression: %s\n", p); free(p); goto bailout; } psinfo.regex_data[psinfo.regexs].pattern = p; if (psinfo.regexs++ == 0) { pc->cmd_cleanup_arg = (void *)&psinfo; pc->cmd_cleanup = ps_cleanup; } psinfo.type[ac] = PS_BY_REGEX; flag |= PS_BY_REGEX; ac++; } optind++; continue; } else { psinfo.pid[ac] = NO_PID; psinfo.task[ac] = NO_TASK; p = args[optind][0] == '\\' ? &args[optind][1] : args[optind]; strlcpy(psinfo.comm[ac], p, TASK_COMM_LEN); psinfo.type[ac] = PS_BY_CMD; flag |= PS_BY_CMD; ac++; } optind++; } psinfo.argc = ac; show_ps(flag, &psinfo); bailout: ps_cleanup((void *)&psinfo); } /* * Clean up regex buffers and pattern strings. */ static void ps_cleanup(void *arg) { int i; struct psinfo *ps; pc->cmd_cleanup = NULL; pc->cmd_cleanup_arg = NULL; ps = (struct psinfo *)arg; for (i = 0; i < ps->regexs; i++) { regfree(&ps->regex_data[i].regex); free(ps->regex_data[i].pattern); } if (ps->cpus) FREEBUF(ps->cpus); } /* * Do the work requested by cmd_ps(). */ static void show_ps_data(ulong flag, struct task_context *tc, struct psinfo *psi) { struct task_mem_usage task_mem_usage, *tm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; ulong tgid; int task_active; if ((flag & PS_USER) && is_kernel_thread(tc->task)) return; if ((flag & PS_KERNEL) && !is_kernel_thread(tc->task)) return; if ((flag & PS_POLICY) && !has_sched_policy(tc->task, psi->policy)) return; if (flag & PS_GROUP) { if (flag & (PS_LAST_RUN|PS_MSECS)) error(FATAL, "-G not supported with -%c option\n", flag & PS_LAST_RUN ? 'l' : 'm'); tgid = task_tgid(tc->task); if (tc->pid != tgid) { if (pc->curcmd_flags & TASK_SPECIFIED) { if (!(tc = tgid_to_context(tgid))) return; if (hq_entry_exists((ulong)tc)) return; hq_enter((ulong)tc); } else return; } else { if (hq_entry_exists((ulong)tc)) return; hq_enter((ulong)tc); } } if (flag & PS_PPID_LIST) { parent_list(tc->task); fprintf(fp, "\n"); return; } if (flag & PS_CHILD_LIST) { child_list(tc->task); fprintf(fp, "\n"); return; } if (flag & (PS_LAST_RUN)) { show_last_run(tc, psi); return; } if (flag & (PS_MSECS)) { show_milliseconds(tc, psi); return; } if (flag & PS_ARGV_ENVP) { show_task_args(tc); return; } if (flag & PS_RLIMIT) { show_task_rlimit(tc); return; } if (flag & PS_TGID_LIST) { show_tgid_list(tc->task); return; } tm = &task_mem_usage; get_task_mem_usage(tc->task, tm); task_active = is_task_active(tc->task); if ((flag & PS_ACTIVE) && (flag & PS_SHOW_ALL) && !task_active) return; if (task_active) { if (hide_offline_cpu(tc->processor)) fprintf(fp, "- "); else fprintf(fp, "> "); } else fprintf(fp, " "); fprintf(fp, "%7ld %7ld %3s %s %3s", tc->pid, task_to_pid(tc->ptask), task_cpu(tc->processor, buf2, !VERBOSE), task_pointer_string(tc, flag & PS_KSTACKP, buf3), task_state_string(tc->task, buf1, !VERBOSE)); pad_line(fp, strlen(buf1) > 3 ? 1 : 2, ' '); sprintf(buf1, "%.1f", tm->pct_physmem); if (strlen(buf1) == 3) mkstring(buf1, 4, CENTER|RJUST, NULL); fprintf(fp, "%s ", buf1); fprintf(fp, "%8ld ", (tm->total_vm * PAGESIZE())/1024); fprintf(fp, "%8ld ", (tm->rss * PAGESIZE())/1024); if (is_kernel_thread(tc->task)) fprintf(fp, "[%s]\n", tc->comm); else fprintf(fp, "%s\n", tc->comm); } static void show_ps(ulong flag, struct psinfo *psi) { int i, ac; struct task_context *tc; int print; char buf[BUFSIZE]; if (!(flag & ((PS_EXCLUSIVE & ~PS_ACTIVE)|PS_NO_HEADER))) fprintf(fp, " PID PPID CPU %s ST %%MEM VSZ RSS COMM\n", flag & PS_KSTACKP ? mkstring(buf, VADDR_PRLEN, CENTER|RJUST, "KSTACKP") : mkstring(buf, VADDR_PRLEN, CENTER, "TASK")); if (flag & PS_SHOW_ALL) { if (flag & PS_TIMES) { show_task_times(NULL, flag); return; } if (flag & PS_SUMMARY) { show_ps_summary(flag); return; } if (psi->cpus) { show_ps_data(flag, NULL, psi); return; } tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) show_ps_data(flag, tc, psi); return; } pc->curcmd_flags |= TASK_SPECIFIED; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { for (ac = 0; ac < psi->argc; ac++) { print = FALSE; switch(psi->type[ac]) { case PS_BY_PID: if (tc->pid == psi->pid[ac]) print = TRUE; break; case PS_BY_TASK: if ((tc->task == psi->task[ac])) print = TRUE; break; case PS_BY_CMD: if (STREQ(tc->comm, psi->comm[ac])) { if (flag & (PS_TGID_LIST|PS_GROUP)) { if (tc->pid == task_tgid(tc->task)) print = TRUE; else print = FALSE; } else print = TRUE; } break; case PS_BY_REGEX: if (regexec(&psi->regex_data[ac].regex, tc->comm, 0, NULL, 0) == 0) { if (flag & (PS_TGID_LIST|PS_GROUP)) { if (tc->pid == task_tgid(tc->task)) print = TRUE; else print = FALSE; } else print = TRUE; } break; } if (print) { if (flag & PS_TIMES) show_task_times(tc, flag); else show_ps_data(flag, tc, psi); } } } } static void show_ps_summary(ulong flag) { int i, s; struct task_context *tc; char buf[BUFSIZE]; #define MAX_STATES 20 struct ps_state { long cnt; char string[3]; } ps_state[MAX_STATES]; if (flag & (PS_USER|PS_KERNEL|PS_GROUP)) error(FATAL, "-S option cannot be used with other options\n"); for (s = 0; s < MAX_STATES; s++) ps_state[s].cnt = 0; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { task_state_string(tc->task, buf, !VERBOSE); for (s = 0; s < MAX_STATES; s++) { if (ps_state[s].cnt && STREQ(ps_state[s].string, buf)) { ps_state[s].cnt++; break; } if (ps_state[s].cnt == 0) { strcpy(ps_state[s].string, buf); ps_state[s].cnt++; break; } } } for (s = 0; s < MAX_STATES; s++) { if (ps_state[s].cnt) fprintf(fp, " %s: %ld\n", ps_state[s].string, ps_state[s].cnt); } } /* * Display the task preceded by the last_run stamp and its * current state. */ static void show_last_run(struct task_context *tc, struct psinfo *psi) { int i, c, others; struct task_context *tcp; char format[15]; char buf[BUFSIZE]; tcp = FIRST_CONTEXT(); sprintf(buf, pc->output_radix == 10 ? "%lld" : "%llx", task_last_run(tcp->task)); c = strlen(buf); sprintf(format, "[%c%dll%c] ", '%', c, pc->output_radix == 10 ? 'u' : 'x'); if (psi && psi->cpus) { for (c = others = 0; c < kt->cpus; c++) { if (!NUM_IN_BITMAP(psi->cpus, c)) continue; fprintf(fp, "%sCPU: %d", others++ ? "\n" : "", c); if (hide_offline_cpu(c)) { fprintf(fp, " [OFFLINE]\n"); continue; } else fprintf(fp, "\n"); tcp = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tcp++) { if (tcp->processor != c) continue; fprintf(fp, format, task_last_run(tcp->task)); fprintf(fp, "[%s] ", task_state_string(tcp->task, buf, !VERBOSE)); print_task_header(fp, tcp, FALSE); } } } else if (tc) { fprintf(fp, format, task_last_run(tc->task)); fprintf(fp, "[%s] ", task_state_string(tc->task, buf, !VERBOSE)); print_task_header(fp, tc, FALSE); } else { tcp = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tcp++) { fprintf(fp, format, task_last_run(tcp->task)); fprintf(fp, "[%s] ", task_state_string(tcp->task, buf, !VERBOSE)); print_task_header(fp, tcp, FALSE); } } } /* * Translate a value in nanoseconds into a string showing days, * hours, minutes, seconds and milliseconds. */ static char * translate_nanoseconds(ulonglong value, char *buf) { ulong days, hours, mins, secs, ms; value = value / 1000000L; ms = value % 1000L; value = value / 1000L; secs = value % 60L; value = value / 60L; mins = value % 60L; value = value / 60L; hours = value % 24L; value = value / 24L; days = value; sprintf(buf, "%ld %02ld:%02ld:%02ld.%03ld", days, hours, mins, secs, ms); return buf; } /* * Display the task preceded by a per-rq translation of the * sched_info.last_arrival and its current state. */ static void show_milliseconds(struct task_context *tc, struct psinfo *psi) { int i, c, others, days, max_days; struct task_context *tcp; char format[15]; char buf[BUFSIZE]; struct syment *rq_sp; ulong runq; ulonglong rq_clock; long long delta; if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues"))) error(FATAL, "cannot determine per-cpu runqueue address\n"); tcp = FIRST_CONTEXT(); sprintf(buf, pc->output_radix == 10 ? "%lld" : "%llx", task_last_run(tcp->task)); c = strlen(buf); sprintf(format, "[%c%dll%c] ", '%', c, pc->output_radix == 10 ? 'u' : 'x'); if (psi && psi->cpus) { for (c = others = 0; c < kt->cpus; c++) { if (!NUM_IN_BITMAP(psi->cpus, c)) continue; fprintf(fp, "%sCPU: %d", others++ ? "\n" : "", c); if (hide_offline_cpu(c)) { fprintf(fp, " [OFFLINE]\n"); continue; } else fprintf(fp, "\n"); if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[c]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, &rq_clock, sizeof(ulonglong), "per-cpu rq clock", FAULT_ON_ERROR); translate_nanoseconds(rq_clock, buf); max_days = first_space(buf) - buf; tcp = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tcp++) { if (tcp->processor != c) continue; delta = rq_clock - task_last_run(tcp->task); if (delta < 0) delta = 0; translate_nanoseconds(delta, buf); days = first_space(buf) - buf; fprintf(fp, "[%s%s] ", space(max_days - days), buf); fprintf(fp, "[%s] ", task_state_string(tcp->task, buf, !VERBOSE)); print_task_header(fp, tcp, FALSE); } } } else if (tc) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[tc->processor]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, &rq_clock, sizeof(ulonglong), "per-cpu rq clock", FAULT_ON_ERROR); translate_nanoseconds(rq_clock, buf); max_days = first_space(buf) - buf; delta = rq_clock - task_last_run(tc->task); if (delta < 0) delta = 0; translate_nanoseconds(delta, buf); days = first_space(buf) - buf; fprintf(fp, "[%s%s] ", space(max_days - days), buf); fprintf(fp, "[%s] ", task_state_string(tc->task, buf, !VERBOSE)); print_task_header(fp, tc, FALSE); } else { tcp = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tcp++) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[tcp->processor]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, &rq_clock, sizeof(ulonglong), "per-cpu rq clock", FAULT_ON_ERROR); delta = rq_clock - task_last_run(tcp->task); if (delta < 0) delta = 0; fprintf(fp, "[%s] ", translate_nanoseconds(delta, buf)); fprintf(fp, "[%s] ", task_state_string(tcp->task, buf, !VERBOSE)); print_task_header(fp, tcp, FALSE); } } } static char * read_arg_string(struct task_context *tc, char *buf, ulong start, ulong end) { physaddr_t paddr; ulong uvaddr, size, cnt; char *bufptr; uvaddr = start; size = end - start; bufptr = buf; while (size > 0) { if (!uvtop(tc, uvaddr, &paddr, 0)) { error(INFO, "cannot access user stack address: %lx\n\n", uvaddr); return NULL; } cnt = PAGESIZE() - PAGEOFFSET(uvaddr); if (cnt > size) cnt = size; if (!readmem(paddr, PHYSADDR, bufptr, cnt, "user stack contents", RETURN_ON_ERROR|QUIET)) { error(INFO, "cannot access user stack address: %lx\n\n", uvaddr); return NULL; } uvaddr += cnt; bufptr += cnt; size -= cnt; } return bufptr; } /* * Show the argv and envp strings pointed to by mm_struct->arg_start * and mm_struct->env_start. The user addresses need to broken up * into physical on a page-per-page basis because we typically are * not going to be working in the context of the target task. */ static void show_task_args(struct task_context *tc) { ulong arg_start, arg_end, env_start, env_end; char *buf, *p1, *end; int c, d; print_task_header(fp, tc, 0); if (!tc || !tc->mm_struct) { /* probably a kernel thread */ error(INFO, "no user stack\n\n"); return; } if (!task_mm(tc->task, TRUE)) return; if (INVALID_MEMBER(mm_struct_arg_start)) { MEMBER_OFFSET_INIT(mm_struct_arg_start, "mm_struct", "arg_start"); MEMBER_OFFSET_INIT(mm_struct_arg_end, "mm_struct", "arg_end"); MEMBER_OFFSET_INIT(mm_struct_env_start, "mm_struct", "env_start"); MEMBER_OFFSET_INIT(mm_struct_env_end, "mm_struct", "env_end"); } arg_start = ULONG(tt->mm_struct + OFFSET(mm_struct_arg_start)); arg_end = ULONG(tt->mm_struct + OFFSET(mm_struct_arg_end)); env_start = ULONG(tt->mm_struct + OFFSET(mm_struct_env_start)); env_end = ULONG(tt->mm_struct + OFFSET(mm_struct_env_end)); if (CRASHDEBUG(1)) { fprintf(fp, "arg_start: %lx arg_end: %lx (%ld)\n", arg_start, arg_end, arg_end - arg_start); fprintf(fp, "env_start: %lx env_end: %lx (%ld)\n", env_start, env_end, env_end - env_start); } buf = GETBUF(arg_end - arg_start + 1); end = read_arg_string(tc, buf, arg_start, arg_end); if (!end) goto bailout; fprintf(fp, "ARG: "); for (p1 = buf, c = 0; p1 < end; p1++) { if (*p1 == NULLCHAR) { if (c) fprintf(fp, " "); c = 0; } else { fprintf(fp, "%c", *p1); c++; } } FREEBUF(buf); buf = GETBUF(env_end - env_start + 1); end = read_arg_string(tc, buf, env_start, env_end); if (!end) goto bailout; fprintf(fp, "\nENV: "); for (p1 = buf, c = d = 0; p1 < end; p1++) { if (*p1 == NULLCHAR) { if (c) fprintf(fp, "\n"); c = 0; } else { fprintf(fp, "%s%c", !c && (p1 != buf) ? " " : "", *p1); c++, d++; } } fprintf(fp, "\n%s", d ? "" : "\n"); bailout: FREEBUF(buf); } char *rlim_names[] = { /* 0 */ "CPU", /* 1 */ "FSIZE", /* 2 */ "DATA", /* 3 */ "STACK", /* 4 */ "CORE", /* 5 */ "RSS", /* 6 */ "NPROC", /* 7 */ "NOFILE", /* 8 */ "MEMLOCK", /* 9 */ "AS", /* 10 */ "LOCKS", /* 11 */ "SIGPENDING", /* 12 */ "MSGQUEUE", /* 13 */ "NICE", /* 14 */ "RTPRIO", /* 15 */ "RTTIME", NULL, }; #ifndef RLIM_INFINITY #define RLIM_INFINITY (~0UL) #endif /* * Show the current and maximum rlimit values. */ static void show_task_rlimit(struct task_context *tc) { int i, j, len1, len2, rlimit_index; int in_task_struct, in_signal_struct; char *rlimit_buffer; ulong *p1, rlim_addr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; rlimit_index = 0; if (!VALID_MEMBER(task_struct_rlim) && !VALID_MEMBER(signal_struct_rlim)) { MEMBER_OFFSET_INIT(task_struct_rlim, "task_struct", "rlim"); MEMBER_OFFSET_INIT(signal_struct_rlim, "signal_struct", "rlim"); STRUCT_SIZE_INIT(rlimit, "rlimit"); if (!VALID_MEMBER(task_struct_rlim) && !VALID_MEMBER(signal_struct_rlim)) error(FATAL, "cannot determine rlimit array location\n"); } else if (!VALID_STRUCT(rlimit)) error(FATAL, "cannot determine rlimit structure definition\n"); in_task_struct = in_signal_struct = FALSE; if (VALID_MEMBER(task_struct_rlim)) { rlimit_index = (i = ARRAY_LENGTH(task_struct_rlim)) ? i : get_array_length("task_struct.rlim", NULL, 0); in_task_struct = TRUE; } else if (VALID_MEMBER(signal_struct_rlim)) { if (!VALID_MEMBER(task_struct_signal)) error(FATAL, "cannot determine rlimit array location\n"); rlimit_index = (i = ARRAY_LENGTH(signal_struct_rlim)) ? i : get_array_length("signal_struct.rlim", NULL, 0); in_signal_struct = TRUE; } if (!rlimit_index) error(FATAL, "cannot determine rlimit array size\n"); for (i = len1 = 0; i < rlimit_index; i++) { if (rlim_names[i] == NULL) continue; if ((j = strlen(rlim_names[i])) > len1) len1 = j; } len2 = strlen("(unlimited)"); rlimit_buffer = GETBUF(rlimit_index * SIZE(rlimit)); print_task_header(fp, tc, 0); fill_task_struct(tc->task); if (in_task_struct) { BCOPY(tt->task_struct + OFFSET(task_struct_rlim), rlimit_buffer, rlimit_index * SIZE(rlimit)); } else if (in_signal_struct) { rlim_addr = ULONG(tt->task_struct + OFFSET(task_struct_signal)); if (!readmem(rlim_addr + OFFSET(signal_struct_rlim), KVADDR, rlimit_buffer, rlimit_index * SIZE(rlimit), "signal_struct rlimit array", RETURN_ON_ERROR)) { FREEBUF(rlimit_buffer); return; } } fprintf(fp, " %s %s %s\n", mkstring(buf1, len1, RJUST, "RLIMIT"), mkstring(buf2, len2, CENTER|RJUST, "CURRENT"), mkstring(buf3, len2, CENTER|RJUST, "MAXIMUM")); for (p1 = (ulong *)rlimit_buffer, i = 0; i < rlimit_index; i++) { fprintf(fp, " %s ", mkstring(buf1, len1, RJUST, rlim_names[i] ? rlim_names[i] : "(unknown)")); if (*p1 == (ulong)RLIM_INFINITY) fprintf(fp, "(unlimited) "); else fprintf(fp, "%s ", mkstring(buf1, len2, CENTER|LJUST|LONG_DEC, MKSTR(*p1))); p1++; if (*p1 == (ulong)RLIM_INFINITY) fprintf(fp, "(unlimited)\n"); else fprintf(fp, "%s\n", mkstring(buf1, len2, CENTER|LJUST|LONG_DEC, MKSTR(*p1))); p1++; } fprintf(fp, "\n"); FREEBUF(rlimit_buffer); } /* * Put either the task_struct address or kernel stack pointer into a string. * If the kernel stack pointer is requested, piggy-back on top of the * back trace code to avoid having to deal with machine dependencies, * live active tasks, and dumpfile panic tasks. */ static char * task_pointer_string(struct task_context *tc, ulong do_kstackp, char *buf) { struct bt_info bt_info, *bt; char buf1[BUFSIZE]; if (do_kstackp) { bt = &bt_info; BZERO(bt, sizeof(struct bt_info));; if (is_task_active(tc->task)) { bt->stkptr = 0; } else if (VALID_MEMBER(task_struct_thread_esp)) { readmem(tc->task + OFFSET(task_struct_thread_esp), KVADDR, &bt->stkptr, sizeof(void *), "thread_struct esp", FAULT_ON_ERROR); } else if (VALID_MEMBER(task_struct_thread_ksp)) { readmem(tc->task + OFFSET(task_struct_thread_ksp), KVADDR, &bt->stkptr, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); } else if (VALID_MEMBER(task_struct_thread_context_sp)) { readmem(tc->task + OFFSET(task_struct_thread_context_sp), KVADDR, &bt->stkptr, sizeof(void *), "cpu_context sp", FAULT_ON_ERROR); } else { if ((bt->stackbase = GET_STACKBASE(tc->task))) { bt->stacktop = GET_STACKTOP(tc->task); bt->task = tc->task; bt->tc = tc; bt->flags |= BT_KSTACKP; back_trace(bt); if (bt->stackbuf) FREEBUF(bt->stackbuf); } else bt->stkptr = 0; } if (bt->stkptr) sprintf(buf, "%s", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST|LONG_HEX, MKSTR(bt->stkptr))); else sprintf(buf, "%s", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, "--")); } else sprintf(buf, "%s", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST|LONG_HEX, MKSTR(tc->task))); return buf; } /* * Dump the task list ordered by start_time. */ struct kernel_timeval { unsigned int tv_sec; unsigned int tv_usec; }; struct task_start_time { struct task_context *tc; ulonglong start_time; ulong tms_utime; ulong tms_stime; struct timeval old_utime; struct timeval old_stime; struct kernel_timeval kutime; struct kernel_timeval kstime; ulonglong utime; ulonglong stime; }; static void show_task_times(struct task_context *tcp, ulong flags) { int i, tasks, use_kernel_timeval, use_utime_stime; struct task_context *tc; struct task_start_time *task_start_times, *tsp; ulong jiffies, tgid; ulonglong jiffies_64; char buf1[BUFSIZE]; task_start_times = (struct task_start_time *) GETBUF(RUNNING_TASKS() * sizeof(struct task_start_time)); use_kernel_timeval = STRUCT_EXISTS("kernel_timeval"); if (VALID_MEMBER(task_struct_utime) && (SIZE(task_struct_utime) == (BITS32() ? sizeof(uint32_t) : sizeof(uint64_t)))) use_utime_stime = TRUE; else use_utime_stime = FALSE; get_symbol_data("jiffies", sizeof(long), &jiffies); if (symbol_exists("jiffies_64")) get_uptime(NULL, &jiffies_64); tsp = task_start_times; tc = tcp ? tcp : FIRST_CONTEXT(); for (i = tasks = 0; i < RUNNING_TASKS(); i++, tc++) { if ((flags & PS_USER) && is_kernel_thread(tc->task)) continue; if ((flags & PS_KERNEL) && !is_kernel_thread(tc->task)) continue; if (flags & PS_GROUP) { tgid = task_tgid(tc->task); if (tc->pid != tgid) { if (tcp) { if (!(tc = tgid_to_context(tgid))) return; } else continue; } if (hq_entry_exists((ulong)tc)) return; hq_enter((ulong)tc); } fill_task_struct(tc->task); if (!tt->last_task_read) { if (tcp) return; continue; } tsp->tc = tc; if (BITS32() && (SIZE(task_struct_start_time) == 8)) { if (start_time_timespec()) tsp->start_time = ULONG(tt->task_struct + OFFSET(task_struct_start_time)); else tsp->start_time = ULONGLONG(tt->task_struct + OFFSET(task_struct_start_time)); } else { start_time_timespec(); tsp->start_time = ULONG(tt->task_struct + OFFSET(task_struct_start_time)); } if (VALID_MEMBER(task_struct_times)) { tsp->tms_utime = ULONG(tt->task_struct + OFFSET(task_struct_times) + OFFSET(tms_tms_utime)); tsp->tms_stime = ULONG(tt->task_struct + OFFSET(task_struct_times) + OFFSET(tms_tms_stime)); } else if (VALID_MEMBER(task_struct_utime)) { if (use_utime_stime) { tsp->utime = ULONG(tt->task_struct + OFFSET(task_struct_utime)); tsp->stime = ULONG(tt->task_struct + OFFSET(task_struct_stime)); } else if (use_kernel_timeval) { BCOPY(tt->task_struct + OFFSET(task_struct_utime), &tsp->kutime, sizeof(struct kernel_timeval)); BCOPY(tt->task_struct + OFFSET(task_struct_stime), &tsp->kstime, sizeof(struct kernel_timeval)); } else if (VALID_STRUCT(cputime_t)) { /* since linux 2.6.11 */ if (SIZE(cputime_t) == 8) { uint64_t utime_64, stime_64; BCOPY(tt->task_struct + OFFSET(task_struct_utime), &utime_64, 8); BCOPY(tt->task_struct + OFFSET(task_struct_stime), &stime_64, 8); /* convert from micro-sec. to sec. */ tsp->old_utime.tv_sec = utime_64 / 1000000; tsp->old_stime.tv_sec = stime_64 / 1000000; } else { uint32_t utime_32, stime_32; BCOPY(tt->task_struct + OFFSET(task_struct_utime), &utime_32, 4); BCOPY(tt->task_struct + OFFSET(task_struct_stime), &stime_32, 4); tsp->old_utime.tv_sec = utime_32; tsp->old_stime.tv_sec = stime_32; } } else { BCOPY(tt->task_struct + OFFSET(task_struct_utime), &tsp->utime, sizeof(struct timeval)); BCOPY(tt->task_struct + OFFSET(task_struct_stime), &tsp->stime, sizeof(struct timeval)); } } tasks++; tsp++; if (tcp) break; } qsort((void *)task_start_times, (size_t)tasks, sizeof(struct task_start_time), compare_start_time); for (i = 0, tsp = task_start_times; i < tasks; i++, tsp++) { print_task_header(fp, tsp->tc, 0); fprintf(fp, " RUN TIME: %s\n", symbol_exists("jiffies_64") ? convert_time(convert_start_time(tsp->start_time, jiffies_64), buf1) : convert_time(jiffies - tsp->start_time, buf1)); fprintf(fp, " START TIME: %llu\n", tsp->start_time); if (VALID_MEMBER(task_struct_times)) { fprintf(fp, " USER TIME: %ld\n", tsp->tms_utime); fprintf(fp, " SYSTEM TIME: %ld\n\n", tsp->tms_stime); } else if (VALID_MEMBER(task_struct_utime)) { if (use_utime_stime) { fprintf(fp, " UTIME: %lld\n", (ulonglong)tsp->utime); fprintf(fp, " STIME: %lld\n\n", (ulonglong)tsp->stime); } else if (use_kernel_timeval) { fprintf(fp, " USER TIME: %d\n", tsp->kutime.tv_sec); fprintf(fp, " SYSTEM TIME: %d\n\n", tsp->kstime.tv_sec); } else { fprintf(fp, " USER TIME: %ld\n", tsp->old_utime.tv_sec); fprintf(fp, " SYSTEM TIME: %ld\n\n", tsp->old_stime.tv_sec); } } } FREEBUF(task_start_times); } static int start_time_timespec(void) { switch(tt->flags & (TIMESPEC | NO_TIMESPEC | START_TIME_NSECS)) { case TIMESPEC: return TRUE; case NO_TIMESPEC: case START_TIME_NSECS: return FALSE; default: break; } tt->flags |= NO_TIMESPEC; if (VALID_MEMBER(task_struct_start_time) && STREQ(MEMBER_TYPE_NAME("task_struct", "start_time"), "timespec")) { tt->flags &= ~NO_TIMESPEC; tt->flags |= TIMESPEC; } if ((tt->flags & NO_TIMESPEC) && (SIZE(task_struct_start_time) == 8)) { tt->flags &= ~NO_TIMESPEC; tt->flags |= START_TIME_NSECS; } return (tt->flags & TIMESPEC ? TRUE : FALSE); } static ulonglong convert_start_time(ulonglong start_time, ulonglong current) { ulong tmp1, tmp2; ulonglong wrapped; switch(tt->flags & (TIMESPEC | NO_TIMESPEC | START_TIME_NSECS)) { case START_TIME_NSECS: start_time /= 1000000000ULL; /* FALLTHROUGH */ case TIMESPEC: if ((start_time * (ulonglong)machdep->hz) > current) return 0; else return current - (start_time * (ulonglong)machdep->hz); case NO_TIMESPEC: if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { wrapped = (start_time & 0xffffffff00000000ULL); if (wrapped) { wrapped -= 0x100000000ULL; start_time &= 0x00000000ffffffffULL; start_time |= wrapped; start_time += (ulonglong)(300*machdep->hz); } else { tmp1 = (ulong)(uint)(-300*machdep->hz); tmp2 = (ulong)start_time; start_time = (ulonglong)(tmp2 - tmp1); } } break; default: break; } return start_time; } /* * The comparison function must return an integer less than, * equal to, or greater than zero if the first argument is * considered to be respectively less than, equal to, or * greater than the second. If two members compare as equal, * their order in the sorted array is undefined. */ static int compare_start_time(const void *v1, const void *v2) { struct task_start_time *t1, *t2; t1 = (struct task_start_time *)v1; t2 = (struct task_start_time *)v2; return (t1->start_time < t2->start_time ? -1 : t1->start_time == t2->start_time ? 0 : 1); } static ulong parent_of(ulong task) { long offset; ulong parent; if (VALID_MEMBER(task_struct_parent)) offset = OFFSET(task_struct_parent); else offset = OFFSET(task_struct_p_pptr); readmem(task+offset, KVADDR, &parent, sizeof(void *), "task parent", FAULT_ON_ERROR); return parent; } /* * Dump the parental hierarchy of a task. */ static void parent_list(ulong task) { int i, j, cnt; struct task_context *tc; char *buffer; long reserved; ulong *task_list, child, parent; reserved = 100 * sizeof(ulong); buffer = GETBUF(reserved); task_list = (ulong *)buffer; child = task_list[0] = task; parent = parent_of(child); cnt = 1; while (child != parent) { child = task_list[cnt++] = parent; parent = parent_of(child); if ((cnt * sizeof(ulong)) == reserved) { RESIZEBUF(buffer, reserved, reserved * 2); reserved *= 2; task_list = (ulong *)buffer; } } for (i = cnt-1, j = 0; i >= 0; i--, j++) { INDENT(j); tc = task_to_context(task_list[i]); if (tc) print_task_header(fp, tc, 0); } FREEBUF(task_list); } /* * Dump the children of a task. */ static void child_list(ulong task) { int i; int cnt; struct task_context *tc; tc = task_to_context(task); print_task_header(fp, tc, 0); tc = FIRST_CONTEXT(); for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->ptask == task) { INDENT(2); print_task_header(fp, tc, 0); cnt++; } } if (!cnt) fprintf(fp, " (no children)\n"); } /* * Dump the children of a task. */ static void show_tgid_list(ulong task) { int i; int cnt; struct task_context *tc; ulong tgid; tc = task_to_context(task); tgid = task_tgid(task); if (tc->pid != tgid) { if (pc->curcmd_flags & TASK_SPECIFIED) { if (!(tc = tgid_to_context(tgid))) return; task = tc->task; } else return; } if ((tc->pid == 0) && (pc->curcmd_flags & IDLE_TASK_SHOWN)) return; print_task_header(fp, tc, 0); tc = FIRST_CONTEXT(); for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->task == task) continue; if (task_tgid(tc->task) == tgid) { INDENT(2); print_task_header(fp, tc, 0); cnt++; if (tc->pid == 0) pc->curcmd_flags |= IDLE_TASK_SHOWN; } } if (!cnt) fprintf(fp, " (no threads)\n"); fprintf(fp, "\n"); } /* * Return the first task found that belongs to a pid. */ ulong pid_to_task(ulong pid) { int i; struct task_context *tc; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) if (tc->pid == pid) return(tc->task); return((ulong)NULL); } /* * Return the pid of a task. */ ulong task_to_pid(ulong task) { struct task_context *tc; tc = task_to_context(task); if (tc != NULL) return tc->pid; return(NO_PID); } /* * Verify whether a task exists. */ int task_exists(ulong task) { int i; struct task_context *tc; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) if (tc->task == task) return TRUE; return FALSE; } /* * Return the task_context structure of a task. */ struct task_context * task_to_context(ulong task) { struct task_context key, *tc, **found; int i; /* Binary search the context_by_task array. */ if (tt->flags & INDEXED_CONTEXTS) { key.task = task; tc = &key; found = bsearch(&tc, tt->context_by_task, tt->running_tasks, sizeof(*tt->context_by_task), sort_by_task); return found ? *found : NULL; } tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) if (tc->task == task) return tc; return NULL; } /* * Return a tgid's parent task_context structure. */ struct task_context * tgid_to_context(ulong parent_tgid) { int i; struct task_context *tc; ulong tgid; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { tgid = task_tgid(tc->task); if ((tgid == parent_tgid) && (tgid == tc->pid)) return tc; } return NULL; } /* * Return the task_context structure of the first task found with a pid, * while linking all tasks that have that pid. */ struct task_context * pid_to_context(ulong pid) { int i; struct task_context *tc, *firsttc, *lasttc; tc = FIRST_CONTEXT(); firsttc = lasttc = NULL; for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->pid == pid) { if (!firsttc) firsttc = tc; if (lasttc) lasttc->tc_next = tc; tc->tc_next = NULL; lasttc = tc; } } return firsttc; } /* * Verify whether a pid exists, and if found, linking all tasks having the pid. */ int pid_exists(ulong pid) { int i; struct task_context *tc, *lasttc; int count; tc = FIRST_CONTEXT(); count = 0; lasttc = NULL; for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->pid == pid) { count++; if (lasttc) lasttc->tc_next = tc; tc->tc_next = NULL; lasttc = tc; } } return(count); } /* * Translate a stack pointer to a task, dealing with possible split. * If that doesn't work, check the hardirq_stack and softirq_stack. * * TODO: This function can be optimized by getting min & max of the * stack range in first pass and use these values against the * given SP to decide whether or not to proceed with stack lookup. */ ulong stkptr_to_task(ulong sp) { int i, c; struct task_context *tc; struct bt_info bt_info, *bt; if (!sp) return NO_TASK; bt = &bt_info; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { bt->stackbase = GET_STACKBASE(tc->task); bt->stacktop = GET_STACKTOP(tc->task); if (INSTACK(sp, bt)) return tc->task; } if (!(tt->flags & IRQSTACKS)) return NO_TASK; bt = &bt_info; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { for (c = 0; c < NR_CPUS; c++) { if (tt->hardirq_ctx[c]) { bt->stackbase = tt->hardirq_ctx[c]; bt->stacktop = bt->stackbase + SIZE(irq_ctx); if (INSTACK(sp, bt) && (tt->hardirq_tasks[c] == tc->task)) return tc->task; } if (tt->softirq_ctx[c]) { bt->stackbase = tt->softirq_ctx[c]; bt->stacktop = bt->stackbase + SIZE(irq_ctx); if (INSTACK(sp, bt) && (tt->softirq_tasks[c] == tc->task)) return tc->task; } } } return NO_TASK; } /* * Translate a task pointer to its thread_info. */ ulong task_to_thread_info(ulong task) { int i; struct task_context *tc; if (!(tt->flags & THREAD_INFO)) error(FATAL, "task_to_thread_info: thread_info struct does not exist!\n"); tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->task == task) return tc->thread_info; } return(error(FATAL, "task does not exist: %lx\n", task)); } /* * Translate a task address to its stack base, dealing with potential split. */ ulong task_to_stackbase(ulong task) { ulong stackbase; if (tt->flags & THREAD_INFO_IN_TASK) { readmem(task + OFFSET(task_struct_stack), KVADDR, &stackbase, sizeof(void *), "task_struct.stack", FAULT_ON_ERROR); return stackbase; } else if (tt->flags & THREAD_INFO) return task_to_thread_info(task); else return (task & ~(STACKSIZE()-1)); } /* * Try to translate a decimal or hexadecimal string into a task or pid, * failing if no task or pid exists, or if there is ambiguity between * the decimal and hexadecimal translations. However, if the value could * be a decimal PID and a hexadecimal PID of two different processes, then * default to the decimal value. * * This was added in preparation for overlapping, zero-based, user and kernel * virtual addresses on s390 and s390x, allowing for the entry of ambiguous * decimal/hexadecimal task address values without the leading "0x". * It should be used in lieu of "stol" when parsing for task/pid arguments. */ int str_to_context(char *string, ulong *value, struct task_context **tcp) { ulong dvalue, hvalue; int found, type; char *s; struct task_context *tc_dp, *tc_dt, *tc_hp, *tc_ht; if (string == NULL) { error(INFO, "received NULL string\n"); return STR_INVALID; } s = string; dvalue = hvalue = BADADDR; if (decimal(s, 0)) dvalue = dtol(s, RETURN_ON_ERROR, NULL); if (hexadecimal(s, 0)) { if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) <= MAX_HEXADDR_STRLEN) hvalue = htol(s, RETURN_ON_ERROR, NULL); } found = 0; tc_dp = tc_dt = tc_hp = tc_ht = NULL; type = STR_INVALID; if (dvalue != BADADDR) { if ((tc_dp = pid_to_context(dvalue))) found++; if ((tc_dt = task_to_context(dvalue))) found++; } if ((hvalue != BADADDR) && (dvalue != hvalue)) { if ((tc_hp = pid_to_context(hvalue))) found++; if ((tc_ht = task_to_context(hvalue))) found++; } switch (found) { case 2: if (tc_dp && tc_hp) { *tcp = tc_dp; *value = dvalue; type = STR_PID; } break; case 1: if (tc_dp) { *tcp = tc_dp; *value = dvalue; type = STR_PID; } if (tc_dt) { *tcp = tc_dt; *value = dvalue; type = STR_TASK; } if (tc_hp) { *tcp = tc_hp; *value = hvalue; type = STR_PID; } if (tc_ht) { *tcp = tc_ht; *value = hvalue; type = STR_TASK; } break; } return type; } /* * Return the task if the vaddr is part of a task's task_struct. */ ulong vaddr_in_task_struct(ulong vaddr) { int i; struct task_context *tc; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if ((vaddr >= tc->task) && (vaddr < (tc->task + SIZE(task_struct)))) return tc->task; } return NO_TASK; } /* * Verify whether any task is running a command. */ int comm_exists(char *s) { int i, cnt; struct task_context *tc; char buf[TASK_COMM_LEN]; strlcpy(buf, s, TASK_COMM_LEN); tc = FIRST_CONTEXT(); for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) if (STREQ(tc->comm, buf)) cnt++; return cnt; } /* * Set a new context. If only a pid is passed, the first task found with * that pid is selected. */ int set_context(ulong task, ulong pid, uint update_gdb_thread) { int i; struct task_context *tc; int found; if (CURRENT_CONTEXT() && (CURRENT_TASK() == task || CURRENT_PID() == pid)) return TRUE; tc = FIRST_CONTEXT(); for (i = 0, found = FALSE; i < RUNNING_TASKS(); i++, tc++) { if (task && (tc->task == task)) { found = TRUE; break; } else if (pid == tc->pid) { found = TRUE; break; } } if (found) { CURRENT_CONTEXT() = tc; /* change the selected thread in gdb, according to current context */ if (update_gdb_thread) return gdb_change_thread_context(); else return TRUE; } else { if (task) error(INFO, "cannot set context for task: %lx\n", task); else error(INFO, "cannot set context for pid: %d\n", pid); return FALSE; } } /* * Check whether the panic was determined to be caused by a "sys -panic" * command. If so, fix the task_context's pid despite what the task_struct * says. */ #define CONTEXT_ADJUSTED (1) #define CONTEXT_ERRONEOUS (2) static int panic_context_adjusted(struct task_context *tc) { pid_t pgrp, tgid; char buf[BUFSIZE]; if (!(DUMPFILE() && (tc == task_to_context(tt->panic_task)) && (tc->pid == 0) && STRNEQ(tc->comm, pc->program_name) && strstr(get_panicmsg(buf), "Attempted to kill the idle task"))) return 0; if (INVALID_MEMBER(task_struct_pgrp) || INVALID_MEMBER(task_struct_tgid)) return CONTEXT_ERRONEOUS; fill_task_struct(tc->task); pgrp = tt->last_task_read ? UINT(tt->task_struct + OFFSET(task_struct_pgrp)) : 0; tgid = tt->last_task_read ? UINT(tt->task_struct + OFFSET(task_struct_tgid)) : 0; if (pgrp && tgid && (pgrp == tgid) && !pid_exists((ulong)pgrp)) { tc->pid = (ulong)pgrp; return CONTEXT_ADJUSTED; } return CONTEXT_ERRONEOUS; } /* * Display a task context. */ void show_context(struct task_context *tc) { char buf[BUFSIZE]; char *p1; int adjusted, cnt, indent; adjusted = pc->flags & RUNTIME ? 0 : panic_context_adjusted(tc); indent = pc->flags & RUNTIME ? 0 : 5; INDENT(indent); fprintf(fp, " PID: %ld\n", tc->pid); INDENT(indent); fprintf(fp, "COMMAND: \"%s\"\n", tc->comm); INDENT(indent); fprintf(fp, " TASK: %lx ", tc->task); if ((machdep->flags & (INIT|MCA)) && (tc->pid == 0)) cnt = comm_exists(tc->comm); else cnt = TASKS_PER_PID(tc->pid); if (cnt > 1) fprintf(fp, "(1 of %d) ", cnt); if (tt->flags & THREAD_INFO) fprintf(fp, "[THREAD_INFO: %lx]", tc->thread_info); fprintf(fp, "\n"); INDENT(indent); fprintf(fp, " CPU: %s\n", task_cpu(tc->processor, buf, VERBOSE)); INDENT(indent); fprintf(fp, " STATE: %s ", task_state_string(tc->task, buf, VERBOSE)); if (is_task_active(tc->task)) { if (machdep->flags & HWRESET) fprintf(fp, "(HARDWARE RESET)"); else if ((pc->flags & SYSRQ) && (tc->task == tt->panic_task)) fprintf(fp, "(SYSRQ)"); else if (machdep->flags & INIT) fprintf(fp, "(INIT)"); else if ((machdep->flags & MCA) && (tc->task == tt->panic_task)) fprintf(fp, "(MCA)"); else if ((tc->processor >= 0) && (tc->processor < NR_CPUS) && (kt->cpu_flags[tc->processor] & NMI)) fprintf(fp, "(NMI)"); else if ((tc->task == tt->panic_task) && XENDUMP_DUMPFILE() && (kt->xen_flags & XEN_SUSPEND)) fprintf(fp, "(SUSPEND)"); else if ((tc->task == tt->panic_task) && !(pc->flags2 & SNAP)) fprintf(fp, "(PANIC)"); else fprintf(fp, "(ACTIVE)"); } if (!(pc->flags & RUNTIME) && !ACTIVE() && (tt->flags & PANIC_TASK_NOT_FOUND) && !SYSRQ_TASK(tc->task)) { fprintf(fp, "\n"); INDENT(indent); if (machine_type("S390") || machine_type("S390X")) fprintf(fp, " INFO: no panic task found"); else if (tt->panic_processor >= 0) fprintf(fp, "WARNING: reported panic task %lx not found", tt->panic_threads[tt->panic_processor]); else fprintf(fp, "WARNING: panic task not found"); } fprintf(fp, "\n"); if (pc->flags & RUNTIME) return; /* * Dump any pre-first-prompt messages here. */ cnt = 0; if (pc->flags & NAMELIST_UNLINKED) { strcpy(buf, pc->namelist); if ((p1 = strstr(buf, "@"))) *p1 = NULLCHAR; fprintf(fp, "%sNOTE: To save the remote \"%s\" locally,\n enter: \"save kernel\"\n", cnt++ ? "" : "\n", buf); } if (REMOTE_DUMPFILE()) fprintf(fp, "%sNOTE: To save the remote \"%s\" locally,\n enter: \"save dumpfile\"\n", cnt++ ? "" : "\n", basename(pc->server_memsrc)); /* * If this panic was caused by a "sys -panic" command, issue the * proper warning message. */ switch (adjusted) { case CONTEXT_ADJUSTED: fprintf(fp, "%sNOTE: The \"%s\" task_struct will erroneously show a p_pid of 0\n", cnt++ ? "" : "\n", tc->comm); break; case CONTEXT_ERRONEOUS: fprintf(fp, "%sWARNING: The \"%s\" context will erroneously show a PID of 0\n", cnt++ ? "" : "\n", tc->comm); break; } if (!(pc->flags & RUNTIME) && (tt->flags & ACTIVE_ONLY)) error(WARNING, "\nonly the active tasks on each cpu are being tracked\n"); } /* * Translate a task_struct state value into a long (verbose), or short string, * or if requested, just pass back the state value. */ #define TASK_STATE_UNINITIALIZED (-1) static long _RUNNING_ = TASK_STATE_UNINITIALIZED; static long _INTERRUPTIBLE_ = TASK_STATE_UNINITIALIZED; static long _UNINTERRUPTIBLE_ = TASK_STATE_UNINITIALIZED; static long _STOPPED_ = TASK_STATE_UNINITIALIZED; static long _TRACING_STOPPED_ = TASK_STATE_UNINITIALIZED; long _ZOMBIE_ = TASK_STATE_UNINITIALIZED; /* also used by IS_ZOMBIE() */ static long _DEAD_ = TASK_STATE_UNINITIALIZED; static long _SWAPPING_ = TASK_STATE_UNINITIALIZED; static long _EXCLUSIVE_ = TASK_STATE_UNINITIALIZED; static long _WAKEKILL_ = TASK_STATE_UNINITIALIZED; static long _WAKING_ = TASK_STATE_UNINITIALIZED; static long _NONINTERACTIVE_ = TASK_STATE_UNINITIALIZED; static long _PARKED_ = TASK_STATE_UNINITIALIZED; static long _NOLOAD_ = TASK_STATE_UNINITIALIZED; static long _NEW_ = TASK_STATE_UNINITIALIZED; #define valid_task_state(X) ((X) != TASK_STATE_UNINITIALIZED) static void dump_task_states(void) { int hi, lo; fprintf(fp, " RUNNING: %3ld (0x%lx)\n", _RUNNING_, _RUNNING_); fprintf(fp, " INTERRUPTIBLE: %3ld (0x%lx)\n", _INTERRUPTIBLE_, _INTERRUPTIBLE_); fprintf(fp, " UNINTERRUPTIBLE: %3ld (0x%lx)\n", _UNINTERRUPTIBLE_, _UNINTERRUPTIBLE_); fprintf(fp, " STOPPED: %3ld (0x%lx)\n", _STOPPED_, _STOPPED_); if (valid_task_state(_TRACING_STOPPED_)) { if (count_bits_long(_TRACING_STOPPED_) > 1) { lo = lowest_bit_long(_TRACING_STOPPED_); hi = highest_bit_long(_TRACING_STOPPED_); fprintf(fp, " TRACING_STOPPED: %3d and %d (0x%x and 0x%x)\n", 1< 1) { lo = lowest_bit_long(_DEAD_); hi = highest_bit_long(_DEAD_); fprintf(fp, " DEAD: %3d and %d (0x%x and 0x%x)\n", 1< 0) && read_string(symbol_value("stat_nam"), buf, BUFSIZE-1) && ascii_string(buf) && (strlen(buf) > strlen("RSDTtZX"))) { for (i = 0; i < strlen(buf); i++) { switch (buf[i]) { case 'R': _RUNNING_ = i; break; case 'S': _INTERRUPTIBLE_ = i; break; case 'D': _UNINTERRUPTIBLE_ = (1 << (i-1)); break; case 'T': _STOPPED_ = (1 << (i-1)); break; case 't': _TRACING_STOPPED_ = (1 << (i-1)); break; case 'X': if (_DEAD_ == UNINITIALIZED) _DEAD_ = (1 << (i-1)); else _DEAD_ |= (1 << (i-1)); break; case 'Z': _ZOMBIE_ = (1 << (i-1)); break; case 'x': if (_DEAD_ == UNINITIALIZED) _DEAD_ = (1 << (i-1)); else _DEAD_ |= (1 << (i-1)); break; case 'K': _WAKEKILL_ = (1 << (i-1)); break; case 'W': _WAKING_ = (1 << (i-1)); break; case 'P': _PARKED_ = (1 << (i-1)); break; case 'N': _NOLOAD_ = (1 << (i-1)); break; case 'n': _NEW_ = (1 << (i-1)); break; } } goto done_states; } if ((len = get_array_length("task_state_array", NULL, 0)) <= 0) goto old_defaults; bitpos = 0; for (i = 0; i < len; i++) { if (!read_string(str, buf, BUFSIZE-1)) break; if (CRASHDEBUG(3)) fprintf(fp, "%s%s[%d][%s]\n", bitpos ? "" : "\n", i < 10 ? " " : "", i, buf); if (strstr(buf, "(running)")) _RUNNING_ = bitpos; else if (strstr(buf, "(sleeping)")) _INTERRUPTIBLE_ = bitpos; else if (strstr(buf, "(disk sleep)")) _UNINTERRUPTIBLE_ = bitpos; else if (strstr(buf, "(stopped)")) _STOPPED_ = bitpos; else if (strstr(buf, "(zombie)")) _ZOMBIE_ = bitpos; else if (strstr(buf, "(dead)")) { if (_DEAD_ == TASK_STATE_UNINITIALIZED) _DEAD_ = bitpos; else _DEAD_ |= bitpos; } else if (strstr(buf, "(swapping)")) /* non-existent? */ _SWAPPING_ = bitpos; else if (strstr(buf, "(tracing stop)")) { if (_TRACING_STOPPED_ == TASK_STATE_UNINITIALIZED) _TRACING_STOPPED_ = bitpos; else _TRACING_STOPPED_ |= bitpos; } else if (strstr(buf, "(wakekill)")) _WAKEKILL_ = bitpos; else if (strstr(buf, "(waking)")) _WAKING_ = bitpos; else if (strstr(buf, "(parked)")) _PARKED_ = bitpos; if (!bitpos) bitpos = 1; else bitpos = bitpos << 1; task_state_array += sizeof(void *); if (!readmem(task_state_array, KVADDR, &str, sizeof(void *), "task_state_array", RETURN_ON_ERROR)) break; } if ((THIS_KERNEL_VERSION >= LINUX(2,6,16)) && (THIS_KERNEL_VERSION < LINUX(2,6,24))) { _NONINTERACTIVE_ = 64; } if (THIS_KERNEL_VERSION >= LINUX(4,14,0)) { if (valid_task_state(_PARKED_)) { bitpos = _PARKED_; _DEAD_ |= (bitpos << 1); /* TASK_DEAD */ _WAKEKILL_ = (bitpos << 2); /* TASK_WAKEKILL */ _WAKING_ = (bitpos << 3); /* TASK_WAKING */ _NOLOAD_ = (bitpos << 4); /* TASK_NOLOAD */ _NEW_ = (bitpos << 5); /* TASK_NEW */ } } else if (THIS_KERNEL_VERSION >= LINUX(2,6,32)) { /* * Account for states not listed in task_state_array[] */ if (count_bits_long(_DEAD_) == 1) { bitpos = 1<< lowest_bit_long(_DEAD_); _DEAD_ |= (bitpos<<1); /* TASK_DEAD */ _WAKEKILL_ = (bitpos<<2); /* TASK_WAKEKILL */ _WAKING_ = (bitpos<<3); /* TASK_WAKING */ } } done_states: if (CRASHDEBUG(3)) dump_task_states(); if (!valid_task_state(_RUNNING_) || !valid_task_state(_INTERRUPTIBLE_) || !valid_task_state(_UNINTERRUPTIBLE_) || !valid_task_state(_ZOMBIE_) || !valid_task_state(_STOPPED_)) { if (CRASHDEBUG(3)) fprintf(fp, "initialize_task_state: using old defaults\n"); goto old_defaults; } } /* * Print multiple state strings if appropriate. */ static char * task_state_string_verbose(ulong task, char *buf) { long state, both; int count; state = task_state(task); buf[0] = NULLCHAR; count = 0; if (state == _RUNNING_) { sprintf(buf, "TASK_RUNNING"); return buf; } if (state & _INTERRUPTIBLE_) sprintf(&buf[strlen(buf)], "%sTASK_INTERRUPTIBLE", count++ ? "|" : ""); if (state & _UNINTERRUPTIBLE_) sprintf(&buf[strlen(buf)], "%sTASK_UNINTERRUPTIBLE", count++ ? "|" : ""); if (state & _STOPPED_) sprintf(&buf[strlen(buf)], "%sTASK_STOPPED", count++ ? "|" : ""); if (state & _TRACING_STOPPED_) sprintf(&buf[strlen(buf)], "%sTASK_TRACED", count++ ? "|" : ""); if ((both = (state & _DEAD_))) { if (count_bits_long(both) > 1) sprintf(&buf[strlen(buf)], "%sEXIT_DEAD|TASK_DEAD", count++ ? "|" : ""); else sprintf(&buf[strlen(buf)], "%sEXIT_DEAD", count++ ? "|" : ""); } if (state & _ZOMBIE_) sprintf(&buf[strlen(buf)], "%sEXIT_ZOMBIE", count++ ? "|" : ""); if (valid_task_state(_WAKING_) && (state & _WAKING_)) sprintf(&buf[strlen(buf)], "%sTASK_WAKING", count++ ? "|" : ""); if (valid_task_state(_WAKEKILL_) && (state & _WAKEKILL_)) sprintf(&buf[strlen(buf)], "%sTASK_WAKEKILL", count++ ? "|" : ""); if (valid_task_state(_NOLOAD_) && (state & _NOLOAD_)) sprintf(&buf[strlen(buf)], "%sTASK_NOLOAD", count++ ? "|" : ""); if (valid_task_state(_NEW_) && (state & _NEW_)) sprintf(&buf[strlen(buf)], "%sTASK_NEW", count++ ? "|" : ""); if (valid_task_state(_NONINTERACTIVE_) && (state & _NONINTERACTIVE_)) sprintf(&buf[strlen(buf)], "%sTASK_NONINTERACTIVE", count++ ? "|" : ""); if (state == _PARKED_) { sprintf(buf, "TASK_PARKED"); return buf; } return buf; } char * task_state_string(ulong task, char *buf, int verbose) { long state; int exclusive; int valid, set; if (_RUNNING_ == TASK_STATE_UNINITIALIZED) initialize_task_state(); if (verbose) return task_state_string_verbose(task, buf); if (buf) sprintf(buf, verbose ? "(unknown)" : "??"); state = task_state(task); set = valid = exclusive = 0; if (valid_task_state(_EXCLUSIVE_)) { exclusive = state & _EXCLUSIVE_; state &= ~(_EXCLUSIVE_); } if (state == _RUNNING_) { sprintf(buf, "RU"); valid++; } if (state & _INTERRUPTIBLE_) { sprintf(buf, "IN"); valid++; set++; } if (state & _UNINTERRUPTIBLE_) { if (valid_task_state(_NOLOAD_) && (state & _NOLOAD_)) sprintf(buf, "ID"); else sprintf(buf, "UN"); valid++; set++; } if (state & _ZOMBIE_) { sprintf(buf, "ZO"); valid++; set++; } if (state & _STOPPED_) { sprintf(buf, "ST"); valid++; set++; } if (valid_task_state(_TRACING_STOPPED_) && (state & _TRACING_STOPPED_)) { sprintf(buf, "TR"); valid++; set++; } if (state == _SWAPPING_) { sprintf(buf, "SW"); valid++; set++; } if ((state & _DEAD_) && !set) { sprintf(buf, "DE"); valid++; set++; } if (state == _PARKED_) { sprintf(buf, "PA"); valid++; } if (state == _WAKING_) { sprintf(buf, "WA"); valid++; } if (state == _NEW_) { sprintf(buf, "NE"); valid++; } if (valid && exclusive) strcat(buf, "EX"); return buf; } /* * Return a task's state and exit_state together. */ ulong task_state(ulong task) { ulong state, exit_state; fill_task_struct(task); if (!tt->last_task_read) return 0; if (SIZE(task_struct_state) == sizeof(ulong)) state = ULONG(tt->task_struct + OFFSET(task_struct_state)); else state = UINT(tt->task_struct + OFFSET(task_struct_state)); if (VALID_MEMBER(task_struct_exit_state) && SIZE(task_struct_exit_state) == sizeof(ulong)) exit_state = ULONG(tt->task_struct + OFFSET(task_struct_exit_state)); else if (VALID_MEMBER(task_struct_exit_state)) exit_state = UINT(tt->task_struct + OFFSET(task_struct_exit_state)); else exit_state = 0; return (state | exit_state); } /* * Return a task's flags. */ ulong task_flags(ulong task) { ulong flags; fill_task_struct(task); if (tt->last_task_read) { if (SIZE(task_struct_flags) == sizeof(unsigned int)) flags = UINT(tt->task_struct + OFFSET(task_struct_flags)); else flags = ULONG(tt->task_struct + OFFSET(task_struct_flags)); } else flags = 0; return flags; } /* * Return task's policy as bitmask bit. */ static ulong task_policy(ulong task) { ulong policy = 0; fill_task_struct(task); if (!tt->last_task_read) return policy; if (SIZE(task_struct_policy) == sizeof(unsigned int)) policy = 1 << UINT(tt->task_struct + OFFSET(task_struct_policy)); else policy = 1 << ULONG(tt->task_struct + OFFSET(task_struct_policy)); return policy; } /* * Return a task's tgid. */ ulong task_tgid(ulong task) { uint tgid; fill_task_struct(task); tgid = tt->last_task_read ? UINT(tt->task_struct + OFFSET(task_struct_tgid)) : 0; return (ulong)tgid; } ulonglong task_last_run(ulong task) { ulong last_run; ulonglong timestamp; timestamp = 0; fill_task_struct(task); if (VALID_MEMBER(task_struct_last_run)) { last_run = tt->last_task_read ? ULONG(tt->task_struct + OFFSET(task_struct_last_run)) : 0; timestamp = (ulonglong)last_run; } else if (VALID_MEMBER(task_struct_timestamp)) timestamp = tt->last_task_read ? ULONGLONG(tt->task_struct + OFFSET(task_struct_timestamp)) : 0; else if (VALID_MEMBER(sched_info_last_arrival)) timestamp = tt->last_task_read ? ULONGLONG(tt->task_struct + OFFSET(task_struct_sched_info) + OFFSET(sched_info_last_arrival)) : 0; return timestamp; } /* * Return a task's mm_struct address. If "fill" is set, the mm_struct * cache is loaded. */ ulong task_mm(ulong task, int fill) { ulong mm_struct; fill_task_struct(task); if (!tt->last_task_read) return 0; mm_struct = ULONG(tt->task_struct + OFFSET(task_struct_mm)); if (fill && mm_struct) fill_mm_struct(mm_struct); return mm_struct; } /* * Translate a processor number into a string, taking NO_PROC_ID into account. */ char * task_cpu(int processor, char *buf, int verbose) { if (processor < NR_CPUS) sprintf(buf, "%d", processor); else sprintf(buf, verbose ? "(unknown)" : "?"); return buf; } /* * Check either the panic_threads[] array on a dump, or the has_cpu flag * of a task_struct on a live system. Also account for deprecation of * usage of has_cpu on non-SMP systems. */ int is_task_active(ulong task) { int has_cpu; if (LOCAL_ACTIVE() && (task == tt->this_task)) return TRUE; if (DUMPFILE() && is_panic_thread(task)) return TRUE; fill_task_struct(task); has_cpu = tt->last_task_read ? task_has_cpu(task, tt->task_struct) : 0; return(has_cpu); } /* * Return true if a task is the panic_task or is contained within the * panic_threads[] array. */ int is_panic_thread(ulong task) { int i; if (DUMPFILE()) { if (tt->panic_task == task) return TRUE; for (i = 0; i < NR_CPUS; i++) if (tt->panic_threads[i] == task) return TRUE; } return FALSE; } /* * Depending upon the kernel, check the task_struct's has_cpu or cpus_runnable * field if either exist, or the global runqueues[].curr via get_active_set() * to determine whether a task is running on a cpu. */ static int task_has_cpu(ulong task, char *local_task) { int i, has_cpu; ulong cpus_runnable; if (DUMPFILE() && (task == tt->panic_task)) /* no need to continue */ return TRUE; if (VALID_MEMBER(task_struct_has_cpu)) { if (local_task) has_cpu = INT(local_task+OFFSET(task_struct_has_cpu)); else if (!readmem((ulong)(task+OFFSET(task_struct_has_cpu)), KVADDR, &has_cpu, sizeof(int), "task_struct has_cpu", RETURN_ON_ERROR)) has_cpu = FALSE; } else if (VALID_MEMBER(task_struct_cpus_runnable)) { if (local_task) cpus_runnable = ULONG(local_task + OFFSET(task_struct_cpus_runnable)); else if (!readmem((ulong)(task + OFFSET(task_struct_cpus_runnable)), KVADDR, &cpus_runnable, sizeof(ulong), "task_struct cpus_runnable", RETURN_ON_ERROR)) cpus_runnable = ~0UL; has_cpu = (cpus_runnable != ~0UL); } else if (get_active_set()) { for (i = 0, has_cpu = FALSE; i < NR_CPUS; i++) { if (task == tt->active_set[i]) { has_cpu = TRUE; break; } } } else error(FATAL, "task_struct has no has_cpu, or cpus_runnable; runqueues[] not defined?\n"); return has_cpu; } /* * If a task is in the panic_threads array and has an associated panic_ksp * array entry, return it. */ int get_panic_ksp(struct bt_info *bt, ulong *ksp) { int i; if (tt->flags & PANIC_KSP) { for (i = 0; i < NR_CPUS; i++) { if ((tt->panic_threads[i] == bt->task) && tt->panic_ksp[i] && INSTACK(tt->panic_ksp[i], bt)) { *ksp = tt->panic_ksp[i]; return TRUE; } } } return FALSE; } /* * Look for kcore's storage information for the system's panic state. * If it's not there (somebody else's dump format?), look through all * the stack traces or the log buffer for evidence of panic. */ static ulong get_panic_context(void) { int i; struct task_context *tc; ulong panic_threads_addr; ulong task; char *tp; for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->active_set[i])) continue; if (!task_exists(task)) { error(WARNING, "active task %lx on cpu %d not found in PID hash\n\n", task, i); if ((tp = fill_task_struct(task))) add_context(task, tp); } } /* * --no_panic command line option */ if (tt->flags & PANIC_TASK_NOT_FOUND) goto use_task_0; tt->panic_processor = -1; task = NO_TASK; tc = FIRST_CONTEXT(); if (symbol_exists("panic_threads") && symbol_exists("panicmsg") && symbol_exists("panic_processor")) { panic_threads_addr = symbol_value("panic_threads"); get_symbol_data("panic_processor", sizeof(int), &tt->panic_processor); get_symbol_data("panicmsg", sizeof(char *), &tt->panicmsg); if (!readmem(panic_threads_addr, KVADDR, tt->panic_threads, sizeof(void *)*NR_CPUS, "panic_processor array", RETURN_ON_ERROR)) goto use_task_0; task = tt->panic_threads[tt->panic_processor]; if (symbol_exists("panic_ksp")) { if (!(tt->panic_ksp = (ulong *) calloc(NR_CPUS, sizeof(void *)))) error(FATAL, "cannot malloc panic_ksp array.\n"); readmem(symbol_value("panic_ksp"), KVADDR, tt->panic_ksp, sizeof(void *)*NR_CPUS, "panic_ksp array", RETURN_ON_ERROR); tt->flags |= PANIC_KSP; } if (machdep->flags & HWRESET) { populate_panic_threads(); task = tt->panic_threads[0]; } } if (task && task_exists(task)) return(tt->panic_task = task); if (task) error(INFO, "reported panic task %lx does not exist!\n\n", task); if ((tc = panic_search())) { tt->panic_processor = tc->processor; return(tt->panic_task = tc->task); } use_task_0: if (CRASHDEBUG(1)) error(INFO, "get_panic_context: panic task not found\n"); tt->flags |= PANIC_TASK_NOT_FOUND; tc = FIRST_CONTEXT(); return(tc->task); } /* * Get the active task on a cpu -- from a dumpfile only. */ ulong get_active_task(int cpu) { int i; ulong task; struct task_context *tc; if (DUMPFILE() && (task = tt->panic_threads[cpu])) return task; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if ((tc->processor == cpu) && is_task_active(tc->task)) return(tc->task); } return NO_TASK; } /* * Arrange the panic strings based on the severity of the panic * events. */ static const char* panic_msg[] = { "SysRq : Crash", "SysRq : Trigger a crash", "SysRq : Netdump", "Kernel panic: ", "Kernel panic - ", "Kernel BUG at", "kernel BUG at", "Unable to handle kernel paging request", "Unable to handle kernel NULL pointer dereference", "BUG: unable to handle kernel ", "general protection fault: ", "double fault: ", "divide error: ", "stack segment: ", "[Hardware Error]: ", "Bad mode in ", "Oops: ", }; #define ARRAY_SIZE(a) (sizeof (a) / sizeof ((a)[0])) /* * Read the panic string. */ char * get_panicmsg(char *buf) { int msg_found, i; BZERO(buf, BUFSIZE); msg_found = FALSE; if (tt->panicmsg) { read_string(tt->panicmsg, buf, BUFSIZE-1); msg_found = TRUE; } else if (LKCD_DUMPFILE()) { get_lkcd_panicmsg(buf); msg_found = TRUE; } if (msg_found == TRUE) return(buf); open_tmpfile(); dump_log(SHOW_LOG_TEXT); /* * First check for a SYSRQ-generated crash, and set the * active-task flag appropriately. The message may or * may not be used as the panic message. */ for (i = 0; i < ARRAY_SIZE(panic_msg); i++) { rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, panic_msg[i])) { msg_found = TRUE; if (strstr(buf, "SysRq :")) pc->flags |= SYSRQ; goto found; } } } rewind(pc->tmpfile); while (!msg_found && fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "sysrq") && symbol_exists("sysrq_pressed")) { get_symbol_data("sysrq_pressed", sizeof(int), &msg_found); break; } /* * Try to search panic string in panic keywords */ search_panic_task_by_keywords(buf, &msg_found); } found: close_tmpfile(); if (!msg_found) BZERO(buf, BUFSIZE); return(buf); } /* * This command allows the running of a set of commands on any or all * tasks running on a system. The target tasks may be designated by * pid, task or command name. The available command set is designated by * the FOREACH_xxx definitions below. If a running command name string * conflicts with a foreach command, the command name string may be * prefixed with a \ character. */ void cmd_foreach(void) { int a, c, k, t, p; ulong value; static struct foreach_data foreach_data; struct foreach_data *fd; struct task_context *tc; char *p1; int key; BZERO(&foreach_data, sizeof(struct foreach_data)); fd = &foreach_data; while ((c = getopt(argcnt, args, "R:vomlgersStTpukcfFxhdaGy:")) != EOF) { switch(c) { case 'R': fd->reference = optarg; break; case 'h': case 'x': fd->flags |= FOREACH_x_FLAG; break; case 'd': fd->flags |= FOREACH_d_FLAG; break; case 'v': fd->flags |= FOREACH_v_FLAG; break; case 'm': fd->flags |= FOREACH_m_FLAG; break; case 'l': fd->flags |= FOREACH_l_FLAG; break; case 'o': fd->flags |= FOREACH_o_FLAG; break; case 'g': fd->flags |= FOREACH_g_FLAG; break; case 'e': fd->flags |= FOREACH_e_FLAG; break; case 's': fd->flags |= FOREACH_s_FLAG; break; case 'S': fd->flags |= FOREACH_S_FLAG; break; case 'r': fd->flags |= FOREACH_r_FLAG; break; case 'T': fd->flags |= FOREACH_T_FLAG; break; case 't': fd->flags |= FOREACH_t_FLAG; break; case 'p': fd->flags |= FOREACH_p_FLAG; break; case 'u': fd->flags |= FOREACH_u_FLAG; break; case 'k': fd->flags |= FOREACH_k_FLAG; break; case 'c': fd->flags |= FOREACH_c_FLAG; break; case 'f': fd->flags |= FOREACH_f_FLAG; break; case 'F': if (fd->flags & FOREACH_F_FLAG) fd->flags |= FOREACH_F_FLAG2; else fd->flags |= FOREACH_F_FLAG; break; case 'a': fd->flags |= FOREACH_a_FLAG; break; case 'G': fd->flags |= FOREACH_G_FLAG; break; case 'y': fd->flags |= FOREACH_y_FLAG; fd->policy = make_sched_policy(optarg); break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); a = c = k = t = p = 0; while (args[optind]) { /* * Once a keyword has been entered, then only accept * command arguments. */ if (k) { p1 = args[optind]; goto command_argument; } /* * If it's a keyword, grab it and check no further. */ if (is_foreach_keyword(args[optind], &key)) { if (k == MAX_FOREACH_KEYWORDS) error(INFO, "too many keywords!\n"); else fd->keyword_array[k++] = key; optind++; continue; } /* * If it's a task pointer or pid, take it. */ if (IS_A_NUMBER(args[optind])) { if (STREQ(args[optind], "DE") && pid_exists(0xde)) { error(INFO, "ambiguous task-identifying argument: %s\n", args[optind]); error(CONT, "for a \"state\" argument, use: \\DE\n"); error(CONT, "for a \"pid\" argument, use: 0xDE, 0xde, de or 222\n\n"); cmd_usage(pc->curcmd, SYNOPSIS); return; } switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: if (p == MAX_FOREACH_PIDS) error(INFO, "too many pids specified!\n"); else { fd->pid_array[p++] = value; fd->flags |= FOREACH_SPECIFIED; } optind++; continue; case STR_TASK: if (t == MAX_FOREACH_TASKS) error(INFO, "too many tasks specified!\n"); else { fd->task_array[t++] = value; fd->flags |= FOREACH_SPECIFIED; } optind++; continue; case STR_INVALID: break; } } /* * Select all kernel threads. */ if (STREQ(args[optind], "kernel")) { if (fd->flags & FOREACH_USER) error(FATAL, "user and kernel are mutually exclusive!\n"); fd->flags |= FOREACH_KERNEL; optind++; continue; } if ((args[optind][0] == '\\') && STREQ(&args[optind][1], "DE")) shift_string_left(args[optind], 1); if (STREQ(args[optind], "RU") || STREQ(args[optind], "IN") || STREQ(args[optind], "UN") || STREQ(args[optind], "ST") || STREQ(args[optind], "TR") || STREQ(args[optind], "ZO") || STREQ(args[optind], "DE") || STREQ(args[optind], "PA") || STREQ(args[optind], "WA") || STREQ(args[optind], "ID") || STREQ(args[optind], "NE") || STREQ(args[optind], "SW")) { ulong state = TASK_STATE_UNINITIALIZED; if (fd->flags & FOREACH_STATE) error(FATAL, "only one task state allowed\n"); if (STREQ(args[optind], "RU")) state = _RUNNING_; else if (STREQ(args[optind], "IN")) state = _INTERRUPTIBLE_; else if (STREQ(args[optind], "UN")) state = _UNINTERRUPTIBLE_; else if (STREQ(args[optind], "ST")) state = _STOPPED_; else if (STREQ(args[optind], "TR")) state = _TRACING_STOPPED_; else if (STREQ(args[optind], "ZO")) state = _ZOMBIE_; else if (STREQ(args[optind], "DE")) state = _DEAD_; else if (STREQ(args[optind], "SW")) state = _SWAPPING_; else if (STREQ(args[optind], "PA")) state = _PARKED_; else if (STREQ(args[optind], "WA")) state = _WAKING_; else if (STREQ(args[optind], "ID")) state = _UNINTERRUPTIBLE_|_NOLOAD_; else if (STREQ(args[optind], "NE")) state = _NEW_; if (state == TASK_STATE_UNINITIALIZED) error(FATAL, "invalid task state for this kernel: %s\n", args[optind]); fd->state = args[optind]; fd->flags |= FOREACH_STATE; optind++; continue; } /* * Select only user threads. */ if (STREQ(args[optind], "user")) { if (fd->flags & FOREACH_KERNEL) error(FATAL, "user and kernel are mutually exclusive!\n"); fd->flags |= FOREACH_USER; optind++; continue; } /* * Select only user-space thread group leaders */ if (STREQ(args[optind], "gleader")) { if (fd->flags & FOREACH_KERNEL) error(FATAL, "gleader and kernel are mutually exclusive!\n"); fd->flags |= (FOREACH_USER|FOREACH_GLEADER); optind++; continue; } /* * Select only active tasks (dumpfile only) */ if (STREQ(args[optind], "active")) { if (!DUMPFILE()) error(FATAL, "active option not allowed on live systems\n"); fd->flags |= FOREACH_ACTIVE; optind++; continue; } /* * Regular expression is exclosed within "'" character. * The args[optind] string may not be modified, so a copy * is duplicated. */ if (SINGLE_QUOTED_STRING(args[optind])) { if (fd->regexs == MAX_REGEX_ARGS) error(INFO, "too many expressions specified!\n"); else { p1 = strdup(&args[optind][1]); LASTCHAR(p1) = NULLCHAR; if (regcomp(&fd->regex_info[fd->regexs].regex, p1, REG_EXTENDED|REG_NOSUB)) { error(INFO, "invalid regular expression: %s\n", p1); free(p1); goto bailout; } fd->regex_info[fd->regexs].pattern = p1; if (fd->regexs++ == 0) { pc->cmd_cleanup_arg = (void *)fd; pc->cmd_cleanup = foreach_cleanup; } } optind++; continue; } /* * If it's a command name, prefixed or otherwise, take it. */ p1 = (args[optind][0] == '\\') ? &args[optind][1] : args[optind]; if (comm_exists(p1)) { if (c == MAX_FOREACH_COMMS) error(INFO, "too many commands specified!\n"); else { fd->comm_array[c++] = p1; fd->flags |= FOREACH_SPECIFIED; } optind++; continue; } command_argument: /* * If no keyword has been entered, we don't know what this * is -- most likely it's a bogus command specifier. We set * FOREACH_SPECIFIED in case it was a bad specifier and no * other task selectors exist -- which in turn would causes * the command to be erroneously run on all tasks. */ if (!k) { fd->flags |= FOREACH_SPECIFIED; error(INFO, "unknown argument: \"%s\"\n", args[optind]); optind++; continue; } /* * Must be an command argument -- so store it and let * the command deal with it... */ if (a == MAX_FOREACH_ARGS) error(INFO, "too many arguments specified!\n"); else fd->arg_array[a++] = (ulong)p1; optind++; } fd->flags |= FOREACH_CMD; fd->pids = p; fd->keys = k; fd->comms = c; fd->tasks = t; fd->args = a; if (fd->keys) foreach(fd); else error(INFO, "no keywords specified\n"); bailout: foreach_cleanup((void *)fd); } /* * Do the work for cmd_foreach(). */ void foreach(struct foreach_data *fd) { int i, j, k, a; struct task_context *tc, *tgc; int specified; int doit; int subsequent; unsigned int radix; ulong cmdflags; ulong tgid; struct reference reference, *ref; int print_header; struct bt_info bt_info, *bt; char buf[TASK_COMM_LEN]; struct psinfo psinfo; /* * Filter out any command/option issues. */ if (CRASHDEBUG(1)) { fprintf(fp, " flags: %lx\n", fd->flags); fprintf(fp, " task_array: %s", fd->tasks ? "" : "(none)"); for (j = 0; j < fd->tasks; j++) fprintf(fp, "[%lx] ", fd->task_array[j]); fprintf(fp, "\n"); fprintf(fp, " pid_array: %s", fd->pids ? "" : "(none)"); for (j = 0; j < fd->pids; j++) fprintf(fp, "[%ld] ", fd->pid_array[j]); fprintf(fp, "\n"); fprintf(fp, " comm_array: %s", fd->comms ? "" : "(none)"); for (j = 0; j < fd->comms; j++) fprintf(fp, "[%s] ", fd->comm_array[j]); fprintf(fp, "\n"); fprintf(fp, " regex_info: %s", fd->regexs ? "" : "(none)\n"); for (j = 0; j < fd->regexs; j++) { fprintf(fp, "%s[%d] pattern: [%s] ", j ? " " : "", j, fd->regex_info[j].pattern); fprintf(fp, "regex: [%lx]\n", (ulong)&fd->regex_info[j].regex); } fprintf(fp, "\n"); fprintf(fp, "keyword_array: %s", fd->keys ? "" : "(none)"); for (k = 0; k < fd->keys; k++) fprintf(fp, "[%d] ", fd->keyword_array[k]); fprintf(fp, "\n"); fprintf(fp, " arg_array: %s", fd->args ? "" : "(none)"); for (a = 0; a < fd->args; a++) fprintf(fp, "[%lx (%s)] ", fd->arg_array[a], (char *)fd->arg_array[a]); fprintf(fp, "\n"); fprintf(fp, " reference: \"%s\"\n", fd->reference ? fd->reference : ""); } print_header = TRUE; bt = NULL; for (k = 0; k < fd->keys; k++) { switch(fd->keyword_array[k]) { case FOREACH_NET: switch (fd->flags & (FOREACH_s_FLAG|FOREACH_S_FLAG)) { case (FOREACH_s_FLAG|FOREACH_S_FLAG): error(WARNING, "net -s and -S options are mutually exclusive!\n"); fd->flags = FOREACH_s_FLAG; break; case 0: error(WARNING, "net command requires -s or -S option\n\n"); fd->flags |= FOREACH_s_FLAG; break; } if ((fd->flags & (FOREACH_x_FLAG|FOREACH_d_FLAG)) == (FOREACH_x_FLAG|FOREACH_d_FLAG)) error(FATAL, "net: -x and -d options are mutually exclusive\n"); break; case FOREACH_VTOP: if (!fd->args) error(FATAL, "foreach command requires address argument\n"); if (fd->reference) error(FATAL, "vtop command does not support -R option\n"); if ((fd->flags & (FOREACH_u_FLAG|FOREACH_k_FLAG)) == (FOREACH_u_FLAG|FOREACH_k_FLAG)) error(FATAL, "vtop: -u and -k options are mutually exclusive\n"); break; case FOREACH_VM: if ((fd->flags & (FOREACH_x_FLAG|FOREACH_d_FLAG)) == (FOREACH_x_FLAG|FOREACH_d_FLAG)) error(FATAL, "vm: -x and -d options are mutually exclusive\n"); if (count_bits_long(fd->flags & (FOREACH_i_FLAG|FOREACH_p_FLAG| FOREACH_m_FLAG|FOREACH_v_FLAG)) > 1) error(FATAL, "vm command accepts only one of -p, -m or -v flags\n"); if (fd->reference) { if (fd->flags & FOREACH_i_FLAG) error(FATAL, "vm: -i is not applicable to the -R option\n"); if (fd->flags & FOREACH_m_FLAG) error(FATAL, "vm: -m is not applicable to the -R option\n"); if (fd->flags & FOREACH_v_FLAG) error(FATAL, "vm: -v is not applicable to the -R option\n"); } break; case FOREACH_BT: if ((fd->flags & (FOREACH_x_FLAG|FOREACH_d_FLAG)) == (FOREACH_x_FLAG|FOREACH_d_FLAG)) error(FATAL, "bt: -x and -d options are mutually exclusive\n"); if ((fd->flags & FOREACH_l_FLAG) && NO_LINE_NUMBERS()) { error(INFO, "line numbers are not available\n"); fd->flags &= ~FOREACH_l_FLAG; } #ifndef GDB_5_3 if ((fd->flags & FOREACH_g_FLAG)) error(FATAL, "bt -g option is not supported when issued from foreach\n"); #endif bt = &bt_info; break; case FOREACH_TASK: if ((fd->flags & (FOREACH_x_FLAG|FOREACH_d_FLAG)) == (FOREACH_x_FLAG|FOREACH_d_FLAG)) error(FATAL, "task: -x and -d options are mutually exclusive\n"); if (count_bits_long(fd->flags & (FOREACH_x_FLAG|FOREACH_d_FLAG)) > 1) error(FATAL, "task command accepts -R member[,member]," " and either -x or -d flags\n"); break; case FOREACH_SET: if (fd->reference) error(FATAL, "set command does not support -R option\n"); break; case FOREACH_SIG: if (fd->flags & (FOREACH_l_FLAG|FOREACH_s_FLAG)) error(FATAL, "sig: -l and -s options are not applicable\n"); if (fd->flags & FOREACH_g_FLAG) { if (!hq_open()) { error(INFO, "cannot hash thread group tasks\n"); fd->flags &= ~FOREACH_g_FLAG; } else print_header = FALSE; } break; case FOREACH_PS: if (count_bits_long(fd->flags & FOREACH_PS_EXCLUSIVE) > 1) error(FATAL, ps_exclusive); if ((fd->flags & (FOREACH_l_FLAG|FOREACH_m_FLAG)) && (fd->flags & FOREACH_G_FLAG)) error(FATAL, "-G not supported with -%c option\n", fd->flags & FOREACH_l_FLAG ? 'l' : 'm'); BZERO(&psinfo, sizeof(struct psinfo)); if (fd->flags & FOREACH_G_FLAG) { if (!hq_open()) { error(INFO, "cannot hash thread group tasks\n"); fd->flags &= ~FOREACH_G_FLAG; } } if (fd->flags & (FOREACH_l_FLAG|FOREACH_m_FLAG)) sort_context_array_by_last_run(); if ((fd->flags & FOREACH_m_FLAG) && INVALID_MEMBER(rq_timestamp)) option_not_supported('m'); print_header = FALSE; break; case FOREACH_FILES: if (fd->flags & FOREACH_p_FLAG) error(FATAL, "files command does not support -p option\n"); break; case FOREACH_TEST: break; } } subsequent = FALSE; specified = (fd->tasks || fd->pids || fd->comms || fd->regexs || (fd->flags & FOREACH_SPECIFIED)); ref = &reference; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { doit = FALSE; if ((fd->flags & FOREACH_ACTIVE) && !is_task_active(tc->task)) continue; if ((fd->flags & FOREACH_USER) && is_kernel_thread(tc->task)) continue; if ((fd->flags & FOREACH_GLEADER) && tc->pid != task_tgid(tc->task)) continue; if ((fd->flags & FOREACH_KERNEL) && !is_kernel_thread(tc->task)) continue; if ((fd->flags & FOREACH_STATE) && (!STRNEQ(task_state_string(tc->task, buf, 0), fd->state))) continue; if (specified) { for (j = 0; j < fd->tasks; j++) { if (fd->task_array[j] == tc->task) { doit = TRUE; break; } } for (j = 0; !doit && (j < fd->pids); j++) { if (fd->pid_array[j] == tc->pid) { doit = TRUE; break; } } for (j = 0; !doit && (j < fd->comms); j++) { strlcpy(buf, fd->comm_array[j], TASK_COMM_LEN); if (STREQ(buf, tc->comm)) { doit = TRUE; break; } } for (j = 0; !doit && (j < fd->regexs); j++) { if (regexec(&fd->regex_info[j].regex, tc->comm, 0, NULL, 0) == 0) { doit = TRUE; break; } } } else doit = TRUE; if (!doit) continue; if (output_closed() || received_SIGINT()) { free_all_bufs(); goto foreach_bailout; } if (setjmp(pc->foreach_loop_env)) { free_all_bufs(); continue; } pc->flags |= IN_FOREACH; if (fd->reference) { BZERO(ref, sizeof(struct reference)); ref->str = fd->reference; } else if (print_header) print_task_header(fp, tc, subsequent++); for (k = 0; k < fd->keys; k++) { free_all_bufs(); switch(fd->keyword_array[k]) { case FOREACH_BT: pc->curcmd = "bt"; BZERO(bt, sizeof(struct bt_info));; bt->task = tc->task; bt->tc = tc; bt->stackbase = GET_STACKBASE(tc->task); bt->stacktop = GET_STACKTOP(tc->task); if (fd->flags & FOREACH_r_FLAG) bt->flags |= BT_RAW; if (fd->flags & FOREACH_s_FLAG) bt->flags |= BT_SYMBOL_OFFSET; if (fd->flags & FOREACH_t_FLAG) bt->flags |= BT_TEXT_SYMBOLS; if (fd->flags & FOREACH_T_FLAG) { bt->flags |= BT_TEXT_SYMBOLS; bt->flags |= BT_TEXT_SYMBOLS_ALL; } if ((fd->flags & FOREACH_o_FLAG) || (kt->flags & USE_OPT_BT)) bt->flags |= BT_OPT_BACK_TRACE; if (fd->flags & FOREACH_e_FLAG) bt->flags |= BT_EFRAME_SEARCH; #ifdef GDB_5_3 if (fd->flags & FOREACH_g_FLAG) bt->flags |= BT_USE_GDB; #endif if (fd->flags & FOREACH_l_FLAG) bt->flags |= BT_LINE_NUMBERS; if (fd->flags & FOREACH_f_FLAG) bt->flags |= BT_FULL; if (fd->flags & FOREACH_F_FLAG) bt->flags |= (BT_FULL|BT_FULL_SYM_SLAB); if (fd->flags & FOREACH_F_FLAG2) bt->flags |= BT_FULL_SYM_SLAB2; if (fd->flags & FOREACH_x_FLAG) bt->radix = 16; if (fd->flags & FOREACH_d_FLAG) bt->radix = 10; if (fd->reference) bt->ref = ref; back_trace(bt); break; case FOREACH_VM: pc->curcmd = "vm"; cmdflags = 0; if (fd->flags & FOREACH_x_FLAG) cmdflags = PRINT_RADIX_16; else if (fd->flags & FOREACH_d_FLAG) cmdflags = PRINT_RADIX_10; if (fd->flags & FOREACH_i_FLAG) vm_area_dump(tc->task, PRINT_INODES, 0, NULL); else if (fd->flags & FOREACH_p_FLAG) vm_area_dump(tc->task, PHYSADDR, 0, fd->reference ? ref : NULL); else if (fd->flags & FOREACH_m_FLAG) vm_area_dump(tc->task, PRINT_MM_STRUCT|cmdflags, 0, NULL); else if (fd->flags & FOREACH_v_FLAG) vm_area_dump(tc->task, PRINT_VMA_STRUCTS|cmdflags, 0, NULL); else vm_area_dump(tc->task, 0, 0, fd->reference ? ref : NULL); break; case FOREACH_TASK: pc->curcmd = "task"; if (fd->flags & FOREACH_x_FLAG) radix = 16; else if (fd->flags & FOREACH_d_FLAG) radix = 10; else radix = pc->output_radix; do_task(tc->task, FOREACH_TASK, fd->reference ? ref : NULL, radix); break; case FOREACH_SIG: pc->curcmd = "sig"; if (fd->flags & FOREACH_g_FLAG) { tgid = task_tgid(tc->task); tgc = tgid_to_context(tgid); if (hq_enter(tgc->task)) do_sig_thread_group(tgc->task); } else do_sig(tc->task, FOREACH_SIG, fd->reference ? ref : NULL); break; case FOREACH_SET: pc->curcmd = "set"; show_context(tc); break; case FOREACH_PS: pc->curcmd = "ps"; psinfo.task[0] = tc->task; psinfo.pid[0] = NO_PID; psinfo.type[0] = PS_BY_TASK; psinfo.argc = 1; cmdflags = PS_BY_TASK; if (subsequent++) cmdflags |= PS_NO_HEADER; if (fd->flags & FOREACH_G_FLAG) cmdflags |= PS_GROUP; if (fd->flags & FOREACH_s_FLAG) cmdflags |= PS_KSTACKP; if (fd->flags & FOREACH_y_FLAG) { cmdflags |= PS_POLICY; psinfo.policy = fd->policy; } /* * mutually exclusive flags */ if (fd->flags & FOREACH_a_FLAG) cmdflags |= PS_ARGV_ENVP; else if (fd->flags & FOREACH_c_FLAG) cmdflags |= PS_CHILD_LIST; else if (fd->flags & FOREACH_p_FLAG) cmdflags |= PS_PPID_LIST; else if (fd->flags & FOREACH_t_FLAG) cmdflags |= PS_TIMES; else if (fd->flags & FOREACH_l_FLAG) cmdflags |= PS_LAST_RUN; else if (fd->flags & FOREACH_m_FLAG) cmdflags |= PS_MSECS; else if (fd->flags & FOREACH_r_FLAG) cmdflags |= PS_RLIMIT; else if (fd->flags & FOREACH_g_FLAG) cmdflags |= PS_TGID_LIST; show_ps(cmdflags, &psinfo); break; case FOREACH_FILES: pc->curcmd = "files"; cmdflags = 0; if (fd->flags & FOREACH_i_FLAG) cmdflags |= PRINT_INODES; if (fd->flags & FOREACH_c_FLAG) cmdflags |= PRINT_NRPAGES; open_files_dump(tc->task, cmdflags, fd->reference ? ref : NULL); break; case FOREACH_NET: pc->curcmd = "net"; if (fd->flags & (FOREACH_s_FLAG|FOREACH_S_FLAG)) dump_sockets_workhorse(tc->task, fd->flags, fd->reference ? ref : NULL); break; case FOREACH_VTOP: pc->curcmd = "vtop"; cmdflags = 0; if (fd->flags & FOREACH_c_FLAG) cmdflags |= USE_USER_PGD; if (fd->flags & FOREACH_u_FLAG) cmdflags |= UVADDR; if (fd->flags & FOREACH_k_FLAG) cmdflags |= KVADDR; for (a = 0; a < fd->args; a++) { do_vtop(htol((char *)fd->arg_array[a], FAULT_ON_ERROR, NULL), tc, cmdflags); } break; case FOREACH_TEST: pc->curcmd = "test"; foreach_test(tc->task, 0); break; } pc->curcmd = "foreach"; } } /* * Post-process any commands requiring it. */ for (k = 0; k < fd->keys; k++) { switch(fd->keyword_array[k]) { case FOREACH_SIG: if (fd->flags & FOREACH_g_FLAG) hq_close(); break; } } foreach_bailout: pc->flags &= ~IN_FOREACH; } /* * Clean up regex buffers and pattern strings. */ static void foreach_cleanup(void *arg) { int i; struct foreach_data *fd; pc->cmd_cleanup = NULL; pc->cmd_cleanup_arg = NULL; fd = (struct foreach_data *)arg; for (i = 0; i < fd->regexs; i++) { regfree(&fd->regex_info[i].regex); free(fd->regex_info[i].pattern); } } /* * The currently available set of foreach commands. */ static int is_foreach_keyword(char *s, int *key) { if (STREQ(args[optind], "bt")) { *key = FOREACH_BT; return TRUE; } if (STREQ(args[optind], "vm")) { *key = FOREACH_VM; return TRUE; } if (STREQ(args[optind], "task")) { *key = FOREACH_TASK; return TRUE; } if (STREQ(args[optind], "set")) { *key = FOREACH_SET; return TRUE; } if (STREQ(args[optind], "files")) { *key = FOREACH_FILES; return TRUE; } if (STREQ(args[optind], "net")) { *key = FOREACH_NET; return TRUE; } if (STREQ(args[optind], "vtop")) { *key = FOREACH_VTOP; return TRUE; } if (STREQ(args[optind], "sig")) { *key = FOREACH_SIG; return TRUE; } if (STREQ(args[optind], "test")) { *key = FOREACH_TEST; return TRUE; } if (STREQ(args[optind], "ps")) { *key = FOREACH_PS; return TRUE; } return FALSE; } /* * Try the dumpfile-specific manner of finding the panic task first. If * that fails, find the panic task the hard way -- do a "foreach bt" in the * background, and look for the only one that has "panic" embedded in it. */ static struct task_context * panic_search(void) { struct foreach_data foreach_data, *fd; char *p1, *p2, *tp; ulong lasttask, dietask, found; char buf[BUFSIZE]; struct task_context *tc; if ((lasttask = get_dumpfile_panic_task())) { found = TRUE; goto found_panic_task; } if (pc->flags2 & LIVE_DUMP) return NULL; BZERO(&foreach_data, sizeof(struct foreach_data)); fd = &foreach_data; fd->keys = 1; fd->keyword_array[0] = FOREACH_BT; if (machine_type("S390X")) fd->flags |= FOREACH_o_FLAG; else if (machine_type("ARM64")) fd->flags |= FOREACH_t_FLAG; else fd->flags |= (FOREACH_t_FLAG|FOREACH_o_FLAG); dietask = lasttask = NO_TASK; found = FALSE; open_tmpfile(); foreach(fd); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if ((p1 = strstr(buf, " TASK: "))) { p1 += strlen(" TASK: "); p2 = p1; while (!whitespace(*p2)) p2++; *p2 = NULLCHAR; lasttask = htol(p1, RETURN_ON_ERROR, NULL); } if (strstr(buf, " panic at ")) { found = TRUE; break; } if (strstr(buf, " crash_kexec at ") || strstr(buf, " .crash_kexec at ")) { found = TRUE; break; } if (strstr(buf, " die at ")) { switch (dietask) { case NO_TASK: dietask = lasttask; break; default: if (dietask != lasttask) dietask = NO_TASK+1; break; } } } close_tmpfile(); pc->curcmd = pc->program_name; if (!found && (dietask > (NO_TASK+1)) && task_has_cpu(dietask, NULL)) { lasttask = dietask; found = TRUE; } if (dietask == (NO_TASK+1)) error(WARNING, "multiple active tasks have called die\n\n"); if (CRASHDEBUG(1) && found) error(INFO, "panic_search: %lx (via foreach bt)\n", lasttask); if (!found) { if (CRASHDEBUG(1)) error(INFO, "panic_search: failed (via foreach bt)\n"); if ((lasttask = get_log_panic_task())) found = TRUE; } found_panic_task: populate_panic_threads(); if (found) { if ((tc = task_to_context(lasttask))) return tc; /* * If the task list was corrupted, add this one in. */ if ((tp = fill_task_struct(lasttask))) { if ((tc = add_context(lasttask, tp))) return tc; } } if (CRASHDEBUG(1)) error(INFO, "panic_search: failed\n"); return NULL; } static ulong search_panic_task_by_cpu(char *buf) { int crashing_cpu; char *p1, *p2; ulong task = NO_TASK; p1 = NULL; if ((p1 = strstr(buf, "CPU: "))) p1 += strlen("CPU: "); else if (STRNEQ(buf, "CPU ")) p1 = buf + strlen("CPU "); if (p1) { p2 = p1; while (!whitespace(*p2) && (*p2 != '\n')) p2++; *p2 = NULLCHAR; crashing_cpu = dtol(p1, RETURN_ON_ERROR, NULL); if ((crashing_cpu >= 0) && in_cpu_map(ONLINE_MAP, crashing_cpu)) { task = tt->active_set[crashing_cpu]; if (CRASHDEBUG(1)) error(WARNING, "get_log_panic_task: active_set[%d]: %lx\n", crashing_cpu, tt->active_set[crashing_cpu]); } } return task; } static ulong search_panic_task_by_keywords(char *buf, int *found_flag) { char *p; int i = 0; ulong task; while (panic_keywords[i]) { if ((p = strstr(buf, panic_keywords[i]))) { if ((task = search_panic_task_by_cpu(p))) { *found_flag = FOUND_PANIC_TASK; return task; } else { *found_flag = FOUND_PANIC_KEYWORD; return NO_TASK; } } i++; } *found_flag = FOUND_NO_PANIC_KEYWORD; return NO_TASK; } /* * Search for the panic task by seeking panic keywords from kernel log buffer. * The panic keyword is generally followed by printing out the stack trace info * of the panicking task. We can determine the panic task by finding the first * instance of "CPU: " or "CPU " following the panic keywords. */ static ulong get_log_panic_task(void) { int found_flag = FOUND_NO_PANIC_KEYWORD; int found_panic_keyword = FALSE; ulong task = NO_TASK; char buf[BUFSIZE]; if (!get_active_set()) goto fail; BZERO(buf, BUFSIZE); open_tmpfile(); dump_log(SHOW_LOG_TEXT); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (!found_panic_keyword) { task = search_panic_task_by_keywords(buf, &found_flag); switch (found_flag) { case FOUND_PANIC_TASK: goto found_panic_task; case FOUND_PANIC_KEYWORD: found_panic_keyword = TRUE; continue; default: continue; } } else { task = search_panic_task_by_cpu(buf); if (task) goto found_panic_task; } } found_panic_task: close_tmpfile(); fail: if (CRASHDEBUG(1) && !task) error(WARNING, "cannot determine the panic task from kernel log buffer\n"); return task; } /* * Get the panic task from the appropriate dumpfile handler. */ static ulong get_dumpfile_panic_task(void) { ulong task; if (NETDUMP_DUMPFILE()) { task = pc->flags & REM_NETDUMP ? tt->panic_task : get_netdump_panic_task(); if (task) return task; } else if (KDUMP_DUMPFILE()) { task = get_kdump_panic_task(); if (task) return task; } else if (DISKDUMP_DUMPFILE()) { task = get_diskdump_panic_task(); if (task) return task; } else if (KVMDUMP_DUMPFILE()) { task = get_kvmdump_panic_task(); if (task) return task; } else if (XENDUMP_DUMPFILE()) { task = get_xendump_panic_task(); if (task) return task; } else if (LKCD_DUMPFILE()) return(get_lkcd_panic_task()); if (pc->flags2 & LIVE_DUMP) return NO_TASK; if (get_active_set()) return(get_active_set_panic_task()); return NO_TASK; } /* * If runqueues is defined in the kernel, get the panic threads from the * active set. * * If it's an LKCD dump, or for some other reason the active threads cannot * be determined, do it the hard way. * * NOTE: this function should be deprecated -- the work should have been * done in the initial task table refresh. */ static void populate_panic_threads(void) { int i; int found; struct task_context *tc; if (get_active_set()) { for (i = 0; i < NR_CPUS; i++) tt->panic_threads[i] = tt->active_set[i]; return; } found = 0; if (!(machdep->flags & HWRESET)) { for (i = 0; i < kt->cpus; i++) { if (tt->panic_threads[i]) { if (++found == kt->cpus) return; } } } tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (task_has_cpu(tc->task, NULL) && (tc->processor >= 0) && (tc->processor < NR_CPUS)) { tt->panic_threads[tc->processor] = tc->task; found++; } } if (!found && !(kt->flags & SMP) && (LKCD_DUMPFILE() || NETDUMP_DUMPFILE() || KDUMP_DUMPFILE() || DISKDUMP_DUMPFILE() || KVMDUMP_DUMPFILE())) tt->panic_threads[0] = get_dumpfile_panic_task(); } /* * Separate the foreach command's output on a task-by-task basis by * displaying this header string. */ void print_task_header(FILE *out, struct task_context *tc, int newline) { char buf[BUFSIZE]; char buf1[BUFSIZE]; fprintf(out, "%sPID: %-7ld TASK: %s CPU: %-3s COMMAND: \"%s\"\n", newline ? "\n" : "", tc->pid, mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(tc->task)), task_cpu(tc->processor, buf, !VERBOSE), tc->comm); } /* * "help -t" output */ void dump_task_table(int verbose) { int i, j, more, nr_cpus; struct task_context *tc; struct tgid_context *tg; char buf[BUFSIZE]; int others, wrap, flen; tc = tt->current; others = 0; more = FALSE; fprintf(fp, " current: %lx [%ld]\n", (ulong)tt->current, (ulong)(tt->current - tt->context_array)); if (tt->current) { fprintf(fp, " .pid: %ld\n", tc->pid); fprintf(fp, " .comm: \"%s\"\n", tc->comm); fprintf(fp, " .task: %lx\n", tc->task); fprintf(fp, " .thread_info: %lx\n", tc->thread_info); fprintf(fp, " .processor: %d\n", tc->processor); fprintf(fp, " .ptask: %lx\n", tc->ptask); fprintf(fp, " .mm_struct: %lx\n", tc->mm_struct); fprintf(fp, " .tc_next: %lx\n", (ulong)tc->tc_next); } fprintf(fp, " context_array: %lx\n", (ulong)tt->context_array); fprintf(fp, " context_by_task: %lx\n", (ulong)tt->context_by_task); fprintf(fp, " tgid_array: %lx\n", (ulong)tt->tgid_array); fprintf(fp, " tgid_searches: %ld\n", tt->tgid_searches); fprintf(fp, " tgid_cache_hits: %ld (%ld%%)\n", tt->tgid_cache_hits, tt->tgid_searches ? tt->tgid_cache_hits * 100 / tt->tgid_searches : 0); fprintf(fp, " last_tgid: %lx\n", (ulong)tt->last_tgid); fprintf(fp, "refresh_task_table: "); if (tt->refresh_task_table == refresh_fixed_task_table) fprintf(fp, "refresh_fixed_task_table()\n"); else if (tt->refresh_task_table == refresh_unlimited_task_table) fprintf(fp, "refresh_unlimited_task_table()\n"); else if (tt->refresh_task_table == refresh_pidhash_task_table) fprintf(fp, "refresh_pidhash_task_table()\n"); else if (tt->refresh_task_table == refresh_pid_hash_task_table) fprintf(fp, "refresh_pid_hash_task_table()\n"); else if (tt->refresh_task_table == refresh_hlist_task_table) fprintf(fp, "refresh_hlist_task_table()\n"); else if (tt->refresh_task_table == refresh_hlist_task_table_v2) fprintf(fp, "refresh_hlist_task_table_v2()\n"); else if (tt->refresh_task_table == refresh_hlist_task_table_v3) fprintf(fp, "refresh_hlist_task_table_v3()\n"); else if (tt->refresh_task_table == refresh_active_task_table) fprintf(fp, "refresh_active_task_table()\n"); else if (tt->refresh_task_table == refresh_radix_tree_task_table) fprintf(fp, "refresh_radix_tree_task_table()\n"); else if (tt->refresh_task_table == refresh_xarray_task_table) fprintf(fp, "refresh_xarray_task_table()\n"); else fprintf(fp, "%lx\n", (ulong)tt->refresh_task_table); buf[0] = NULLCHAR; fprintf(fp, " flags: %lx ", tt->flags); sprintf(buf, "("); if (tt->flags & TASK_INIT_DONE) sprintf(&buf[strlen(buf)], "%sTASK_INIT_DONE", others++ ? "|" : ""); if (tt->flags & TASK_ARRAY_EXISTS) sprintf(&buf[strlen(buf)], "%sTASK_ARRAY_EXISTS", others++ ? "|" : ""); if (tt->flags & PANIC_TASK_NOT_FOUND) sprintf(&buf[strlen(buf)], "%sPANIC_TASK_NOT_FOUND", others++ ? "|" : ""); if (tt->flags & TASK_REFRESH) sprintf(&buf[strlen(buf)], "%sTASK_REFRESH", others++ ? "|" : ""); if (tt->flags & TASK_REFRESH_OFF) sprintf(&buf[strlen(buf)], "%sTASK_REFRESH_OFF", others++ ? "|" : ""); if (tt->flags & PANIC_KSP) sprintf(&buf[strlen(buf)], "%sPANIC_KSP", others++ ? "|" : ""); if (tt->flags & POPULATE_PANIC) sprintf(&buf[strlen(buf)], "%sPOPULATE_PANIC", others++ ? "|" : ""); if (tt->flags & ACTIVE_SET) sprintf(&buf[strlen(buf)], "%sACTIVE_SET", others++ ? "|" : ""); if (tt->flags & PIDHASH) sprintf(&buf[strlen(buf)], "%sPIDHASH", others++ ? "|" : ""); if (tt->flags & PID_HASH) sprintf(&buf[strlen(buf)], "%sPID_HASH", others++ ? "|" : ""); if (tt->flags & PID_RADIX_TREE) sprintf(&buf[strlen(buf)], "%sPID_RADIX_TREE", others++ ? "|" : ""); if (tt->flags & PID_XARRAY) sprintf(&buf[strlen(buf)], "%sPID_XARRAY", others++ ? "|" : ""); if (tt->flags & THREAD_INFO) sprintf(&buf[strlen(buf)], "%sTHREAD_INFO", others++ ? "|" : ""); if (tt->flags & THREAD_INFO_IN_TASK) sprintf(&buf[strlen(buf)], "%sTHREAD_INFO_IN_TASK", others++ ? "|" : ""); if (tt->flags & IRQSTACKS) sprintf(&buf[strlen(buf)], "%sIRQSTACKS", others++ ? "|" : ""); if (tt->flags & TIMESPEC) sprintf(&buf[strlen(buf)], "%sTIMESPEC", others++ ? "|" : ""); if (tt->flags & NO_TIMESPEC) sprintf(&buf[strlen(buf)], "%sNO_TIMESPEC", others++ ? "|" : ""); if (tt->flags & START_TIME_NSECS) sprintf(&buf[strlen(buf)], "%sSTART_TIME_NSECS", others++ ? "|" : ""); if (tt->flags & ACTIVE_ONLY) sprintf(&buf[strlen(buf)], "%sACTIVE_ONLY", others++ ? "|" : ""); if (tt->flags & INDEXED_CONTEXTS) sprintf(&buf[strlen(buf)], "%sINDEXED_CONTEXTS", others++ ? "|" : ""); sprintf(&buf[strlen(buf)], ")"); if (strlen(buf) > 54) fprintf(fp, "\n%s\n", mkstring(buf, 80, CENTER|LJUST, NULL)); else fprintf(fp, "%s\n", buf); fprintf(fp, " task_start: %lx\n", tt->task_start); fprintf(fp, " task_end: %lx\n", tt->task_end); fprintf(fp, " task_local: %lx\n", (ulong)tt->task_local); fprintf(fp, " max_tasks: %d\n", tt->max_tasks); fprintf(fp, " pid_radix_tree: %lx\n", tt->pid_radix_tree); fprintf(fp, " pid_xarray: %lx\n", tt->pid_xarray); fprintf(fp, " callbacks: %d\n", tt->callbacks); fprintf(fp, " nr_threads: %d\n", tt->nr_threads); fprintf(fp, " running_tasks: %ld\n", tt->running_tasks); fprintf(fp, " retries: %ld\n", tt->retries); fprintf(fp, " panicmsg: \"%s\"\n", strip_linefeeds(get_panicmsg(buf))); fprintf(fp, " panic_processor: %d\n", tt->panic_processor); fprintf(fp, " panic_task: %lx\n", tt->panic_task); fprintf(fp, " this_task: %lx\n", tt->this_task); fprintf(fp, " pidhash_len: %d\n", tt->pidhash_len); fprintf(fp, " pidhash_addr: %lx\n", tt->pidhash_addr); fprintf(fp, " last_task_read: %lx\n", tt->last_task_read); fprintf(fp, " last_mm_read: %lx\n", tt->last_mm_read); fprintf(fp, " task_struct: %lx\n", (ulong)tt->task_struct); fprintf(fp, " mm_struct: %lx\n", (ulong)tt->mm_struct); fprintf(fp, " init_pid_ns: %lx\n", tt->init_pid_ns); fprintf(fp, " filepages: %ld\n", tt->filepages); fprintf(fp, " anonpages: %ld\n", tt->anonpages); fprintf(fp, " shmempages: %ld\n", tt->shmempages); fprintf(fp, " stack_end_magic: %lx\n", tt->stack_end_magic); fprintf(fp, " pf_kthread: %lx ", tt->pf_kthread); switch (tt->pf_kthread) { case UNINITIALIZED: fprintf(fp, "(UNINITIALIZED)\n"); break; case 0: fprintf(fp, "(n/a)\n"); break; default: fprintf(fp, "(PF_KTHREAD)\n"); break; } wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4; flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16; nr_cpus = kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS; fprintf(fp, " idle_threads:"); for (i = 0; i < nr_cpus; i++) { if (!tt->idle_threads) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->idle_threads[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->idle_threads[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " active_set:"); for (i = 0; i < nr_cpus; i++) { if (!tt->active_set) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->active_set[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->active_set[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " panic_threads:"); for (i = 0; i < nr_cpus; i++) { if (!tt->panic_threads) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->panic_threads[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->panic_threads[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " panic_ksp:"); for (i = 0; i < nr_cpus; i++) { if (!tt->panic_ksp) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->panic_ksp[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->panic_ksp[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " hardirq_ctx:"); for (i = 0; i < nr_cpus; i++) { if (!tt->hardirq_ctx) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->hardirq_ctx[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->hardirq_ctx[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " hardirq_tasks:"); for (i = 0; i < nr_cpus; i++) { if (!tt->hardirq_tasks) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->hardirq_tasks[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->hardirq_tasks[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " softirq_ctx:"); for (i = 0; i < nr_cpus; i++) { if (!tt->softirq_ctx) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->softirq_ctx[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->softirq_ctx[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); fprintf(fp, " softirq_tasks:"); for (i = 0; i < nr_cpus; i++) { if (!tt->softirq_tasks) { fprintf(fp, " (unused)"); break; } if ((i % wrap) == 0) { fprintf(fp, "\n "); for (j = i, more = FALSE; j < nr_cpus; j++) { if (tt->softirq_tasks[j]) { more = TRUE; break; } } } fprintf(fp, "%.*lx ", flen, tt->softirq_tasks[i]); if (!more) { fprintf(fp, "..."); break; } } fprintf(fp, "\n"); dump_task_states(); if (!verbose) return; if (tt->flags & THREAD_INFO) fprintf(fp, "\nINDEX TASK/THREAD_INFO PID CPU PTASK MM_STRUCT COMM\n"); else fprintf(fp, "\nINDEX TASK PID CPU PTASK MM_STRUCT COMM\n"); tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (tt->flags & THREAD_INFO) fprintf(fp, "[%3d] %08lx/%08lx %5ld %d %08lx %016lx %s\n", i, tc->task, tc->thread_info, tc->pid, tc->processor, tc->ptask, (ulong)tc->mm_struct, tc->comm); else fprintf(fp, "[%3d] %08lx %5ld %d %08lx %08lx %s\n", i, tc->task, tc->pid, tc->processor, tc->ptask, (ulong)tc->mm_struct, tc->comm); } fprintf(fp, "\nINDEX TASK TGID (COMM)\n"); for (i = 0; i < RUNNING_TASKS(); i++) { tg = &tt->tgid_array[i]; tc = task_to_context(tg->task); fprintf(fp, "[%3d] %lx %ld (%s)\n", i, tg->task, tg->tgid, tc->comm); } fprintf(fp, "\nINDEX TASK (COMM)\n"); for (i = 0; i < RUNNING_TASKS(); i++) { tc = tt->context_by_task[i]; fprintf(fp, "[%3d] %lx (%s)\n", i, tc->task, tc->comm); } } /* * Determine whether a task is a kernel thread. This would seem easier than * it looks, but on live systems it's easy to get faked out. */ int is_kernel_thread(ulong task) { struct task_context *tc; ulong mm; if (tt->pf_kthread == UNINITIALIZED) { if (THIS_KERNEL_VERSION >= LINUX(2,6,27)) { tt->pf_kthread = PF_KTHREAD; if ((tc = pid_to_context(0)) && !(task_flags(tc->task) & PF_KTHREAD)) { error(WARNING, "pid 0: PF_KTHREAD not set?\n"); tt->pf_kthread = 0; } if ((tc = pid_to_context(1)) && task_mm(tc->task, FALSE) && (task_flags(tc->task) & PF_KTHREAD)) { error(WARNING, "pid 1: PF_KTHREAD set?\n"); tt->pf_kthread = 0; } } else tt->pf_kthread = 0; } if (tt->pf_kthread) return (task_flags(task) & tt->pf_kthread ? TRUE : FALSE); tc = task_to_context(task); if ((tc->pid == 0) && !STREQ(tc->comm, pc->program_name)) return TRUE; if (_ZOMBIE_ == TASK_STATE_UNINITIALIZED) initialize_task_state(); if (IS_ZOMBIE(task) || IS_EXITING(task)) return FALSE; /* * Check for shifting sands on a live system. */ mm = task_mm(task, TRUE); if (ACTIVE() && (mm != tc->mm_struct)) return FALSE; /* * Later version Linux kernel threads have no mm_struct at all. * Earlier version kernel threads point to common init_mm. */ if (!tc->mm_struct) { if (IS_EXITING(task)) return FALSE; if (!task_state(task) && !task_flags(task)) return FALSE; return TRUE; } else if (tc->mm_struct == symbol_value("init_mm")) return TRUE; return FALSE; } /* * Checks if task policy corresponds to given mask. */ static int has_sched_policy(ulong task, ulong policy) { return !!(task_policy(task) & policy); } /* * Converts sched policy name into mask bit. */ static ulong sched_policy_bit_from_str(const char *policy_str) { struct sched_policy_info *info = NULL; ulong policy = 0; int found = 0; char *upper = NULL; /* * Once kernel gets more than 10 scheduling policies, * sizes of these arrays should be adjusted */ char digit[2] = { 0, 0 }; char hex[4] = { 0, 0, 0, 0 }; upper = GETBUF(strlen(policy_str) + 1); upper_case(policy_str, upper); for (info = sched_policy_info; info->name; info++) { snprintf(digit, sizeof digit, "%lu", info->value); /* * Not using %#lX format here since "0X" prefix * is not prepended if 0 value is given */ snprintf(hex, sizeof hex, "0X%lX", info->value); if (strncmp(upper, info->name, strlen(info->name)) == 0 || strncmp(upper, digit, sizeof digit) == 0 || strncmp(upper, hex, sizeof hex) == 0) { policy = 1 << info->value; found = 1; break; } } FREEBUF(upper); if (!found) error(FATAL, "%s: invalid scheduling policy\n", policy_str); return policy; } /* * Converts sched policy string set into bitmask. */ static ulong make_sched_policy(const char *policy_str) { ulong policy = 0; char *iter = NULL; char *orig = NULL; char *cur = NULL; iter = STRDUPBUF(policy_str); orig = iter; while ((cur = strsep(&iter, ","))) policy |= sched_policy_bit_from_str(cur); FREEBUF(orig); return policy; } /* * Gather an arry of pointers to the per-cpu idle tasks. The tasklist * argument must be at least the size of ulong[NR_CPUS]. There may be * junk in everything after the first entry on a single CPU box, so the * data gathered may be throttled by kt->cpus. */ void get_idle_threads(ulong *tasklist, int nr_cpus) { int i, cnt; ulong runq, runqaddr; char *runqbuf; struct syment *rq_sp; BZERO(tasklist, sizeof(ulong) * NR_CPUS); runqbuf = NULL; cnt = 0; if ((rq_sp = per_cpu_symbol_search("per_cpu__runqueues")) && VALID_MEMBER(runqueue_idle)) { runqbuf = GETBUF(SIZE(runqueue)); for (i = 0; i < nr_cpus; i++) { if (machine_type("SPARC64") && cpu_map_addr("possible") && !(in_cpu_map(POSSIBLE, i))) continue; if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[i]; else runq = rq_sp->value; readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "runqueues entry (per_cpu)", FAULT_ON_ERROR); tasklist[i] = ULONG(runqbuf + OFFSET(runqueue_idle)); if (IS_KVADDR(tasklist[i])) cnt++; } } else if (symbol_exists("runqueues") && VALID_MEMBER(runqueue_idle)) { runq = symbol_value("runqueues"); runqbuf = GETBUF(SIZE(runqueue)); for (i = 0; i < nr_cpus; i++, runq += SIZE(runqueue)) { readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "runqueues entry (old)", FAULT_ON_ERROR); tasklist[i] = ULONG(runqbuf + OFFSET(runqueue_idle)); if (IS_KVADDR(tasklist[i])) cnt++; } } else if (symbol_exists("runqueues") && VALID_MEMBER(runqueue_cpu)) { runq = symbol_value("runqueues"); runqbuf = GETBUF(SIZE(runqueue)); for (i = 0; i < nr_cpus; i++) { runqaddr = runq + (SIZE(runqueue) * rq_idx(i)); readmem(runqaddr, KVADDR, runqbuf, SIZE(runqueue), "runqueues entry", FAULT_ON_ERROR); if ((tasklist[i] = get_idle_task(i, runqbuf))) cnt++; } } else if (symbol_exists("init_tasks")) { readmem(symbol_value("init_tasks"), KVADDR, tasklist, sizeof(void *) * nr_cpus, "init_tasks array", FAULT_ON_ERROR); if (IS_KVADDR(tasklist[0])) cnt++; else BZERO(tasklist, sizeof(ulong) * NR_CPUS); } else if (OPENVZ()) { runq = symbol_value("pcpu_info"); runqbuf = GETBUF(SIZE(pcpu_info)); for (i = 0; i < nr_cpus; i++, runq += SIZE(pcpu_info)) { readmem(runq, KVADDR, runqbuf, SIZE(pcpu_info), "pcpu info", FAULT_ON_ERROR); tasklist[i] = ULONG(runqbuf + OFFSET(pcpu_info_idle)); if (IS_KVADDR(tasklist[i])) cnt++; } } if (runqbuf) FREEBUF(runqbuf); if (!cnt) { error(INFO, "cannot determine idle task addresses from init_tasks[] or runqueues[]\n"); tasklist[0] = symbol_value("init_task_union"); } } /* * Emulate the kernel rq_idx() macro. */ static long rq_idx(int cpu) { if (kt->runq_siblings == 1) return cpu; else if (!(kt->__rq_idx)) return 0; else return kt->__rq_idx[cpu]; } /* * Emulate the kernel cpu_idx() macro. */ static long cpu_idx(int cpu) { if (kt->runq_siblings == 1) return 0; else if (!(kt->__cpu_idx)) return 0; else return kt->__cpu_idx[cpu]; } /* * Dig out the idle task data from a runqueue structure. */ static ulong get_idle_task(int cpu, char *runqbuf) { ulong idle_task; idle_task = ULONG(runqbuf + OFFSET(runqueue_cpu) + (SIZE(cpu_s) * cpu_idx(cpu)) + OFFSET(cpu_s_idle)); if (IS_KVADDR(idle_task)) return idle_task; else { if (cpu < kt->cpus) error(INFO, "cannot determine idle task for cpu %d\n", cpu); return NO_TASK; } } /* * Dig out the current task data from a runqueue structure. */ static ulong get_curr_task(int cpu, char *runqbuf) { ulong curr_task; curr_task = ULONG(runqbuf + OFFSET(runqueue_cpu) + (SIZE(cpu_s) * cpu_idx(cpu)) + OFFSET(cpu_s_curr)); if (IS_KVADDR(curr_task)) return curr_task; else return NO_TASK; } /* * On kernels with runqueue[] array, store the active set of tasks. */ int get_active_set(void) { int i, cnt; ulong runq, runqaddr; char *runqbuf; struct syment *rq_sp; if (tt->flags & ACTIVE_SET) return TRUE; runq = 0; rq_sp = per_cpu_symbol_search("per_cpu__runqueues"); if (!rq_sp) { if (symbol_exists("runqueues")) runq = symbol_value("runqueues"); else if (OPENVZ()) runq = symbol_value("pcpu_info"); else return FALSE; } else runq = rq_sp->value; if (!tt->active_set && !(tt->active_set = (ulong *)calloc(NR_CPUS, sizeof(ulong)))) error(FATAL, "cannot malloc active_set array"); runqbuf = GETBUF(SIZE(runqueue)); cnt = 0; if (OPENVZ()) { ulong vcpu_struct; char *pcpu_info_buf, *vcpu_struct_buf; pcpu_info_buf = GETBUF(SIZE(pcpu_info)); vcpu_struct_buf = GETBUF(SIZE(vcpu_struct)); for (i = 0; i < kt->cpus; i++, runq += SIZE(pcpu_info)) { readmem(runq, KVADDR, pcpu_info_buf, SIZE(pcpu_info), "pcpu_info", FAULT_ON_ERROR); vcpu_struct= ULONG(pcpu_info_buf + OFFSET(pcpu_info_vcpu)); readmem(vcpu_struct, KVADDR, vcpu_struct_buf, SIZE(vcpu_struct), "pcpu_info->vcpu", FAULT_ON_ERROR); tt->active_set[i] = ULONG(vcpu_struct_buf + OFFSET(vcpu_struct_rq) + OFFSET(runqueue_curr)); if (IS_KVADDR(tt->active_set[i])) cnt++; } FREEBUF(pcpu_info_buf); FREEBUF(vcpu_struct_buf); } else if (VALID_MEMBER(runqueue_curr) && rq_sp) { for (i = 0; i < kt->cpus; i++) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[i]; else runq = rq_sp->value; readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "active runqueues entry (per_cpu)", FAULT_ON_ERROR); tt->active_set[i] = ULONG(runqbuf + OFFSET(runqueue_curr)); if (IS_KVADDR(tt->active_set[i])) cnt++; } } else if (VALID_MEMBER(runqueue_curr)) { for (i = 0; i < MAX(kt->cpus, kt->kernel_NR_CPUS); i++, runq += SIZE(runqueue)) { readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "(old) runqueues curr", FAULT_ON_ERROR); tt->active_set[i] = ULONG(runqbuf + OFFSET(runqueue_curr)); if (IS_KVADDR(tt->active_set[i])) cnt++; } } else if (VALID_MEMBER(runqueue_cpu)) { for (i = 0; i < kt->cpus; i++) { runqaddr = runq + (SIZE(runqueue) * rq_idx(i)); readmem(runqaddr, KVADDR, runqbuf, SIZE(runqueue), "runqueues curr", FAULT_ON_ERROR); if ((tt->active_set[i] = get_curr_task(i, runqbuf))) cnt++; } } if (cnt) { tt->flags |= ACTIVE_SET; return TRUE; } else { error(INFO, "get_active_set: no tasks found?\n"); return FALSE; } } /* * Clear the ACTIVE_SET flag on a live system, forcing a re-read of the * runqueues[] array the next time get_active_set() is called above. */ void clear_active_set(void) { if (ACTIVE() && (tt->flags & TASK_REFRESH)) tt->flags &= ~ACTIVE_SET; } #define RESOLVE_PANIC_AND_DIE_CALLERS() \ if (xen_panic_task) { \ if (CRASHDEBUG(1)) \ error(INFO, \ "get_active_set_panic_task: %lx (xen_panic_event)\n", \ xen_panic_task); \ return xen_panic_task; \ } \ if (crash_kexec_task) { \ if (CRASHDEBUG(1)) \ error(INFO, \ "get_active_set_panic_task: %lx (crash_kexec)\n", \ crash_kexec_task); \ return crash_kexec_task; \ } \ if (crash_fadump_task) { \ if (CRASHDEBUG(1)) \ error(INFO, \ "get_active_set_panic_task: %lx (crash_fadump)\n", \ crash_fadump_task); \ return crash_fadump_task; \ } \ if ((panic_task > (NO_TASK+1)) && !die_task) { \ if (CRASHDEBUG(1)) \ fprintf(fp, \ "get_active_set_panic_task: %lx (panic)\n", \ panic_task); \ return panic_task; \ } \ \ if (panic_task && die_task) { \ if ((panic_task > (NO_TASK+1)) && \ (panic_task == die_task)) { \ if (CRASHDEBUG(1)) \ fprintf(fp, \ "get_active_set_panic_task: %lx (panic)\n", \ panic_task); \ return panic_task; \ } \ error(WARNING, \ "multiple active tasks have called die and/or panic\n\n"); \ goto no_panic_task_found; \ } \ \ if (die_task > (NO_TASK+1)) { \ if (CRASHDEBUG(1)) \ fprintf(fp, \ "get_active_set_panic_task: %lx (die)\n", \ die_task); \ return die_task; \ } \ else if (die_task == (NO_TASK+1)) \ error(WARNING, \ "multiple active tasks have called die\n\n"); #define SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS() \ while (fgets(buf, BUFSIZE, pc->tmpfile)) { \ if (strstr(buf, " die+")) { \ switch (die_task) \ { \ case NO_TASK: \ die_task = task; \ break; \ default: \ if (die_task != task) \ die_task = NO_TASK+1; \ break; \ } \ } \ if (strstr(buf, " panic+")) { \ switch (panic_task) \ { \ case NO_TASK: \ panic_task = task; \ if (XENDUMP_DUMPFILE()) \ xendump_panic_hook(buf); \ break; \ default: \ if (panic_task != task) \ panic_task = NO_TASK+1; \ break; \ } \ } \ if (strstr(buf, " crash_kexec+") || \ strstr(buf, " .crash_kexec+")) { \ crash_kexec_task = task; \ } \ if (strstr(buf, " .crash_fadump+")) \ crash_fadump_task = task; \ if (strstr(buf, " machine_kexec+") || \ strstr(buf, " .machine_kexec+")) { \ crash_kexec_task = task; \ } \ if (strstr(buf, " xen_panic_event+") || \ strstr(buf, " .xen_panic_event+")){ \ xen_panic_task = task; \ xendump_panic_hook(buf); \ } \ if (machine_type("IA64") && XENDUMP_DUMPFILE() && !xen_panic_task && \ strstr(buf, " sysrq_handle_crashdump+")) \ xen_sysrq_task = task; \ } /* * Search the active set tasks for instances of die or panic calls. */ static ulong get_active_set_panic_task() { int i, j, found; ulong task; char buf[BUFSIZE]; ulong panic_task, die_task, crash_kexec_task, crash_fadump_task; ulong xen_panic_task; ulong xen_sysrq_task; panic_task = die_task = crash_kexec_task = xen_panic_task = NO_TASK; xen_sysrq_task = NO_TASK; crash_fadump_task = NO_TASK; for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->active_set[i]) || !task_exists(task)) continue; open_tmpfile(); raw_stack_dump(GET_STACKBASE(task), STACKSIZE()); rewind(pc->tmpfile); SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); close_tmpfile(); } RESOLVE_PANIC_AND_DIE_CALLERS(); if (tt->flags & IRQSTACKS) { panic_task = die_task = NO_TASK; for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->hardirq_tasks[i])) continue; for (j = found = 0; j < NR_CPUS; j++) { if (task == tt->active_set[j]) { found++; break; } } if (!found) continue; open_tmpfile(); raw_stack_dump(tt->hardirq_ctx[i], SIZE(thread_union)); rewind(pc->tmpfile); SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); close_tmpfile(); } RESOLVE_PANIC_AND_DIE_CALLERS(); panic_task = die_task = NO_TASK; for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->softirq_tasks[i])) continue; for (j = found = 0; j < NR_CPUS; j++) { if (task == tt->active_set[j]) { found++; break; } } if (!found) continue; open_tmpfile(); raw_stack_dump(tt->softirq_ctx[i], SIZE(thread_union)); rewind(pc->tmpfile); SEARCH_STACK_FOR_PANIC_DIE_AND_KEXEC_CALLERS(); close_tmpfile(); } RESOLVE_PANIC_AND_DIE_CALLERS(); } if (crash_kexec_task) { if (CRASHDEBUG(1)) error(INFO, "get_active_set_panic_task: %lx (crash_kexec)\n", crash_kexec_task); return crash_kexec_task; } if (crash_fadump_task) { if (CRASHDEBUG(1)) error(INFO, "get_active_set_panic_task: %lx (crash_fadump)\n", crash_fadump_task); return crash_fadump_task; } if (xen_sysrq_task) { if (CRASHDEBUG(1)) error(INFO, "get_active_set_panic_task: %lx (sysrq_handle_crashdump)\n", xen_sysrq_task); return xen_sysrq_task; } no_panic_task_found: if (CRASHDEBUG(1)) error(INFO, "get_active_set_panic_task: failed\n"); return NO_TASK; } /* * Determine whether a task is one of the idle threads. */ int is_idle_thread(ulong task) { int i; for (i = 0; i < NR_CPUS; i++) if (task == tt->idle_threads[i]) return TRUE; return FALSE; } /* * Dump the current run queue task list. This command should be expanded * to deal with timer queues, bottom halves, etc... */ void cmd_runq(void) { int c; char arg_buf[BUFSIZE]; ulong *cpus = NULL; int sched_debug = 0; int dump_timestamp_flag = 0; int dump_lag_flag = 0; int dump_task_group_flag = 0; int dump_milliseconds_flag = 0; while ((c = getopt(argcnt, args, "dtTgmc:")) != EOF) { switch(c) { case 'd': sched_debug = 1; break; case 't': dump_timestamp_flag = 1; break; case 'T': dump_lag_flag = 1; break; case 'm': dump_milliseconds_flag = 1; break; case 'g': if ((INVALID_MEMBER(task_group_cfs_rq) && INVALID_MEMBER(task_group_rt_rq)) || INVALID_MEMBER(task_group_parent)) option_not_supported(c); dump_task_group_flag = 1; break; case 'c': if (pc->curcmd_flags & CPUMASK) { error(INFO, "only one -c option allowed\n"); argerrs++; } else { pc->curcmd_flags |= CPUMASK; BZERO(arg_buf, BUFSIZE); strcpy(arg_buf, optarg); cpus = get_cpumask_buf(); make_cpumask(arg_buf, cpus, FAULT_ON_ERROR, NULL); pc->curcmd_private = (ulong)cpus; } break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (dump_timestamp_flag) dump_on_rq_timestamp(); else if (dump_lag_flag) dump_on_rq_lag(); else if (dump_milliseconds_flag) dump_on_rq_milliseconds(); else if (sched_debug) dump_on_rq_tasks(); else if (dump_task_group_flag) dump_tasks_by_task_group(); else dump_runq(); if (cpus) FREEBUF(cpus); } /* * Displays the runqueue and active task timestamps of each cpu. */ static void dump_on_rq_timestamp(void) { ulong runq; char buf[BUFSIZE]; char format[15]; struct syment *rq_sp; struct task_context *tc; int cpu, len, indent; ulonglong timestamp; ulong *cpus; indent = runq = 0; cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues"))) error(FATAL, "per-cpu runqueues do not exist\n"); if (INVALID_MEMBER(rq_timestamp)) option_not_supported('t'); for (cpu = 0; cpu < kt->cpus; cpu++) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; if ((kt->flags & SMP) && (kt->flags &PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[cpu]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, ×tamp, sizeof(ulonglong), "per-cpu rq timestamp", FAULT_ON_ERROR); sprintf(buf, pc->output_radix == 10 ? "%llu" : "%llx", timestamp); fprintf(fp, "%sCPU %d: ", cpu < 10 ? " " : "", cpu); if (hide_offline_cpu(cpu)) { fprintf(fp, "[OFFLINE]\n"); continue; } else fprintf(fp, "%s\n", buf); len = strlen(buf); if ((tc = task_to_context(tt->active_set[cpu]))){ if (cpu < 10) indent = 7; else if (cpu < 100) indent = 8; else if (cpu < 1000) indent = 9; if (cpu < 10) indent++; timestamp = task_last_run(tc->task); sprintf(format, "%c0%dll%c", '%', len, pc->output_radix == 10 ? 'u' : 'x'); sprintf(buf, format, timestamp); fprintf(fp, "%s%s PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", space(indent), buf, tc->pid, tc->task, tc->comm); } else fprintf(fp, "\n"); } } /* * Runqueue timestamp struct for dump_on_rq_lag(). */ struct runq_ts_info { int cpu; ulonglong ts; }; /* * Comparison function for dump_on_rq_lag(). * Sorts runqueue timestamps in a descending order. */ static int compare_runq_ts(const void *p1, const void *p2) { const struct runq_ts_info *ts1 = p1; const struct runq_ts_info *ts2 = p2; if (ts1->ts > ts2->ts) return -1; if (ts1->ts < ts2->ts) return 1; return 0; } /* * Calculates integer log10 */ static ulong __log10ul(ulong x) { ulong ret = 1; while (x > 9) { ret++; x /= 10; } return ret; } /* * Displays relative CPU lag. */ static void dump_on_rq_lag(void) { struct syment *rq_sp; int cpu; ulong runq; ulonglong timestamp; struct runq_ts_info runq_ts[kt->cpus]; if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues"))) error(FATAL, "per-cpu runqueues do not exist\n"); if (INVALID_MEMBER(rq_timestamp)) option_not_supported('T'); for (cpu = 0; cpu < kt->cpus; cpu++) { if ((kt->flags & SMP) && (kt->flags &PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[cpu]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, ×tamp, sizeof(ulonglong), "per-cpu rq timestamp", FAULT_ON_ERROR); runq_ts[cpu].cpu = cpu; runq_ts[cpu].ts = timestamp; } qsort(runq_ts, (size_t)kt->cpus, sizeof(struct runq_ts_info), compare_runq_ts); for (cpu = 0; cpu < kt->cpus; cpu++) { fprintf(fp, "%sCPU %d: %.2lf secs\n", space(2 + __log10ul(kt->cpus) - __log10ul(runq_ts[cpu].cpu)), runq_ts[cpu].cpu, ((double)runq_ts[0].ts - (double)runq_ts[cpu].ts) / 1000000000.0); } } /* * Displays the runqueue and active task timestamps of each cpu. */ static void dump_on_rq_milliseconds(void) { ulong runq; char buf[BUFSIZE]; struct syment *rq_sp; struct task_context *tc; int cpu, max_indent, indent, max_days, days; long long delta; ulonglong task_timestamp, rq_timestamp; ulong *cpus; if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues"))) error(FATAL, "per-cpu runqueues do not exist\n"); if (INVALID_MEMBER(rq_timestamp)) option_not_supported('m'); if (kt->cpus < 10) max_indent = 1; else if (kt->cpus < 100) max_indent = 2; else if (kt->cpus < 1000) max_indent = 3; else max_indent = 4; max_days = days = 0; cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; for (cpu = 0; cpu < kt->cpus; cpu++) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; if ((kt->flags & SMP) && (kt->flags &PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[cpu]; else runq = rq_sp->value; readmem(runq + OFFSET(rq_timestamp), KVADDR, &rq_timestamp, sizeof(ulonglong), "per-cpu rq timestamp", FAULT_ON_ERROR); if (!max_days) { translate_nanoseconds(rq_timestamp, buf); max_days = first_space(buf) - buf; } if (cpu < 10) indent = max_indent; else if (cpu < 100) indent = max_indent - 1; else if (cpu < 1000) indent = max_indent - 2; else indent = max_indent - 4; if (hide_offline_cpu(cpu)) { fprintf(fp, "%sCPU %d: [OFFLINE]\n", space(indent), cpu); continue; } if ((tc = task_to_context(tt->active_set[cpu]))) task_timestamp = task_last_run(tc->task); else { fprintf(fp, "%sCPU %d: [unknown]\n", space(indent), cpu); continue; } delta = rq_timestamp - task_timestamp; if (delta < 0) delta = 0; translate_nanoseconds(delta, buf); days = first_space(buf) - buf; fprintf(fp, "%sCPU %d: [%s%s] PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", space(indent), cpu, space(max_days - days), buf, tc->pid, tc->task, tc->comm); } } /* * Dump the task run queue on behalf cmd_runq(). */ static void dump_runq(void) { int i; ulong next, runqueue_head; long offs; int qlen, cnt; ulong *tlist; struct task_context *tc; if (VALID_MEMBER(rq_cfs)) { dump_CFS_runqueues(); return; } if (VALID_MEMBER(runqueue_arrays)) { dump_runqueues(); return; } offs = runqueue_head = 0; qlen = 1000; start_again: tlist = (ulong *)GETBUF(qlen * sizeof(void *)); if (symbol_exists("runqueue_head")) { next = runqueue_head = symbol_value("runqueue_head"); offs = 0; } else if (VALID_MEMBER(task_struct_next_run)) { offs = OFFSET(task_struct_next_run); next = runqueue_head = symbol_value("init_task_union"); } else error(FATAL, "cannot determine run queue structures\n"); cnt = 0; do { if (cnt == qlen) { FREEBUF(tlist); qlen += 1000; goto start_again; } tlist[cnt++] = next; readmem(next+offs, KVADDR, &next, sizeof(void *), "run queue entry", FAULT_ON_ERROR); if (next == runqueue_head) break; } while (next); for (i = 0; i < cnt; i++) { if (tlist[i] == runqueue_head) continue; if (!(tc = task_to_context(VIRTPAGEBASE(tlist[i])))) { fprintf(fp, "PID: ? TASK: %lx CPU: ? COMMAND: ?\n", tlist[i]); continue; } if (!is_idle_thread(tc->task)) print_task_header(fp, tc, 0); } } #define RUNQ_ACTIVE (1) #define RUNQ_EXPIRED (2) static void dump_runqueues(void) { int cpu, displayed; ulong runq, offset; char *runqbuf; ulong active, expired, arrays; struct task_context *tc; struct syment *rq_sp; ulong *cpus; runq = 0; rq_sp = per_cpu_symbol_search("per_cpu__runqueues"); if (!rq_sp) { if (symbol_exists("runqueues")) runq = symbol_value("runqueues"); else error(FATAL, "cannot determine run queue structures\n"); } get_active_set(); runqbuf = GETBUF(SIZE(runqueue)); cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; for (cpu = displayed = 0; cpu < kt->cpus; cpu++, runq += SIZE(runqueue)) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; if (rq_sp) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[cpu]; else runq = rq_sp->value; } fprintf(fp, "%sCPU %d ", displayed++ ? "\n" : "", cpu); if (hide_offline_cpu(cpu)) { fprintf(fp, "[OFFLINE]\n"); continue; } else fprintf(fp, "RUNQUEUE: %lx\n", runq); fprintf(fp, " CURRENT: "); if ((tc = task_to_context(tt->active_set[cpu]))) fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); else fprintf(fp, "%lx\n", tt->active_set[cpu]); readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "runqueues array entry", FAULT_ON_ERROR); active = ULONG(runqbuf + OFFSET(runqueue_active)); expired = ULONG(runqbuf + OFFSET(runqueue_expired)); arrays = runq + OFFSET(runqueue_arrays); console("active: %lx\n", active); console("expired: %lx\n", expired); console("arrays: %lx\n", arrays); offset = active == arrays ? OFFSET(runqueue_arrays) : OFFSET(runqueue_arrays) + SIZE(prio_array); offset = active - runq; dump_prio_array(RUNQ_ACTIVE, active, &runqbuf[offset]); offset = expired == arrays ? OFFSET(runqueue_arrays) : OFFSET(runqueue_arrays) + SIZE(prio_array); offset = expired - runq; dump_prio_array(RUNQ_EXPIRED, expired, &runqbuf[offset]); } } static void dump_prio_array(int which, ulong k_prio_array, char *u_prio_array) { int i, c, cnt, tot, nr_active; int qheads ATTRIBUTE_UNUSED; ulong offset, kvaddr, uvaddr; ulong list_head[2]; struct list_data list_data, *ld; struct task_context *tc; ulong *tlist; qheads = (i = ARRAY_LENGTH(prio_array_queue)) ? i : get_array_length("prio_array.queue", NULL, SIZE(list_head)); console("dump_prio_array[%d]: %lx %lx\n", which, k_prio_array, (ulong)u_prio_array); nr_active = INT(u_prio_array + OFFSET(prio_array_nr_active)); console("nr_active: %d\n", nr_active); fprintf(fp, " %s PRIO_ARRAY: %lx\n", which == RUNQ_ACTIVE ? "ACTIVE" : "EXPIRED", k_prio_array); if (CRASHDEBUG(1)) fprintf(fp, "nr_active: %d\n", nr_active); ld = &list_data; for (i = tot = 0; i < 140; i++) { offset = OFFSET(prio_array_queue) + (i * SIZE(list_head)); kvaddr = k_prio_array + offset; uvaddr = (ulong)u_prio_array + offset; BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); if (CRASHDEBUG(1)) fprintf(fp, "prio_array[%d] @ %lx => %lx/%lx %s\n", i, kvaddr, list_head[0], list_head[1], (list_head[0] == list_head[1]) && (list_head[0] == kvaddr) ? "(empty)" : ""); if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) continue; console("[%d] %lx => %lx-%lx ", i, kvaddr, list_head[0], list_head[1]); fprintf(fp, " [%3d] ", i); BZERO(ld, sizeof(struct list_data)); ld->start = list_head[0]; ld->list_head_offset = OFFSET(task_struct_run_list); ld->end = kvaddr; hq_open(); cnt = do_list(ld); hq_close(); console("%d entries\n", cnt); tlist = (ulong *)GETBUF((cnt) * sizeof(ulong)); cnt = retrieve_list(tlist, cnt); for (c = 0; c < cnt; c++) { if (!(tc = task_to_context(tlist[c]))) continue; if (c) INDENT(11); fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); } tot += cnt; FREEBUF(tlist); } if (!tot) { INDENT(5); fprintf(fp, "[no tasks queued]\n"); } } #define MAX_GROUP_NUM 200 struct task_group_info { int use; int depth; char *name; ulong task_group; struct task_group_info *parent; }; static struct task_group_info **tgi_array; static int tgi_p = 0; static int tgi_p_max = 0; static void sort_task_group_info_array(void) { int i, j; struct task_group_info *tmp; for (i = 0; i < tgi_p - 1; i++) { for (j = 0; j < tgi_p - i - 1; j++) { if (tgi_array[j]->depth > tgi_array[j+1]->depth) { tmp = tgi_array[j+1]; tgi_array[j+1] = tgi_array[j]; tgi_array[j] = tmp; } } } } static void print_task_group_info_array(void) { int i; for (i = 0; i < tgi_p; i++) { fprintf(fp, "%d : use=%d, depth=%d, group=%lx, ", i, tgi_array[i]->use, tgi_array[i]->depth, tgi_array[i]->task_group); fprintf(fp, "name=%s, ", tgi_array[i]->name ? tgi_array[i]->name : "NULL"); if (tgi_array[i]->parent) fprintf(fp, "parent=%lx", tgi_array[i]->parent->task_group); fprintf(fp, "\n"); } } static void free_task_group_info_array(void) { int i; for (i = 0; i < tgi_p; i++) { if (tgi_array[i]->name) FREEBUF(tgi_array[i]->name); FREEBUF(tgi_array[i]); } tgi_p = 0; FREEBUF(tgi_array); } static void reuse_task_group_info_array(void) { int i; for (i = 0; i < tgi_p; i++) { if (tgi_array[i]->depth == 0) tgi_array[i]->use = 0; else tgi_array[i]->use = 1; } } static void dump_task_runq_entry(struct task_context *tc, int current) { int prio; readmem(tc->task + OFFSET(task_struct_prio), KVADDR, &prio, sizeof(int), "task prio", FAULT_ON_ERROR); fprintf(fp, "[%3d] ", prio); fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"", tc->pid, tc->task, tc->comm); if (current) fprintf(fp, " [CURRENT]\n"); else fprintf(fp, "\n"); } static void print_group_header_fair(int depth, ulong cfs_rq, void *t) { int throttled; struct task_group_info *tgi = (struct task_group_info *)t; INDENT(2 + 3 * depth); fprintf(fp, "TASK_GROUP: %lx CFS_RQ: %lx ", tgi->task_group, cfs_rq); if (tgi->name) fprintf(fp, " <%s>", tgi->name); if (VALID_MEMBER(cfs_rq_throttled)) { readmem(cfs_rq + OFFSET(cfs_rq_throttled), KVADDR, &throttled, sizeof(int), "cfs_rq throttled", FAULT_ON_ERROR); if (throttled) fprintf(fp, " (THROTTLED)"); } fprintf(fp, "\n"); } static void print_parent_task_group_fair(void *t, int cpu) { struct task_group_info *tgi; ulong cfs_rq_c, cfs_rq_p; tgi = ((struct task_group_info *)t)->parent; if (tgi && tgi->use) print_parent_task_group_fair(tgi, cpu); else return; readmem(tgi->task_group + OFFSET(task_group_cfs_rq), KVADDR, &cfs_rq_c, sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); readmem(cfs_rq_c + cpu * sizeof(ulong), KVADDR, &cfs_rq_p, sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); print_group_header_fair(tgi->depth, cfs_rq_p, tgi); tgi->use = 0; } static int dump_tasks_in_lower_dequeued_cfs_rq(int depth, ulong cfs_rq, int cpu, struct task_context *ctc) { int i, total, nr_running; ulong group, cfs_rq_c, cfs_rq_p; total = 0; for (i = 0; i < tgi_p; i++) { if (tgi_array[i]->use == 0 || tgi_array[i]->depth - depth != 1) continue; readmem(cfs_rq + OFFSET(cfs_rq_tg), KVADDR, &group, sizeof(ulong), "cfs_rq tg", FAULT_ON_ERROR); if (group != tgi_array[i]->parent->task_group) continue; readmem(tgi_array[i]->task_group + OFFSET(task_group_cfs_rq), KVADDR, &cfs_rq_c, sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); readmem(cfs_rq_c + cpu * sizeof(ulong), KVADDR, &cfs_rq_p, sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); if (cfs_rq == cfs_rq_p) continue; readmem(cfs_rq_p + OFFSET(cfs_rq_nr_running), KVADDR, &nr_running, sizeof(int), "cfs_rq nr_running", FAULT_ON_ERROR); if (nr_running == 0) { total += dump_tasks_in_lower_dequeued_cfs_rq(depth + 1, cfs_rq_p, cpu, ctc); continue; } print_parent_task_group_fair(tgi_array[i], cpu); total++; total += dump_tasks_in_task_group_cfs_rq(depth + 1, cfs_rq_p, cpu, ctc); } return total; } static int dump_tasks_in_cfs_rq(ulong cfs_rq) { struct task_context *tc; struct rb_root *root; struct rb_node *node; ulong my_q, leftmost, curr, curr_my_q; int total; total = 0; if (VALID_MEMBER(sched_entity_my_q)) { readmem(cfs_rq + OFFSET(cfs_rq_curr), KVADDR, &curr, sizeof(ulong), "curr", FAULT_ON_ERROR); if (curr) { readmem(curr + OFFSET(sched_entity_my_q), KVADDR, &curr_my_q, sizeof(ulong), "curr->my_q", FAULT_ON_ERROR); if (curr_my_q) total += dump_tasks_in_cfs_rq(curr_my_q); } } readmem(cfs_rq + OFFSET(cfs_rq_rb_leftmost), KVADDR, &leftmost, sizeof(ulong), "rb_leftmost", FAULT_ON_ERROR); root = (struct rb_root *)(cfs_rq + OFFSET(cfs_rq_tasks_timeline)); for (node = rb_first(root); leftmost && node; node = rb_next(node)) { if (VALID_MEMBER(sched_entity_my_q)) { readmem((ulong)node - OFFSET(sched_entity_run_node) + OFFSET(sched_entity_my_q), KVADDR, &my_q, sizeof(ulong), "my_q", FAULT_ON_ERROR); if (my_q) { total += dump_tasks_in_cfs_rq(my_q); continue; } } tc = task_to_context((ulong)node - OFFSET(task_struct_se) - OFFSET(sched_entity_run_node)); if (!tc) continue; if (hq_enter((ulong)tc)) { INDENT(5); dump_task_runq_entry(tc, 0); } else { error(WARNING, "duplicate CFS runqueue node: task %lx\n", tc->task); return total; } total++; } return total; } static int dump_tasks_in_task_group_cfs_rq(int depth, ulong cfs_rq, int cpu, struct task_context *ctc) { struct task_context *tc; struct rb_root *root; struct rb_node *node; ulong my_q, leftmost, curr, curr_my_q, tg; int total, i; total = 0; curr_my_q = curr = 0; if (depth) { readmem(cfs_rq + OFFSET(cfs_rq_tg), KVADDR, &tg, sizeof(ulong), "cfs_rq tg", FAULT_ON_ERROR); for (i = 0; i < tgi_p; i++) { if (tgi_array[i]->task_group == tg) { print_group_header_fair(depth, cfs_rq, tgi_array[i]); tgi_array[i]->use = 0; break; } } } if (VALID_MEMBER(sched_entity_my_q)) { readmem(cfs_rq + OFFSET(cfs_rq_curr), KVADDR, &curr, sizeof(ulong), "curr", FAULT_ON_ERROR); if (curr) { readmem(curr + OFFSET(sched_entity_my_q), KVADDR, &curr_my_q, sizeof(ulong), "curr->my_q", FAULT_ON_ERROR); if (curr_my_q) { total++; total += dump_tasks_in_task_group_cfs_rq(depth + 1, curr_my_q, cpu, ctc); } } } /* * check if "curr" is the task that is current running task */ if (!curr_my_q && ctc && (curr - OFFSET(task_struct_se)) == ctc->task) { /* curr is not in the rb tree, so let's print it here */ total++; INDENT(5 + 3 * depth); dump_task_runq_entry(ctc, 1); } readmem(cfs_rq + OFFSET(cfs_rq_rb_leftmost), KVADDR, &leftmost, sizeof(ulong), "rb_leftmost", FAULT_ON_ERROR); root = (struct rb_root *)(cfs_rq + OFFSET(cfs_rq_tasks_timeline)); for (node = rb_first(root); leftmost && node; node = rb_next(node)) { if (VALID_MEMBER(sched_entity_my_q)) { readmem((ulong)node - OFFSET(sched_entity_run_node) + OFFSET(sched_entity_my_q), KVADDR, &my_q, sizeof(ulong), "my_q", FAULT_ON_ERROR); if (my_q) { total++; total += dump_tasks_in_task_group_cfs_rq(depth + 1, my_q, cpu, ctc); continue; } } tc = task_to_context((ulong)node - OFFSET(task_struct_se) - OFFSET(sched_entity_run_node)); if (!tc) continue; if (hq_enter((ulong)tc)) { INDENT(5 + 3 * depth); dump_task_runq_entry(tc, 0); } else { error(WARNING, "duplicate CFS runqueue node: task %lx\n", tc->task); return total; } total++; } total += dump_tasks_in_lower_dequeued_cfs_rq(depth, cfs_rq, cpu, ctc); if (!total) { INDENT(5 + 3 * depth); fprintf(fp, "[no tasks queued]\n"); } return total; } static void dump_on_rq_tasks(void) { char buf[BUFSIZE]; struct task_context *tc; int i, cpu, on_rq, tot; ulong *cpus; if (!VALID_MEMBER(task_struct_on_rq)) { MEMBER_OFFSET_INIT(task_struct_se, "task_struct", "se"); STRUCT_SIZE_INIT(sched_entity, "sched_entity"); MEMBER_OFFSET_INIT(sched_entity_on_rq, "sched_entity", "on_rq"); MEMBER_OFFSET_INIT(task_struct_on_rq, "task_struct", "on_rq"); MEMBER_OFFSET_INIT(task_struct_prio, "task_struct", "prio"); if (INVALID_MEMBER(task_struct_on_rq)) { if (INVALID_MEMBER(task_struct_se) || INVALID_SIZE(sched_entity)) option_not_supported('d'); } } cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; for (cpu = 0; cpu < kt->cpus; cpu++) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; fprintf(fp, "%sCPU %d", cpu ? "\n" : "", cpu); if (hide_offline_cpu(cpu)) { fprintf(fp, " [OFFLINE]\n"); continue; } else fprintf(fp, "\n"); tc = FIRST_CONTEXT(); tot = 0; for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (VALID_MEMBER(task_struct_on_rq)) { readmem(tc->task + OFFSET(task_struct_on_rq), KVADDR, &on_rq, sizeof(int), "task on_rq", FAULT_ON_ERROR); } else { readmem(tc->task + OFFSET(task_struct_se), KVADDR, buf, SIZE(sched_entity), "task se", FAULT_ON_ERROR); on_rq = INT(buf + OFFSET(sched_entity_on_rq)); } if (!on_rq || tc->processor != cpu) continue; INDENT(5); dump_task_runq_entry(tc, 0); tot++; } if (!tot) { INDENT(5); fprintf(fp, "[no tasks queued]\n"); } } } static void cfs_rq_offset_init(void) { if (!VALID_STRUCT(cfs_rq)) { STRUCT_SIZE_INIT(cfs_rq, "cfs_rq"); STRUCT_SIZE_INIT(rt_rq, "rt_rq"); MEMBER_OFFSET_INIT(rq_rt, "rq", "rt"); MEMBER_OFFSET_INIT(rq_nr_running, "rq", "nr_running"); MEMBER_OFFSET_INIT(task_struct_se, "task_struct", "se"); STRUCT_SIZE_INIT(sched_entity, "sched_entity"); MEMBER_OFFSET_INIT(sched_entity_run_node, "sched_entity", "run_node"); MEMBER_OFFSET_INIT(sched_entity_cfs_rq, "sched_entity", "cfs_rq"); MEMBER_OFFSET_INIT(sched_entity_my_q, "sched_entity", "my_q"); MEMBER_OFFSET_INIT(sched_rt_entity_my_q, "sched_rt_entity", "my_q"); MEMBER_OFFSET_INIT(sched_entity_on_rq, "sched_entity", "on_rq"); MEMBER_OFFSET_INIT(cfs_rq_tasks_timeline, "cfs_rq", "tasks_timeline"); MEMBER_OFFSET_INIT(cfs_rq_rb_leftmost, "cfs_rq", "rb_leftmost"); if (INVALID_MEMBER(cfs_rq_rb_leftmost) && VALID_MEMBER(cfs_rq_tasks_timeline) && MEMBER_EXISTS("rb_root_cached", "rb_leftmost")) ASSIGN_OFFSET(cfs_rq_rb_leftmost) = OFFSET(cfs_rq_tasks_timeline) + MEMBER_OFFSET("rb_root_cached", "rb_leftmost"); MEMBER_OFFSET_INIT(cfs_rq_nr_running, "cfs_rq", "nr_running"); if (INVALID_MEMBER(cfs_rq_nr_running)) { MEMBER_OFFSET_INIT(cfs_rq_nr_running, "cfs_rq", "nr_queued"); } MEMBER_OFFSET_INIT(cfs_rq_curr, "cfs_rq", "curr"); MEMBER_OFFSET_INIT(rt_rq_active, "rt_rq", "active"); MEMBER_OFFSET_INIT(task_struct_run_list, "task_struct", "run_list"); MEMBER_OFFSET_INIT(task_struct_on_rq, "task_struct", "on_rq"); MEMBER_OFFSET_INIT(task_struct_prio, "task_struct", "prio"); MEMBER_OFFSET_INIT(task_struct_rt, "task_struct", "rt"); MEMBER_OFFSET_INIT(sched_rt_entity_run_list, "sched_rt_entity", "run_list"); MEMBER_OFFSET_INIT(rt_prio_array_queue, "rt_prio_array", "queue"); } } static void task_group_offset_init(void) { if (!VALID_STRUCT(task_group)) { STRUCT_SIZE_INIT(task_group, "task_group"); MEMBER_OFFSET_INIT(rt_rq_rt_nr_running, "rt_rq", "rt_nr_running"); MEMBER_OFFSET_INIT(cfs_rq_tg, "cfs_rq", "tg"); MEMBER_OFFSET_INIT(rt_rq_tg, "rt_rq", "tg"); MEMBER_OFFSET_INIT(rt_rq_highest_prio, "rt_rq", "highest_prio"); MEMBER_OFFSET_INIT(task_group_css, "task_group", "css"); MEMBER_OFFSET_INIT(cgroup_subsys_state_cgroup, "cgroup_subsys_state", "cgroup"); MEMBER_OFFSET_INIT(cgroup_dentry, "cgroup", "dentry"); MEMBER_OFFSET_INIT(cgroup_kn, "cgroup", "kn"); MEMBER_OFFSET_INIT(kernfs_node_name, "kernfs_node", "name"); MEMBER_OFFSET_INIT(kernfs_node_parent, "kernfs_node", "parent"); MEMBER_OFFSET_INIT(task_group_siblings, "task_group", "siblings"); MEMBER_OFFSET_INIT(task_group_children, "task_group", "children"); MEMBER_OFFSET_INIT(task_group_cfs_bandwidth, "task_group", "cfs_bandwidth"); MEMBER_OFFSET_INIT(cfs_rq_throttled, "cfs_rq", "throttled"); MEMBER_OFFSET_INIT(task_group_rt_bandwidth, "task_group", "rt_bandwidth"); MEMBER_OFFSET_INIT(rt_rq_rt_throttled, "rt_rq", "rt_throttled"); } } static void dump_CFS_runqueues(void) { int cpu, tot, displayed; ulong runq, cfs_rq, prio_array; char *runqbuf, *cfs_rq_buf; ulong tasks_timeline ATTRIBUTE_UNUSED; struct task_context *tc; struct rb_root *root; struct syment *rq_sp, *init_sp; ulong *cpus; cfs_rq_offset_init(); if (!(rq_sp = per_cpu_symbol_search("per_cpu__runqueues"))) error(FATAL, "per-cpu runqueues do not exist\n"); runqbuf = GETBUF(SIZE(runqueue)); if ((init_sp = per_cpu_symbol_search("per_cpu__init_cfs_rq"))) cfs_rq_buf = GETBUF(SIZE(cfs_rq)); else cfs_rq_buf = NULL; get_active_set(); cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; for (cpu = displayed = 0; cpu < kt->cpus; cpu++) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) runq = rq_sp->value + kt->__per_cpu_offset[cpu]; else runq = rq_sp->value; fprintf(fp, "%sCPU %d ", displayed++ ? "\n" : "", cpu); if (hide_offline_cpu(cpu)) { fprintf(fp, "[OFFLINE]\n"); continue; } else fprintf(fp, "RUNQUEUE: %lx\n", runq); fprintf(fp, " CURRENT: "); if ((tc = task_to_context(tt->active_set[cpu]))) fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); else fprintf(fp, "%lx\n", tt->active_set[cpu]); readmem(runq, KVADDR, runqbuf, SIZE(runqueue), "per-cpu rq", FAULT_ON_ERROR); if (cfs_rq_buf) { /* * Use default task group's cfs_rq on each cpu. */ if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) cfs_rq = init_sp->value + kt->__per_cpu_offset[cpu]; else cfs_rq = init_sp->value; readmem(cfs_rq, KVADDR, cfs_rq_buf, SIZE(cfs_rq), "per-cpu cfs_rq", FAULT_ON_ERROR); root = (struct rb_root *)(cfs_rq + OFFSET(cfs_rq_tasks_timeline)); } else { cfs_rq = runq + OFFSET(rq_cfs); root = (struct rb_root *)(runq + OFFSET(rq_cfs) + OFFSET(cfs_rq_tasks_timeline)); } prio_array = runq + OFFSET(rq_rt) + OFFSET(rt_rq_active); fprintf(fp, " RT PRIO_ARRAY: %lx\n", prio_array); tot = dump_RT_prio_array(prio_array, &runqbuf[OFFSET(rq_rt) + OFFSET(rt_rq_active)]); if (!tot) { INDENT(5); fprintf(fp, "[no tasks queued]\n"); } fprintf(fp, " CFS RB_ROOT: %lx\n", (ulong)root); hq_open(); tot = dump_tasks_in_cfs_rq(cfs_rq); hq_close(); if (!tot) { INDENT(5); fprintf(fp, "[no tasks queued]\n"); } } FREEBUF(runqbuf); if (cfs_rq_buf) FREEBUF(cfs_rq_buf); } static void print_group_header_rt(ulong rt_rq, void *t) { int throttled; struct task_group_info *tgi = (struct task_group_info *)t; fprintf(fp, "TASK_GROUP: %lx RT_RQ: %lx", tgi->task_group, rt_rq); if (tgi->name) fprintf(fp, " <%s>", tgi->name); if (VALID_MEMBER(task_group_rt_bandwidth)) { readmem(rt_rq + OFFSET(rt_rq_rt_throttled), KVADDR, &throttled, sizeof(int), "rt_rq rt_throttled", FAULT_ON_ERROR); if (throttled) fprintf(fp, " (THROTTLED)"); } fprintf(fp, "\n"); } static void print_parent_task_group_rt(void *t, int cpu) { int prio; struct task_group_info *tgi; ulong rt_rq_c, rt_rq_p; tgi = ((struct task_group_info *)t)->parent; if (tgi && tgi->use) print_parent_task_group_fair(tgi, cpu); else return; readmem(tgi->task_group + OFFSET(task_group_rt_rq), KVADDR, &rt_rq_c, sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); readmem(rt_rq_c + cpu * sizeof(ulong), KVADDR, &rt_rq_p, sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); readmem(rt_rq_p + OFFSET(rt_rq_highest_prio), KVADDR, &prio, sizeof(int), "rt_rq highest prio", FAULT_ON_ERROR); INDENT(-1 + 6 * tgi->depth); fprintf(fp, "[%3d] ", prio); print_group_header_rt(rt_rq_p, tgi); tgi->use = 0; } static int dump_tasks_in_lower_dequeued_rt_rq(int depth, ulong rt_rq, int cpu) { int i, prio, tot, delta, nr_running; ulong rt_rq_c, rt_rq_p, group; tot = 0; for (i = 0; i < tgi_p; i++) { delta = tgi_array[i]->depth - depth; if (delta > 1) break; if (tgi_array[i]->use == 0 || delta < 1) continue; readmem(rt_rq + OFFSET(rt_rq_tg), KVADDR, &group, sizeof(ulong), "rt_rq tg", FAULT_ON_ERROR); if (group != tgi_array[i]->parent->task_group) continue; readmem(tgi_array[i]->task_group + OFFSET(task_group_rt_rq), KVADDR, &rt_rq_c, sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); readmem(rt_rq_c + cpu * sizeof(ulong), KVADDR, &rt_rq_p, sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); if (rt_rq == rt_rq_p) continue; readmem(rt_rq_p + OFFSET(rt_rq_rt_nr_running), KVADDR, &nr_running, sizeof(int), "rt_rq rt_nr_running", FAULT_ON_ERROR); if (nr_running == 0) { tot += dump_tasks_in_lower_dequeued_rt_rq(depth + 1, rt_rq_p, cpu); continue; } print_parent_task_group_rt(tgi_array[i], cpu); readmem(rt_rq_p + OFFSET(rt_rq_highest_prio), KVADDR, &prio, sizeof(int), "rt_rq highest_prio", FAULT_ON_ERROR); INDENT(5 + 6 * depth); fprintf(fp, "[%3d] ", prio); tot++; dump_tasks_in_task_group_rt_rq(depth + 1, rt_rq_p, cpu); } return tot; } static int dump_RT_prio_array(ulong k_prio_array, char *u_prio_array) { int i, c, tot, cnt, qheads; ulong offset, kvaddr, uvaddr; ulong list_head[2]; struct list_data list_data, *ld; struct task_context *tc; ulong my_q, task_addr; char *rt_rq_buf; qheads = (i = ARRAY_LENGTH(rt_prio_array_queue)) ? i : get_array_length("rt_prio_array.queue", NULL, SIZE(list_head)); ld = &list_data; for (i = tot = 0; i < qheads; i++) { offset = OFFSET(rt_prio_array_queue) + (i * SIZE(list_head)); kvaddr = k_prio_array + offset; uvaddr = (ulong)u_prio_array + offset; BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); if (CRASHDEBUG(1)) fprintf(fp, "rt_prio_array[%d] @ %lx => %lx/%lx\n", i, kvaddr, list_head[0], list_head[1]); if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) continue; BZERO(ld, sizeof(struct list_data)); ld->start = list_head[0]; ld->flags |= LIST_ALLOCATE; if (VALID_MEMBER(task_struct_rt) && VALID_MEMBER(sched_rt_entity_run_list)) ld->list_head_offset = OFFSET(sched_rt_entity_run_list); else ld->list_head_offset = OFFSET(task_struct_run_list); ld->end = kvaddr; cnt = do_list(ld); for (c = 0; c < cnt; c++) { task_addr = ld->list_ptr[c]; if (VALID_MEMBER(sched_rt_entity_my_q)) { readmem(ld->list_ptr[c] + OFFSET(sched_rt_entity_my_q), KVADDR, &my_q, sizeof(ulong), "my_q", FAULT_ON_ERROR); if (my_q) { rt_rq_buf = GETBUF(SIZE(rt_rq)); readmem(my_q, KVADDR, rt_rq_buf, SIZE(rt_rq), "rt_rq", FAULT_ON_ERROR); tot += dump_RT_prio_array( my_q + OFFSET(rt_rq_active), &rt_rq_buf[OFFSET(rt_rq_active)]); FREEBUF(rt_rq_buf); continue; } } if (VALID_MEMBER(task_struct_rt)) task_addr -= OFFSET(task_struct_rt); else task_addr -= OFFSET(task_struct_run_list); if (!(tc = task_to_context(task_addr))) continue; INDENT(5); fprintf(fp, "[%3d] ", i); fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); tot++; } FREEBUF(ld->list_ptr); } return tot; } static void dump_tasks_in_task_group_rt_rq(int depth, ulong rt_rq, int cpu) { int i, c, tot, cnt, qheads; ulong offset, kvaddr, uvaddr; ulong list_head[2]; struct list_data list_data, *ld; struct task_context *tc; ulong my_q, task_addr, tg, k_prio_array; char *rt_rq_buf, *u_prio_array; k_prio_array = rt_rq + OFFSET(rt_rq_active); rt_rq_buf = GETBUF(SIZE(rt_rq)); readmem(rt_rq, KVADDR, rt_rq_buf, SIZE(rt_rq), "rt_rq", FAULT_ON_ERROR); u_prio_array = &rt_rq_buf[OFFSET(rt_rq_active)]; if (depth) { readmem(rt_rq + OFFSET(rt_rq_tg), KVADDR, &tg, sizeof(ulong), "rt_rq tg", FAULT_ON_ERROR); for (i = 0; i < tgi_p; i++) { if (tgi_array[i]->task_group == tg) { print_group_header_rt(rt_rq, tgi_array[i]); tgi_array[i]->use = 0; break; } } } qheads = (i = ARRAY_LENGTH(rt_prio_array_queue)) ? i : get_array_length("rt_prio_array.queue", NULL, SIZE(list_head)); ld = &list_data; for (i = tot = 0; i < qheads; i++) { offset = OFFSET(rt_prio_array_queue) + (i * SIZE(list_head)); kvaddr = k_prio_array + offset; uvaddr = (ulong)u_prio_array + offset; BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); if (CRASHDEBUG(1)) fprintf(fp, "rt_prio_array[%d] @ %lx => %lx/%lx\n", i, kvaddr, list_head[0], list_head[1]); if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) continue; BZERO(ld, sizeof(struct list_data)); ld->start = list_head[0]; ld->flags |= LIST_ALLOCATE; if (VALID_MEMBER(task_struct_rt) && VALID_MEMBER(sched_rt_entity_run_list)) ld->list_head_offset = OFFSET(sched_rt_entity_run_list); else ld->list_head_offset = OFFSET(task_struct_run_list); ld->end = kvaddr; cnt = do_list(ld); for (c = 0; c < cnt; c++) { task_addr = ld->list_ptr[c]; if (INVALID_MEMBER(sched_rt_entity_my_q)) goto is_task; readmem(ld->list_ptr[c] + OFFSET(sched_rt_entity_my_q), KVADDR, &my_q, sizeof(ulong), "my_q", FAULT_ON_ERROR); if (!my_q) { task_addr -= OFFSET(task_struct_rt); goto is_task; } INDENT(5 + 6 * depth); fprintf(fp, "[%3d] ", i); tot++; dump_tasks_in_task_group_rt_rq(depth + 1, my_q, cpu); continue; is_task: if (!(tc = task_to_context(task_addr))) continue; INDENT(5 + 6 * depth); fprintf(fp, "[%3d] ", i); fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); tot++; } FREEBUF(ld->list_ptr); } tot += dump_tasks_in_lower_dequeued_rt_rq(depth, rt_rq, cpu); if (!tot) { INDENT(5 + 6 * depth); fprintf(fp, "[no tasks queued]\n"); } FREEBUF(rt_rq_buf); } static char * get_task_group_name(ulong group) { ulong cgroup, dentry, kernfs_node, parent, name; char *dentry_buf, *tmp; char buf[BUFSIZE]; int len; tmp = NULL; readmem(group + OFFSET(task_group_css) + OFFSET(cgroup_subsys_state_cgroup), KVADDR, &cgroup, sizeof(ulong), "task_group css cgroup", FAULT_ON_ERROR); if (cgroup == 0) return NULL; if (VALID_MEMBER(cgroup_dentry)) { readmem(cgroup + OFFSET(cgroup_dentry), KVADDR, &dentry, sizeof(ulong), "cgroup dentry", FAULT_ON_ERROR); if (dentry == 0) return NULL; dentry_buf = GETBUF(SIZE(dentry)); readmem(dentry, KVADDR, dentry_buf, SIZE(dentry), "dentry", FAULT_ON_ERROR); len = UINT(dentry_buf + OFFSET(dentry_d_name) + OFFSET(qstr_len)); tmp = GETBUF(len + 1); name = ULONG(dentry_buf + OFFSET(dentry_d_name) + OFFSET(qstr_name)); readmem(name, KVADDR, tmp, len, "qstr name", FAULT_ON_ERROR); FREEBUF(dentry_buf); return tmp; } /* * Emulate kernfs_name() and kernfs_name_locked() */ if (INVALID_MEMBER(cgroup_kn) || INVALID_MEMBER(kernfs_node_name) || INVALID_MEMBER(kernfs_node_parent)) return NULL; readmem(cgroup + OFFSET(cgroup_kn), KVADDR, &kernfs_node, sizeof(ulong), "cgroup kn", FAULT_ON_ERROR); if (kernfs_node == 0) return NULL; readmem(kernfs_node + OFFSET(kernfs_node_parent), KVADDR, &parent, sizeof(ulong), "kernfs_node parent", FAULT_ON_ERROR); if (!parent) { tmp = GETBUF(2); strcpy(tmp, "/"); return tmp; } readmem(kernfs_node + OFFSET(kernfs_node_name), KVADDR, &name, sizeof(ulong), "kernfs_node name", FAULT_ON_ERROR); if (!name || !read_string(name, buf, BUFSIZE-1)) return NULL; tmp = GETBUF(strlen(buf)+1); strcpy(tmp, buf); return tmp; } static void fill_task_group_info_array(int depth, ulong group, char *group_buf, int i) { int d; ulong kvaddr, uvaddr, offset; ulong list_head[2], next; struct task_group_info **tgi_array_new; d = tgi_p; tgi_array[tgi_p] = (struct task_group_info *) GETBUF(sizeof(struct task_group_info)); if (depth) tgi_array[tgi_p]->use = 1; else tgi_array[tgi_p]->use = 0; tgi_array[tgi_p]->depth = depth; tgi_array[tgi_p]->name = get_task_group_name(group); tgi_array[tgi_p]->task_group = group; if (i >= 0) tgi_array[tgi_p]->parent = tgi_array[i]; else tgi_array[tgi_p]->parent = NULL; tgi_p++; if (tgi_p == tgi_p_max) { tgi_p_max += MAX_GROUP_NUM; tgi_array_new = (struct task_group_info **) GETBUF(sizeof(void *) * tgi_p_max); BCOPY(tgi_array, tgi_array_new, sizeof(void *) * tgi_p); FREEBUF(tgi_array); tgi_array = tgi_array_new; } offset = OFFSET(task_group_children); kvaddr = group + offset; uvaddr = (ulong)(group_buf + offset); BCOPY((char *)uvaddr, (char *)&list_head[0], sizeof(ulong)*2); if ((list_head[0] == kvaddr) && (list_head[1] == kvaddr)) return; next = list_head[0]; while (next != kvaddr) { group = next - OFFSET(task_group_siblings); readmem(group, KVADDR, group_buf, SIZE(task_group), "task_group", FAULT_ON_ERROR); next = ULONG(group_buf + OFFSET(task_group_siblings) + OFFSET(list_head_next)); fill_task_group_info_array(depth + 1, group, group_buf, d); } } static void dump_tasks_by_task_group(void) { int cpu, displayed; ulong root_task_group, cfs_rq = 0, cfs_rq_p; ulong rt_rq = 0, rt_rq_p; char *buf; struct task_context *tc; char *task_group_name; ulong *cpus; cfs_rq_offset_init(); task_group_offset_init(); root_task_group = 0; task_group_name = NULL; if (symbol_exists("init_task_group")) { root_task_group = symbol_value("init_task_group"); task_group_name = "INIT"; } else if (symbol_exists("root_task_group")) { root_task_group = symbol_value("root_task_group"); task_group_name = "ROOT"; } else error(FATAL, "cannot determine root task_group\n"); tgi_p_max = MAX_GROUP_NUM; tgi_array = (struct task_group_info **)GETBUF(sizeof(void *) * tgi_p_max); buf = GETBUF(SIZE(task_group)); readmem(root_task_group, KVADDR, buf, SIZE(task_group), "task_group", FAULT_ON_ERROR); if (VALID_MEMBER(task_group_rt_rq)) rt_rq = ULONG(buf + OFFSET(task_group_rt_rq)); if (VALID_MEMBER(task_group_cfs_rq)) cfs_rq = ULONG(buf + OFFSET(task_group_cfs_rq)); fill_task_group_info_array(0, root_task_group, buf, -1); sort_task_group_info_array(); if (CRASHDEBUG(1)) print_task_group_info_array(); get_active_set(); cpus = pc->curcmd_flags & CPUMASK ? (ulong *)(ulong)pc->curcmd_private : NULL; for (cpu = displayed = 0; cpu < kt->cpus; cpu++) { if (cpus && !NUM_IN_BITMAP(cpus, cpu)) continue; if (rt_rq) readmem(rt_rq + cpu * sizeof(ulong), KVADDR, &rt_rq_p, sizeof(ulong), "task_group rt_rq", FAULT_ON_ERROR); if (cfs_rq) readmem(cfs_rq + cpu * sizeof(ulong), KVADDR, &cfs_rq_p, sizeof(ulong), "task_group cfs_rq", FAULT_ON_ERROR); fprintf(fp, "%sCPU %d", displayed++ ? "\n" : "", cpu); if (hide_offline_cpu(cpu)) { fprintf(fp, " [OFFLINE]\n"); continue; } else fprintf(fp, "\n"); fprintf(fp, " CURRENT: "); if ((tc = task_to_context(tt->active_set[cpu]))) fprintf(fp, "PID: %-5ld TASK: %lx COMMAND: \"%s\"\n", tc->pid, tc->task, tc->comm); else fprintf(fp, "%lx\n", tt->active_set[cpu]); if (rt_rq) { fprintf(fp, " %s_TASK_GROUP: %lx RT_RQ: %lx\n", task_group_name, root_task_group, rt_rq_p); reuse_task_group_info_array(); dump_tasks_in_task_group_rt_rq(0, rt_rq_p, cpu); } if (cfs_rq) { fprintf(fp, " %s_TASK_GROUP: %lx CFS_RQ: %lx\n", task_group_name, root_task_group, cfs_rq_p); reuse_task_group_info_array(); dump_tasks_in_task_group_cfs_rq(0, cfs_rq_p, cpu, tc); } } FREEBUF(buf); free_task_group_info_array(); } #undef _NSIG #define _NSIG 64 #define _NSIG_BPW machdep->bits #define _NSIG_WORDS (_NSIG / _NSIG_BPW) #undef SIGRTMIN #define SIGRTMIN 32 static struct signame { char *name; char *altname; } signame[_NSIG] = { /* 0 */ {NULL, NULL}, /* 1 */ {"SIGHUP", NULL}, /* 2 */ {"SIGINT", NULL}, /* 3 */ {"SIGQUIT", NULL}, /* 4 */ {"SIGILL", NULL}, /* 5 */ {"SIGTRAP", NULL}, /* 6 */ {"SIGABRT", "SIGIOT"}, /* 7 */ {"SIGBUS", NULL}, /* 8 */ {"SIGFPE", NULL}, /* 9 */ {"SIGKILL", NULL}, /* 10 */ {"SIGUSR1", NULL}, /* 11 */ {"SIGSEGV", NULL}, /* 12 */ {"SIGUSR2", NULL}, /* 13 */ {"SIGPIPE", NULL}, /* 14 */ {"SIGALRM", NULL}, /* 15 */ {"SIGTERM", NULL}, /* 16 */ {"SIGSTKFLT", NULL}, /* 17 */ {"SIGCHLD", "SIGCLD"}, /* 18 */ {"SIGCONT", NULL}, /* 19 */ {"SIGSTOP", NULL}, /* 20 */ {"SIGTSTP", NULL}, /* 21 */ {"SIGTTIN", NULL}, /* 22 */ {"SIGTTOU", NULL}, /* 23 */ {"SIGURG", NULL}, /* 24 */ {"SIGXCPU", NULL}, /* 25 */ {"SIGXFSZ", NULL}, /* 26 */ {"SIGVTALRM", NULL}, /* 27 */ {"SIGPROF", NULL}, /* 28 */ {"SIGWINCH", NULL}, /* 29 */ {"SIGIO", "SIGPOLL"}, /* 30 */ {"SIGPWR", NULL}, /* 31 */ {"SIGSYS", "SIGUNUSED"}, {NULL, NULL}, /* Real time signals start here. */ }; static int sigrt_minmax(int *min, int *max) { int sigrtmax, j; sigrtmax = THIS_KERNEL_VERSION < LINUX(2,5,0) ? _NSIG - 1 : _NSIG; if (min && max) { j = sigrtmax-SIGRTMIN-1; *max = j / 2; *min = j - *max; } return sigrtmax; } static void signame_list(void) { int i, sigrtmax, j, min, max; sigrtmax = sigrt_minmax(&min, &max); j = 1; for (i = 1; i <= sigrtmax; i++) { if ((i == SIGRTMIN) || (i == sigrtmax)) { fprintf(fp, "[%d] %s", i, (i== SIGRTMIN) ? "SIGRTMIN" : "SIGRTMAX"); } else if (i > SIGRTMIN) { if (j <= min){ fprintf(fp, "[%d] %s%d", i , "SIGRTMIN+", j); j++; } else if (max >= 1) { fprintf(fp, "[%d] %s%d", i , "SIGRTMAX-",max); max--; } } else { if (!signame[i].name) continue; fprintf(fp, "%s[%d] %s", i < 10 ? " " : "", i, signame[i].name); if (signame[i].altname) fprintf(fp, "/%s", signame[i].altname); } fprintf(fp, "\n"); } } /* * Translate the bits in a signal set into their name strings. */ static void translate_sigset(ulonglong sigset) { int sigrtmax, min, max, i, j, c, len; char buf[BUFSIZE]; if (!sigset) { fprintf(fp, "(none)\n"); return; } len = 0; sigrtmax= sigrt_minmax(&min, &max); j = 1; for (i = 1, c = 0; i <= sigrtmax; i++) { if (sigset & (ulonglong)1) { if (i == SIGRTMIN || i == sigrtmax) sprintf(buf, "%s%s", c++ ? " " : "", (i==SIGRTMIN) ? "SIGRTMIN" : "SIGRTMAX"); else if (i > SIGRTMIN) { if (j <= min) sprintf(buf, "%s%s%d", c++ ? " " : "", "SIGRTMIN+", j); else if (max >= 1) sprintf(buf, "%s%s%d", c++ ? " " : "", "SIGRTMAX-", max); } else sprintf(buf, "%s%s", c++ ? " " : "", signame[i].name); if ((len + strlen(buf)) > 80) { shift_string_left(buf, 1); fprintf(fp, "\n"); len = 0; } len += strlen(buf); fprintf(fp, "%s", buf); } sigset >>= 1; if (i > SIGRTMIN) { if (j <= min) j++; else if (max >= 1) max--; } } fprintf(fp, "\n"); } /* * Machine dependent interface to modify signame struct contents. */ void modify_signame(int sig, char *name, char *altname) { signame[sig].name = name; signame[sig].altname = altname; } /* * Display all signal-handling data for a task. * * Reference handling framework is here, but not used as of yet. */ void cmd_sig(void) { int c, tcnt, bogus; ulong value; ulonglong sigset; struct reference *ref; struct task_context *tc; ulong *tasklist; char *siglist; int thread_group = FALSE; tasklist = (ulong *)GETBUF((MAXARGS+NR_CPUS)*sizeof(ulong)); ref = (struct reference *)GETBUF(sizeof(struct reference)); siglist = GETBUF(BUFSIZE); ref->str = siglist; while ((c = getopt(argcnt, args, "lR:s:g")) != EOF) { switch(c) { case 's': sigset = htoll(optarg, FAULT_ON_ERROR, NULL); translate_sigset(sigset); return; case 'R': if (strlen(ref->str)) strcat(ref->str, ","); strcat(ref->str, optarg); break; case 'l': signame_list(); return; case 'g': pc->curcmd_flags |= TASK_SPECIFIED; thread_group = TRUE; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); tcnt = bogus = 0; while (args[optind]) { if (IS_A_NUMBER(args[optind])) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) tasklist[tcnt++] = tc->task; break; case STR_TASK: tasklist[tcnt++] = value; break; case STR_INVALID: bogus++; error(INFO, "invalid task or pid value: %s\n\n", args[optind]); break; } } else if (strstr(args[optind], ",") || MEMBER_EXISTS("task_struct", args[optind])) { if (strlen(ref->str)) strcat(ref->str, ","); strcat(ref->str, args[optind]); } else error(INFO, "invalid task or pid value: %s\n\n", args[optind]); optind++; } if (!tcnt && !bogus) tasklist[tcnt++] = CURRENT_TASK(); for (c = 0; c < tcnt; c++) { if (thread_group) do_sig_thread_group(tasklist[c]); else { do_sig(tasklist[c], 0, strlen(ref->str) ? ref : NULL); fprintf(fp, "\n"); } } } /* * Do the work for the "sig -g" command option, coming from sig or foreach. */ static void do_sig_thread_group(ulong task) { int i; int cnt; struct task_context *tc; ulong tgid; tc = task_to_context(task); tgid = task_tgid(task); if (tc->pid != tgid) { if (pc->curcmd_flags & TASK_SPECIFIED) { if (!(tc = tgid_to_context(tgid))) return; task = tc->task; } else return; } if ((tc->pid == 0) && (pc->curcmd_flags & IDLE_TASK_SHOWN)) return; print_task_header(fp, tc, 0); dump_signal_data(tc, THREAD_GROUP_LEVEL); fprintf(fp, "\n "); print_task_header(fp, tc, 0); dump_signal_data(tc, TASK_LEVEL|TASK_INDENT); tc = FIRST_CONTEXT(); for (i = cnt = 0; i < RUNNING_TASKS(); i++, tc++) { if (tc->task == task) continue; if (task_tgid(tc->task) == tgid) { fprintf(fp, "\n "); print_task_header(fp, tc, 0); dump_signal_data(tc, TASK_LEVEL|TASK_INDENT); cnt++; if (tc->pid == 0) pc->curcmd_flags |= IDLE_TASK_SHOWN; } } fprintf(fp, "\n"); } /* * Do the work for the sig command, coming from sig or foreach. */ void do_sig(ulong task, ulong flags, struct reference *ref) { struct task_context *tc; tc = task_to_context(task); if (ref) signal_reference(tc, flags, ref); else { if (!(flags & FOREACH_SIG)) print_task_header(fp, tc, 0); dump_signal_data(tc, TASK_LEVEL|THREAD_GROUP_LEVEL); } } /* * Implementation for -R reference for the sig command. */ static void signal_reference(struct task_context *tc, ulong flags, struct reference *ref) { if (flags & FOREACH_SIG) error(FATAL, "sig: -R not supported yet\n"); else error(FATAL, "-R not supported yet\n"); } /* * Dump all signal-handling data for a task. */ static void dump_signal_data(struct task_context *tc, ulong flags) { int i, sigrtmax, others, use_sighand; int translate, sigpending; uint ti_flags; ulonglong sigset, blocked, mask; ulong signal_struct, kaddr, handler, sa_flags, sigqueue; ulong sighand_struct; long size; char *signal_buf, *uaddr; ulong shared_pending, signal; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; sigpending = sigqueue = 0; sighand_struct = signal_struct = 0; if (VALID_STRUCT(sigqueue) && !VALID_MEMBER(sigqueue_next)) { MEMBER_OFFSET_INIT(sigqueue_next, "sigqueue", "next"); MEMBER_OFFSET_INIT(sigqueue_list, "sigqueue", "list"); MEMBER_OFFSET_INIT(sigqueue_info, "sigqueue", "info"); } else if (!VALID_MEMBER(signal_queue_next)) { MEMBER_OFFSET_INIT(signal_queue_next, "signal_queue", "next"); MEMBER_OFFSET_INIT(signal_queue_info, "signal_queue", "info"); } sigset = task_signal(tc->task, 0); if (!tt->last_task_read) return; if (VALID_MEMBER(task_struct_sig)) signal_struct = ULONG(tt->task_struct + OFFSET(task_struct_sig)); else if (VALID_MEMBER(task_struct_signal)) signal_struct = ULONG(tt->task_struct + OFFSET(task_struct_signal)); size = MAX(SIZE(signal_struct), VALID_SIZE(signal_queue) ? SIZE(signal_queue) : SIZE(sigqueue)); if (VALID_SIZE(sighand_struct)) size = MAX(size, SIZE(sighand_struct)); signal_buf = GETBUF(size); if (signal_struct) readmem(signal_struct, KVADDR, signal_buf, SIZE(signal_struct), "signal_struct buffer", FAULT_ON_ERROR); /* * Signal dispositions (thread group level). */ if (flags & THREAD_GROUP_LEVEL) { if (flags & TASK_INDENT) INDENT(2); fprintf(fp, "SIGNAL_STRUCT: %lx ", signal_struct); if (!signal_struct) { fprintf(fp, "\n"); return; } if (VALID_MEMBER(signal_struct_count)) fprintf(fp, "COUNT: %d\n", INT(signal_buf + OFFSET(signal_struct_count))); else if (VALID_MEMBER(signal_struct_nr_threads)) fprintf(fp, "NR_THREADS: %d\n", INT(signal_buf + OFFSET(signal_struct_nr_threads))); else fprintf(fp, "\n"); if (flags & TASK_INDENT) INDENT(2); fprintf(fp, " SIG %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN == 8 ? 9 : VADDR_PRLEN, CENTER, "SIGACTION"), mkstring(buf2, UVADDR_PRLEN, RJUST, "HANDLER"), mkstring(buf3, 16, CENTER, "MASK"), mkstring(buf4, VADDR_PRLEN, LJUST, "FLAGS")); if (VALID_MEMBER(task_struct_sighand)) { sighand_struct = ULONG(tt->task_struct + OFFSET(task_struct_sighand)); readmem(sighand_struct, KVADDR, signal_buf, SIZE(sighand_struct), "sighand_struct buffer", FAULT_ON_ERROR); use_sighand = TRUE; } else use_sighand = FALSE; sigrtmax = sigrt_minmax(NULL, NULL); for (i = 1; i <= sigrtmax; i++) { if (flags & TASK_INDENT) INDENT(2); fprintf(fp, "%s[%d] ", i < 10 ? " " : "", i); if (use_sighand) { kaddr = sighand_struct + OFFSET(sighand_struct_action) + ((i-1) * SIZE(k_sigaction)); uaddr = signal_buf + OFFSET(sighand_struct_action) + ((i-1) * SIZE(k_sigaction)); } else { kaddr = signal_struct + OFFSET(signal_struct_action) + ((i-1) * SIZE(k_sigaction)); uaddr = signal_buf + OFFSET(signal_struct_action) + ((i-1) * SIZE(k_sigaction)); } handler = ULONG(uaddr + OFFSET(sigaction_sa_handler)); switch ((long)handler) { case -1: mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_ERR"); break; case 0: mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_DFL"); break; case 1: mkstring(buf1, UVADDR_PRLEN, RJUST, "SIG_IGN"); break; default: mkstring(buf1, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(handler)); break; } mask = sigaction_mask((ulong)uaddr); sa_flags = ULONG(uaddr + OFFSET(sigaction_sa_flags)); fprintf(fp, "%s%s %s %016llx %lx ", space(MINSPACE-1), mkstring(buf2, UVADDR_PRLEN,LJUST|LONG_HEX,MKSTR(kaddr)), buf1, mask, sa_flags); if (sa_flags) { others = 0; translate = 1; if (sa_flags & SA_NOCLDSTOP) fprintf(fp, "%s%sSA_NOCLDSTOP", translate-- > 0 ? "(" : "", others++ ? "|" : ""); #ifdef SA_RESTORER if (sa_flags & SA_RESTORER) fprintf(fp, "%s%sSA_RESTORER", translate-- > 0 ? "(" : "", others++ ? "|" : ""); #endif #ifdef SA_NOCLDWAIT if (sa_flags & SA_NOCLDWAIT) fprintf(fp, "%s%sSA_NOCLDWAIT", translate-- > 0 ? "(" : "", others++ ? "|" : ""); #endif if (sa_flags & SA_SIGINFO) fprintf(fp, "%s%sSA_SIGINFO", translate-- > 0 ? "(" : "", others++ ? "|" : ""); if (sa_flags & SA_ONSTACK) fprintf(fp, "%s%sSA_ONSTACK", translate-- > 0 ? "(" : "", others++ ? "|" : ""); if (sa_flags & SA_RESTART) fprintf(fp, "%s%sSA_RESTART", translate-- > 0 ? "(" : "", others++ ? "|" : ""); if (sa_flags & SA_NODEFER) fprintf(fp, "%s%sSA_NODEFER", translate-- > 0 ? "(" : "", others++ ? "|" : ""); if (sa_flags & SA_RESETHAND) fprintf(fp, "%s%sSA_RESETHAND", translate-- > 0 ? "(" : "", others++ ? "|" : ""); if (translate < 1) fprintf(fp, ")"); } fprintf(fp, "\n"); } } if (flags & TASK_LEVEL) { /* * Pending signals (task level). */ if (VALID_MEMBER(task_struct_sigpending)) sigpending = INT(tt->task_struct + OFFSET(task_struct_sigpending)); else if (VALID_MEMBER(thread_info_flags)) { fill_thread_info(tc->thread_info); ti_flags = UINT(tt->thread_info + OFFSET(thread_info_flags)); sigpending = ti_flags & (1<task); if (flags & TASK_INDENT) INDENT(2); fprintf(fp, " BLOCKED: %016llx\n", blocked); /* * Pending queue (task level). */ if (flags & TASK_INDENT) INDENT(2); if (VALID_MEMBER(signal_struct_shared_pending)) { fprintf(fp, "PRIVATE_PENDING\n"); if (flags & TASK_INDENT) INDENT(2); } fprintf(fp, " SIGNAL: %016llx\n", sigset); if (VALID_MEMBER(task_struct_sigqueue)) sigqueue = ULONG(tt->task_struct + OFFSET(task_struct_sigqueue)); else if (VALID_MEMBER(task_struct_pending)) sigqueue = ULONG(tt->task_struct + OFFSET(task_struct_pending) + OFFSET_OPTION(sigpending_head, sigpending_list)); if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue)) sigqueue = 0; if (flags & TASK_INDENT) INDENT(2); if (sigqueue) { fprintf(fp, " SIGQUEUE: SIG %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO")); sigqueue_list(sigqueue); } else fprintf(fp, " SIGQUEUE: (empty)\n"); } /* * Pending queue (thread group level). */ if ((flags & THREAD_GROUP_LEVEL) && VALID_MEMBER(signal_struct_shared_pending)) { fprintf(fp, "SHARED_PENDING\n"); shared_pending = signal_struct + OFFSET(signal_struct_shared_pending); signal = shared_pending + OFFSET(sigpending_signal); readmem(signal, KVADDR, signal_buf,SIZE(sigpending_signal), "signal", FAULT_ON_ERROR); sigset = task_signal(0, (ulong*)signal_buf); if (flags & TASK_INDENT) INDENT(2); fprintf(fp, " SIGNAL: %016llx\n", sigset); sigqueue = (shared_pending + OFFSET_OPTION(sigpending_head, sigpending_list) + OFFSET(list_head_next)); readmem(sigqueue,KVADDR, signal_buf, SIZE(sigqueue), "sigqueue", FAULT_ON_ERROR); sigqueue = ULONG(signal_buf); if (VALID_MEMBER(sigqueue_list) && empty_list(sigqueue)) sigqueue = 0; if (flags & TASK_INDENT) INDENT(2); if (sigqueue) { fprintf(fp, " SIGQUEUE: SIG %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SIGINFO")); sigqueue_list(sigqueue); } else fprintf(fp, " SIGQUEUE: (empty)\n"); } FREEBUF(signal_buf); } /* * Dump a pending signal queue (private/shared). */ static void sigqueue_list(ulong sigqueue) { ulong sigqueue_save, next; int sig; char *signal_buf; long size; size = VALID_SIZE(signal_queue) ? SIZE(signal_queue) : SIZE(sigqueue); signal_buf = GETBUF(size); sigqueue_save = sigqueue; while (sigqueue) { readmem(sigqueue, KVADDR, signal_buf, SIZE_OPTION(signal_queue, sigqueue), "signal_queue/sigqueue", FAULT_ON_ERROR); if (VALID_MEMBER(signal_queue_next) && VALID_MEMBER(signal_queue_info)) { next = ULONG(signal_buf + OFFSET(signal_queue_next)); sig = INT(signal_buf + OFFSET(signal_queue_info) + OFFSET(siginfo_si_signo)); } else { next = ULONG(signal_buf + OFFSET_OPTION(sigqueue_next, sigqueue_list)); sig = INT(signal_buf + OFFSET(sigqueue_info) + OFFSET(siginfo_si_signo)); } if (sigqueue_save == next) break; fprintf(fp, " %3d %lx\n", sig, sigqueue + OFFSET_OPTION(signal_queue_info, sigqueue_info)); sigqueue = next; } FREEBUF(signal_buf); } /* * Return the current set of signals sent to a task, in the form of * a long long data type form that can be easily masked regardless * of its size. */ static ulonglong task_signal(ulong task, ulong *signal) { ulong *sigset_ptr; ulonglong sigset = 0; if (task) { fill_task_struct(task); if (!tt->last_task_read) return 0; if (VALID_MEMBER(sigpending_signal)) { sigset_ptr = (ulong *)(tt->task_struct + OFFSET(task_struct_pending) + OFFSET(sigpending_signal)); } else if (VALID_MEMBER(task_struct_signal)) { sigset_ptr = (ulong *)(tt->task_struct + OFFSET(task_struct_signal)); } else return 0; } else if (signal) { sigset_ptr = signal; } else return 0; switch (_NSIG_WORDS) { case 1: sigset = (ulonglong)sigset_ptr[0]; break; case 2: sigset = (ulonglong)(sigset_ptr[1]) << 32; sigset |= (ulonglong)(sigset_ptr[0]); break; } return sigset; } /* * Return the current set of signals that a task has blocked, in the form * of a long long data type form that can be easily masked regardless * of its size. */ static ulonglong task_blocked(ulong task) { ulonglong sigset; ulong *sigset_ptr; fill_task_struct(task); if (!tt->last_task_read) return 0; sigset_ptr = (ulong *)(tt->task_struct + OFFSET(task_struct_blocked)); sigset = (ulonglong)(sigset_ptr[1]) << 32; sigset |= (ulonglong)(sigset_ptr[0]); return sigset; } static ulonglong sigaction_mask(ulong sigaction) { ulonglong sigset; ulong *sigset_ptr; sigset = 0; sigset_ptr = (ulong *)(sigaction + OFFSET(sigaction_sa_mask)); switch (_NSIG_WORDS) { case 1: sigset = (ulonglong)sigset_ptr[0]; break; case 2: sigset = (ulonglong)(sigset_ptr[1]) << 32; sigset |= (ulonglong)(sigset_ptr[0]); break; } return sigset; } /* * Deal with potential separation of task_struct and kernel stack. */ ulong generic_get_stackbase(ulong task) { return task_to_stackbase(task); } ulong generic_get_stacktop(ulong task) { return task_to_stackbase(task) + STACKSIZE(); } #define STACK_END_MAGIC 0x57AC6E9D static void stack_overflow_check_init(void) { int pid; struct task_context *tc; ulong location, magic; if (!(tt->flags & THREAD_INFO)) return; for (pid = 1; pid < 10; pid++) { if (!(tc = pid_to_context(pid))) continue; if (tt->flags & THREAD_INFO_IN_TASK) location = task_to_stackbase(tc->task); else location = tc->thread_info + SIZE(thread_info); if (!readmem(location, KVADDR, &magic, sizeof(long), "stack magic", RETURN_ON_ERROR|QUIET)) continue; if (magic == STACK_END_MAGIC) { tt->stack_end_magic = STACK_END_MAGIC; break; } } } /* * Check thread_info.task and thread_info.cpu members, * and the STACK_END_MAGIC location. */ void check_stack_overflow(void) { int i, overflow, cpu_size, cpu, total; char buf[BUFSIZE]; ulong magic, task, stackbase, location; struct task_context *tc; if (!tt->stack_end_magic && INVALID_MEMBER(thread_info_task) && INVALID_MEMBER(thread_info_cpu)) option_not_supported('v'); cpu_size = VALID_MEMBER(thread_info_cpu) ? MEMBER_SIZE("thread_info", "cpu") : 0; tc = FIRST_CONTEXT(); for (i = total = 0; i < RUNNING_TASKS(); i++, tc++) { overflow = 0; if (tt->flags & THREAD_INFO_IN_TASK) { if (!readmem(task_to_stackbase(tc->task), KVADDR, &stackbase, sizeof(ulong), "stack overflow check", RETURN_ON_ERROR)) continue; goto check_stack_end_magic; } else { if (!readmem(tc->thread_info, KVADDR, buf, SIZE(thread_info) + sizeof(ulong), "stack overflow check", RETURN_ON_ERROR)) continue; } if (VALID_MEMBER(thread_info_task)) { task = ULONG(buf + OFFSET(thread_info_task)); if (task != tc->task) { print_task_header(fp, tc, 0); fprintf(fp, " possible stack overflow: thread_info.task: %lx != %lx\n", task, tc->task); overflow++; total++; } } if (VALID_MEMBER(thread_info_cpu)) { int cpus = get_cpus_present(); switch (cpu_size) { case 1: cpu = UCHAR(buf + OFFSET(thread_info_cpu)); break; case 2: cpu = USHORT(buf + OFFSET(thread_info_cpu)); break; case 4: cpu = UINT(buf + OFFSET(thread_info_cpu)); break; default: cpu = 0; break; } if (cpu >= cpus) { if (!overflow) print_task_header(fp, tc, 0); fprintf(fp, " possible stack overflow: thread_info.cpu: %d >= %d\n", cpu, cpus); overflow++; total++; } } check_stack_end_magic: if (!tt->stack_end_magic) continue; if (tt->flags & THREAD_INFO_IN_TASK) magic = stackbase; else magic = ULONG(buf + SIZE(thread_info)); if (tc->pid == 0) { if (kernel_symbol_exists("init_task")) { if (tc->task == symbol_value("init_task")) continue; } else continue; } if (magic != STACK_END_MAGIC) { if (!overflow) print_task_header(fp, tc, 0); if (tt->flags & THREAD_INFO_IN_TASK) location = task_to_stackbase(tc->task); else location = tc->thread_info + SIZE(thread_info); fprintf(fp, " possible stack overflow: %lx: %lx != STACK_END_MAGIC\n", location, magic); overflow++, total++; } if (overflow) fprintf(fp, "\n"); } if (!total) fprintf(fp, "No stack overflows detected\n"); } void crash_get_current_task_info(unsigned long *pid, char **comm) { struct task_context *tc = CURRENT_CONTEXT(); *pid = tc->pid; *comm = tc->comm; } crash-utility-crash-9cd43f5/lkcd_dump_v5.h0000664000372000037200000002206515107550337020126 0ustar juerghjuergh/* lkcd_dump_v5.h - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net) * Copyright 2001 Matt D. Robinson. All rights reserved. * * Most of this is the same old stuff from vmdump.h, except now we're * actually a stand-alone driver plugged into the block layer interface, * with the exception that we now allow for compression modes externally * loaded (e.g., someone can come up with their own). */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _DUMP_H #define _DUMP_H //#include /* define TRUE and FALSE for use in our dump modules */ #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #ifndef MCLX /* * MCLX NOTE: the architecture-specific headers are being ignored until * deemed necessary; crash has never used them functionally, and only * referencing them in the dump_sgi_environment() helper routines. */ /* necessary header files */ #include /* for architecture-specific header */ #endif #define UTSNAME_ENTRY_SZ 65 /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* header definitions for s390 dump */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */ #define DUMP_VERSION_NUMBER 0x5 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump levels - type specific stuff added later -- add as necessary */ #define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */ #define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */ #define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */ #define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */ #define DUMP_LEVEL_ALL 0x8 /* dump header, all memory pages */ /* dump compression options -- add as necessary */ #define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */ #define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */ #define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */ /* dump flags - any dump-type specific flags -- add as necessary */ #define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */ #define DUMP_FLAGS_NONDISRUPT 0x1 /* try to keep running after dump */ /* dump header flags -- add as necessary */ #define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */ #define DUMP_DH_RAW 0x1 /* raw page (no compression) */ #define DUMP_DH_COMPRESSED 0x2 /* page is compressed */ #define DUMP_DH_END 0x4 /* end marker on a full dump */ /* names for various dump tunables (they are now all read-only) */ #define DUMP_ROOT_NAME "sys/dump" #define DUMP_DEVICE_NAME "dump_device" #define DUMP_COMPRESS_NAME "dump_compress" #define DUMP_LEVEL_NAME "dump_level" #define DUMP_FLAGS_NAME "dump_flags" /* page size for gzip compression -- buffered beyond PAGE_SIZE slightly */ #define DUMP_DPC_PAGE_SIZE (PAGE_SIZE + 512) /* dump ioctl() control options */ #define DIOSDUMPDEV 1 /* set the dump device */ #define DIOGDUMPDEV 2 /* get the dump device */ #define DIOSDUMPLEVEL 3 /* set the dump level */ #define DIOGDUMPLEVEL 4 /* get the dump level */ #define DIOSDUMPFLAGS 5 /* set the dump flag parameters */ #define DIOGDUMPFLAGS 6 /* get the dump flag parameters */ #define DIOSDUMPCOMPRESS 7 /* set the dump compress level */ #define DIOGDUMPCOMPRESS 8 /* get the dump compress level */ /* the major number used for the dumping device */ #ifndef DUMP_MAJOR #define DUMP_MAJOR 227 #endif /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* the time of the system crash */ struct timeval dh_time; /* the NEW utsname (uname) information -- in character form */ /* we do this so we don't have to include utsname.h */ /* plus it helps us be more architecture independent */ /* now maybe one day soon they'll make the [65] a #define! */ char dh_utsname_sysname[65]; char dh_utsname_nodename[65]; char dh_utsname_release[65]; char dh_utsname_version[65]; char dh_utsname_machine[65]; char dh_utsname_domainname[65]; /* the address of current task (OLD = task_struct *, NEW = void *) */ void *dh_current_task; /* what type of compression we're using in this dump (if any) */ uint32_t dh_dump_compress; /* any additional flags */ uint32_t dh_dump_flags; /* any additional flags */ uint32_t dh_dump_device; } dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } dump_page_t; /* * This structure contains information needed for the lkcdutils * package (particularly lcrash) to determine what information is * associated to this kernel, specifically. */ typedef struct lkcdinfo_s { int arch; int ptrsz; int byte_order; int linux_release; int page_shift; int page_size; uint64_t page_mask; uint64_t page_offset; int stack_offset; } lkcdinfo_t; #ifdef __KERNEL__ /* * Structure: dump_compress_t * Function: This is what an individual compression mechanism can use * to plug in their own compression techniques. It's always * best to build these as individual modules so that people * can put in whatever they want. */ typedef struct dump_compress_s { /* the list_head structure for list storage */ struct list_head list; /* the type of compression to use (DUMP_COMPRESS_XXX) */ int compress_type; /* the compression function to call */ int (*compress_func)(char *, int, char *, int); } dump_compress_t; extern int dump_init(void); extern void dump_execute(char *, struct pt_regs *); extern int page_is_ram(unsigned long); #endif /* __KERNEL__ */ #endif /* _DUMP_H */ crash-utility-crash-9cd43f5/lkcd_v7.c0000664000372000037200000003513615107550337017101 0ustar juerghjuergh/* lkcd_v7.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002 Silicon Graphics, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #include "lkcd_dump_v5.h" /* REMIND */ static dump_header_t dump_header_v7 = { 0 }; static dump_page_t dump_page = { 0 }; static void mclx_cache_page_headers_v7(void); /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v7(FILE *fp, int fd, char *dumpfile) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_page_t *dp; int dump_index_size ATTRIBUTE_UNUSED; int dump_index_created ATTRIBUTE_UNUSED; static char dumpfile_index_name[128]; int ifd ATTRIBUTE_UNUSED; lkcd->fd = fd; lkcd->fp = fp; dump_index_created = 0; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v7; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; lkcd->dump_page = dp; lkcd->dump_header = dh; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); #ifdef IA64 if ( (fix_addr_v7(fd) == -1) ) return FALSE; #endif /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->total_pages = dh->dh_num_pages; /* * REMIND: dh_memory_size should be in physical pages and seems to be wrong. * pad by two for now; 3DFE8 should be 40000. */ lkcd->memory_pages = dh->dh_memory_size; lkcd->page_offsets = 0; lkcd->ifd = -1; lkcd->dumpfile_index = NULL; /* Keep from getting unused warnings */ dump_index_size = 0; dump_index_created = 0; strcpy(dumpfile_index_name, dumpfile); ifd = 0; #ifdef LKCD_INDEX_FILE if (dh->dh_memory_end < 0x1000000000LL) { lkcd->memory_pages = dh->dh_memory_end / lkcd->page_size + 1; } else { lkcd->memory_pages = (dh->dh_memory_size * (getpagesize()/lkcd->page_size)) * 2; } dump_index_size = (lkcd->memory_pages * sizeof(off_t)); lkcd->page_offsets = 0; strcpy(dumpfile_index_name, dumpfile); lkcd->dumpfile_index = strcat(dumpfile_index_name, ".index"); ifd = open(lkcd->dumpfile_index, O_RDWR, 0644); if( ifd < 0 ) { int err; ifd = open(lkcd->dumpfile_index, (O_RDWR | O_CREAT), 0644); if (ifd > 0) { err = ftruncate(ifd, dump_index_size); if (err == -1) { lkcd->dumpfile_index = NULL; close(ifd); ifd = -1; } else { dump_index_created++; } } } if (ifd >= 0) { /* MAP_SHARED so we can sync the file */ lkcd->page_offsets = mmap( (void *)0, dump_index_size, (PROT_READ | PROT_WRITE), MAP_SHARED, ifd, (off_t)0); if (lkcd->page_offsets == MAP_FAILED) { close(ifd); ifd = -1; lkcd->dumpfile_index = NULL; lkcd->page_offsets = 0; } } lkcd->ifd = ifd; #endif lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->zoned_offsets = 0; lkcd->get_dp_flags = get_dp_flags_v7; lkcd->get_dp_address = get_dp_address_v7; lkcd->get_dp_size = get_dp_size_v7; lkcd->compression = dh->dh_dump_compress; lkcd->page_header_size = sizeof(dump_page_t); lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); /* * Read all of the pages and save the page offsets for lkcd_lseek(). */ for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: lkcd_print("reached EOF\n"); eof = TRUE; continue; } if (dp->dp_flags & ~(DUMP_DH_COMPRESSED|DUMP_DH_RAW|DUMP_DH_END|LKCD_DUMP_MCLX_V0)) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_flags & (LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1)) lkcd->flags |= LKCD_MCLX; if (dp->dp_size > 4096) { lkcd_print("dp_size > 4096: %d\n", dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_DH_END) { lkcd_print("found DUMP_DH_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(2)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (dh->dh_version & LKCD_DUMP_MCLX_V1) mclx_cache_page_headers_v7(); if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v7(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v7(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v7(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v7(ulong arg) { int others; dump_header_t *dh; dump_page_t *dp; dh = (dump_header_t *)lkcd->dump_header; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: "); lkcd_print(BITS32() ? "%llx " : "%lx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else if (dh->dh_magic_number == DUMP_MAGIC_LIVE) lkcd_print("(DUMP_MAGIC_LIVE)\n"); else lkcd_print("(?)\n"); others = 0; lkcd_print(" dh_version: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_version); switch (dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) { case LKCD_DUMP_V1: lkcd_print("%sLKCD_DUMP_V1", others++ ? "|" : ""); break; case LKCD_DUMP_V2: lkcd_print("%sLKCD_DUMP_V2", others++ ? "|" : ""); break; case LKCD_DUMP_V3: lkcd_print("%sLKCD_DUMP_V3", others++ ? "|" : ""); break; case LKCD_DUMP_V5: lkcd_print("%sLKCD_DUMP_V5", others++ ? "|" : ""); break; case LKCD_DUMP_V7: lkcd_print("%sLKCD_DUMP_V7", others++ ? "|" : ""); break; case LKCD_DUMP_V8: lkcd_print("%sLKCD_DUMP_V8", others++ ? "|" : ""); break; } if (dh->dh_version & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); if (dh->dh_version & LKCD_DUMP_MCLX_V1) lkcd_print("%sLKCD_DUMP_MCLX_V1", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_header_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_level); others = 0; if (dh->dh_dump_level & DUMP_LEVEL_HEADER) lkcd_print("%sDUMP_LEVEL_HEADER", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_KERN) lkcd_print("%sDUMP_LEVEL_KERN", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_USED) lkcd_print("%sDUMP_LEVEL_USED", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_ALL) lkcd_print("%sDUMP_LEVEL_ALL", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_page_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: "); lkcd_print(BITS32() ? "%lld\n" : "%ld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_end); lkcd_print(" dh_num_pages: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(dh->dh_time.tv_sec)))); lkcd_print("dh_utsname_sysname: %s\n", dh->dh_utsname_sysname); lkcd_print("dh_utsname_nodename: %s\n", dh->dh_utsname_nodename); lkcd_print("dh_utsname_release: %s\n", dh->dh_utsname_release); lkcd_print("dh_utsname_version: %s\n", dh->dh_utsname_version); lkcd_print("dh_utsname_machine: %s\n", dh->dh_utsname_machine); lkcd_print("dh_utsname_domainname: %s\n", dh->dh_utsname_domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print(" dh_dump_compress: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_compress); others = 0; if (dh->dh_dump_compress == DUMP_COMPRESS_NONE) lkcd_print("%sDUMP_COMPRESS_NONE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_RLE) lkcd_print("%sDUMP_COMPRESS_RLE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_GZIP) lkcd_print("%sDUMP_COMPRESS_GZIP", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_flags: "); others = 0; lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_flags); if (dh->dh_dump_flags & DUMP_FLAGS_NONDISRUPT) lkcd_print("%sDUMP_FLAGS_NONDISRUPT", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_device: "); lkcd_print(BITS32() ? "%lx\n" : "%x\n", dh->dh_dump_device); if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dp->dp_address); lkcd_print(" dp_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dp->dp_size); lkcd_print(" dp_flags: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_DH_COMPRESSED) lkcd_print("DUMP_DH_COMPRESSED", others++); if (dp->dp_flags & DUMP_DH_RAW) lkcd_print("%sDUMP_DH_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_DH_END) lkcd_print("%sDUMP_DH_END", others++ ? "|" : ""); if (dp->dp_flags & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); lkcd_print(")\n"); } void dump_dump_page_v7(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console(BITS32() ? "dp_address: %llx " : "dp_address: %lx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_DH_COMPRESSED) console("DUMP_DH_COMPRESSED", others++); if (flags & DUMP_DH_RAW) console("%sDUMP_DH_RAW", others++ ? "|" : ""); if (flags & DUMP_DH_END) console("%sDUMP_DH_END", others++ ? "|" : ""); if (flags & LKCD_DUMP_MCLX_V0) console("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); console(")\n"); } /* * Read the MCLX-enhanced page header cache. Verify the first one, which * is a pointer to the page header for address 1MB, and take the rest at * blind faith. Note that the page headers do not include the 64K dump * header offset, which must be added to the values found. */ static void mclx_cache_page_headers_v7(void) { int i; uint64_t physaddr1, physaddr2, page_headers[MCLX_PAGE_HEADERS]; dump_page_t dump_page, *dp; ulong granularity; if (LKCD_DEBUG(2)) /* dump headers have all been read */ return; if (lkcd->total_pages > MEGABYTES(1))/* greater than 4G not supported */ return; if (lseek(lkcd->fd, sizeof(dump_header_t), SEEK_SET) == -1) return; if (read(lkcd->fd, page_headers, MCLX_V1_PAGE_HEADER_CACHE) != MCLX_V1_PAGE_HEADER_CACHE) return; dp = &dump_page; /* * Determine the granularity between offsets. */ if (lseek(lkcd->fd, page_headers[0] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if (lseek(lkcd->fd, page_headers[1] + LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr2 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if ((physaddr1 % MEGABYTES(1)) || (physaddr2 % MEGABYTES(1)) || (physaddr2 < physaddr1)) return; granularity = physaddr2 - physaddr1; for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) { if (!page_headers[i]) break; lkcd->curhdroffs = page_headers[i] + LKCD_OFFSET_TO_FIRST_PAGE; set_mb_benchmark((granularity * (i+1))/lkcd->page_size); } } crash-utility-crash-9cd43f5/lkcd_v8.c0000664000372000037200000004645515107550337017110 0ustar juerghjuergh/* lkcd_v8.c - core analysis suite * * Forward ported from lkcd_v7.c by Corey Mineyard * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002 Silicon Graphics, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #include "lkcd_dump_v8.h" /* REMIND */ static dump_header_t dump_header_v8 = { 0 }; #ifndef HAVE_NO_DUMP_HEADER_ASM static dump_header_asm_t dump_header_asm_v8 = { 0 }; #endif static dump_page_t dump_page = { 0 }; static void mclx_cache_page_headers_v8(void); static off_t lkcd_offset_to_first_page = LKCD_OFFSET_TO_FIRST_PAGE; #if defined(X86_64) int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp) { if (eip) *eip = dump_header_asm_v8.dha_smp_regs[cpu].rip; if (esp) *esp = dump_header_asm_v8.dha_smp_regs[cpu].rsp; return 0; } #elif defined(X86) int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp) { if (eip) *eip = dump_header_asm_v8.dha_smp_regs[cpu].eip; if (esp) *esp = dump_header_asm_v8.dha_smp_regs[cpu].esp; return 0; } #else int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp) { return -1; } #endif int get_lkcd_regs_for_cpu_v8(struct bt_info *bt, ulong *eip, ulong *esp) { int cpu = bt->tc->processor; if (!bt || !bt->tc) { fprintf(stderr, "get_lkcd_regs_for_cpu_v8: invalid tc " "(CPU=%d)\n", cpu); return -EINVAL; } if (cpu >= NR_CPUS) { fprintf(stderr, "get_lkcd_regs_for_cpu_v8, cpu (%d) too high\n", cpu); return -EINVAL; } return get_lkcd_regs_for_cpu_arch(cpu, eip, esp); } #ifndef HAVE_NO_DUMP_HEADER_ASM int lkcd_dump_init_v8_arch(dump_header_t *dh) { off_t ret_of; ssize_t ret_sz; uint32_t hdr_size, offset, nr_cpus; dump_header_asm_t arch_hdr; char *hdr_buf = NULL; ret_of = lseek(lkcd->fd, dh->dh_header_size + offsetof(dump_header_asm_t, dha_header_size), SEEK_SET); if (ret_of < 0) { perror("lseek failed in " __FILE__ ":" STR(__LINE__)); goto err; } ret_sz = read(lkcd->fd, (char *)&hdr_size, sizeof(hdr_size)); if (ret_sz != sizeof(hdr_size)) { perror("Reading hdr_size failed in " __FILE__ ":" STR(__LINE__)); goto err; } ret_of = lseek(lkcd->fd, dh->dh_header_size, SEEK_SET); if (ret_of < 0) { perror("lseek failed in " __FILE__ ":" STR(__LINE__)); goto err; } hdr_buf = (char *)malloc(hdr_size); if (!hdr_buf) { perror("Could not allocate memory for dump header\n"); goto err; } ret_sz = read(lkcd->fd, (char *)hdr_buf, hdr_size); if (ret_sz != hdr_size) { perror("Could not read header " __FILE__ ":" STR(__LINE__)); goto err; } /* * Though we have KL_NR_CPUS is 128, the header size is different * CONFIG_NR_CPUS might be different in the kernel. Hence, need * to find out how many CPUs are configured. */ offset = offsetof(dump_header_asm_t, dha_smp_regs[0]); nr_cpus = (hdr_size - offset) / sizeof(dump_CPU_info_t); /* check for CPU overflow */ if (nr_cpus > NR_CPUS) { fprintf(stderr, "CPU number too high %d (%s:%d)\n", nr_cpus, __FILE__, __LINE__); goto err; } /* parts that don't depend on the number of CPUs */ memcpy(&arch_hdr, (void *)hdr_buf, offset); /* registers */ memcpy(&arch_hdr.dha_smp_regs, (void *)&hdr_buf[offset], nr_cpus * sizeof(struct pt_regs)); offset += nr_cpus * sizeof(struct pt_regs); /* current task */ memcpy(&arch_hdr.dha_smp_current_task, (void *)&hdr_buf[offset], nr_cpus * sizeof(&arch_hdr.dha_smp_current_task[0])); offset += nr_cpus * sizeof(&arch_hdr.dha_smp_current_task[0]); /* stack */ memcpy(&arch_hdr.dha_stack, (void *)&hdr_buf[offset], nr_cpus * sizeof(&arch_hdr.dha_stack[0])); offset += nr_cpus * sizeof(&arch_hdr.dha_stack[0]); /* stack_ptr */ memcpy(&arch_hdr.dha_stack_ptr, (void *)&hdr_buf[offset], nr_cpus * sizeof(&arch_hdr.dha_stack_ptr[0])); offset += nr_cpus * sizeof(&arch_hdr.dha_stack_ptr[0]); if (arch_hdr.dha_magic_number != DUMP_ASM_MAGIC_NUMBER) { fprintf(stderr, "Invalid magic number for x86_64\n"); goto err; } /* * read the kernel load address on IA64 -- other architectures have * no relocatable kernel at the lifetime of LKCD */ #ifdef IA64 memcpy(&arch_hdr.dha_kernel_addr, (void *)&hdr_buf[offset], sizeof(uint64_t)); #endif memcpy(&dump_header_asm_v8, &arch_hdr, sizeof(dump_header_asm_t)); free(hdr_buf); return 0; err: free(hdr_buf); return -1; } #else /* architecture that has no lkcd_dump_init_v8 */ int lkcd_dump_init_v8_arch(dump_header_t *dh) { return 0; } #endif /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v8(FILE *fp, int fd, char *dumpfile) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_page_t *dp; int dump_index_size ATTRIBUTE_UNUSED; int dump_index_created ATTRIBUTE_UNUSED; static char dumpfile_index_name[128]; int ifd ATTRIBUTE_UNUSED; uint64_t dh_dump_buffer_size; lkcd->fd = fd; lkcd->fp = fp; dump_index_created = 0; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v8; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; if ((dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) == LKCD_DUMP_V9){ if (read(lkcd->fd, &dh_dump_buffer_size, sizeof(dh_dump_buffer_size)) != sizeof(dh_dump_buffer_size)) return FALSE; lkcd_offset_to_first_page = dh_dump_buffer_size; } else lkcd_offset_to_first_page = LKCD_OFFSET_TO_FIRST_PAGE; lkcd->dump_page = dp; lkcd->dump_header = dh; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); if (lkcd_dump_init_v8_arch(dh) != 0) { fprintf(stderr, "Warning: Failed to initialise " "arch specific dump code\n"); } #ifdef IA64 if ( (fix_addr_v8(&dump_header_asm_v8) == -1) ) return FALSE; #endif /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->total_pages = dh->dh_num_pages; /* * REMIND: dh_memory_size should be in physical pages and seems to be wrong. * pad by two for now; 3DFE8 should be 40000. */ lkcd->memory_pages = dh->dh_memory_size; lkcd->page_offsets = 0; lkcd->ifd = -1; lkcd->dumpfile_index = NULL; /* Keep from getting unused warnings */ dump_index_size = 0; dump_index_created = 0; strcpy(dumpfile_index_name, dumpfile); ifd = 0; #ifdef LKCD_INDEX_FILE lkcd->memory_pages = dh->dh_memory_size * 2; dump_index_size = (lkcd->memory_pages * sizeof(off_t)); lkcd->page_offsets = 0; strcpy(dumpfile_index_name, dumpfile); lkcd->dumpfile_index = strcat(dumpfile_index_name, ".index"); ifd = open(lkcd->dumpfile_index, O_RDWR, 0644); if( ifd < 0 ) { int err; ifd = open(lkcd->dumpfile_index, (O_RDWR | O_CREAT), 0644); if (ifd > 0) { err = ftruncate(ifd, dump_index_size); if (err == -1) { lkcd->dumpfile_index = NULL; close(ifd); ifd = -1; } else { dump_index_created++; } } } if (ifd >= 0) { /* MAP_SHARED so we can sync the file */ lkcd->page_offsets = mmap( (void *)0, dump_index_size, (PROT_READ | PROT_WRITE), MAP_SHARED, ifd, (off_t)0); if (lkcd->page_offsets == MAP_FAILED) { close(ifd); ifd = -1; lkcd->dumpfile_index = NULL; lkcd->page_offsets = 0; } } lkcd->ifd = ifd; #endif lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->zoned_offsets = 0; lkcd->get_dp_flags = get_dp_flags_v8; lkcd->get_dp_address = get_dp_address_v8; lkcd->get_dp_size = get_dp_size_v8; lkcd->compression = dh->dh_dump_compress; lkcd->page_header_size = sizeof(dump_page_t); lseek(lkcd->fd, lkcd_offset_to_first_page, SEEK_SET); /* * Read all of the pages and save the page offsets for lkcd_lseek(). */ for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: lkcd_print("reached EOF\n"); eof = TRUE; continue; } if (dp->dp_flags & ~(DUMP_DH_COMPRESSED|DUMP_DH_RAW|DUMP_DH_END|LKCD_DUMP_MCLX_V0)) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_flags & (LKCD_DUMP_MCLX_V0|LKCD_DUMP_MCLX_V1)) lkcd->flags |= LKCD_MCLX; if (dp->dp_size > dh->dh_page_size) { lkcd_print("dp_size > %d: %d\n", dh->dh_page_size, dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_DH_END) { lkcd_print("found DUMP_DH_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(2)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (dh->dh_version & LKCD_DUMP_MCLX_V1) mclx_cache_page_headers_v8(); if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v8(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v8(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v8(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v8(ulong arg) { int others; dump_header_t *dh; dump_page_t *dp; struct timeval tv; dh = (dump_header_t *)lkcd->dump_header; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: "); lkcd_print(BITS32() ? "%llx " : "%lx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else if (dh->dh_magic_number == DUMP_MAGIC_LIVE) lkcd_print("(DUMP_MAGIC_LIVE)\n"); else lkcd_print("(?)\n"); others = 0; lkcd_print(" dh_version: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_version); switch (dh->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) { case LKCD_DUMP_V1: lkcd_print("%sLKCD_DUMP_V1", others++ ? "|" : ""); break; case LKCD_DUMP_V2: lkcd_print("%sLKCD_DUMP_V2", others++ ? "|" : ""); break; case LKCD_DUMP_V3: lkcd_print("%sLKCD_DUMP_V3", others++ ? "|" : ""); break; case LKCD_DUMP_V5: lkcd_print("%sLKCD_DUMP_V5", others++ ? "|" : ""); break; case LKCD_DUMP_V7: lkcd_print("%sLKCD_DUMP_V7", others++ ? "|" : ""); break; case LKCD_DUMP_V8: lkcd_print("%sLKCD_DUMP_V8", others++ ? "|" : ""); break; case LKCD_DUMP_V9: lkcd_print("%sLKCD_DUMP_V9", others++ ? "|" : ""); break; } if (dh->dh_version & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); if (dh->dh_version & LKCD_DUMP_MCLX_V1) lkcd_print("%sLKCD_DUMP_MCLX_V1", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_header_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_level); others = 0; if (dh->dh_dump_level & DUMP_LEVEL_HEADER) lkcd_print("%sDUMP_LEVEL_HEADER", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_KERN) lkcd_print("%sDUMP_LEVEL_KERN", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_USED) lkcd_print("%sDUMP_LEVEL_USED", others++ ? "|" : ""); if (dh->dh_dump_level & DUMP_LEVEL_ALL) lkcd_print("%sDUMP_LEVEL_ALL", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_page_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: "); lkcd_print(BITS32() ? "%lld\n" : "%ld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dh->dh_memory_end); lkcd_print(" dh_num_pages: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); tv.tv_sec = dh->dh_time.tv_sec; lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(tv.tv_sec)))); lkcd_print("dh_utsname_sysname: %s\n", dh->dh_utsname_sysname); lkcd_print("dh_utsname_nodename: %s\n", dh->dh_utsname_nodename); lkcd_print("dh_utsname_release: %s\n", dh->dh_utsname_release); lkcd_print("dh_utsname_version: %s\n", dh->dh_utsname_version); lkcd_print("dh_utsname_machine: %s\n", dh->dh_utsname_machine); lkcd_print("dh_utsname_domainname: %s\n", dh->dh_utsname_domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print(" dh_dump_compress: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_compress); others = 0; if (dh->dh_dump_compress == DUMP_COMPRESS_NONE) lkcd_print("%sDUMP_COMPRESS_NONE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_RLE) lkcd_print("%sDUMP_COMPRESS_RLE", others++ ? "|" : ""); if (dh->dh_dump_compress & DUMP_COMPRESS_GZIP) lkcd_print("%sDUMP_COMPRESS_GZIP", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_flags: "); others = 0; lkcd_print(BITS32() ? "%lx (" : "%x (", dh->dh_dump_flags); if (dh->dh_dump_flags & DUMP_FLAGS_NONDISRUPT) lkcd_print("%sDUMP_FLAGS_NONDISRUPT", others++ ? "|" : ""); lkcd_print(")\n"); lkcd_print(" dh_dump_device: "); lkcd_print(BITS32() ? "%lx\n" : "%x\n", dh->dh_dump_device); if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: "); lkcd_print(BITS32() ? "%llx\n" : "%lx\n", dp->dp_address); lkcd_print(" dp_size: "); lkcd_print(BITS32() ? "%ld\n" : "%d\n", dp->dp_size); lkcd_print(" dp_flags: "); lkcd_print(BITS32() ? "%lx (" : "%x (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_DH_COMPRESSED) lkcd_print("DUMP_DH_COMPRESSED", others++); if (dp->dp_flags & DUMP_DH_RAW) lkcd_print("%sDUMP_DH_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_DH_END) lkcd_print("%sDUMP_DH_END", others++ ? "|" : ""); if (dp->dp_flags & LKCD_DUMP_MCLX_V0) lkcd_print("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); lkcd_print(")\n"); } void dump_dump_page_v8(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console(BITS32() ? "dp_address: %llx " : "dp_address: %lx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_DH_COMPRESSED) console("DUMP_DH_COMPRESSED", others++); if (flags & DUMP_DH_RAW) console("%sDUMP_DH_RAW", others++ ? "|" : ""); if (flags & DUMP_DH_END) console("%sDUMP_DH_END", others++ ? "|" : ""); if (flags & LKCD_DUMP_MCLX_V0) console("%sLKCD_DUMP_MCLX_V0", others++ ? "|" : ""); console(")\n"); } /* * Read the MCLX-enhanced page header cache. Verify the first one, which * is a pointer to the page header for address 1MB, and take the rest at * blind faith. Note that the page headers do not include the 64K dump * header offset, which must be added to the values found. */ static void mclx_cache_page_headers_v8(void) { int i; uint64_t physaddr1, physaddr2, page_headers[MCLX_PAGE_HEADERS]; dump_page_t dump_page, *dp; ulong granularity; size_t dh_size; if (LKCD_DEBUG(2)) /* dump headers have all been read */ return; if (lkcd->total_pages > MEGABYTES(1))/* greater than 4G not supported */ return; dh_size = sizeof(dump_header_t); if ((((dump_header_t *)lkcd->dump_header)->dh_version & LKCD_DUMP_VERSION_NUMBER_MASK) == LKCD_DUMP_V9) dh_size += sizeof(uint64_t); if (lseek(lkcd->fd, dh_size, SEEK_SET) == -1) return; if (read(lkcd->fd, page_headers, MCLX_V1_PAGE_HEADER_CACHE) != MCLX_V1_PAGE_HEADER_CACHE) return; dp = &dump_page; /* * Determine the granularity between offsets. */ if (lseek(lkcd->fd, page_headers[0] + lkcd_offset_to_first_page, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr1 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if (lseek(lkcd->fd, page_headers[1] + lkcd_offset_to_first_page, SEEK_SET) == -1) return; if (read(lkcd->fd, dp, lkcd->page_header_size) != lkcd->page_header_size) return; physaddr2 = (dp->dp_address - lkcd->kvbase) << lkcd->page_shift; if ((physaddr1 % MEGABYTES(1)) || (physaddr2 % MEGABYTES(1)) || (physaddr2 < physaddr1)) return; granularity = physaddr2 - physaddr1; for (i = 0; i < (MCLX_PAGE_HEADERS-1); i++) { if (!page_headers[i]) break; lkcd->curhdroffs = page_headers[i] + lkcd_offset_to_first_page; set_mb_benchmark((granularity * (i+1))/lkcd->page_size); } } crash-utility-crash-9cd43f5/s390_dump.c0000664000372000037200000000425415107550337017270 0ustar juerghjuergh/* s390_dump.c - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * Copyright (C) 2005 Michael Holzheu, IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" //#include #include "ibm_common.h" static FILE * s390_file; int is_s390_dump(char *file) { FILE* fh; long long int magic; size_t items ATTRIBUTE_UNUSED; int rc; fh = fopen(file,"r"); if (fh == NULL) { error(INFO, "is_s390_dump: cannot open %s: %s\n", file); return FALSE; } items = fread(&magic, sizeof(magic), 1,fh); if(magic == 0xa8190173618f23fdLL) rc = TRUE; else rc = FALSE; fclose(fh); return rc; } FILE* s390_dump_init(char *file) { if ((s390_file = fopen(file, "r+")) == NULL) { if ((s390_file = fopen(file, "r")) == NULL) return NULL; } return s390_file; } int read_s390_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { paddr += S390_DUMP_HEADER_SIZE; if (fseek(s390_file, (ulong)paddr, SEEK_SET) != 0) return SEEK_ERROR; if (fread(bufptr, 1 , cnt, s390_file) != cnt) return READ_ERROR; return 0; } int write_s390_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return WRITE_ERROR; } #define S390_PAGE_SHIFT 12 #define S390_PAGE_SIZE (1UL << S390_PAGE_SHIFT) uint s390_page_size(void) { return S390_PAGE_SIZE; } int s390_memory_used(void) { return 0; } int s390_free_memory(void) { return 0; } int s390_memory_dump(FILE *fp) { return 0; } ulong get_s390_panic_task(void) { return BADVAL; } void get_s390_panicmsg(char *buf) { return; } crash-utility-crash-9cd43f5/sadump.c0000664000372000037200000013051615107550337017037 0ustar juerghjuergh/* * sadump.h - core analysis suite * * Copyright (c) 2011 FUJITSU LIMITED * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: HATAYAMA Daisuke */ #include "defs.h" #include "sadump.h" #include /* htonl, htons */ #include #include enum { failed = -1 }; static struct sadump_data sadump_data = { 0 }; static struct sadump_data *sd = &sadump_data; static int read_device(void *buf, size_t bytes, ulong *offset); static int read_dump_header(char *file); static int add_disk(char *file); static int open_dump_file(char *file); static int open_disk(char *file); static uint64_t paddr_to_pfn(physaddr_t paddr); static inline int is_set_bit(char *bitmap, uint64_t pfn); static inline int page_is_ram(uint64_t nr); static inline int page_is_dumpable(uint64_t nr); static int lookup_diskset(uint64_t whole_offset, int *diskid, uint64_t *disk_offset); static struct tm *efi_time_t_to_tm(const efi_time_t *e); static char * guid_to_str(efi_guid_t *guid, char *buf, size_t buflen); static int verify_magic_number(uint32_t magicnum[DUMP_PART_HEADER_MAGICNUM_SIZE]); static ulong per_cpu_ptr(ulong ptr, int cpu); static ulong early_per_cpu_ptr(char *symbol, struct syment *sym, int cpu); static ulong legacy_per_cpu_ptr(ulong ptr, int cpu); static int get_prstatus_from_crash_notes(int cpu, char *prstatus); static void display_smram_cpu_state(int apicid, struct sadump_smram_cpu_state *s); static int cpu_to_apicid(int cpu, int *apicid); static int get_sadump_smram_cpu_state(int cpu, struct sadump_smram_cpu_state *smram); static int block_table_init(void); static uint64_t pfn_to_block(uint64_t pfn); static void mask_reserved_fields(struct sadump_smram_cpu_state *smram); struct sadump_data * sadump_get_sadump_data(void) { if (!SADUMP_VALID() || !SADUMP_DUMPFILE()) return NULL; return &sadump_data; } int sadump_cleanup_sadump_data(void) { int i; if (!SADUMP_VALID() || !SADUMP_DUMPFILE()) return FALSE; if (sd->flags & SADUMP_DISKSET) { for (i = 1; i < sd->sd_list_len; ++i) { if (sd->sd_list[i]->dfd) close(sd->sd_list[i]->dfd); free(sd->sd_list[i]->header); free(sd->sd_list[i]); } } close(sd->dfd); free(sd->header); free(sd->dump_header); free(sd->diskset_header); free(sd->bitmap); free(sd->dumpable_bitmap); free(sd->page_buf); free(sd->block_table); if (sd->sd_list[0]) free(sd->sd_list[0]); free(sd->sd_list); memset(&sadump_data, 0, sizeof(sadump_data)); pc->flags &= ~SADUMP; pc->dumpfile = NULL; pc->readmem = NULL; pc->writemem = NULL; return TRUE; } static int read_device(void *buf, size_t bytes, ulong *offset) { if (lseek(sd->dfd, *offset, SEEK_SET) == failed) { error(INFO, "sadump: cannot lseek dump device\n"); return FALSE; } if (read(sd->dfd, buf, bytes) < bytes) { error(INFO, "sadump: cannot read dump device\n"); return FALSE; } *offset += bytes; return TRUE; } static int read_dump_header(char *file) { struct sadump_part_header *sph = NULL; struct sadump_header *sh = NULL; struct sadump_disk_set_header *new, *sdh = NULL; struct sadump_media_header *smh = NULL; struct sadump_diskset_data *sd_list_len_0 = NULL; size_t block_size = SADUMP_DEFAULT_BLOCK_SIZE; ulong flags = 0; ulong offset = 0, sub_hdr_offset, data_offset; uint32_t smram_cpu_state_size = 0; ulong bitmap_len, dumpable_bitmap_len; char *bitmap = NULL, *dumpable_bitmap = NULL, *page_buf = NULL; char guid1[SADUMP_EFI_GUID_TEXT_REPR_LEN+1]; char guid2[SADUMP_EFI_GUID_TEXT_REPR_LEN+1]; sph = malloc(block_size); if (!sph) { error(INFO, "sadump: cannot allocate partition header buffer\n"); goto err; } sdh = malloc(block_size); if (!sdh) { error(INFO, "sadump: cannot allocate disk set header buffer\n"); goto err; } sh = malloc(block_size); if (!sh) { error(INFO, "sadump: cannot allocate dump header buffer\n"); goto err; } smh = malloc(block_size); if (!smh) { error(INFO, "sadump: cannot allocate media header buffer\n"); goto err; } restart: if (!read_device(sph, block_size, &offset)) { error(INFO, "sadump: cannot read partition header\n"); goto err; } if (sph->signature1 != SADUMP_SIGNATURE1 || sph->signature2 != SADUMP_SIGNATURE2) { flags |= SADUMP_MEDIA; if (CRASHDEBUG(1)) error(INFO, "sadump: read dump device as media " "format\n"); offset = 0; if (!read_device(smh, block_size, &offset)) { error(INFO, "sadump: cannot read media header\n"); goto err; } if (!read_device(sph, block_size, &offset)) { error(INFO, "sadump: cannot read partition header\n"); goto err; } if (sph->signature1 != SADUMP_SIGNATURE1 || sph->signature2 != SADUMP_SIGNATURE2) { if (CRASHDEBUG(1)) error(INFO, "sadump: does not have partition " "header\n"); goto err; } } if (!verify_magic_number(sph->magicnum)) { error(INFO, "sadump: invalid magic number\n"); goto err; } if (!(flags & SADUMP_MEDIA) && sph->set_disk_set) { uint32_t header_blocks; size_t header_size; flags |= SADUMP_DISKSET; if (CRASHDEBUG(1)) error(INFO, "sadump: read dump device as diskset\n"); if (sph->set_disk_set != 1 || sph->set_disk_set > SADUMP_MAX_DISK_SET_NUM) { if (CRASHDEBUG(1)) error(INFO, "sadump: invalid disk set number: " "%d\n", sph->set_disk_set); goto err; } if (!read_device(&header_blocks, sizeof(uint32_t), &offset)) { error(INFO, "sadump: cannot read disk set header " "size\n"); goto err; } offset -= sizeof(uint32_t); header_size = header_blocks * block_size; if (header_size > block_size) { new = realloc(sdh, header_size); if (!new) { error(INFO, "sadump: cannot re-allocate disk " "set buffer\n"); goto err; } sdh = new; } if (!read_device(sdh, header_size, &offset)) { error(INFO, "sadump: cannot read disk set header\n"); goto err; } } if (!read_device(sh, block_size, &offset)) { error(INFO, "sadump: cannot read dump header\n"); goto err; } sub_hdr_offset = offset; if (strncmp(sh->signature, SADUMP_SIGNATURE, 8) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: does not have dump header\n"); goto err; } if (flags & SADUMP_MEDIA) { if (memcmp(&sph->sadump_id, &smh->sadump_id, sizeof(efi_guid_t)) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: system ID mismatch\n" " partition header: %s\n" " media header: %s\n", guid_to_str(&sph->sadump_id, guid1, sizeof(guid1)), guid_to_str(&smh->sadump_id, guid2, sizeof(guid2))); goto err; } if (memcmp(&sph->disk_set_id, &smh->disk_set_id, sizeof(efi_guid_t)) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: disk set ID mismatch\n" " partition header: %s\n" " media header: %s\n", guid_to_str(&sph->disk_set_id, guid1, sizeof(guid1)), guid_to_str(&smh->disk_set_id, guid2, sizeof(guid2))); goto err; } if (memcmp(&sph->time_stamp, &smh->time_stamp, sizeof(efi_time_t)) != 0) { if (CRASHDEBUG(1)) { error(INFO, "sadump: time stamp mismatch\n"); error(INFO, "sadump: partition header: %s\n", strip_linefeeds(asctime (efi_time_t_to_tm (&sph->time_stamp)))); error(INFO, "sadump: media header: %s\n", strip_linefeeds(asctime (efi_time_t_to_tm (&smh->time_stamp)))); } } if (smh->sequential_num != 1) { error(INFO, "sadump: first media file has sequential " "number %d\n", smh->sequential_num); goto err; } } if (sh->block_size != block_size) { block_size = sh->block_size; offset = 0; goto restart; } if (CRASHDEBUG(1)) { if (flags & SADUMP_MEDIA) error(INFO, "sadump: media backup file\n"); else if (flags & SADUMP_DISKSET) error(INFO, "sadump: diskset configuration with %d " "disks\n", sdh->disk_num); else error(INFO, "sadump: single partition " "configuration\n"); } flags |= SADUMP_LOCAL; switch (sh->header_version) { case 0: sd->max_mapnr = (uint64_t)sh->max_mapnr; break; default: error(WARNING, "sadump: unsupported header version: %u\n" "sadump: assuming header version: 1\n", sh->header_version); case 1: sd->max_mapnr = sh->max_mapnr_64; break; } if (sh->sub_hdr_size > 0) { if (!read_device(&smram_cpu_state_size, sizeof(uint32_t), &offset)) { error(INFO, "sadump: cannot read SMRAM CPU STATE size\n"); goto err; } smram_cpu_state_size /= sh->nr_cpus; offset -= sizeof(uint32_t); offset += sh->sub_hdr_size * block_size; } if (!sh->bitmap_blocks) { error(INFO, "sadump: bitmap_blocks is zero\n"); goto err; } bitmap_len = block_size * sh->bitmap_blocks; bitmap = calloc(bitmap_len, 1); if (!bitmap) { error(INFO, "sadump: cannot allocate memory for bitmap " "buffer\n"); goto err; } if (!read_device(bitmap, bitmap_len, &offset)) { error(INFO, "sadump: cannot read bitmap\n"); goto err; } if (!sh->dumpable_bitmap_blocks) { error(INFO, "sadump: dumpable_bitmap_blocks is zero\n"); goto err; } dumpable_bitmap_len = block_size * sh->dumpable_bitmap_blocks; dumpable_bitmap = calloc(dumpable_bitmap_len, 1); if (!dumpable_bitmap) { error(INFO, "sadump: cannot allocate memory for " "dumpable_bitmap buffer\n"); goto err; } if (!read_device(dumpable_bitmap, dumpable_bitmap_len, &offset)) { error(INFO, "sadump: cannot read dumpable bitmap\n"); goto err; } data_offset = offset; page_buf = malloc(block_size); if (!page_buf) { error(INFO, "sadump: cannot allocate page buffer\n"); goto err; } sd->filename = file; /* * Switch to zero excluded mode by default on sadump-related * formats because some Fujitsu troubleshooting software * assumes the behavior. */ sd->flags = flags | SADUMP_ZERO_EXCLUDED; if (machine_type("X86")) sd->machine_type = EM_386; else if (machine_type("X86_64")) sd->machine_type = EM_X86_64; else { error(INFO, "sadump: unsupported machine type: %s\n", MACHINE_TYPE); goto err; } sd->data_offset = data_offset; sd->block_size = block_size; sd->block_shift = ffs(sd->block_size) - 1; sd->bitmap = bitmap; sd->dumpable_bitmap = dumpable_bitmap; sd->sub_hdr_offset = sub_hdr_offset; sd->smram_cpu_state_size = smram_cpu_state_size; sd->header = sph; sd->dump_header = sh; if (flags & SADUMP_DISKSET) sd->diskset_header = sdh; if (flags & SADUMP_MEDIA) sd->media_header = smh; sd->page_buf = page_buf; if (flags & SADUMP_DISKSET) { sd_list_len_0 = malloc(sizeof(struct sadump_diskset_data)); if (!sd_list_len_0) { error(INFO, "sadump: cannot allocate diskset data buffer\n"); goto err; } sd_list_len_0->filename = sd->filename; sd_list_len_0->dfd = sd->dfd; sd_list_len_0->header = sd->header; sd_list_len_0->data_offset = sd->data_offset; sd->sd_list = malloc(sizeof(struct sadump_diskset_data *)); if (!sd->sd_list) { error(INFO, "sadump: cannot allocate diskset list buffer\n"); goto err; } sd->sd_list_len = 1; sd->sd_list[0] = sd_list_len_0; } if (!block_table_init()) { error(INFO, "sadump: cannot initialize block hash table\n"); goto err; } if (!(flags & SADUMP_DISKSET)) free(sdh); if (!(flags & SADUMP_MEDIA)) free(smh); return TRUE; err: close(sd->dfd); free(sph); free(sdh); free(sh); free(smh); free(bitmap); free(dumpable_bitmap); free(page_buf); free(sd_list_len_0); free(sd->sd_list); return FALSE; } static int add_disk(char *file) { struct sadump_part_header *ph; struct sadump_diskset_data *this_disk; int diskid; char guid1[SADUMP_EFI_GUID_TEXT_REPR_LEN+1]; char guid2[SADUMP_EFI_GUID_TEXT_REPR_LEN+1]; diskid = sd->sd_list_len - 1; this_disk = sd->sd_list[diskid]; if (CRASHDEBUG(1)) error(INFO, "sadump: add disk #%d\n", diskid+1); ph = malloc(sd->block_size); if (!ph) { error(INFO, "sadump: cannot malloc block_size buffer\n"); return FALSE; } if (lseek(this_disk->dfd, 0, SEEK_SET) == failed) { error(INFO, "sadump: cannot lseek dump partition header\n"); free(ph); return FALSE; } if (read(this_disk->dfd, ph, sd->block_size) < sd->block_size) { error(INFO, "sadump: cannot read dump partition header\n"); free(ph); return FALSE; } if (ph->signature1 != SADUMP_SIGNATURE1 || ph->signature2 != SADUMP_SIGNATURE2) { if (CRASHDEBUG(1)) error(INFO, "sadump: does not have partition header\n"); free(ph); return FALSE; } if (memcmp(&sd->header->sadump_id, &ph->sadump_id, sizeof(efi_guid_t)) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: system ID mismatch\n" " partition header on disk #1: %s\n" " partition header on disk #%d: %s\n", guid_to_str(&sd->header->sadump_id, guid1, sizeof(guid1)), diskid+1, guid_to_str(&ph->sadump_id, guid2, sizeof(guid2))); free(ph); return FALSE; } if (memcmp(&sd->header->disk_set_id, &ph->disk_set_id, sizeof(efi_guid_t)) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: disk set ID mismatch\n" " partition header on disk #1: %s\n" " partition header on disk #%d: %s\n", guid_to_str(&sd->header->disk_set_id, guid1, sizeof(guid1)), diskid+1, guid_to_str(&ph->disk_set_id, guid2, sizeof(guid2))); free(ph); return FALSE; } if (memcmp(&sd->diskset_header->vol_info[diskid - 1].id, &ph->vol_id, sizeof(efi_guid_t)) != 0) { if (CRASHDEBUG(1)) error(INFO, "sadump: volume ID mismatch\n" " disk set header on disk #1: %s\n" " partition header on disk #%d: %s\n", guid_to_str(&sd->diskset_header->vol_info[diskid-1].id, guid1, sizeof(guid1)), diskid+1, guid_to_str(&ph->vol_id, guid2, sizeof(guid2))); free(ph); return FALSE; } if (memcmp(&sd->header->time_stamp, &ph->time_stamp, sizeof(efi_time_t)) != 0) { if (CRASHDEBUG(1)) { error(INFO, "sadump: time stamp mismatch\n"); error(INFO, "sadump: partition header on disk #1: %s\n", strip_linefeeds(asctime (efi_time_t_to_tm (&sd->header->time_stamp)))); error(INFO, "sadump: partition header on disk #%d: %s\n", diskid+1, strip_linefeeds(asctime (efi_time_t_to_tm (&ph->time_stamp)))); } } if (diskid != ph->set_disk_set - 1) { if (CRASHDEBUG(1)) error(INFO, "sadump: wrong disk order; " "#%d expected but #%d given\n", diskid+1, ph->set_disk_set); free(ph); return FALSE; } this_disk->header = ph; this_disk->data_offset = sd->block_size; this_disk->filename = file; return TRUE; } static int open_dump_file(char *file) { int fd; fd = open(file, O_RDONLY); if (fd < 0) { error(INFO, "sadump: unable to open dump file %s", file); return FALSE; } sd->dfd = fd; return TRUE; } static int open_disk(char *file) { struct sadump_diskset_data *this_disk; sd->sd_list_len++; if (CRASHDEBUG(1)) error(INFO, "sadump: open disk #%d\n", sd->sd_list_len); if (sd->sd_list_len > sd->diskset_header->disk_num) { error(INFO, "sadump: too many diskset arguments; " "this diskset consists of %d disks\n", sd->diskset_header->disk_num); return FALSE; } sd->sd_list = realloc(sd->sd_list, sd->sd_list_len * sizeof(struct sadump_diskset_data *)); if (!sd->sd_list) { if (CRASHDEBUG(1)) { error(INFO, "sadump: cannot malloc diskset list buffer\n"); } return FALSE; } this_disk = malloc(sizeof(struct sadump_diskset_data)); if (!this_disk) { if (CRASHDEBUG(1)) { error(INFO, "sadump: cannot malloc diskset data buffer\n"); } return FALSE; } memset(this_disk, 0, sizeof(*this_disk)); sd->sd_list[sd->sd_list_len - 1] = this_disk; this_disk->dfd = open(file, O_RDONLY); if (!this_disk->dfd) { free(this_disk); error(INFO, "sadump: unable to open dump file %s", file); return FALSE; } return TRUE; } int is_sadump(char *file) { if (SADUMP_VALID()) { if (!(sd->flags & SADUMP_DISKSET)) { if (CRASHDEBUG(1)) error(INFO, "sadump: does not support multiple" " file formats\n"); (void) sadump_cleanup_sadump_data(); return FALSE; } if (!open_disk(file) || !add_disk(file)) { (void) sadump_cleanup_sadump_data(); return FALSE; } return TRUE; } if (!open_dump_file(file) || !read_dump_header(file)) return FALSE; return TRUE; } int sadump_is_diskset(void) { if (!SADUMP_VALID()) return FALSE; return !!(sd->flags & SADUMP_DISKSET); } uint sadump_page_size(void) { return sd->dump_header->block_size; } /* * Translate physical address in paddr to PFN number. This means normally that * we just shift paddr by some constant. */ static uint64_t paddr_to_pfn(physaddr_t paddr) { return paddr >> sd->block_shift; } static inline int is_set_bit(char *bitmap, uint64_t pfn) { ulong index, bit; index = pfn >> 3; bit = 7 - (pfn & 7); return !!(bitmap[index] & (1UL << bit)); } static inline int page_is_ram(uint64_t nr) { return is_set_bit(sd->bitmap, nr); } static inline int page_is_dumpable(uint64_t nr) { return is_set_bit(sd->dumpable_bitmap, nr); } static int lookup_diskset(uint64_t whole_offset, int *diskid, uint64_t *disk_offset) { uint64_t offset = whole_offset; int i; for (i = 0; i < sd->sd_list_len; ++i) { uint64_t used_device_i, ram_size; ulong data_offset_i; used_device_i = sd->sd_list[i]->header->used_device; data_offset_i = sd->sd_list[i]->data_offset; ram_size = used_device_i - data_offset_i; if (offset < ram_size) break; offset -= ram_size; } if (i == sd->sd_list_len) return FALSE; *diskid = i; *disk_offset = offset; return TRUE; } int read_sadump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { physaddr_t curpaddr ATTRIBUTE_UNUSED; uint64_t pfn, whole_offset, perdisk_offset, block; ulong page_offset; int dfd; if (sd->flags & SADUMP_KDUMP_BACKUP && paddr >= sd->backup_src_start && paddr < sd->backup_src_start + sd->backup_src_size) { ulong orig_paddr; orig_paddr = paddr; paddr += sd->backup_offset - sd->backup_src_start; if (CRASHDEBUG(1)) error(INFO, "sadump: kdump backup region: %#llx => %#llx\n", orig_paddr, paddr); } pfn = paddr_to_pfn(paddr); curpaddr = paddr & ~((physaddr_t)(sd->block_size-1)); page_offset = paddr & ((physaddr_t)(sd->block_size-1)); if ((pfn >= sd->max_mapnr) || !page_is_ram(pfn)) return SEEK_ERROR; if (!page_is_dumpable(pfn)) { if (!(sd->flags & SADUMP_ZERO_EXCLUDED)) return PAGE_EXCLUDED; memset(bufptr, 0, cnt); return cnt; } block = pfn_to_block(pfn); whole_offset = block * sd->block_size; if (sd->flags & SADUMP_DISKSET) { int diskid; if (!lookup_diskset(whole_offset, &diskid, &perdisk_offset)) return SEEK_ERROR; dfd = sd->sd_list[diskid]->dfd; perdisk_offset += sd->sd_list[diskid]->data_offset; } else { dfd = sd->dfd; perdisk_offset = whole_offset + sd->data_offset; } if (lseek(dfd, perdisk_offset, SEEK_SET) == failed) return SEEK_ERROR; if (read(dfd, sd->page_buf, sd->block_size) != sd->block_size) return READ_ERROR; memcpy(bufptr, sd->page_buf + page_offset, cnt); return cnt; } int write_sadump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return 0; } int sadump_init(char *unused, FILE *fptr) { if (!SADUMP_VALID()) return FALSE; return TRUE; } ulong get_sadump_panic_task(void) { return NO_TASK; } ulong get_sadump_switch_stack(ulong task) { return 0; } static struct tm * efi_time_t_to_tm(const efi_time_t *e) { static struct tm t; time_t ti; memset(&t, 0, sizeof(t)); t.tm_sec = e->second; t.tm_min = e->minute; t.tm_hour = e->hour; t.tm_mday = e->day; t.tm_mon = e->month - 1; t.tm_year = e->year - 1900; if (e->timezone != EFI_UNSPECIFIED_TIMEZONE) t.tm_hour += e->timezone; else if (CRASHDEBUG(1)) error(INFO, "sadump: timezone information is missing\n"); ti = mktime(&t); if (ti == (time_t)-1) return &t; return localtime_r(&ti, &t); } static char * guid_to_str(efi_guid_t *guid, char *buf, size_t buflen) { snprintf(buf, buflen, "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", htonl(guid->data1), htons(guid->data2), htons(guid->data3), guid->data4[0], guid->data4[1], guid->data4[2], guid->data4[3], guid->data4[4], guid->data4[5], guid->data4[6], guid->data4[7]); return buf; } static int verify_magic_number(uint32_t magicnum[DUMP_PART_HEADER_MAGICNUM_SIZE]) { int i; for (i = 1; i < DUMP_PART_HEADER_MAGICNUM_SIZE; ++i) if (magicnum[i] != (magicnum[i - 1] + 7) * 11) return FALSE; return TRUE; } int sadump_memory_used(void) { return 0; } int sadump_free_memory(void) { return 0; } /* * This function is dump-type independent, and could be used to dump * the diskdump_data structure contents and perhaps the sadump header * data. */ int sadump_memory_dump(FILE *fp) { struct sadump_part_header *sph; struct sadump_disk_set_header *sdh; struct sadump_header *sh; struct sadump_media_header *smh; int i, others; char guid[SADUMP_EFI_GUID_TEXT_REPR_LEN+1]; fprintf(fp, "sadump_data: \n"); fprintf(fp, " filename: %s\n", sd->filename); fprintf(fp, " flags: %lx (", sd->flags); others = 0; if (sd->flags & SADUMP_LOCAL) fprintf(fp, "%sSADUMP_LOCAL", others++ ? "|" : ""); if (sd->flags & SADUMP_DISKSET) fprintf(fp, "%sSADUMP_DISKSET", others++ ? "|" : ""); if (sd->flags & SADUMP_MEDIA) fprintf(fp, "%sSADUMP_MEDIA", others++ ? "|" : ""); if (sd->flags & SADUMP_ZERO_EXCLUDED) fprintf(fp, "%sSADUMP_ZERO_EXCLUDED", others++ ? "|" : ""); if (sd->flags & SADUMP_KDUMP_BACKUP) fprintf(fp, "%sSADUMP_KDUMP_BACKUP", others++ ? "|" : ""); fprintf(fp, ") \n"); fprintf(fp, " dfd: %d\n", sd->dfd); fprintf(fp, " machine_type: %d ", sd->machine_type); switch (sd->machine_type) { case EM_386: fprintf(fp, "(EM_386)\n"); break; case EM_X86_64: fprintf(fp, "(EM_X86_64)\n"); break; default: fprintf(fp, "(unknown)\n"); break; } fprintf(fp, "\n header: %lx\n", (ulong)sd->header); sph = sd->header; fprintf(fp, " signature1: %x\n", sph->signature1); fprintf(fp, " signature2: %x\n", sph->signature2); fprintf(fp, " enable: %u\n", sph->enable); fprintf(fp, " reboot: %u\n", sph->reboot); fprintf(fp, " compress: %u\n", sph->compress); fprintf(fp, " recycle: %u\n", sph->recycle); fprintf(fp, " label: (unused)\n"); fprintf(fp, " sadump_id: %s\n", guid_to_str(&sph->sadump_id, guid, sizeof(guid))); fprintf(fp, " disk_set_id: %s\n", guid_to_str(&sph->disk_set_id, guid, sizeof(guid))); fprintf(fp, " vol_id: %s\n", guid_to_str(&sph->vol_id, guid, sizeof(guid))); fprintf(fp, " time_stamp: %s\n", strip_linefeeds(asctime(efi_time_t_to_tm(&sph->time_stamp)))); fprintf(fp, " set_disk_set: %u\n", sph->set_disk_set); fprintf(fp, " reserve: %u\n", sph->reserve); fprintf(fp, " used_device: %llu\n", (ulonglong)sph->used_device); fprintf(fp, " magicnum: %s\n", verify_magic_number(sph->magicnum) ? "(valid)" : "(invalid)"); fprintf(fp, "\n dump header: %lx\n", (ulong)sd->dump_header); sh = sd->dump_header; fprintf(fp, " signature: %s\n", sh->signature); fprintf(fp, " header_version: %u\n", sh->header_version); fprintf(fp, " reserve: %u\n", sh->reserve); fprintf(fp, " timestamp: %s\n", strip_linefeeds(asctime(efi_time_t_to_tm(&sh->timestamp)))); fprintf(fp, " status: %u\n", sh->status); fprintf(fp, " compress: %u\n", sh->compress); fprintf(fp, " block_size: %u\n", sh->block_size); fprintf(fp, " extra_hdr_size: %u\n", sh->extra_hdr_size); fprintf(fp, " sub_hdr_size: %u\n", sh->sub_hdr_size); fprintf(fp, " bitmap_blocks: %u\n", sh->bitmap_blocks); fprintf(fp, "dumpable_bitmap_blocks: %u\n", sh->dumpable_bitmap_blocks); fprintf(fp, " max_mapnr: %u\n", sh->max_mapnr); fprintf(fp, " total_ram_blocks: %u\n", sh->total_ram_blocks); fprintf(fp, " device_blocks: %u\n", sh->device_blocks); fprintf(fp, " written_blocks: %u\n", sh->written_blocks); fprintf(fp, " current_cpu: %u\n", sh->current_cpu); fprintf(fp, " nr_cpus: %u\n", sh->nr_cpus); if (sh->header_version >= 1) { fprintf(fp, " max_mapnr_64: %" PRIu64 "\n" " total_ram_blocks_64: %" PRIu64 "\n" " device_blocks_64: %" PRIu64 "\n" " written_blocks_64: %" PRIu64 "\n", sh->max_mapnr_64, sh->total_ram_blocks_64, sh->device_blocks_64, sh->written_blocks_64); } fprintf(fp, "\n dump sub heaer: "); if (sh->sub_hdr_size > 0) { ulong offset = sd->sub_hdr_offset; struct sadump_apic_state as; struct sadump_smram_cpu_state scs, zero; uint32_t size; uint aid; memset(&zero, 0, sizeof(zero)); if (!read_device(&size, sizeof(uint32_t), &offset)) { error(INFO, "sadump: cannot read sub header size\n"); return FALSE; } fprintf(fp, "\n size: %u\n", size); for (aid = 0; aid < sh->nr_cpus; ++aid) { if (!read_device(&as, sizeof(as), &offset)) { error(INFO, "sadump: cannot read sub header " "apic_id\n"); return FALSE; } fprintf(fp, " " "apic_id[%u]: ApicId %llu: Ldr: %llu\n", aid, (ulonglong)as.ApicId, (ulonglong)as.Ldr); } for (aid = 0; aid < sh->nr_cpus; ++aid) { if (!read_device(&scs, sizeof(scs), &offset)) { error(INFO, "sadump: cannot read sub header " "cpu_state\n"); return FALSE; } /* * Reserved fields in SMRAM CPU states could * be non-zero even if the corresponding APICs * are NOT used. This breaks the assumption * that SMRAM CPU state is zero cleared if and * only if the APIC corresponding to the entry * is NOT used. */ mask_reserved_fields(&scs); if (memcmp(&scs, &zero, sizeof(scs)) != 0) { fprintf(fp, "\n"); display_smram_cpu_state(aid, &scs); } } } else fprintf(fp, "(n/a)\n"); fprintf(fp, "\n disk set header: %lx ", (ulong)sd->diskset_header); if ((sdh = sd->diskset_header)) { fprintf(fp, "\ndisk_set_header_size: %u\n", sdh->disk_set_header_size); fprintf(fp, " disk_num: %u\n", sdh->disk_num); fprintf(fp, " disk_set_size: %llu\n", (ulonglong)sdh->disk_set_size); for (i = 0; i < sdh->disk_num - 1; ++i) { struct sadump_volume_info *vol = &sdh->vol_info[i]; fprintf(fp, " vol_info[%d]: \n", i); fprintf(fp, " id: %s\n", guid_to_str(&vol->id, guid, sizeof(guid))); fprintf(fp, " vol_size: %llu\n", (ulonglong)vol->vol_size); fprintf(fp, " status: %u\n", vol->status); fprintf(fp, " cache_size: %u\n", vol->cache_size); } } else fprintf(fp, "(n/a)\n"); fprintf(fp, "\n media header: %lx ", (ulong)sd->media_header); if ((smh = sd->media_header)) { fprintf(fp, "\n sadump_id: %s\n", guid_to_str(&smh->sadump_id, guid, sizeof(guid))); fprintf(fp, " disk_set_id: %s\n", guid_to_str(&smh->disk_set_id, guid, sizeof(guid))); fprintf(fp, " time_stamp: %s\n", strip_linefeeds(asctime(efi_time_t_to_tm(&smh->time_stamp)))); fprintf(fp, " sequential_num: %d\n", smh->sequential_num); fprintf(fp, " term_cord: %d\n", smh->term_cord); fprintf(fp, "disk_set_header_size: %d\n", smh->disk_set_header_size); fprintf(fp, " disks_in_use: %d\n", smh->disks_in_use); fprintf(fp, " reserve: (not displayed) \n"); } else fprintf(fp, "(n/a)\n"); fprintf(fp, "\n bitmap: %lx\n", (ulong)sd->bitmap); fprintf(fp, " dumpable_bitmap: %lx\n", (ulong)sd->dumpable_bitmap); fprintf(fp, " sub_hdr_offset: %lx\n", (ulong)sd->sub_hdr_offset); fprintf(fp, "smram_cpu_state_size: %lx\n", (ulong)sd->smram_cpu_state_size); fprintf(fp, " data_offset: %lx\n", sd->data_offset); fprintf(fp, " block_size: %d\n", sd->block_size); fprintf(fp, " block_shift: %d\n", sd->block_shift); fprintf(fp, " page_buf: %lx\n", (ulong)sd->page_buf); fprintf(fp, " block_table: %lx\n", (ulong)sd->block_table); fprintf(fp, " sd_list_len: %d\n", sd->sd_list_len); fprintf(fp, " sd_list: %lx\n", (ulong)sd->sd_list); fprintf(fp, " backup_src_start: %llx\n", sd->backup_src_start); fprintf(fp, " backup_src_size: %lx\n", sd->backup_src_size); fprintf(fp, " backup_offset: %llx\n", (ulonglong)sd->backup_src_size); for (i = 0; i < sd->sd_list_len; ++i) { struct sadump_diskset_data *sdd = sd->sd_list[i]; fprintf(fp, "\n sd_list[%d]: \n", i); fprintf(fp, " filename: %s\n", sdd->filename); fprintf(fp, " dfd: %d\n", sdd->dfd); fprintf(fp, " header: %lx\n", (ulong)sdd->header); sph = sdd->header; fprintf(fp, " signature1: %x\n", sph->signature1); fprintf(fp, " signature2: %x\n", sph->signature2); fprintf(fp, " enable: %u\n", sph->enable); fprintf(fp, " reboot: %u\n", sph->reboot); fprintf(fp, " compress: %u\n", sph->compress); fprintf(fp, " recycle: %u\n", sph->recycle); fprintf(fp, " label: (unused)\n"); fprintf(fp, " sadump_id: %s\n", guid_to_str(&sph->sadump_id, guid, sizeof(guid))); fprintf(fp, " disk_set_id: %s\n", guid_to_str(&sph->disk_set_id, guid, sizeof(guid))); fprintf(fp, " vol_id: %s\n", guid_to_str(&sph->vol_id, guid, sizeof(guid))); fprintf(fp, " time_stamp: %s\n", strip_linefeeds(asctime(efi_time_t_to_tm(&sph->time_stamp)))); fprintf(fp, " set_disk_set: %u\n", sph->set_disk_set); fprintf(fp, " reserve: %u\n", sph->reserve); fprintf(fp, " used_device: %llu\n", (ulonglong)sph->used_device); fprintf(fp, " magicnum: %s\n", verify_magic_number(sph->magicnum) ? "(valid)" : "(invalid)"); fprintf(fp, " data_offset: %lx\n", sdd->data_offset); } return TRUE; } static ulong per_cpu_ptr(ulong ptr, int cpu) { if (cpu < 0 || cpu >= kt->cpus) return 0UL; if (kt->cpus == 1) return ptr; if (!(kt->flags & PER_CPU_OFF)) return 0UL; if (machine_type("X86_64")) { ulong __per_cpu_load; readmem(symbol_value("__per_cpu_load"), KVADDR, &__per_cpu_load, sizeof(__per_cpu_load), "__per_cpu_load", FAULT_ON_ERROR); if (kt->__per_cpu_offset[cpu] == __per_cpu_load) return 0UL; } else if (machine_type("X86")) { if (kt->__per_cpu_offset[cpu] == 0) return 0UL; } return ptr + kt->__per_cpu_offset[cpu]; } static ulong early_per_cpu_ptr(char *symbol, struct syment *sym, int cpu) { char sym_early_ptr[BUFSIZE], sym_early_map[BUFSIZE]; ulong early_ptr; if (cpu < 0 || cpu >= kt->cpus) return 0UL; if (!sym && !(sym = per_cpu_symbol_search(symbol))) return 0UL; if (!(kt->flags & SMP)) return per_cpu_ptr(sym->value, cpu); snprintf(sym_early_ptr, BUFSIZE, "%s_early_ptr", symbol); snprintf(sym_early_map, BUFSIZE, "%s_early_map", symbol); if (!symbol_exists(sym_early_ptr) || !symbol_exists(sym_early_map)) return 0UL; readmem(symbol_value(sym_early_ptr), KVADDR, &early_ptr, sizeof(early_ptr), sym_early_ptr, FAULT_ON_ERROR); return early_ptr ? symbol_value(sym_early_map)+cpu*sizeof(uint16_t) : per_cpu_ptr(sym->value, cpu); } static ulong legacy_per_cpu_ptr(ulong ptr, int cpu) { ulong addr; if (!(kt->flags & SMP)) return ptr; if (cpu < 0 || cpu >= kt->cpus) return 0UL; if (!readmem(~ptr + cpu * sizeof(ulong), KVADDR, &addr, sizeof(ulong), "search percpu_data", FAULT_ON_ERROR)) return 0UL; return addr; } /** * Retrieve eip and esp register values from crash_notes saved by * kdump at crash. If register values has not been saved yet, set 0 to * eip and esp instead. */ static int get_prstatus_from_crash_notes(int cpu, char *prstatus) { ulong crash_notes, crash_notes_ptr, percpu_addr; char *prstatus_ptr, *note_buf, *zero_buf, *name; uint32_t *buf; if (cpu < 0 || kt->cpus <= cpu) { error(INFO, "sadump: given cpu is invalid: %d\n", cpu); return FALSE; } if (!symbol_exists("crash_notes")) { error(INFO, "sadump: symbol crash_notes doesn't exist\n"); return FALSE; } crash_notes = symbol_value("crash_notes"); readmem(crash_notes, KVADDR, &crash_notes_ptr, sizeof(ulong), "dereference crash_notes", FAULT_ON_ERROR); if (!crash_notes_ptr) { if (CRASHDEBUG(1)) error(INFO, "sadump: buffer for crash_notes is NULL\n"); return FALSE; } percpu_addr = VALID_STRUCT(percpu_data) ? legacy_per_cpu_ptr(crash_notes_ptr, cpu) : per_cpu_ptr(crash_notes_ptr, cpu); zero_buf = GETBUF(SIZE(note_buf)); BZERO(zero_buf, SIZE(note_buf)); note_buf = GETBUF(SIZE(note_buf)); readmem(percpu_addr, KVADDR, note_buf, SIZE(note_buf), "read crash_notes", FAULT_ON_ERROR); if (memcmp(note_buf, zero_buf, SIZE(note_buf)) == 0) return FALSE; if (BITS64()) { Elf64_Nhdr *note64; note64 = (Elf64_Nhdr *)note_buf; buf = (uint32_t *)note_buf; name = (char *)(note64 + 1); if (note64->n_type != NT_PRSTATUS || note64->n_namesz != strlen("CORE") + 1 || strncmp(name, "CORE", note64->n_namesz) || note64->n_descsz != SIZE(elf_prstatus)) return FALSE; prstatus_ptr = (char *)(buf + (sizeof(*note64) + 3) / 4 + (note64->n_namesz + 3) / 4); } else { Elf32_Nhdr *note32; note32 = (Elf32_Nhdr *)note_buf; buf = (uint32_t *)note_buf; name = (char *)(note32 + 1); if ((note32->n_type != NT_PRSTATUS) && (note32->n_namesz != strlen("CORE") + 1 || strncmp(name, "CORE", note32->n_namesz) || note32->n_descsz != SIZE(elf_prstatus))) return FALSE; prstatus_ptr = (char *)(buf + (sizeof(*note32) + 3) / 4 + (note32->n_namesz + 3) / 4); } memcpy(prstatus, prstatus_ptr, SIZE(elf_prstatus)); return TRUE; } int sadump_get_smram_cpu_state(int apicid, struct sadump_smram_cpu_state *smram) { ulong offset; if (!sd->sub_hdr_offset || !sd->smram_cpu_state_size || apicid >= sd->dump_header->nr_cpus) return FALSE; offset = sd->sub_hdr_offset + sizeof(uint32_t) + sd->dump_header->nr_cpus * sizeof(struct sadump_apic_state); if (lseek(sd->dfd, offset + apicid * sd->smram_cpu_state_size, SEEK_SET) == failed) error(FATAL, "sadump: cannot lseek smram cpu state in dump sub header\n"); if (read(sd->dfd, smram, sd->smram_cpu_state_size) != sd->smram_cpu_state_size) error(FATAL, "sadump: cannot read smram cpu state in dump sub " "header\n"); return TRUE; } static void display_smram_cpu_state(int apicid, struct sadump_smram_cpu_state *s) { fprintf(fp, "APIC ID: %d\n" " RIP: %016llx RSP: %08x%08x RBP: %08x%08x\n" " RAX: %08x%08x RBX: %08x%08x RCX: %08x%08x\n" " RDX: %08x%08x RSI: %08x%08x RDI: %08x%08x\n" " R08: %08x%08x R09: %08x%08x R10: %08x%08x\n" " R11: %08x%08x R12: %08x%08x R13: %08x%08x\n" " R14: %08x%08x R15: %08x%08x\n" " SMM REV: %08x SMM BASE %08x\n" " CS : %08x DS: %08x SS: %08x ES: %08x FS: %08x\n" " GS : %08x\n" " CR0: %016llx CR3: %016llx CR4: %08x\n" " GDT: %08x%08x LDT: %08x%08x IDT: %08x%08x\n" " GDTlim: %08x LDTlim: %08x IDTlim: %08x\n" " LDTR: %08x TR: %08x RFLAGS: %016llx\n" " EPTP: %016llx EPTP_SETTING: %08x\n" " DR6: %016llx DR7: %016llx\n" " Ia32Efer: %016llx\n" " IoMemAddr: %08x%08x IoEip: %016llx\n" " IoMisc: %08x LdtInfo: %08x\n" " IoInstructionRestart: %04x AutoHaltRestart: %04x\n", apicid, (ulonglong)s->Rip, s->RspUpper, s->RspLower, s->RbpUpper, s->RbpLower, s->RaxUpper, s->RaxLower, s->RbxUpper, s->RbxLower, s->RcxUpper, s->RcxLower, s->RdxUpper, s->RdxLower, s->RsiUpper, s->RsiLower, s->RdiUpper, s->RdiLower, s->R8Upper, s->R8Lower, s->R9Upper, s->R9Lower, s->R10Upper, s->R10Lower, s->R11Upper, s->R11Lower, s->R12Upper, s->R12Lower, s->R13Upper, s->R13Lower, s->R14Upper, s->R14Lower, s->R15Upper, s->R15Lower, s->SmmRevisionId, s->Smbase, s->Cs, s->Ds, s->Ss, s->Es, s->Fs, s->Gs, (ulonglong)s->Cr0, (ulonglong)s->Cr3, s->Cr4, s->GdtUpper, s->GdtLower, s->LdtUpper, s->LdtLower, s->IdtUpper, s->IdtLower, s->GdtLimit, s->LdtLimit, s->IdtLimit, s->Ldtr, s->Tr, (ulonglong)s->Rflags, (ulonglong)s->Eptp, s->EptpSetting, (ulonglong)s->Dr6, (ulonglong)s->Dr7, (ulonglong)s->Ia32Efer, s->IoMemAddrUpper, s->IoMemAddrLower, (ulonglong)s->IoEip, s->IoMisc, s->LdtInfo, s->IoInstructionRestart, s->AutoHaltRestart); } static int cpu_to_apicid(int cpu, int *apicid) { struct syment *sym; if (symbol_exists("bios_cpu_apicid")) { uint8_t apicid_u8; readmem(symbol_value("bios_cpu_apicid") + cpu*sizeof(uint8_t), KVADDR, &apicid_u8, sizeof(uint8_t), "bios_cpu_apicid", FAULT_ON_ERROR); *apicid = (int)apicid_u8; if (CRASHDEBUG(1)) error(INFO, "sadump: apicid %u for cpu %d from " "bios_cpu_apicid\n", apicid_u8, cpu); } else if ((sym = per_cpu_symbol_search("x86_bios_cpu_apicid"))) { uint16_t apicid_u16; readmem(early_per_cpu_ptr("x86_bios_cpu_apicid", sym, cpu), KVADDR, &apicid_u16, sizeof(uint16_t), "x86_bios_cpu_apicid", FAULT_ON_ERROR); *apicid = (int)apicid_u16; if (CRASHDEBUG(1)) error(INFO, "sadump: apicid %u for cpu %d from " "x86_bios_cpu_apicid\n", apicid_u16, cpu); } else { if (CRASHDEBUG(1)) error(INFO, "sadump: no symbols for access to apicid\n"); return FALSE; } return TRUE; } static int get_sadump_smram_cpu_state(int cpu, struct sadump_smram_cpu_state *smram) { int apicid = 0; if (cpu < 0 || kt->cpus <= cpu) { error(INFO, "sadump: given cpu is invalid: %d\n", cpu); return FALSE; } if (!cpu_to_apicid(cpu, &apicid)) return FALSE; sadump_get_smram_cpu_state(apicid, smram); return TRUE; } void get_sadump_regs(struct bt_info *bt, ulong *ipp, ulong *spp) { ulong ip, sp; struct sadump_smram_cpu_state smram; char *prstatus; int cpu = bt->tc->processor; if (!is_task_active(bt->task)) { machdep->get_stack_frame(bt, ipp, spp); return; } bt->flags |= BT_DUMPFILE_SEARCH; if (machine_type("X86_64")) machdep->get_stack_frame(bt, ipp, spp); else if (machine_type("X86")) get_netdump_regs_x86(bt, ipp, spp); if (bt->flags & BT_DUMPFILE_SEARCH) return; prstatus = GETBUF(SIZE(elf_prstatus)); if (get_prstatus_from_crash_notes(cpu, prstatus)) { ip = ULONG(prstatus + OFFSET(elf_prstatus_pr_reg) + (BITS64() ? OFFSET(user_regs_struct_rip) : OFFSET(user_regs_struct_eip))); sp = ULONG(prstatus + OFFSET(elf_prstatus_pr_reg) + (BITS64() ? OFFSET(user_regs_struct_rsp) : OFFSET(user_regs_struct_eip))); if (ip || sp) { *ipp = ip; *spp = sp; return; } } get_sadump_smram_cpu_state(cpu, &smram); ip = smram.Rip; sp = ((uint64_t)smram.RspUpper << 32) + smram.RspLower; if (is_kernel_text(ip) && (((sp >= GET_STACKBASE(bt->task)) && (sp < GET_STACKTOP(bt->task))) || in_alternate_stack(bt->tc->processor, sp))) { *ipp = ip; *spp = sp; bt->flags |= BT_KERNEL_SPACE; return; } if (!is_kernel_text(ip) && in_user_stack(bt->tc->task, sp)) bt->flags |= BT_USER_SPACE; } void sadump_display_regs(int cpu, FILE *ofp) { struct sadump_smram_cpu_state smram; if (cpu < 0 || cpu >= kt->cpus) { error(INFO, "sadump: given cpu is invalid: %d\n", cpu); return; } get_sadump_smram_cpu_state(cpu, &smram); if (machine_type("X86_64")) { fprintf(ofp, " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" " RAX: %016llx RBX: %016llx RCX: %016llx\n" " RDX: %016llx RSI: %016llx RDI: %016llx\n" " RBP: %016llx R8: %016llx R9: %016llx\n" " R10: %016llx R11: %016llx R12: %016llx\n" " R13: %016llx R14: %016llx R15: %016llx\n" " CS: %04x SS: %04x\n", (ulonglong)(smram.Rip), (ulonglong)(((uint64_t)smram.RspUpper<<32)+smram.RspLower), (ulonglong)(smram.Rflags), (ulonglong)(((uint64_t)smram.RaxUpper<<32)+smram.RaxLower), (ulonglong)(((uint64_t)smram.RbxUpper<<32)+smram.RbxLower), (ulonglong)(((uint64_t)smram.RcxUpper<<32)+smram.RcxLower), (ulonglong)(((uint64_t)smram.RdxUpper<<32)+smram.RdxLower), (ulonglong)(((uint64_t)smram.RsiUpper<<32)+smram.RsiLower), (ulonglong)(((uint64_t)smram.RdiUpper<<32)+smram.RdiLower), (ulonglong)(((uint64_t)smram.RbpUpper<<32)+smram.RbpLower), (ulonglong)(((uint64_t)smram.R8Upper<<32)+smram.R8Lower), (ulonglong)(((uint64_t)smram.R9Upper<<32)+smram.R9Lower), (ulonglong)(((uint64_t)smram.R10Upper<<32)+smram.R10Lower), (ulonglong)(((uint64_t)smram.R11Upper<<32)+smram.R11Lower), (ulonglong)(((uint64_t)smram.R12Upper<<32)+smram.R12Lower), (ulonglong)(((uint64_t)smram.R13Upper<<32)+smram.R13Lower), (ulonglong)(((uint64_t)smram.R14Upper<<32)+smram.R14Lower), (ulonglong)(((uint64_t)smram.R15Upper<<32)+smram.R15Lower), smram.Cs, smram.Ss); } if (machine_type("X86")) { fprintf(ofp, " EAX: %08llx EBX: %08llx ECX: %08llx EDX: %08llx\n" " DS: %04x ESI: %08llx ES: %04x EDI: %08llx\n" " SS: %04x ESP: %08llx EBP: %08llx GS: %04x\n" " CS: %04x EIP: %08llx EFLAGS: %08llx\n", (ulonglong)smram.RaxLower, (ulonglong)smram.RbxLower, (ulonglong)smram.RcxLower, (ulonglong)smram.RdxLower, smram.Ds & 0xffff, (ulonglong)smram.RsiLower, smram.Es & 0xffff, (ulonglong)smram.RdiLower, smram.Ss, (ulonglong)smram.RspLower, (ulonglong)smram.RbpLower, smram.Gs, smram.Cs, (ulonglong)smram.Rip, (ulonglong)smram.Rflags); } } /* * sadump does not save phys_base; it must resort to another way. */ int sadump_phys_base(ulong *phys_base) { if (SADUMP_VALID() && !sd->phys_base) { if (CRASHDEBUG(1)) error(NOTE, "sadump: does not save phys_base.\n"); return FALSE; } if (sd->phys_base) { *phys_base = sd->phys_base; return TRUE; } return FALSE; } int sadump_set_phys_base(ulong phys_base) { sd->phys_base = phys_base; return TRUE; } /* * Used by "sys" command to show diskset disk names. */ void sadump_show_diskset(void) { int i; for (i = 0; i < sd->sd_list_len; ++i) { char *filename = sd->sd_list[i]->filename; fprintf(fp, "%s%s", i ? " " : "", filename); if ((i+1) < sd->sd_list_len) fprintf(fp, "\n"); } } static int block_table_init(void) { uint64_t pfn, section, max_section, *block_table; max_section = divideup(sd->max_mapnr, SADUMP_PF_SECTION_NUM); block_table = calloc(sizeof(uint64_t), max_section); if (!block_table) { error(INFO, "sadump: cannot allocate memory for block_table\n"); return FALSE; } for (section = 0; section < max_section; ++section) { if (section > 0) block_table[section] = block_table[section-1]; for (pfn = section * SADUMP_PF_SECTION_NUM; pfn < (section + 1) * SADUMP_PF_SECTION_NUM; ++pfn) if (page_is_dumpable(pfn)) block_table[section]++; } sd->block_table = block_table; return TRUE; } static uint64_t pfn_to_block(uint64_t pfn) { uint64_t block, section, p; section = pfn / SADUMP_PF_SECTION_NUM; if (section) block = sd->block_table[section - 1]; else block = 0; for (p = section * SADUMP_PF_SECTION_NUM; p < pfn; ++p) if (page_is_dumpable(p)) block++; return block; } int sadump_is_zero_excluded(void) { return (sd->flags & SADUMP_ZERO_EXCLUDED) ? TRUE : FALSE; } void sadump_set_zero_excluded(void) { sd->flags |= SADUMP_ZERO_EXCLUDED; } void sadump_unset_zero_excluded(void) { sd->flags &= ~SADUMP_ZERO_EXCLUDED; } struct sadump_data * get_sadump_data(void) { return sd; } int sadump_get_nr_cpus(void) { /* apicids */ return sd->dump_header->nr_cpus; } #ifdef X86_64 int sadump_get_cr3_cr4_idtr(int cpu, ulong *cr3, ulong *cr4, ulong *idtr) { struct sadump_smram_cpu_state scs; memset(&scs, 0, sizeof(scs)); if (!sadump_get_smram_cpu_state(cpu, &scs)) return FALSE; *cr3 = scs.Cr3; *cr4 = scs.Cr4; *idtr = ((uint64_t)scs.IdtUpper)<<32 | (uint64_t)scs.IdtLower; return TRUE; } #endif /* X86_64 */ static void mask_reserved_fields(struct sadump_smram_cpu_state *smram) { memset(smram->Reserved1, 0, sizeof(smram->Reserved1)); memset(smram->Reserved2, 0, sizeof(smram->Reserved2)); memset(smram->Reserved3, 0, sizeof(smram->Reserved3)); memset(smram->Reserved4, 0, sizeof(smram->Reserved4)); memset(smram->Reserved5, 0, sizeof(smram->Reserved5)); memset(smram->Reserved6, 0, sizeof(smram->Reserved6)); memset(smram->Reserved7, 0, sizeof(smram->Reserved7)); } crash-utility-crash-9cd43f5/ppc.c0000664000372000037200000016050115107550337016325 0ustar juerghjuergh/* ppc.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2007, 2010-2014 David Anderson * Copyright (C) 2002-2007, 2010-2014 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef PPC #include "defs.h" #include #define MAX_PLATFORM_LEN 32 /* length for platform string */ /* * This structure was copied from kernel source * in include/asm-ppc/ptrace.h */ struct ppc_pt_regs { long gpr[32]; long nip; long msr; long orig_gpr3; /* Used for restarting system calls */ long ctr; long link; long xer; long ccr; long mq; /* 601 only (not used at present) */ /* Used on APUS to hold IPL value. */ long trap; /* Reason for being here */ long dar; /* Fault registers */ long dsisr; long result; /* Result of a system call */ }; static int ppc_kvtop(struct task_context *, ulong, physaddr_t *, int); static int ppc_uvtop(struct task_context *, ulong, physaddr_t *, int); static ulong ppc_vmalloc_start(void); static int ppc_is_task_addr(ulong); static int ppc_verify_symbol(const char *, ulong, char); static ulong ppc_get_task_pgd(ulong); static int ppc_translate_pte(ulong, void *, ulonglong); static ulong ppc_processor_speed(void); static int ppc_eframe_search(struct bt_info *); static ulong ppc_in_irqstack(ulong); static void ppc_back_trace_cmd(struct bt_info *); static void ppc_back_trace(struct gnu_request *, struct bt_info *); static void get_ppc_frame(struct bt_info *, ulong *, ulong *); static void ppc_print_stack_entry(int,struct gnu_request *, ulong, ulong, struct bt_info *); static char *ppc_check_eframe(struct ppc_pt_regs *); static void ppc_print_eframe(char *, struct ppc_pt_regs *, struct bt_info *); static void ppc_print_regs(struct ppc_pt_regs *); static void ppc_display_full_frame(struct bt_info *, ulong, FILE *); static void ppc_dump_irq(int); static void ppc_get_stack_frame(struct bt_info *, ulong *, ulong *); static int ppc_dis_filter(ulong, char *, unsigned int); static void ppc_cmd_mach(void); static int ppc_get_smp_cpus(void); static void ppc_display_machine_stats(void); static void ppc_dump_line_number(ulong); static struct line_number_hook ppc_line_number_hooks[]; static struct machine_specific ppc_machine_specific = { 0 }; static int probe_default_platform(char *); static int probe_ppc44x_platform(char *); static int probe_ppce500_platform(char *); static void ppc_probe_base_platform(void); typedef int (*probe_func_t) (char *); probe_func_t probe_platforms[] = { probe_ppc44x_platform, /* 44x chipsets */ probe_ppce500_platform, /* E500 chipsets */ probe_default_platform, /* This should be at the end */ NULL }; /* Don't forget page flags definitions for each platform */ #define PLATFORM_PAGE_FLAGS_SETUP(PLT) \ do { \ _PAGE_PRESENT = PLT##_PAGE_PRESENT; \ _PAGE_USER = PLT##_PAGE_USER; \ _PAGE_RW = PLT##_PAGE_RW; \ _PAGE_GUARDED = PLT##_PAGE_GUARDED; \ _PAGE_COHERENT = PLT##_PAGE_COHERENT; \ _PAGE_NO_CACHE = PLT##_PAGE_NO_CACHE; \ _PAGE_WRITETHRU = PLT##_PAGE_WRITETHRU; \ _PAGE_DIRTY = PLT##_PAGE_DIRTY; \ _PAGE_ACCESSED = PLT##_PAGE_ACCESSED; \ _PAGE_HWWRITE = PLT##_PAGE_HWWRITE; \ _PAGE_SHARED = PLT##_PAGE_SHARED; \ } while (0) static int probe_ppc44x_platform(char *name) { /* 44x include ppc440* and ppc470 */ if (STRNEQ(name, "ppc440") || STREQ(name, "ppc470")) { PPC_PLATFORM = strdup(name); PLATFORM_PAGE_FLAGS_SETUP(PPC44x); return TRUE; } return FALSE; } struct fsl_booke_tlbcam { #define NUM_TLBCAMS (64) #define LAST_TLBCAM (0x40) uint index; struct { ulong start; ulong limit; physaddr_t phys; } tlbcamrange; struct { uint MAS0; uint MAS1; ulong MAS2; uint MAS3; uint MAS7; } tlbcam; }; static int fsl_booke_vtop(ulong vaddr, physaddr_t *paddr, int verbose) { struct fsl_booke_tlbcam *fsl_mmu; int i, found; if (CRASHDEBUG(1)) fprintf(fp, "[Searching tlbcam address mapping]\n"); fsl_mmu = MMU_SPECIAL; for (i = 0, found = FALSE;;i++, fsl_mmu++) { if (vaddr >= fsl_mmu->tlbcamrange.start && vaddr < fsl_mmu->tlbcamrange.limit) { *paddr = fsl_mmu->tlbcamrange.phys + (vaddr - fsl_mmu->tlbcamrange.start); found = TRUE; break; } if (fsl_mmu->index & LAST_TLBCAM) break; } if (found && verbose) { /* TLBCAM segment attributes */ fprintf(fp, "\n TLBCAM[%u]: MAS0 MAS1 MAS2 " "MAS3 MAS7\n", (fsl_mmu->index & ~LAST_TLBCAM)); fprintf(fp, " %-8x %-8x %-8lx %-8x %-8x\n", fsl_mmu->tlbcam.MAS0, fsl_mmu->tlbcam.MAS1, fsl_mmu->tlbcam.MAS2, fsl_mmu->tlbcam.MAS3, fsl_mmu->tlbcam.MAS7); /* TLBCAM range */ fprintf(fp, " VIRTUAL RANGE : %lx - %lx\n", fsl_mmu->tlbcamrange.start, fsl_mmu->tlbcamrange.limit); fprintf(fp, " PHYSICAL RANGE: %llx - %llx\n", fsl_mmu->tlbcamrange.phys, fsl_mmu->tlbcamrange.phys + (fsl_mmu->tlbcamrange.limit - fsl_mmu->tlbcamrange.start)); /* translated addr and its tlbcam's offset. */ fprintf(fp, " => VIRTUAL PHYSICAL TLBCAM-OFFSET\n"); fprintf(fp, " %-8lx %-8llx %lu\n", vaddr, *paddr, vaddr - fsl_mmu->tlbcamrange.start); } if (CRASHDEBUG(1)) fprintf(fp, "[tlbcam search end]\n"); return found; } static void fsl_booke_mmu_setup(void) { struct fsl_booke_tlbcam *fsl_mmu; uint i, tlbcam_index; ulong tlbcam_addrs, TLBCAM; readmem(symbol_value("tlbcam_index"), KVADDR, &tlbcam_index, sizeof(uint), "tlbcam_index", FAULT_ON_ERROR); if (tlbcam_index != 0 && tlbcam_index < NUM_TLBCAMS) { fsl_mmu = calloc(tlbcam_index, sizeof(*fsl_mmu)); if (!fsl_mmu) { error(FATAL, "fsl_mmu calloc() failed\n"); return; } tlbcam_addrs = symbol_value("tlbcam_addrs"); TLBCAM = symbol_value("TLBCAM"); for (i = 0; i < tlbcam_index; i++) { fsl_mmu[i].index = i; readmem(tlbcam_addrs + i * sizeof(fsl_mmu[i].tlbcamrange), KVADDR, &fsl_mmu[i].tlbcamrange, sizeof(fsl_mmu[i].tlbcamrange), "tlbcam_addrs", FAULT_ON_ERROR); readmem(TLBCAM + i * sizeof(fsl_mmu[i].tlbcam), KVADDR, &fsl_mmu[i].tlbcam, sizeof(fsl_mmu[i].tlbcam), "TLBCAM", FAULT_ON_ERROR); } fsl_mmu[i - 1].index |= LAST_TLBCAM; MMU_SPECIAL = fsl_mmu; VTOP_SPECIAL = fsl_booke_vtop; } else error(INFO, "[%s]: can't setup tlbcam: tlbcam_index=%u\n", PPC_PLATFORM, tlbcam_index); } static int probe_ppce500_platform(char *name) { if (STRNEQ(name, "ppce500mc")) { PPC_PLATFORM = strdup(name); if (IS_PAE()) { PTE_RPN_SHIFT = BOOKE3E_PTE_RPN_SHIFT; PLATFORM_PAGE_FLAGS_SETUP(BOOK3E); /* Set special flag for book3e */ _PAGE_K_RW = BOOK3E_PAGE_KERNEL_RW; } else PLATFORM_PAGE_FLAGS_SETUP(FSL_BOOKE); fsl_booke_mmu_setup(); return TRUE; } return FALSE; } static int probe_default_platform(char *name) { if (IS_PAE()) { error(INFO, "platform \"%s\" 64bit PTE fall through\n", name); error(INFO, "vmalloc translation could not work!\n"); } /* Use the default definitions */ PPC_PLATFORM = strdup(name); PLATFORM_PAGE_FLAGS_SETUP(DEFAULT); return TRUE; } #undef PLATFORM_PAGE_FLAGS_SETUP /* * Find the platform of the crashing system and set the * base_platform accordingly. */ void ppc_probe_base_platform(void) { probe_func_t probe; char platform_name[MAX_PLATFORM_LEN]; ulong ptr; int i; if(!try_get_symbol_data("powerpc_base_platform", sizeof(ulong), &ptr) || read_string(ptr, platform_name, MAX_PLATFORM_LEN - 1) == 0) /* Let us fallback to default definitions */ strcpy(platform_name, "(unknown)"); for (i = 0; probe_platforms[i] != NULL; i++) { probe = probe_platforms[i]; if (probe(platform_name)) break; } } /* * Do all necessary machine-specific setup here. This is called twice, * before and after GDB has been initialized. */ void ppc_init(int when) { uint cpu_features; ulong cur_cpu_spec; struct datatype_member pte = { .name = "pte_t", }; switch (when) { case SETUP_ENV: machdep->machspec = &ppc_machine_specific; machdep->process_elf_notes = process_elf32_notes; break; case PRE_SYMTAB: machdep->verify_symbol = ppc_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = PPC_STACK_SIZE; if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); machdep->pmd = machdep->pgd; if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; break; case PRE_GDB: machdep->kvbase = symbol_value("_stext"); machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = ppc_eframe_search; machdep->back_trace = ppc_back_trace_cmd; machdep->processor_speed = ppc_processor_speed; machdep->uvtop = ppc_uvtop; machdep->kvtop = ppc_kvtop; machdep->get_task_pgd = ppc_get_task_pgd; machdep->get_stack_frame = ppc_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = ppc_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = ppc_is_task_addr; machdep->dis_filter = ppc_dis_filter; machdep->cmd_mach = ppc_cmd_mach; machdep->get_smp_cpus = ppc_get_smp_cpus; machdep->line_number_hooks = ppc_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; break; case POST_GDB: /* gdb interface got available, resolve PTE right now. */ PTE_SIZE = DATATYPE_SIZE(&pte); if (PTE_SIZE < 0) error(FATAL, "gdb could not handle \"pte_t\" size request\n"); /* Check if we have 64bit PTE on 32bit system */ if (PTE_SIZE == sizeof(ulonglong)) machdep->flags |= PAE; /* Find the platform where we crashed */ ppc_probe_base_platform(); if (!PTE_RPN_SHIFT) PTE_RPN_SHIFT = PAGE_SHIFT; machdep->vmalloc_start = ppc_vmalloc_start; MEMBER_OFFSET_INIT(thread_struct_pg_tables, "thread_struct", "pg_tables"); if (VALID_SIZE(irq_desc_t)) { /* * Use generic irq handlers for recent kernels whose * irq_desc_t have been initialized in kernel_init(). */ machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; } else { machdep->dump_irq = ppc_dump_irq; STRUCT_SIZE_INIT(irqdesc, "irqdesc"); STRUCT_SIZE_INIT(irq_desc_t, "irq_desc_t"); MEMBER_OFFSET_INIT(irqdesc_action, "irqdesc", "action"); MEMBER_OFFSET_INIT(irqdesc_ctl, "irqdesc", "ctl"); MEMBER_OFFSET_INIT(irqdesc_level, "irqdesc", "level"); } MEMBER_OFFSET_INIT(device_node_type, "device_node", "type"); MEMBER_OFFSET_INIT(device_node_allnext, "device_node", "allnext"); MEMBER_OFFSET_INIT(device_node_properties, "device_node", "properties"); MEMBER_OFFSET_INIT(property_name, "property", "name"); MEMBER_OFFSET_INIT(property_value, "property", "value"); MEMBER_OFFSET_INIT(property_next, "property", "next"); MEMBER_OFFSET_INIT(machdep_calls_setup_residual, "machdep_calls", "setup_residual"); MEMBER_OFFSET_INIT(RESIDUAL_VitalProductData, "RESIDUAL", "VitalProductData"); MEMBER_OFFSET_INIT(VPD_ProcessorHz, "VPD", "ProcessorHz"); MEMBER_OFFSET_INIT(bd_info_bi_intfreq, "bd_info", "bi_intfreq"); if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(int), &machdep->nr_irqs); else machdep->nr_irqs = 512; /* NR_IRQS (at least) */ if (!machdep->hz) { machdep->hz = HZ; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) machdep->hz = 1000; } if (symbol_exists("cur_cpu_spec")) { get_symbol_data("cur_cpu_spec", sizeof(void *), &cur_cpu_spec); readmem(cur_cpu_spec + MEMBER_OFFSET("cpu_spec", "cpu_user_features"), KVADDR, &cpu_features, sizeof(uint), "cpu user features", FAULT_ON_ERROR); if (cpu_features & CPU_BOOKE) machdep->flags |= CPU_BOOKE; } else machdep->flags |= CPU_BOOKE; machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; /* * IRQ stacks are introduced in 2.6 and also configurable. */ if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && symbol_exists("hardirq_ctx")) STRUCT_SIZE_INIT(irq_ctx, "hardirq_ctx"); STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); break; case POST_INIT: break; case LOG_ONLY: machdep->kvbase = kt->vmcoreinfo._stext_SYMBOL; break; } } void ppc_dump_machdep_table(ulong arg) { int others; others = 0; fprintf(fp, " platform: %s\n", PPC_PLATFORM); fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PAE) fprintf(fp, "%sPAE", others++ ? "|" : ""); if (machdep->flags & CPU_BOOKE) fprintf(fp, "%sCPU_BOOKE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " pgdir_shift: %d\n", PGDIR_SHIFT); fprintf(fp, " ptrs_per_pgd: %d\n", PTRS_PER_PGD); fprintf(fp, " ptrs_per_pte: %d\n", PTRS_PER_PTE); fprintf(fp, " pte_size: %d\n", PTE_SIZE); fprintf(fp, " pte_rpn_shift: %d\n", PTE_RPN_SHIFT); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: ppc_eframe_search() [TBD]\n"); fprintf(fp, " back_trace: ppc_back_trace_cmd()\n"); fprintf(fp, " processor_speed: ppc_processor_speed()\n"); fprintf(fp, " uvtop: ppc_uvtop()\n"); fprintf(fp, " kvtop: ppc_kvtop()\n"); fprintf(fp, " get_task_pgd: ppc_get_task_pgd()\n"); if (machdep->dump_irq == generic_dump_irq) fprintf(fp, " dump_irq: generic_dump_irq()\n"); else fprintf(fp, " dump_irq: ppc_dump_irq()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " get_stack_frame: ppc_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: ppc_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: ppc_vmalloc_start()\n"); fprintf(fp, " is_task_addr: ppc_is_task_addr()\n"); fprintf(fp, " verify_symbol: ppc_verify_symbol()\n"); fprintf(fp, " dis_filter: ppc_dis_filter()\n"); fprintf(fp, " cmd_mach: ppc_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: ppc_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: ppc_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } static ulonglong ppc_pte_physaddr(ulonglong pte) { pte = pte >> PTE_RPN_SHIFT; /* pfn */ pte = pte << PAGE_SHIFT; /* physaddr */ return pte; } static int ppc_pgd_vtop(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *page_dir; ulong pgd_pte, page_table, pte_index; ulonglong pte; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + (vaddr >> PGDIR_SHIFT); /* * Size of a pgd could be more than a PAGE. * So use PAGEBASE(page_dir), instead of * PAGEBASE(pgd) for FILL_PGD() */ FILL_PGD(PAGEBASE((ulong)page_dir), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET((ulong)page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!pgd_pte) { if (VTOP_SPECIAL) /* * This ppc platform have special address mapping * between vaddr and paddr which can not search from * standard page table. */ return VTOP_SPECIAL(vaddr, paddr, verbose); goto no_page; } page_table = pgd_pte; if (IS_BOOKE()) page_table = VTOP(page_table); FILL_PTBL(PAGEBASE((ulong)page_table), PHYSADDR, PAGESIZE()); pte_index = (vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); if (IS_PAE()) pte = ULONGLONG(machdep->ptbl + PTE_SIZE * pte_index); else pte = ULONG(machdep->ptbl + PTE_SIZE * pte_index); if (verbose) fprintf(fp, " PTE: %lx => %llx\n", pgd_pte, pte); if (!(pte & _PAGE_PRESENT)) { if (pte && verbose) { fprintf(fp, "\n"); ppc_translate_pte((ulong)pte, 0, pte); } goto no_page; } if (verbose) { fprintf(fp, " PAGE: %llx\n\n", PAGEBASE(ppc_pte_physaddr(pte))); ppc_translate_pte((ulong)pte, 0, pte); } *paddr = PAGEBASE(ppc_pte_physaddr(pte)) + PAGEOFFSET(vaddr); return TRUE; no_page: return FALSE; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ static int ppc_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { if (VALID_MEMBER(thread_struct_pg_tables)) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if (INVALID_MEMBER(task_struct_active_mm)) error(FATAL, "no pg_tables or active_mm?\n"); readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } return ppc_pgd_vtop(pgd, vaddr, paddr, verbose); } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int ppc_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } pgd = (ulong *)vt->kernel_pgd[0]; return ppc_pgd_vtop(pgd, kvaddr, paddr, verbose); } /* * Determine where vmalloc'd memory starts by looking at the first * entry on the vmlist. */ static ulong ppc_vmalloc_start(void) { return (first_vmalloc_address()); } /* * PPC tasks are all stacksize-aligned, except when split from the stack. * PPC also allows the idle_task to be non-page aligned, so we have to make * an additional check through the idle_threads array. */ static int ppc_is_task_addr(ulong task) { int i; if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else if (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)) return TRUE; for (i = 0; i < kt->cpus; i++) if (task == tt->idle_threads[i]) return TRUE; return FALSE; } /* * According to kernel source, this should cover all the PPC variants out * There, but since we can't test them all, YMMV. */ static ulong ppc_processor_speed(void) { ulong res, value, ppc_md, md_setup_res; ulong prep_setup_res; ulong node, type, name, properties; char str_buf[32]; ulong len, mhz = 0; if (machdep->mhz) return(machdep->mhz); if(symbol_exists("allnodes")) { get_symbol_data("allnodes", sizeof(void *), &node); while(node) { readmem(node+OFFSET(device_node_type), KVADDR, &type, sizeof(ulong), "node type", FAULT_ON_ERROR); if(type != 0) { len = read_string(type, str_buf, sizeof(str_buf)); if(len && (strcasecmp(str_buf, "cpu") == 0)) break; } readmem(node+OFFSET(device_node_allnext), KVADDR, &node, sizeof(ulong), "node allnext", FAULT_ON_ERROR); } /* now, if we found a CPU node, get the speed property */ if(node) { readmem(node+OFFSET(device_node_properties), KVADDR, &properties, sizeof(ulong), "node properties", FAULT_ON_ERROR); while(properties) { readmem(properties+OFFSET(property_name), KVADDR, &name, sizeof(ulong), "property name", FAULT_ON_ERROR); len = read_string(name, str_buf, sizeof(str_buf)); if (len && (strcasecmp(str_buf, "clock-frequency") == 0)) { /* found the right cpu property */ readmem(properties+ OFFSET(property_value), KVADDR, &value, sizeof(ulong), "clock freqency pointer", FAULT_ON_ERROR); readmem(value, KVADDR, &mhz, sizeof(ulong), "clock frequency value", FAULT_ON_ERROR); mhz /= 1000000; break; } else if(len && (strcasecmp(str_buf, "ibm,extended-clock-frequency") == 0)){ /* found the right cpu property */ readmem(properties+ OFFSET(property_value), KVADDR, &value, sizeof(ulong), "clock freqency pointer", FAULT_ON_ERROR); readmem(value, KVADDR, &mhz, sizeof(ulong), "clock frequency value", FAULT_ON_ERROR); mhz /= 1000000; break; } /* keep looking */ readmem(properties+ OFFSET(property_next), KVADDR, &properties, sizeof(ulong), "property next", FAULT_ON_ERROR); } if(!properties) { /* didn't find the cpu speed for some reason */ return (machdep->mhz = 0); } } } /* for machines w/o OF */ /* untested, but in theory this should work on prep machines */ if (symbol_exists("res") && !mhz) { get_symbol_data("res", sizeof(void *), &res); if (symbol_exists("prep_setup_residual")) { get_symbol_data("prep_setup_residual", sizeof(void *), &prep_setup_res); get_symbol_data("ppc_md", sizeof(void *), &ppc_md); readmem(ppc_md + OFFSET(machdep_calls_setup_residual), KVADDR, &md_setup_res, sizeof(ulong), "ppc_md setup_residual", FAULT_ON_ERROR); if(prep_setup_res == md_setup_res) { /* PREP machine */ readmem(res+ OFFSET(RESIDUAL_VitalProductData)+ OFFSET(VPD_ProcessorHz), KVADDR, &mhz, sizeof(ulong), "res VitalProductData", FAULT_ON_ERROR); mhz = (mhz > 1024) ? mhz >> 20 : mhz; } } if(!mhz) { /* everything else seems to do this the same way... */ readmem(res + OFFSET(bd_info_bi_intfreq), KVADDR, &mhz, sizeof(ulong), "bd_info bi_intfreq", FAULT_ON_ERROR); mhz /= 1000000; } } /* else...well, we don't have OF, or a residual structure, so * just print unknown MHz */ return (machdep->mhz = mhz); } /* * Accept or reject a symbol from the kernel namelist. */ static int ppc_verify_symbol(const char *name, ulong value, char type) { if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "_start")) machdep->flags |= KSYMS_START; return (name && strlen(name) && (machdep->flags & KSYMS_START) && !STREQ(name, "Letext") && !STRNEQ(name, "__func__.")); } /* * Get the relevant page directory pointer from a task structure. */ static ulong ppc_get_task_pgd(ulong task) { long offset; ulong pg_tables; offset = VALID_MEMBER(task_struct_thread) ? OFFSET(task_struct_thread) : OFFSET(task_struct_tss); if (INVALID_MEMBER(thread_struct_pg_tables)) error(FATAL, "pg_tables does not exist in this kernel's thread_struct\n"); offset += OFFSET(thread_struct_pg_tables); readmem(task + offset, KVADDR, &pg_tables, sizeof(ulong), "task thread pg_tables", FAULT_ON_ERROR); return(pg_tables); } /* * Translate a PTE, returning TRUE if the page is _PAGE_PRESENT. * If a physaddr pointer is passed in, don't print anything. */ static int ppc_translate_pte(ulong pte32, void *physaddr, ulonglong pte64) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; ulonglong paddr; if (!IS_PAE()) pte64 = pte32; paddr = PAGEBASE(ppc_pte_physaddr(pte64)); page_present = (pte64 & _PAGE_PRESENT); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%llx", pte64); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte64) { swap_location(pte64, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%llx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte64) { if (_PAGE_PRESENT && (pte64 & _PAGE_PRESENT) == _PAGE_PRESENT) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (_PAGE_USER && (pte64 & _PAGE_USER) == _PAGE_USER) fprintf(fp, "%sUSER", others++ ? "|" : ""); if (_PAGE_RW && (pte64 & _PAGE_RW) == _PAGE_RW) fprintf(fp, "%sRW", others++ ? "|" : ""); if (_PAGE_K_RW && ((pte64 & _PAGE_K_RW) == _PAGE_K_RW)) fprintf(fp, "%sK-RW", others++ ? "|" : ""); if (_PAGE_GUARDED && (pte64 & _PAGE_GUARDED) == _PAGE_GUARDED) fprintf(fp, "%sGUARDED", others++ ? "|" : ""); if (_PAGE_COHERENT && (pte64 & _PAGE_COHERENT) == _PAGE_COHERENT) fprintf(fp, "%sCOHERENT", others++ ? "|" : ""); if (_PAGE_NO_CACHE && (pte64 & _PAGE_NO_CACHE) == _PAGE_NO_CACHE) fprintf(fp, "%sNO_CACHE", others++ ? "|" : ""); if (_PAGE_WRITETHRU && (pte64 & _PAGE_WRITETHRU) == _PAGE_WRITETHRU) fprintf(fp, "%sWRITETHRU", others++ ? "|" : ""); if (_PAGE_DIRTY && (pte64 & _PAGE_DIRTY) == _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (_PAGE_ACCESSED && (pte64 & _PAGE_ACCESSED) == _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); if (_PAGE_HWWRITE && (pte64 & _PAGE_HWWRITE) == _PAGE_HWWRITE) fprintf(fp, "%sHWWRITE", others++ ? "|" : ""); } else fprintf(fp, "no mapping"); fprintf(fp, ")\n"); return page_present; } /* * Look for likely exception frames in a stack. */ static int ppc_eframe_search(struct bt_info *bt) { return (error(FATAL, "ppc_eframe_search: function not written yet!\n")); } static ulong ppc_in_irqstack(ulong addr) { int c; if (!(tt->flags & IRQSTACKS)) return 0; for (c = 0; c < kt->cpus; c++) { if (tt->hardirq_ctx[c]) { if ((addr >= tt->hardirq_ctx[c]) && (addr < (tt->hardirq_ctx[c] + SIZE(irq_ctx)))) return tt->hardirq_ctx[c]; } if (tt->softirq_ctx[c]) { if ((addr >= tt->softirq_ctx[c]) && (addr < (tt->softirq_ctx[c] + SIZE(irq_ctx)))) return tt->softirq_ctx[c]; } } return 0; } /* * Unroll a kernel stack. */ static void ppc_back_trace_cmd(struct bt_info *bt) { char buf[BUFSIZE]; struct gnu_request *req; bt->flags |= BT_EXCEPTION_FRAME; if (CRASHDEBUG(1) || bt->debug) fprintf(fp, " => PC: %lx (%s) FP: %lx \n", bt->instptr, value_to_symstr(bt->instptr, buf, 0), bt->stkptr); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_STACK_TRACE; req->flags = GNU_RETURN_ON_ERROR; req->buf = GETBUF(BUFSIZE); req->debug = bt->debug; req->task = bt->task; req->pc = bt->instptr; req->sp = bt->stkptr; if (bt->flags & BT_USE_GDB) { strcpy(req->buf, "backtrace"); gdb_interface(req); } else ppc_back_trace(req, bt); FREEBUF(req->buf); FREEBUF(req); } /* * Unroll the kernel stack using a minimal amount of gdb services. */ static void ppc_back_trace(struct gnu_request *req, struct bt_info *bt) { int frame = 0; ulong lr = 0; ulong newpc = 0, newsp, marker; int eframe_found; if (!INSTACK(req->sp, bt)) { ulong irqstack; if ((irqstack = ppc_in_irqstack(req->sp))) { bt->stackbase = irqstack; bt->stacktop = bt->stackbase + SIZE(irq_ctx); alter_stackbuf(bt); } else { if (CRASHDEBUG(1)) fprintf(fp, "cannot find the stack info.\n"); return; } } while (INSTACK(req->sp, bt)) { newsp = *(ulong *)&bt->stackbuf[req->sp - bt->stackbase]; if (IS_KVADDR(newsp) && INSTACK(newsp, bt)) newpc = *(ulong *)&bt->stackbuf[newsp + STACK_FRAME_LR_SAVE - bt->stackbase]; if ((req->name = closest_symbol(req->pc)) == NULL) { error(FATAL, "ppc_back_trace hit unknown symbol (%lx).\n", req->pc); break; } bt->flags |= BT_SAVE_LASTSP; ppc_print_stack_entry(frame, req, newsp, lr, bt); bt->flags &= ~(ulonglong)BT_SAVE_LASTSP; lr = 0; if (BT_REFERENCE_FOUND(bt)) return; eframe_found = FALSE; /* * Is this frame an execption one? * In 2.6, 0x72656773 is saved and used * to determine the execption frame. */ if (THIS_KERNEL_VERSION < LINUX(2,6,0)) { if (frame && (newsp - req->sp - STACK_FRAME_OVERHEAD >= sizeof(struct ppc_pt_regs))) /* there might be an exception frame here... */ eframe_found = TRUE; /* also possible ones here... */ else if(!IS_KVADDR(newsp) || (newsp < req->sp)) eframe_found = TRUE; else if (STREQ(req->name, ".ret_from_except")) eframe_found = TRUE; } else if ((newsp - req->sp - STACK_FRAME_OVERHEAD) >= sizeof(struct ppc_pt_regs)){ readmem(req->sp + STACK_FRAME_MARKER, KVADDR, &marker, sizeof(ulong), "frame marker", FAULT_ON_ERROR); if (marker == STACK_FRAME_REGS_MARKER) eframe_found = TRUE; } if (eframe_found) { char *efrm_str; struct ppc_pt_regs regs; readmem(req->sp + STACK_FRAME_OVERHEAD, KVADDR, ®s, sizeof(struct ppc_pt_regs), "exception frame", FAULT_ON_ERROR); efrm_str = ppc_check_eframe(®s); if (efrm_str) { ppc_print_eframe(efrm_str, ®s, bt); lr = regs.link; newpc = regs.nip; newsp = regs.gpr[1]; } } if (STREQ(req->name, "start_kernel")) break; req->pc = newpc; req->sp = newsp; frame++; } return; } static void ppc_display_full_frame(struct bt_info *bt, ulong nextsp, FILE *ofp) { int i, u_idx; ulong *nip; ulong words, addr; char buf[BUFSIZE]; if (!INSTACK(nextsp, bt)) nextsp = bt->stacktop; words = (nextsp - bt->frameptr) / sizeof(ulong); addr = bt->frameptr; u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); nip = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(ofp, "%s ", format_stack_entry(bt, buf, *nip, 0)); addr += sizeof(ulong); } fprintf(ofp, "\n"); } /* * print one entry of a stack trace */ static void ppc_print_stack_entry(int frame, struct gnu_request *req, ulong newsp, ulong lr, struct bt_info *bt) { struct load_module *lm; char *lrname = NULL; ulong offset; struct syment *sp; char *name_plus_offset; char buf[BUFSIZE]; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(req->name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; break; case BT_REF_HEXVAL: if (bt->ref->hexval == req->pc) bt->ref->cmdflags |= BT_REF_FOUND; break; } } else { name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(req->pc, &offset); if (sp && offset) name_plus_offset = value_to_symstr(req->pc, buf, bt->radix); } fprintf(fp, "%s#%d [%lx] %s at %lx", frame < 10 ? " " : "", frame, req->sp, name_plus_offset ? name_plus_offset : req->name, req->pc); if (module_symbol(req->pc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); if (req->ra) { /* * Previous frame is an exception one. If the func * symbol for the current frame is same as with * the previous frame's LR value, print "(unreliable)". */ lrname = closest_symbol(req->ra); req->ra = 0; if (!lrname) { if (CRASHDEBUG(1)) error(FATAL, "ppc_back_trace hit unknown symbol (%lx).\n", req->ra); return; } } if (lr) { /* * Link register value for an expection frame. */ if ((lrname = closest_symbol(lr)) == NULL) { if (CRASHDEBUG(1)) error(FATAL, "ppc_back_trace hit unknown symbol (%lx).\n", lr); return; } if (req->pc != lr) { fprintf(fp, "\n [Link Register ] "); fprintf(fp, " [%lx] %s at %lx", req->sp, lrname, lr); } req->ra = lr; } if (!req->name || STREQ(req->name,lrname)) fprintf(fp, " (unreliable)"); fprintf(fp, "\n"); } if (bt->flags & BT_SAVE_LASTSP) req->lastsp = req->sp; bt->frameptr = req->sp; if (bt->flags & BT_FULL) if (IS_KVADDR(newsp)) ppc_display_full_frame(bt, newsp, fp); if (bt->flags & BT_LINE_NUMBERS) ppc_dump_line_number(req->pc); } /* * Check whether the frame is exception one! */ static char * ppc_check_eframe(struct ppc_pt_regs *regs) { switch(regs->trap & ~0xF) { case 0x200: return "machine check"; case 0x300: return "address error (store)"; case 0x400: return "instruction bus error"; case 0x500: return "interrupt"; case 0x600: return "alingment"; case 0x700: return "breakpoint trap"; case 0x800: return "fpu unavailable"; case 0x900: return "decrementer"; case 0xa00: return "reserved"; case 0xb00: return "reserved"; case 0xc00: return "syscall"; case 0xd00: return "single-step/watch"; case 0xe00: return "fp assist"; } /* No exception frame exists */ return NULL; } static void ppc_print_regs(struct ppc_pt_regs *regs) { int i; /* print out the gprs... */ for(i=0; i<32; i++) { if(!(i % 4)) fprintf(fp, "\n"); fprintf(fp, "R%d:%s %08lx ", i, ((i < 10) ? " " : ""), regs->gpr[i]); /* * In 2.6, some stack frame contains only partial regs set. * For the partial set, only 14 regs will be saved and trap * field will contain 1 in the least significant bit. */ if ((i == 13) && (regs->trap & 1)) break; } fprintf(fp, "\n"); /* print out the rest of the registers */ fprintf(fp, "NIP: %08lx ", regs->nip); fprintf(fp, "MSR: %08lx ", regs->msr); fprintf(fp, "OR3: %08lx ", regs->orig_gpr3); fprintf(fp, "CTR: %08lx\n", regs->ctr); fprintf(fp, "LR: %08lx ", regs->link); fprintf(fp, "XER: %08lx ", regs->xer); fprintf(fp, "CCR: %08lx ", regs->ccr); fprintf(fp, "MQ: %08lx\n", regs->mq); fprintf(fp, "DAR: %08lx ", regs->dar); fprintf(fp, "DSISR: %08lx ", regs->dsisr); fprintf(fp, " Syscall Result: %08lx\n", regs->result); } /* * Print the exception frame information */ static void ppc_print_eframe(char *efrm_str, struct ppc_pt_regs *regs, struct bt_info *bt) { if (BT_REFERENCE_CHECK(bt)) return; fprintf(fp, " %s [%lx] exception frame:", efrm_str, regs->trap); ppc_print_regs(regs); fprintf(fp, "\n"); } static void ppc_kdump_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp) { struct ppc_pt_regs *pt_regs; unsigned long ip, sp; ip = sp = 0; pt_regs = (struct ppc_pt_regs*)bt->machdep; if (!pt_regs || !(pt_regs->gpr[1])) { fprintf(fp, "0%lx: GPR1 register value(SP) was not saved\n", bt->task); return; } sp = pt_regs->gpr[1]; if (!IS_KVADDR(sp)) { if (IN_TASK_VMA(bt->task, *ksp)) fprintf(fp, "%0lx: Task is running in user space\n", bt->task); else fprintf(fp, "%0lx: Invalid Stack Pointer %0lx\n", bt->task, *ksp); } ip = pt_regs->nip; if(nip) *nip = ip; if (ksp) *ksp = sp; if (bt->flags && ((BT_TEXT_SYMBOLS | BT_TEXT_SYMBOLS_PRINT | BT_TEXT_SYMBOLS_NOPRINT))) return; /* * Print the collected regs for the active task */ ppc_print_regs(pt_regs); if (!IS_KVADDR(sp)) return; fprintf(fp, " NIP [%016lx] %s\n", pt_regs->nip, closest_symbol(pt_regs->nip)); fprintf(fp, " LR [%016lx] %s\n", pt_regs->link, closest_symbol(pt_regs->link)); fprintf(fp, "\n"); return; } static void ppc_dumpfile_stack_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { struct syment *sp; /* * For KDUMP and compressed KDUMP get the SP, PC from pt_regs * read from the Elf Note. */ if (ELF_NOTES_VALID()) { ppc_kdump_stack_frame(bt, getpc, getsp); return; } if (getpc) { if (!(sp = next_symbol("crash_save_current_state", NULL))) *getpc = (symbol_value("crash_save_current_state")+16); else *getpc = (sp->value - 4); } } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void ppc_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (DUMPFILE() && is_task_active(bt->task)) ppc_dumpfile_stack_frame(bt, pcp, spp); else get_ppc_frame(bt, pcp, spp); } /* * Do the work for ppc_get_stack_frame() for non-active tasks */ static void get_ppc_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { ulong ip; ulong sp; ulong *stack; ulong task; struct ppc_pt_regs regs; ip = 0; task = bt->task; stack = (ulong *)bt->stackbuf; if ((tt->flags & THREAD_INFO) && VALID_MEMBER(task_struct_thread_ksp)) readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, &sp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); else if (VALID_MEMBER(task_struct_tss_ksp)) sp = stack[OFFSET(task_struct_tss_ksp)/sizeof(long)]; else sp = stack[OFFSET(task_struct_thread_ksp)/sizeof(long)]; if (!INSTACK(sp, bt)) goto out; readmem(sp + STACK_FRAME_OVERHEAD, KVADDR, ®s, sizeof(struct ppc_pt_regs), "PPC pt_regs", FAULT_ON_ERROR); ip = regs.nip; if (STREQ(closest_symbol(ip), "__switch_to")) { /* NOTE: _switch_to() calls _switch() which * is asm. _switch leaves pc == lr. * Working through this frame is tricky, * and this mess isn't going to help if we * actually dumped here. Most likely the * analyzer is trying to backtrace a task. * Need to skip 2 frames. */ sp = stack[(sp - bt->stackbase)/sizeof(ulong)]; if (!INSTACK(sp, bt)) goto out; sp = stack[(sp - bt->stackbase)/sizeof(ulong)]; if (!INSTACK(sp + 4, bt)) goto out; ip = stack[(sp + 4 - bt->stackbase)/sizeof(ulong)]; } out: if (DUMPFILE() && getsp && STREQ(closest_symbol(sp), "panic")) { *getsp = sp; return; } if (getsp) *getsp = sp; if (getpc) *getpc = ip; } /* * Do the work for cmd_irq(). */ static void ppc_dump_irq(int irq) { ulong irq_desc_addr, addr; int level, others; ulong action, ctl, value; char typename[32]; int len; irq_desc_addr = symbol_value("irq_desc") + (SIZE(irqdesc) * irq); readmem(irq_desc_addr + OFFSET(irqdesc_level), KVADDR, &level, sizeof(int), "irq_desc entry", FAULT_ON_ERROR); readmem(irq_desc_addr + OFFSET(irqdesc_action), KVADDR, &action, sizeof(long), "irq_desc entry", FAULT_ON_ERROR); readmem(irq_desc_addr + OFFSET(irqdesc_ctl), KVADDR, &ctl, sizeof(long), "irq_desc entry", FAULT_ON_ERROR); fprintf(fp, " IRQ: %d\n", irq); fprintf(fp, " STATUS: 0\n"); fprintf(fp, "HANDLER: "); if (value_symbol(ctl)) { fprintf(fp, "%lx ", ctl); pad_line(fp, VADDR_PRLEN == 8 ? VADDR_PRLEN+2 : VADDR_PRLEN-6, ' '); fprintf(fp, "<%s>\n", value_symbol(ctl)); } else fprintf(fp, "%lx\n", ctl); if(ctl) { /* typename */ readmem(ctl + OFFSET(hw_interrupt_type_typename), KVADDR, &addr, sizeof(ulong), "typename pointer", FAULT_ON_ERROR); len = read_string(addr, typename, 32); if(len) fprintf(fp, " typename: %08lx \"%s\"\n", addr, typename); /* startup...I think this is always 0 */ readmem(ctl + OFFSET(hw_interrupt_type_startup), KVADDR, &addr, sizeof(ulong), "interrupt startup", FAULT_ON_ERROR); fprintf(fp, " startup: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); /* shutdown...I think this is always 0 */ readmem(ctl + OFFSET(hw_interrupt_type_shutdown), KVADDR, &addr, sizeof(ulong), "interrupt shutdown", FAULT_ON_ERROR); fprintf(fp, " shutdown: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); if (VALID_MEMBER(hw_interrupt_type_handle)) { /* handle */ readmem(ctl + OFFSET(hw_interrupt_type_handle), KVADDR, &addr, sizeof(ulong), "interrupt handle", FAULT_ON_ERROR); fprintf(fp, " handle: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); } /* enable/disable */ readmem(ctl + OFFSET(hw_interrupt_type_enable), KVADDR, &addr, sizeof(ulong), "interrupt enable", FAULT_ON_ERROR); fprintf(fp, " enable: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "%lx\n", addr); readmem(ctl + OFFSET(hw_interrupt_type_disable), KVADDR, &addr, sizeof(ulong), "interrupt disable", FAULT_ON_ERROR); fprintf(fp, " disable: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "0\n"); } /* next, the action... and its submembers */ if(!action) fprintf(fp, " ACTION: (none)\n"); while(action) { fprintf(fp, " ACTION: %08lx\n", action); /* handler */ readmem(action + OFFSET(irqaction_handler), KVADDR, &addr, sizeof(ulong), "action handler", FAULT_ON_ERROR); fprintf(fp, " handler: "); if(value_symbol(addr)) { fprintf(fp, "%08lx <%s>\n", addr, value_symbol(addr)); } else fprintf(fp, "0\n"); /* flags */ readmem(action + OFFSET(irqaction_flags), KVADDR, &value, sizeof(ulong), "action flags", FAULT_ON_ERROR); fprintf(fp, " flags: %lx ", value); if (value) { others = 0; fprintf(fp, "("); if (value & SA_INTERRUPT) fprintf(fp, "%sSA_INTERRUPT", others++ ? "|" : ""); if (value & SA_PROBE) fprintf(fp, "%sSA_PROBE", others++ ? "|" : ""); if (value & SA_SAMPLE_RANDOM) fprintf(fp, "%sSA_SAMPLE_RANDOM", others++ ? "|" : ""); if (value & SA_SHIRQ) fprintf(fp, "%sSA_SHIRQ", others++ ? "|" : ""); fprintf(fp, ")"); if (value & ~ACTION_FLAGS) { fprintf(fp, " (bits %lx not translated)", value & ~ACTION_FLAGS); } } fprintf(fp, "\n"); /* mask */ readmem(action + OFFSET(irqaction_mask), KVADDR, &value, sizeof(ulong), "action mask", FAULT_ON_ERROR); fprintf(fp, " mask: %lx\n", value); /* name */ readmem(action + OFFSET(irqaction_name), KVADDR, &addr, sizeof(ulong), "action name", FAULT_ON_ERROR); len = read_string(addr, typename, 32); if(len) fprintf(fp, " name: %08lx \"%s\"\n", addr, typename); /* dev_id */ readmem(action + OFFSET(irqaction_dev_id), KVADDR, &value, sizeof(ulong), "action dev_id", FAULT_ON_ERROR); fprintf(fp, " dev_id: %08lx\n", value); /* next */ readmem(action + OFFSET(irqaction_next), KVADDR, &value, sizeof(ulong), "action next", FAULT_ON_ERROR); fprintf(fp, " next: %lx\n", value); /* keep going if there are chained interrupts */ action = value; } fprintf(fp, " DEPTH: %x\n\n", level); } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int ppc_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on ppc) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Override smp_num_cpus if possible and necessary. */ int ppc_get_smp_cpus(void) { return (get_cpus_online() > 0) ? get_cpus_online() : kt->cpus; } /* * Machine dependent command. */ void ppc_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); ppc_display_machine_stats(); } /* * "mach" command output. */ static void ppc_display_machine_stats(void) { int c; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " PLATFORM: %s\n", PPC_PLATFORM); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); if (tt->flags & IRQSTACKS) { fprintf(fp, "HARD IRQ STACK SIZE: %ld\n", SIZE(irq_ctx)); fprintf(fp, " HARD IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->hardirq_ctx[c]) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->hardirq_ctx[c]); } fprintf(fp, "SOFT IRQ STACK SIZE: %ld\n", SIZE(irq_ctx)); fprintf(fp, " SOFT IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->softirq_ctx[c]) break; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->softirq_ctx[c]); } } } static const char *hook_files[] = { "arch/ppc/kernel/entry.S", "arch/ppc/kernel/head.S", }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) static struct line_number_hook ppc_line_number_hooks[] = { {"DoSyscall", ENTRY_S}, {"_switch", ENTRY_S}, {"ret_from_syscall_1", ENTRY_S}, {"ret_from_syscall_2", ENTRY_S}, {"ret_from_fork", ENTRY_S}, {"ret_from_intercept", ENTRY_S}, {"ret_from_except", ENTRY_S}, {"do_signal_ret", ENTRY_S}, {"ret_to_user_hook", ENTRY_S}, {"enter_rtas", ENTRY_S}, {"restore", ENTRY_S}, {"fake_interrupt", ENTRY_S}, {"lost_irq_ret", ENTRY_S}, {"do_bottom_half_ret", ENTRY_S}, {"ret_to_user_hook", ENTRY_S}, {"signal_return", ENTRY_S}, {"_stext", HEAD_S}, {"_start", HEAD_S}, {"__start", HEAD_S}, {"__after_mmu_off", HEAD_S}, {"turn_on_mmu", HEAD_S}, {"__secondary_hold", HEAD_S}, {"DataAccessCont", HEAD_S}, {"DataAccess", HEAD_S}, {"i0x300", HEAD_S}, {"DataSegmentCont", HEAD_S}, {"InstructionAccessCont", HEAD_S}, {"InstructionAccess", HEAD_S}, {"i0x400", HEAD_S}, {"InstructionSegmentCont", HEAD_S}, {"HardwareInterrupt", HEAD_S}, {"do_IRQ_intercept", HEAD_S}, {"i0x600", HEAD_S}, {"ProgramCheck", HEAD_S}, {"i0x700", HEAD_S}, {"FPUnavailable", HEAD_S}, {"i0x800", HEAD_S}, {"Decrementer", HEAD_S}, {"timer_interrupt_intercept", HEAD_S}, {"SystemCall", HEAD_S}, {"trap_0f_cont", HEAD_S}, {"Trap_0f", HEAD_S}, {"InstructionTLBMiss", HEAD_S}, {"InstructionAddressInvalid", HEAD_S}, {"DataLoadTLBMiss", HEAD_S}, {"DataAddressInvalid", HEAD_S}, {"DataStoreTLBMiss", HEAD_S}, {"AltiVecUnavailable", HEAD_S}, {"DataAccess", HEAD_S}, {"InstructionAccess", HEAD_S}, {"DataSegment", HEAD_S}, {"InstructionSegment", HEAD_S}, {"transfer_to_handler", HEAD_S}, {"stack_ovf", HEAD_S}, {"load_up_fpu", HEAD_S}, {"KernelFP", HEAD_S}, {"load_up_altivec", HEAD_S}, {"KernelAltiVec", HEAD_S}, {"giveup_altivec", HEAD_S}, {"giveup_fpu", HEAD_S}, {"relocate_kernel", HEAD_S}, {"copy_and_flush", HEAD_S}, {"fix_mem_constants", HEAD_S}, {"apus_interrupt_entry", HEAD_S}, {"__secondary_start_gemini", HEAD_S}, {"__secondary_start_psurge", HEAD_S}, {"__secondary_start_psurge2", HEAD_S}, {"__secondary_start_psurge3", HEAD_S}, {"__secondary_start_psurge99", HEAD_S}, {"__secondary_start", HEAD_S}, {"setup_common_caches", HEAD_S}, {"setup_604_hid0", HEAD_S}, {"setup_750_7400_hid0", HEAD_S}, {"load_up_mmu", HEAD_S}, {"start_here", HEAD_S}, {"clear_bats", HEAD_S}, {"flush_tlbs", HEAD_S}, {"mmu_off", HEAD_S}, {"initial_bats", HEAD_S}, {"setup_disp_bat", HEAD_S}, {"m8260_gorom", HEAD_S}, {"sdata", HEAD_S}, {"empty_zero_page", HEAD_S}, {"swapper_pg_dir", HEAD_S}, {"cmd_line", HEAD_S}, {"intercept_table", HEAD_S}, {"set_context", HEAD_S}, {NULL, NULL} /* list must be NULL-terminated */ }; static void ppc_dump_line_number(ulong callpc) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } /* * Try to relocate NT_PRSTATUS notes according by in kernel crash_notes. * Function is only called from ppc's get_regs. */ static int verify_crash_note_in_kernel(int cpu) { int ret; Elf32_Nhdr *note32; ulong crash_notes_ptr; char *buf, *name; ret = TRUE; if (!readmem(symbol_value("crash_notes"), KVADDR, &crash_notes_ptr, sizeof(ulong), "crash_notes", QUIET|RETURN_ON_ERROR) || !crash_notes_ptr) goto out; buf = GETBUF(SIZE(note_buf)); if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) crash_notes_ptr += kt->__per_cpu_offset[cpu]; if (!readmem(crash_notes_ptr, KVADDR, buf, SIZE(note_buf), "cpu crash_notes", QUIET|RETURN_ON_ERROR)) goto freebuf; note32 = (Elf32_Nhdr *)buf; name = (char *)(note32 + 1); if (note32->n_type != NT_PRSTATUS || note32->n_namesz != strlen("CORE") + 1 || strncmp(name, "CORE", note32->n_namesz) || note32->n_descsz != SIZE(elf_prstatus)) ret = FALSE; freebuf: FREEBUF(buf); out: return ret; } void ppc_relocate_nt_prstatus_percpu(void **nt_prstatus_percpu, uint *num_prstatus_notes) { static int relocated = FALSE; void **nt_ptr; int i, j, nrcpus; size_t size; /* relocation is possible only once */ if (relocated == TRUE) return; relocated = TRUE; if (!symbol_exists("crash_notes") || !VALID_STRUCT(note_buf) || !VALID_STRUCT(elf_prstatus)) return; size = NR_CPUS * sizeof(void *); nt_ptr = (void **)GETBUF(size); BCOPY(nt_prstatus_percpu, nt_ptr, size); BZERO(nt_prstatus_percpu, size); *num_prstatus_notes = 0; nrcpus = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); for (i = 0, j = 0; i < nrcpus; i++) { if (!in_cpu_map(ONLINE_MAP, i)) continue; if (verify_crash_note_in_kernel(i)) nt_prstatus_percpu[i] = nt_ptr[j++]; else if (CRASHDEBUG(1)) error(WARNING, "cpu#%d: crash_notes not saved\n", i); /* num_prstatus_notes is always equal to online cpus in ppc */ (*num_prstatus_notes)++; } FREEBUF(nt_ptr); } #endif /* PPC */ crash-utility-crash-9cd43f5/dev.c0000664000372000037200000046156315107550337016335 0ustar juerghjuergh/* dev.c - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2013 David Anderson * Copyright (C) 2002-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "vmcore.h" static void dump_blkdevs(ulong); static void dump_chrdevs(ulong); static void dump_blkdevs_v2(ulong); static void dump_blkdevs_v3(ulong); static ulong search_cdev_map_probes(char *, int, int, ulong *); static ulong search_bdev_map_probes(char *, int, int, ulong *); static ulong search_blockdev_inodes(int, ulong *); static void do_pci(void); static void do_pci2(void); static void do_io(void); static void do_resource_list(ulong, char *, int); static const char *pci_strclass (uint, char *); static const char *pci_strvendor(uint, char *); static const char *pci_strdev(uint, uint, char *); static void diskio_option(ulong flags); static struct dev_table { ulong flags; } dev_table = { 0 }; struct dev_table *dt = &dev_table; #define DEV_INIT 0x1 #define DISKIO_INIT 0x2 #define DIOF_ALL 1 << 0 #define DIOF_NONZERO 1 << 1 void dev_init(void) { MEMBER_OFFSET_INIT(pci_dev_global_list, "pci_dev", "global_list"); MEMBER_OFFSET_INIT(pci_dev_next, "pci_dev", "next"); MEMBER_OFFSET_INIT(pci_dev_bus, "pci_dev", "bus"); MEMBER_OFFSET_INIT(pci_dev_dev, "pci_dev", "dev"); MEMBER_OFFSET_INIT(pci_dev_devfn, "pci_dev", "devfn"); MEMBER_OFFSET_INIT(pci_dev_class, "pci_dev", "class"); MEMBER_OFFSET_INIT(pci_dev_device, "pci_dev", "device"); MEMBER_OFFSET_INIT(pci_dev_hdr_type, "pci_dev", "hdr_type"); MEMBER_OFFSET_INIT(pci_dev_pcie_flags_reg, "pci_dev", "pcie_flags_reg"); MEMBER_OFFSET_INIT(pci_dev_vendor, "pci_dev", "vendor"); MEMBER_OFFSET_INIT(pci_bus_number, "pci_bus", "number"); MEMBER_OFFSET_INIT(pci_bus_node, "pci_bus", "node"); MEMBER_OFFSET_INIT(pci_bus_devices, "pci_bus", "devices"); MEMBER_OFFSET_INIT(pci_bus_dev, "pci_bus", "dev"); MEMBER_OFFSET_INIT(pci_bus_children, "pci_bus", "children"); MEMBER_OFFSET_INIT(pci_bus_parent, "pci_bus", "parent"); MEMBER_OFFSET_INIT(pci_bus_self, "pci_bus", "self"); MEMBER_OFFSET_INIT(device_kobj, "device", "kobj"); MEMBER_OFFSET_INIT(kobject_name, "kobject", "name"); STRUCT_SIZE_INIT(resource, "resource"); if ((VALID_STRUCT(resource) && symbol_exists("do_resource_list")) || (VALID_STRUCT(resource) && symbol_exists("iomem_resource") && symbol_exists("ioport_resource"))) { MEMBER_OFFSET_INIT(resource_name, "resource", "name"); MEMBER_OFFSET_INIT(resource_start, "resource", "start"); MEMBER_OFFSET_INIT(resource_end, "resource", "end"); MEMBER_OFFSET_INIT(resource_sibling, "resource", "sibling"); MEMBER_OFFSET_INIT(resource_child, "resource", "child"); } else { STRUCT_SIZE_INIT(resource_entry_t, "resource_entry_t"); if (VALID_SIZE(resource_entry_t)) { MEMBER_OFFSET_INIT(resource_entry_t_from, "resource_entry_t", "from"); MEMBER_OFFSET_INIT(resource_entry_t_num, "resource_entry_t", "num"); MEMBER_OFFSET_INIT(resource_entry_t_name, "resource_entry_t", "name"); MEMBER_OFFSET_INIT(resource_entry_t_next, "resource_entry_t", "next"); } } dt->flags |= DEV_INIT; } /* * Generic command for character and block device data. */ void cmd_dev(void) { int c; int dd_index = -1; char *outputfile = NULL; ulong flags; flags = 0; while ((c = getopt(argcnt, args, "dDpiVv:")) != EOF) { switch(c) { case 'd': diskio_option(DIOF_ALL); return; case 'D': diskio_option(DIOF_NONZERO); return; case 'i': if (machine_type("S390X")) option_not_supported(c); do_io(); return; case 'p': if (machine_type("S390X")) option_not_supported(c); if (symbol_exists("pci_devices")) do_pci(); else if (symbol_exists("pci_root_buses")) do_pci2(); else option_not_supported(c); return; case 'V': if (KDUMP_DUMPFILE()) kdump_device_dump_info(fp); else if (DISKDUMP_DUMPFILE()) diskdump_device_dump_info(fp); else if (ACTIVE()) error(INFO, "-V option not supported on a live system\n"); else error(INFO, "-V option not supported on this dumpfile type\n"); return; case 'v': dd_index = atoi(optarg); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { if (dd_index >= 0) { if (!outputfile) outputfile = args[optind]; else cmd_usage(pc->curcmd, SYNOPSIS); } else cmd_usage(pc->curcmd, SYNOPSIS); optind++; } if (dd_index >= 0) { if (KDUMP_DUMPFILE()) kdump_device_dump_extract(dd_index, outputfile, fp); else if (DISKDUMP_DUMPFILE()) diskdump_device_dump_extract(dd_index, outputfile, fp); else if (ACTIVE()) error(INFO, "-v option not supported on a live system\n"); else error(INFO, "-v option not supported on this dumpfile type\n"); return; } dump_chrdevs(flags); fprintf(fp, "\n"); dump_blkdevs(flags); } #define MAX_DEV (255) #define MINORBITS 20 #define MINORMASK ((1U << MINORBITS) - 1) #define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) #define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) char *chrdev_hdr = "CHRDEV NAME "; char *blkdev_hdr = "BLKDEV NAME "; /* * Dump the character device data. */ static void dump_chrdevs(ulong flags) { int i; ulong addr, size; char buf[BUFSIZE]; char buf2[BUFSIZE]; struct chrdevs { ulong name; ulong ops; } chrdevs[MAX_DEV], *cp; ulong *cdp; char *char_device_struct_buf; ulong next, savenext, name, fops, cdev; int major, minor; int name_typecode; size_t name_size; if (!symbol_exists("chrdevs")) error(FATAL, "chrdevs: symbol does not exist\n"); addr = symbol_value("chrdevs"); size = VALID_STRUCT(char_device_struct) ? sizeof(void *) : sizeof(struct chrdevs); readmem(addr, KVADDR, &chrdevs[0], size * MAX_DEV, "chrdevs array", FAULT_ON_ERROR); fprintf(fp, "%s %s", chrdev_hdr, VADDR_PRLEN == 8 ? " " : ""); fprintf(fp, "%s ", mkstring(buf, VADDR_PRLEN, CENTER, "CDEV")); fprintf(fp, "%s\n", mkstring(buf, VADDR_PRLEN, LJUST, "OPERATIONS")); if (VALID_STRUCT(char_device_struct)) goto char_device_struct; for (i = 0, cp = &chrdevs[0]; i < MAX_DEV; i++, cp++) { if (!cp->ops) continue; fprintf(fp, " %3d ", i); if (cp->name) { if (read_string(cp->name, buf, BUFSIZE-1)) fprintf(fp, "%-11s ", buf); else fprintf(fp, "%-11s ", "(unknown)"); } else fprintf(fp, "%-11s ", "(unknown)"); sprintf(buf, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf, cp->ops); value_to_symstr(cp->ops, buf, 0); if (strlen(buf)) fprintf(fp, "<%s>", buf); fprintf(fp, "\n"); } return; char_device_struct: char_device_struct_buf = GETBUF(SIZE(char_device_struct)); cdp = (ulong *)&chrdevs[0]; name_typecode = MEMBER_TYPE("char_device_struct", "name"); name_size = (size_t)MEMBER_SIZE("char_device_struct", "name"); for (i = 0; i < MAX_DEV; i++, cdp++) { if (!(*cdp)) continue; readmem(*cdp, KVADDR, char_device_struct_buf, SIZE(char_device_struct), "char_device_struct", FAULT_ON_ERROR); next = ULONG(char_device_struct_buf + OFFSET(char_device_struct_next)); name = ULONG(char_device_struct_buf + OFFSET(char_device_struct_name)); switch (name_typecode) { case TYPE_CODE_ARRAY: snprintf(buf, name_size, "%s", char_device_struct_buf + OFFSET(char_device_struct_name)); break; case TYPE_CODE_PTR: default: if (!name || !read_string(name, buf, BUFSIZE-1)) break; } major = INT(char_device_struct_buf + OFFSET(char_device_struct_major)); minor = INT(char_device_struct_buf + OFFSET(char_device_struct_baseminor)); cdev = fops = 0; if (VALID_MEMBER(char_device_struct_cdev) && VALID_STRUCT(cdev)) { cdev = ULONG(char_device_struct_buf + OFFSET(char_device_struct_cdev)); if (cdev) { addr = cdev + OFFSET(cdev_ops); readmem(addr, KVADDR, &fops, sizeof(void *), "cdev ops", FAULT_ON_ERROR); } } else { fops = ULONG(char_device_struct_buf + OFFSET(char_device_struct_fops)); } if (!fops) fops = search_cdev_map_probes(buf, major, minor, &cdev); if (!fops) { fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", buf); fprintf(fp, "%s%s\n", VADDR_PRLEN == 8 ? " " : " ", mkstring(buf, VADDR_PRLEN, CENTER, "(none)")); } else { fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", buf); sprintf(buf2, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf2, cdev); value_to_symstr(fops, buf2, 0); if (strlen(buf2)) fprintf(fp, "%s", buf2); else fprintf(fp, "%lx", fops); fprintf(fp, "\n"); } if (CRASHDEBUG(1)) fprintf(fp, "%lx: major: %d minor: %d name: %s next: %lx cdev: %lx fops: %lx\n", *cdp, major, minor, buf, next, cdev, fops); while (next) { readmem(savenext = next, KVADDR, char_device_struct_buf, SIZE(char_device_struct), "char_device_struct", FAULT_ON_ERROR); next = ULONG(char_device_struct_buf + OFFSET(char_device_struct_next)); name = ULONG(char_device_struct_buf + OFFSET(char_device_struct_name)); switch (name_typecode) { case TYPE_CODE_ARRAY: snprintf(buf, name_size, "%s", char_device_struct_buf + OFFSET(char_device_struct_name)); break; case TYPE_CODE_PTR: default: if (!name || !read_string(name, buf, BUFSIZE-1)) sprintf(buf, "(unknown)"); break; } major = INT(char_device_struct_buf + OFFSET(char_device_struct_major)); minor = INT(char_device_struct_buf + OFFSET(char_device_struct_baseminor)); fops = cdev = 0; if (VALID_MEMBER(char_device_struct_cdev) && VALID_STRUCT(cdev)) { cdev = ULONG(char_device_struct_buf + OFFSET(char_device_struct_cdev)); if (cdev) { addr = cdev + OFFSET(cdev_ops); readmem(addr, KVADDR, &fops, sizeof(void *), "cdev ops", FAULT_ON_ERROR); } } else { fops = ULONG(char_device_struct_buf + OFFSET(char_device_struct_fops)); } if (!fops) fops = search_cdev_map_probes(buf, major, minor, &cdev); if (!fops) { fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", buf); fprintf(fp, "%s%s\n", VADDR_PRLEN == 8 ? " " : " ", mkstring(buf, VADDR_PRLEN, CENTER, "(none)")); } else { fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", buf); sprintf(buf2, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf2, cdev); value_to_symstr(fops, buf2, 0); if (strlen(buf2)) fprintf(fp, "%s", buf2); else fprintf(fp, "%lx", fops); fprintf(fp, "\n"); } if (CRASHDEBUG(1)) fprintf(fp, "%lx: major: %d minor: %d name: %s next: %lx cdev: %lx fops: %lx\n", savenext, major, minor, buf, next, cdev, fops); } } FREEBUF(char_device_struct_buf); } /* * Search for a major/minor match by following the list headed * by the kobj_map.probes[major] array entry. The "data" member * points to a cdev structure containing the file_operations * pointer. */ static ulong search_cdev_map_probes(char *name, int major, int minor, ulong *cdev) { char *probe_buf; ulong probes[MAX_DEV]; ulong cdev_map, addr, next, ops, probe_data; uint probe_dev; if (kernel_symbol_exists("cdev_map")) get_symbol_data("cdev_map", sizeof(ulong), &cdev_map); else return 0; addr = cdev_map + OFFSET(kobj_map_probes); if (!readmem(addr, KVADDR, &probes[0], sizeof(void *) * MAX_DEV, "cdev_map.probes[]", QUIET|RETURN_ON_ERROR)) return 0; ops = 0; probe_buf = GETBUF(SIZE(probe)); next = probes[major]; while (next) { if (!readmem(next, KVADDR, probe_buf, SIZE(probe), "struct probe", QUIET|RETURN_ON_ERROR)) break; probe_dev = UINT(probe_buf + OFFSET(probe_dev)); if ((MAJOR(probe_dev) == major) && (MINOR(probe_dev) == minor)) { probe_data = ULONG(probe_buf + OFFSET(probe_data)); addr = probe_data + OFFSET(cdev_ops); if (!readmem(addr, KVADDR, &ops, sizeof(void *), "cdev ops", QUIET|RETURN_ON_ERROR)) ops = 0; else *cdev = probe_data; break; } next = ULONG(probe_buf + OFFSET(probe_next)); } FREEBUF(probe_buf); return ops; } /* * Dump the block device data. */ static void dump_blkdevs(ulong flags) { int i; ulong addr; char buf[BUFSIZE]; struct blkdevs { ulong name; ulong ops; } blkdevs[MAX_DEV], *bp; if (kernel_symbol_exists("major_names") && (kernel_symbol_exists("bdev_map") || kernel_symbol_exists("blockdev_superblock"))) { dump_blkdevs_v3(flags); return; } if (symbol_exists("all_bdevs")) { dump_blkdevs_v2(flags); return; } if (!symbol_exists("blkdevs")) error(FATAL, "blkdevs or all_bdevs: symbols do not exist\n"); addr = symbol_value("blkdevs"); readmem(addr, KVADDR, &blkdevs[0], sizeof(struct blkdevs) * MAX_DEV, "blkdevs array", FAULT_ON_ERROR); fprintf(fp, "%s%s\n", blkdev_hdr, mkstring(buf, VADDR_PRLEN, CENTER, "OPERATIONS")); for (i = 0, bp = &blkdevs[0]; i < MAX_DEV; i++, bp++) { if (!bp->ops) continue; fprintf(fp, " %3d ", i); if (bp->name) { if (read_string(bp->name, buf, BUFSIZE-1)) fprintf(fp, "%-11s ", buf); else fprintf(fp, "%-11s ", "(unknown)"); } else fprintf(fp, "%-11s ", "(unknown)"); sprintf(buf, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf, bp->ops); value_to_symstr(bp->ops, buf, 0); if (strlen(buf)) fprintf(fp, "<%s>", buf); fprintf(fp, "\n"); } } /* * block device dump for 2.6 */ static void dump_blkdevs_v2(ulong flags) { struct list_data list_data, *ld; ulong *major_fops, *bdevlist, *gendisklist, *majorlist; int i, j, bdevcnt, len; char *block_device_buf, *gendisk_buf, *blk_major_name_buf; ulong next, savenext, fops; int major, total; char buf[BUFSIZE]; if (!symbol_exists("major_names")) error(FATAL, "major_names[] array doesn't exist in this kernel\n"); len = get_array_length("major_names", NULL, 0); block_device_buf = GETBUF(SIZE(block_device)); gendisk_buf = GETBUF(SIZE(gendisk)); ld = &list_data; BZERO(ld, sizeof(struct list_data)); get_symbol_data("all_bdevs", sizeof(void *), &ld->start); ld->end = symbol_value("all_bdevs"); ld->list_head_offset = OFFSET(block_device_bd_list); hq_open(); bdevcnt = do_list(ld); bdevlist = (ulong *)GETBUF(bdevcnt * sizeof(ulong)); gendisklist = (ulong *)GETBUF(bdevcnt * sizeof(ulong)); bdevcnt = retrieve_list(bdevlist, bdevcnt); hq_close(); total = MAX(len, bdevcnt); major_fops = (ulong *)GETBUF(sizeof(void *) * total); /* * go through the block_device list, emulating: * * ret += bdev->bd_inode->i_mapping->nrpages; */ for (i = 0; i < bdevcnt; i++) { readmem(bdevlist[i], KVADDR, block_device_buf, SIZE(block_device), "block_device buffer", FAULT_ON_ERROR); gendisklist[i] = ULONG(block_device_buf + OFFSET(block_device_bd_disk)); if (CRASHDEBUG(1)) fprintf(fp, "[%d] %lx -> %lx\n", i, bdevlist[i], gendisklist[i]); } for (i = 1; i < bdevcnt; i++) { for (j = 0; j < i; j++) { if (gendisklist[i] == gendisklist[j]) gendisklist[i] = 0; } } for (i = 0; i < bdevcnt; i++) { if (!gendisklist[i]) continue; readmem(gendisklist[i], KVADDR, gendisk_buf, SIZE(gendisk), "gendisk buffer", FAULT_ON_ERROR); fops = ULONG(gendisk_buf + OFFSET(gendisk_fops)); major = UINT(gendisk_buf + OFFSET(gendisk_major)); strncpy(buf, gendisk_buf + OFFSET(gendisk_disk_name), 32); if (CRASHDEBUG(1)) fprintf(fp, "%lx: name: [%s] major: %d fops: %lx\n", gendisklist[i], buf, major, fops); if (fops && (major < total)) major_fops[major] = fops; } FREEBUF(bdevlist); FREEBUF(gendisklist); FREEBUF(block_device_buf); FREEBUF(gendisk_buf); if (CRASHDEBUG(1)) fprintf(fp, "major_names[%d]\n", len); majorlist = (ulong *)GETBUF(len * sizeof(void *)); blk_major_name_buf = GETBUF(SIZE(blk_major_name)); readmem(symbol_value("major_names"), KVADDR, &majorlist[0], sizeof(void *) * len, "major_names array", FAULT_ON_ERROR); fprintf(fp, "%s%s\n", blkdev_hdr, mkstring(buf, VADDR_PRLEN, CENTER, "OPERATIONS")); for (i = 0; i < len; i++) { if (!majorlist[i]) continue; readmem(majorlist[i], KVADDR, blk_major_name_buf, SIZE(blk_major_name), "blk_major_name buffer", FAULT_ON_ERROR); major = UINT(blk_major_name_buf + OFFSET(blk_major_name_major)); buf[0] = NULLCHAR; strncpy(buf, blk_major_name_buf + OFFSET(blk_major_name_name), 16); next = ULONG(blk_major_name_buf + OFFSET(blk_major_name_next)); if (CRASHDEBUG(1)) fprintf(fp, "[%d] %lx major: %d name: %s next: %lx fops: %lx\n", i, majorlist[i], major, buf, next, major_fops[major]); fprintf(fp, " %3d ", major); fprintf(fp, "%-12s ", strlen(buf) ? buf : "(unknown)"); if (major_fops[major]) { sprintf(buf, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf, major_fops[major]); value_to_symstr(major_fops[major], buf, 0); if (strlen(buf)) fprintf(fp, "<%s>", buf); } else fprintf(fp, " (unknown)"); fprintf(fp, "\n"); while (next) { readmem(savenext = next, KVADDR, blk_major_name_buf, SIZE(blk_major_name), "blk_major_name buffer", FAULT_ON_ERROR); major = UINT(blk_major_name_buf + OFFSET(blk_major_name_major)); strncpy(buf, blk_major_name_buf + OFFSET(blk_major_name_name), 16); next = ULONG(blk_major_name_buf + OFFSET(blk_major_name_next)); if (CRASHDEBUG(1)) fprintf(fp, "[%d] %lx major: %d name: %s next: %lx fops: %lx\n", i, savenext, major, buf, next, major_fops[major]); fprintf(fp, " %3d ", major); fprintf(fp, "%-12s ", strlen(buf) ? buf : "(unknown)"); if (major_fops[major]) { sprintf(buf, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf, major_fops[major]); value_to_symstr(major_fops[major], buf, 0); if (strlen(buf)) fprintf(fp, "<%s>", buf); } else fprintf(fp, " (unknown)"); fprintf(fp, "\n"); } } FREEBUF(majorlist); FREEBUF(major_fops); FREEBUF(blk_major_name_buf); } static void dump_blkdevs_v3(ulong flags) { int i, len; ulong blk_major_name; char *blk_major_name_buf; char buf[BUFSIZE]; uint major; ulong gendisk, addr, fops; int use_bdev_map = kernel_symbol_exists("bdev_map"); if (!(len = get_array_length("major_names", NULL, 0))) len = MAX_DEV; fprintf(fp, "%s %s", blkdev_hdr, VADDR_PRLEN == 8 ? " " : ""); fprintf(fp, "%s ", mkstring(buf, VADDR_PRLEN, CENTER|RJUST, "GENDISK")); fprintf(fp, "%s\n", mkstring(buf, VADDR_PRLEN, LJUST, "OPERATIONS")); blk_major_name_buf = GETBUF(SIZE(blk_major_name)); gendisk = 0; for (i = 0; i < len; i++) { addr = symbol_value("major_names") + (i * sizeof(void *)); readmem(addr, KVADDR, &blk_major_name, sizeof(void *), "major_names[] entry", FAULT_ON_ERROR); if (!blk_major_name) continue; readmem(blk_major_name, KVADDR, blk_major_name_buf, SIZE(blk_major_name), "blk_major_name", FAULT_ON_ERROR); major = UINT(blk_major_name_buf + OFFSET(blk_major_name_major)); buf[0] = NULLCHAR; strncpy(buf, blk_major_name_buf + OFFSET(blk_major_name_name), 16); if (use_bdev_map) fops = search_bdev_map_probes(buf, major == i ? major : i, UNUSED, &gendisk); else /* v5.11 and later */ fops = search_blockdev_inodes(major, &gendisk); if (CRASHDEBUG(1)) fprintf(fp, "blk_major_name: %lx block major: %d name: %s gendisk: %lx fops: %lx\n", blk_major_name, major, buf, gendisk, fops); if (!fops) { fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", strlen(buf) ? buf : "(unknown)"); fprintf(fp, "%s%s\n", VADDR_PRLEN == 8 ? " " : " ", mkstring(buf, VADDR_PRLEN, CENTER, "(none)")); continue; } fprintf(fp, " %3d ", major); fprintf(fp, "%-13s ", strlen(buf) ? buf : "(unknown)"); sprintf(buf, "%s%%%dlx ", strlen("OPERATIONS") < VADDR_PRLEN ? " " : " ", VADDR_PRLEN); fprintf(fp, buf, gendisk); value_to_symstr(fops, buf, 0); if (strlen(buf)) fprintf(fp, "%s", buf); else fprintf(fp, "%lx", fops); fprintf(fp, "\n"); } } static ulong search_bdev_map_probes(char *name, int major, int minor, ulong *gendisk) { char *probe_buf, *gendisk_buf; ulong probes[MAX_DEV]; ulong bdev_map, addr, next, probe_data, fops; uint probe_dev; get_symbol_data("bdev_map", sizeof(ulong), &bdev_map); addr = bdev_map + OFFSET(kobj_map_probes); if (!readmem(addr, KVADDR, &probes[0], sizeof(void *) * MAX_DEV, "bdev_map.probes[]", QUIET|RETURN_ON_ERROR)) return 0; probe_buf = GETBUF(SIZE(probe)); gendisk_buf = GETBUF(SIZE(gendisk)); fops = 0; for (next = probes[major]; next; next = ULONG(probe_buf + OFFSET(probe_next))) { if (!readmem(next, KVADDR, probe_buf, SIZE(probe), "struct probe", QUIET|RETURN_ON_ERROR)) break; probe_data = ULONG(probe_buf + OFFSET(probe_data)); if (!probe_data) continue; probe_dev = UINT(probe_buf + OFFSET(probe_dev)); if (MAJOR(probe_dev) != major) continue; if (!readmem(probe_data, KVADDR, gendisk_buf, SIZE(gendisk), "gendisk buffer", QUIET|RETURN_ON_ERROR)) break; fops = ULONG(gendisk_buf + OFFSET(gendisk_fops)); if (fops) { *gendisk = probe_data; break; } } FREEBUF(probe_buf); FREEBUF(gendisk_buf); return fops; } /* For bdev_inode. See block/bdev.c */ #define I_BDEV(inode) (inode - SIZE(block_device)) static ulong search_blockdev_inodes(int major, ulong *gendisk) { struct list_data list_data, *ld; ulong addr, bd_sb, disk, fops = 0; int i, inode_count, gendisk_major; char *gendisk_buf; ld = &list_data; BZERO(ld, sizeof(struct list_data)); get_symbol_data("blockdev_superblock", sizeof(void *), &bd_sb); addr = bd_sb + OFFSET(super_block_s_inodes); if (!readmem(addr, KVADDR, &ld->start, sizeof(ulong), "blockdev_superblock.s_inodes", QUIET|RETURN_ON_ERROR)) return 0; if (empty_list(ld->start)) return 0; ld->flags |= LIST_ALLOCATE; ld->end = bd_sb + OFFSET(super_block_s_inodes); ld->list_head_offset = OFFSET(inode_i_sb_list); inode_count = do_list(ld); gendisk_buf = GETBUF(SIZE(gendisk)); for (i = 0; i < inode_count; i++) { addr = I_BDEV(ld->list_ptr[i]) + OFFSET(block_device_bd_disk); if (!readmem(addr, KVADDR, &disk, sizeof(ulong), "block_device.bd_disk", QUIET|RETURN_ON_ERROR)) continue; if (!disk) continue; if (!readmem(disk, KVADDR, gendisk_buf, SIZE(gendisk), "gendisk buffer", QUIET|RETURN_ON_ERROR)) continue; gendisk_major = INT(gendisk_buf + OFFSET(gendisk_major)); if (gendisk_major != major) continue; fops = ULONG(gendisk_buf + OFFSET(gendisk_fops)); if (fops) { *gendisk = disk; break; } } FREEBUF(ld->list_ptr); FREEBUF(gendisk_buf); return fops; } void dump_dev_table(void) { struct dev_table *dt; int others; dt = &dev_table; others = 0; fprintf(fp, " flags: %lx (", dt->flags); if (dt->flags & DEV_INIT) fprintf(fp, "%sDEV_INIT", others++ ? "|" : ""); if (dt->flags & DISKIO_INIT) fprintf(fp, "%sDISKIO_INIT", others++ ? "|" : ""); fprintf(fp, ")\n"); } /* * Dump the I/O ports. */ static void do_io(void) { int i, c, len, wrap, cnt, size; ulong *resource_list, name, start, end; char *resource_buf, *p1; struct list_data list_data, *ld; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (symbol_exists("get_ioport_list")) /* linux 2.2 */ goto ioport_list; if (symbol_exists("do_resource_list")) /* linux 2.4 */ goto resource_list; if (symbol_exists("iomem_resource") && symbol_exists("ioport_resource")) goto resource_list; return; ioport_list: /* * ioport */ fprintf(fp, "%s %s NAME\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "RESOURCE"), mkstring(buf2, 9, CENTER|LJUST, "RANGE")); wrap = VADDR_PRLEN + 2 + 9 + 2; resource_buf = GETBUF(SIZE(resource_entry_t)); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->start = 0xc026cf20; readmem(symbol_value("iolist") + OFFSET(resource_entry_t_next), KVADDR, &ld->start, sizeof(void *), "iolist.next", FAULT_ON_ERROR); ld->member_offset = OFFSET(resource_entry_t_next); hq_open(); cnt = do_list(ld); if (!cnt) return; resource_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(resource_list, cnt); hq_close(); for (i = 0; i < cnt; i++) { fprintf(fp, "%lx ", resource_list[i]); readmem(resource_list[i], KVADDR, resource_buf, SIZE(resource_entry_t), "resource_entry_t", FAULT_ON_ERROR); start = ULONG(resource_buf + OFFSET(resource_entry_t_from)); end = ULONG(resource_buf + OFFSET(resource_entry_t_num)); end += start; fprintf(fp, "%04lx-%04lx ", start, end); name = ULONG(resource_buf + OFFSET(resource_entry_t_name)); if (!read_string(name, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown)"); if (wrap + strlen(buf1) <= 80) fprintf(fp, "%s\n", buf1); else { len = wrap + strlen(buf1) - 80; for (c = 0, p1 = &buf1[strlen(buf1)-1]; p1 > buf1; p1--, c++) { if (*p1 != ' ') continue; if (c >= len) { *p1 = NULLCHAR; break; } } fprintf(fp, "%s\n", buf1); if (*p1 == NULLCHAR) { pad_line(fp, wrap, ' '); fprintf(fp, "%s\n", p1+1); } } } return; resource_list: resource_buf = GETBUF(SIZE(resource)); /* * ioport */ readmem(symbol_value("ioport_resource") + OFFSET(resource_end), KVADDR, &end, sizeof(long), "ioport_resource.end", FAULT_ON_ERROR); size = (end > 0xffff) ? 8 : 4; fprintf(fp, "%s %s NAME\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "RESOURCE"), mkstring(buf2, (size*2) + 1, CENTER|LJUST, "RANGE")); do_resource_list(symbol_value("ioport_resource"), resource_buf, size); /* * iomem */ readmem(symbol_value("iomem_resource") + OFFSET(resource_end), KVADDR, &end, sizeof(long), "iomem_resource.end", FAULT_ON_ERROR); size = (end > 0xffff) ? 8 : 4; fprintf(fp, "\n%s %s NAME\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "RESOURCE"), mkstring(buf2, (size*2) + 1, CENTER|LJUST, "RANGE")); do_resource_list(symbol_value("iomem_resource"), resource_buf, size); return; } static void do_resource_list(ulong first_entry, char *resource_buf, int size) { ulong entry, name, start, end, child, sibling; int c, wrap, len; char buf1[BUFSIZE]; char *fmt, *p1; fmt = NULL; switch (size) { case 4: fmt = "%8lx %04lx-%04lx"; break; case 8: fmt = "%8lx %08lx-%08lx"; break; } wrap = VADDR_PRLEN + 2 + ((size*2)+1) + 2; entry = first_entry; while (entry) { readmem(entry, KVADDR, resource_buf, SIZE(resource), "resource", FAULT_ON_ERROR); start = ULONG(resource_buf + OFFSET(resource_start)); end = ULONG(resource_buf + OFFSET(resource_end)); name = ULONG(resource_buf + OFFSET(resource_name)); child = ULONG(resource_buf + OFFSET(resource_child)); sibling = ULONG(resource_buf + OFFSET(resource_sibling)); if (!read_string(name, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown)"); fprintf(fp, fmt, entry, start, end); if (wrap + strlen(buf1) <= 80) fprintf(fp, " %s\n", buf1); else { len = wrap + strlen(buf1) - 80; for (c = 0, p1 = &buf1[strlen(buf1)-1]; p1 > buf1; p1--, c++) { if (*p1 != ' ') continue; if (c >= len) { *p1 = NULLCHAR; break; } } fprintf(fp, " %s\n", buf1); if (*p1 == NULLCHAR) { pad_line(fp, wrap, ' '); fprintf(fp, "%s\n", p1+1); } } if (child && (child != entry)) do_resource_list(child, resource_buf, size); entry = sibling; } } /* * PCI defines taken from 2.2.17 version of pci.h */ #define USE_2_2_17_PCI_H #ifdef USE_2_2_17_PCI_H /* * PCI defines and function prototypes * Copyright 1994, Drew Eckhardt * Copyright 1997--1999 Martin Mares * * For more information, please consult the following manuals (look at * http://www.pcisig.com/ for how to get them): * * PCI BIOS Specification * PCI Local Bus Specification * PCI to PCI Bridge Specification * PCI System Design Guide */ /* * Under PCI, each device has 256 bytes of configuration address space, * of which the first 64 bytes are standardized as follows: */ #define PCI_VENDOR_ID 0x00 /* 16 bits */ #define PCI_DEVICE_ID 0x02 /* 16 bits */ #define PCI_COMMAND 0x04 /* 16 bits */ #define PCI_COMMAND_IO 0x1 /* Enable response in I/O space */ #define PCI_COMMAND_MEMORY 0x2 /* Enable response in Memory space */ #define PCI_COMMAND_MASTER 0x4 /* Enable bus mastering */ #define PCI_COMMAND_SPECIAL 0x8 /* Enable response to special cycles */ #define PCI_COMMAND_INVALIDATE 0x10 /* Use memory write and invalidate */ #define PCI_COMMAND_VGA_PALETTE 0x20 /* Enable palette snooping */ #define PCI_COMMAND_PARITY 0x40 /* Enable parity checking */ #define PCI_COMMAND_WAIT 0x80 /* Enable address/data stepping */ #define PCI_COMMAND_SERR 0x100 /* Enable SERR */ #define PCI_COMMAND_FAST_BACK 0x200 /* Enable back-to-back writes */ #define PCI_STATUS 0x06 /* 16 bits */ #define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ #define PCI_STATUS_66MHZ 0x20 /* Support 66 Mhz PCI 2.1 bus */ #define PCI_STATUS_UDF 0x40 /* Support User Definable Features */ #define PCI_STATUS_FAST_BACK 0x80 /* Accept fast-back to back */ #define PCI_STATUS_PARITY 0x100 /* Detected parity error */ #define PCI_STATUS_DEVSEL_MASK 0x600 /* DEVSEL timing */ #define PCI_STATUS_DEVSEL_FAST 0x000 #define PCI_STATUS_DEVSEL_MEDIUM 0x200 #define PCI_STATUS_DEVSEL_SLOW 0x400 #define PCI_STATUS_SIG_TARGET_ABORT 0x800 /* Set on target abort */ #define PCI_STATUS_REC_TARGET_ABORT 0x1000 /* Master ack of " */ #define PCI_STATUS_REC_MASTER_ABORT 0x2000 /* Set on master abort */ #define PCI_STATUS_SIG_SYSTEM_ERROR 0x4000 /* Set when we drive SERR */ #define PCI_STATUS_DETECTED_PARITY 0x8000 /* Set on parity error */ #define PCI_CLASS_REVISION 0x08 /* High 24 bits are class, low 8 revision */ #define PCI_REVISION_ID 0x08 /* Revision ID */ #define PCI_CLASS_PROG 0x09 /* Reg. Level Programming Interface */ #define PCI_CLASS_DEVICE 0x0a /* Device class */ #define PCI_CACHE_LINE_SIZE 0x0c /* 8 bits */ #define PCI_LATENCY_TIMER 0x0d /* 8 bits */ #define PCI_HEADER_TYPE 0x0e /* 8 bits */ #define PCI_HEADER_TYPE_NORMAL 0 #define PCI_HEADER_TYPE_BRIDGE 1 #define PCI_HEADER_TYPE_CARDBUS 2 #define PCI_BIST 0x0f /* 8 bits */ #define PCI_BIST_CODE_MASK 0x0f /* Return result */ #define PCI_BIST_START 0x40 /* 1 to start BIST, 2 secs or less */ #define PCI_BIST_CAPABLE 0x80 /* 1 if BIST capable */ /* * Base addresses specify locations in memory or I/O space. * Decoded size can be determined by writing a value of * 0xffffffff to the register, and reading it back. Only * 1 bits are decoded. */ #define PCI_BASE_ADDRESS_0 0x10 /* 32 bits */ #define PCI_BASE_ADDRESS_1 0x14 /* 32 bits [htype 0,1 only] */ #define PCI_BASE_ADDRESS_2 0x18 /* 32 bits [htype 0 only] */ #define PCI_BASE_ADDRESS_3 0x1c /* 32 bits */ #define PCI_BASE_ADDRESS_4 0x20 /* 32 bits */ #define PCI_BASE_ADDRESS_5 0x24 /* 32 bits */ #define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */ #define PCI_BASE_ADDRESS_SPACE_IO 0x01 #define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00 #define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06 #define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */ #define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M */ #define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */ #define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */ #define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL) #define PCI_BASE_ADDRESS_IO_MASK (~0x03UL) /* bit 1 is reserved if address_space = 1 */ /* Header type 0 (normal devices) */ #define PCI_CARDBUS_CIS 0x28 #define PCI_SUBSYSTEM_VENDOR_ID 0x2c #define PCI_SUBSYSTEM_ID 0x2e #define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */ #define PCI_ROM_ADDRESS_ENABLE 0x01 #define PCI_ROM_ADDRESS_MASK (~0x7ffUL) #define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */ /* 0x35-0x3b are reserved */ #define PCI_INTERRUPT_LINE 0x3c /* 8 bits */ #define PCI_INTERRUPT_PIN 0x3d /* 8 bits */ #define PCI_MIN_GNT 0x3e /* 8 bits */ #define PCI_MAX_LAT 0x3f /* 8 bits */ /* Header type 1 (PCI-to-PCI bridges) */ #define PCI_PRIMARY_BUS 0x18 /* Primary bus number */ #define PCI_SECONDARY_BUS 0x19 /* Secondary bus number */ #define PCI_SUBORDINATE_BUS 0x1a /* Highest bus number behind the bridge */ #define PCI_SEC_LATENCY_TIMER 0x1b /* Latency timer for secondary interface */ #define PCI_IO_BASE 0x1c /* I/O range behind the bridge */ #define PCI_IO_LIMIT 0x1d #define PCI_IO_RANGE_TYPE_MASK 0x0f /* I/O bridging type */ #define PCI_IO_RANGE_TYPE_16 0x00 #define PCI_IO_RANGE_TYPE_32 0x01 #define PCI_IO_RANGE_MASK ~0x0f #define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */ #define PCI_MEMORY_BASE 0x20 /* Memory range behind */ #define PCI_MEMORY_LIMIT 0x22 #define PCI_MEMORY_RANGE_TYPE_MASK 0x0f #define PCI_MEMORY_RANGE_MASK ~0x0f #define PCI_PREF_MEMORY_BASE 0x24 /* Prefetchable memory range behind */ #define PCI_PREF_MEMORY_LIMIT 0x26 #define PCI_PREF_RANGE_TYPE_MASK 0x0f #define PCI_PREF_RANGE_TYPE_32 0x00 #define PCI_PREF_RANGE_TYPE_64 0x01 #define PCI_PREF_RANGE_MASK ~0x0f #define PCI_PREF_BASE_UPPER32 0x28 /* Upper half of prefetchable memory range */ #define PCI_PREF_LIMIT_UPPER32 0x2c #define PCI_IO_BASE_UPPER16 0x30 /* Upper half of I/O addresses */ #define PCI_IO_LIMIT_UPPER16 0x32 /* 0x34-0x3b is reserved */ #define PCI_ROM_ADDRESS1 0x38 /* Same as PCI_ROM_ADDRESS, but for htype 1 */ /* 0x3c-0x3d are same as for htype 0 */ #define PCI_BRIDGE_CONTROL 0x3e #define PCI_BRIDGE_CTL_PARITY 0x01 /* Enable parity detection on secondary interface */ #define PCI_BRIDGE_CTL_SERR 0x02 /* The same for SERR forwarding */ #define PCI_BRIDGE_CTL_NO_ISA 0x04 /* Disable bridging of ISA ports */ #define PCI_BRIDGE_CTL_VGA 0x08 /* Forward VGA addresses */ #define PCI_BRIDGE_CTL_MASTER_ABORT 0x20 /* Report master aborts */ #define PCI_BRIDGE_CTL_BUS_RESET 0x40 /* Secondary bus reset */ #define PCI_BRIDGE_CTL_FAST_BACK 0x80 /* Fast Back2Back enabled on secondary interface */ /* Header type 2 (CardBus bridges) */ /* 0x14-0x15 reserved */ #define PCI_CB_SEC_STATUS 0x16 /* Secondary status */ #define PCI_CB_PRIMARY_BUS 0x18 /* PCI bus number */ #define PCI_CB_CARD_BUS 0x19 /* CardBus bus number */ #define PCI_CB_SUBORDINATE_BUS 0x1a /* Subordinate bus number */ #define PCI_CB_LATENCY_TIMER 0x1b /* CardBus latency timer */ #define PCI_CB_MEMORY_BASE_0 0x1c #define PCI_CB_MEMORY_LIMIT_0 0x20 #define PCI_CB_MEMORY_BASE_1 0x24 #define PCI_CB_MEMORY_LIMIT_1 0x28 #define PCI_CB_IO_BASE_0 0x2c #define PCI_CB_IO_BASE_0_HI 0x2e #define PCI_CB_IO_LIMIT_0 0x30 #define PCI_CB_IO_LIMIT_0_HI 0x32 #define PCI_CB_IO_BASE_1 0x34 #define PCI_CB_IO_BASE_1_HI 0x36 #define PCI_CB_IO_LIMIT_1 0x38 #define PCI_CB_IO_LIMIT_1_HI 0x3a #define PCI_CB_IO_RANGE_MASK ~0x03 /* 0x3c-0x3d are same as for htype 0 */ #define PCI_CB_BRIDGE_CONTROL 0x3e #define PCI_CB_BRIDGE_CTL_PARITY 0x01 /* Similar to standard bridge control register */ #define PCI_CB_BRIDGE_CTL_SERR 0x02 #define PCI_CB_BRIDGE_CTL_ISA 0x04 #define PCI_CB_BRIDGE_CTL_VGA 0x08 #define PCI_CB_BRIDGE_CTL_MASTER_ABORT 0x20 #define PCI_CB_BRIDGE_CTL_CB_RESET 0x40 /* CardBus reset */ #define PCI_CB_BRIDGE_CTL_16BIT_INT 0x80 /* Enable interrupt for 16-bit cards */ #define PCI_CB_BRIDGE_CTL_PREFETCH_MEM0 0x100 /* Prefetch enable for both memory regions */ #define PCI_CB_BRIDGE_CTL_PREFETCH_MEM1 0x200 #define PCI_CB_BRIDGE_CTL_POST_WRITES 0x400 #define PCI_CB_SUBSYSTEM_VENDOR_ID 0x40 #define PCI_CB_SUBSYSTEM_ID 0x42 #define PCI_CB_LEGACY_MODE_BASE 0x44 /* 16-bit PC Card legacy mode base address (ExCa) */ /* 0x48-0x7f reserved */ /* Capability lists */ #define PCI_CAP_LIST_ID 0 /* Capability ID */ #define PCI_CAP_ID_PM 0x01 /* Power Management */ #define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */ #define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ /* Device classes and subclasses */ #define PCI_CLASS_NOT_DEFINED 0x0000 #define PCI_CLASS_NOT_DEFINED_VGA 0x0001 #define PCI_BASE_CLASS_STORAGE 0x01 #define PCI_CLASS_STORAGE_SCSI 0x0100 #define PCI_CLASS_STORAGE_IDE 0x0101 #define PCI_CLASS_STORAGE_FLOPPY 0x0102 #define PCI_CLASS_STORAGE_IPI 0x0103 #define PCI_CLASS_STORAGE_RAID 0x0104 #define PCI_CLASS_STORAGE_OTHER 0x0180 #define PCI_BASE_CLASS_NETWORK 0x02 #define PCI_CLASS_NETWORK_ETHERNET 0x0200 #define PCI_CLASS_NETWORK_TOKEN_RING 0x0201 #define PCI_CLASS_NETWORK_FDDI 0x0202 #define PCI_CLASS_NETWORK_ATM 0x0203 #define PCI_CLASS_NETWORK_OTHER 0x0280 #define PCI_BASE_CLASS_DISPLAY 0x03 #define PCI_CLASS_DISPLAY_VGA 0x0300 #define PCI_CLASS_DISPLAY_XGA 0x0301 #define PCI_CLASS_DISPLAY_OTHER 0x0380 #define PCI_BASE_CLASS_MULTIMEDIA 0x04 #define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400 #define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401 #define PCI_CLASS_MULTIMEDIA_OTHER 0x0480 #define PCI_BASE_CLASS_MEMORY 0x05 #define PCI_CLASS_MEMORY_RAM 0x0500 #define PCI_CLASS_MEMORY_FLASH 0x0501 #define PCI_CLASS_MEMORY_OTHER 0x0580 #define PCI_BASE_CLASS_BRIDGE 0x06 #define PCI_CLASS_BRIDGE_HOST 0x0600 #define PCI_CLASS_BRIDGE_ISA 0x0601 #define PCI_CLASS_BRIDGE_EISA 0x0602 #define PCI_CLASS_BRIDGE_MC 0x0603 #define PCI_CLASS_BRIDGE_PCI 0x0604 #define PCI_CLASS_BRIDGE_PCMCIA 0x0605 #define PCI_CLASS_BRIDGE_NUBUS 0x0606 #define PCI_CLASS_BRIDGE_CARDBUS 0x0607 #define PCI_CLASS_BRIDGE_OTHER 0x0680 #define PCI_BASE_CLASS_COMMUNICATION 0x07 #define PCI_CLASS_COMMUNICATION_SERIAL 0x0700 #define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701 #define PCI_CLASS_COMMUNICATION_OTHER 0x0780 #define PCI_BASE_CLASS_SYSTEM 0x08 #define PCI_CLASS_SYSTEM_PIC 0x0800 #define PCI_CLASS_SYSTEM_DMA 0x0801 #define PCI_CLASS_SYSTEM_TIMER 0x0802 #define PCI_CLASS_SYSTEM_RTC 0x0803 #define PCI_CLASS_SYSTEM_OTHER 0x0880 #define PCI_BASE_CLASS_INPUT 0x09 #define PCI_CLASS_INPUT_KEYBOARD 0x0900 #define PCI_CLASS_INPUT_PEN 0x0901 #define PCI_CLASS_INPUT_MOUSE 0x0902 #define PCI_CLASS_INPUT_OTHER 0x0980 #define PCI_BASE_CLASS_DOCKING 0x0a #define PCI_CLASS_DOCKING_GENERIC 0x0a00 #define PCI_CLASS_DOCKING_OTHER 0x0a01 #define PCI_BASE_CLASS_PROCESSOR 0x0b #define PCI_CLASS_PROCESSOR_386 0x0b00 #define PCI_CLASS_PROCESSOR_486 0x0b01 #define PCI_CLASS_PROCESSOR_PENTIUM 0x0b02 #define PCI_CLASS_PROCESSOR_ALPHA 0x0b10 #define PCI_CLASS_PROCESSOR_POWERPC 0x0b20 #define PCI_CLASS_PROCESSOR_CO 0x0b40 #define PCI_BASE_CLASS_SERIAL 0x0c #define PCI_CLASS_SERIAL_FIREWIRE 0x0c00 #define PCI_CLASS_SERIAL_ACCESS 0x0c01 #define PCI_CLASS_SERIAL_SSA 0x0c02 #define PCI_CLASS_SERIAL_USB 0x0c03 #define PCI_CLASS_SERIAL_FIBER 0x0c04 #define PCI_CLASS_SERIAL_SMBUS 0x0c05 #define PCI_BASE_CLASS_INTELLIGENT 0x0e #define PCI_CLASS_INTELLIGENT_I2O 0x0e00 #define PCI_CLASS_HOT_SWAP_CONTROLLER 0xff00 #define PCI_CLASS_OTHERS 0xff /* * Vendor and card ID's: sort these numerically according to vendor * (and according to card ID within vendor). Send all updates to * . */ #define PCI_VENDOR_ID_COMPAQ 0x0e11 #define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508 #define PCI_DEVICE_ID_COMPAQ_1280 0x3033 #define PCI_DEVICE_ID_COMPAQ_TRIFLEX 0x4000 #define PCI_DEVICE_ID_COMPAQ_6010 0x6010 #define PCI_DEVICE_ID_COMPAQ_SMART2P 0xae10 #define PCI_DEVICE_ID_COMPAQ_NETEL100 0xae32 #define PCI_DEVICE_ID_COMPAQ_NETEL10 0xae34 #define PCI_DEVICE_ID_COMPAQ_NETFLEX3I 0xae35 #define PCI_DEVICE_ID_COMPAQ_NETEL100D 0xae40 #define PCI_DEVICE_ID_COMPAQ_NETEL100PI 0xae43 #define PCI_DEVICE_ID_COMPAQ_NETEL100I 0xb011 #define PCI_DEVICE_ID_COMPAQ_THUNDER 0xf130 #define PCI_DEVICE_ID_COMPAQ_NETFLEX3B 0xf150 #define PCI_VENDOR_ID_NCR 0x1000 #define PCI_DEVICE_ID_NCR_53C810 0x0001 #define PCI_DEVICE_ID_NCR_53C820 0x0002 #define PCI_DEVICE_ID_NCR_53C825 0x0003 #define PCI_DEVICE_ID_NCR_53C815 0x0004 #define PCI_DEVICE_ID_NCR_53C860 0x0006 #define PCI_DEVICE_ID_NCR_53C1510D 0x000a #define PCI_DEVICE_ID_NCR_53C896 0x000b #define PCI_DEVICE_ID_NCR_53C895 0x000c #define PCI_DEVICE_ID_NCR_53C885 0x000d #define PCI_DEVICE_ID_NCR_53C875 0x000f #define PCI_DEVICE_ID_NCR_53C1510 0x0010 #define PCI_DEVICE_ID_NCR_53C875J 0x008f #define PCI_VENDOR_ID_ATI 0x1002 #define PCI_DEVICE_ID_ATI_68800 0x4158 #define PCI_DEVICE_ID_ATI_215CT222 0x4354 #define PCI_DEVICE_ID_ATI_210888CX 0x4358 #define PCI_DEVICE_ID_ATI_215GB 0x4742 #define PCI_DEVICE_ID_ATI_215GD 0x4744 #define PCI_DEVICE_ID_ATI_215GI 0x4749 #define PCI_DEVICE_ID_ATI_215GP 0x4750 #define PCI_DEVICE_ID_ATI_215GQ 0x4751 #define PCI_DEVICE_ID_ATI_215GT 0x4754 #define PCI_DEVICE_ID_ATI_215GTB 0x4755 #define PCI_DEVICE_ID_ATI_210888GX 0x4758 #define PCI_DEVICE_ID_ATI_RAGE128_LE 0x4c45 #define PCI_DEVICE_ID_ATI_RAGE128_LF 0x4c46 #define PCI_DEVICE_ID_ATI_215LG 0x4c47 #define PCI_DEVICE_ID_ATI_264LT 0x4c54 #define PCI_DEVICE_ID_ATI_RAGE128_PF 0x5046 #define PCI_DEVICE_ID_ATI_RAGE128_PR 0x5052 #define PCI_DEVICE_ID_ATI_RAGE128_RE 0x5245 #define PCI_DEVICE_ID_ATI_RAGE128_RF 0x5246 #define PCI_DEVICE_ID_ATI_RAGE128_RK 0x524b #define PCI_DEVICE_ID_ATI_RAGE128_RL 0x524c #define PCI_DEVICE_ID_ATI_264VT 0x5654 #define PCI_VENDOR_ID_VLSI 0x1004 #define PCI_DEVICE_ID_VLSI_82C592 0x0005 #define PCI_DEVICE_ID_VLSI_82C593 0x0006 #define PCI_DEVICE_ID_VLSI_82C594 0x0007 #define PCI_DEVICE_ID_VLSI_82C597 0x0009 #define PCI_DEVICE_ID_VLSI_82C541 0x000c #define PCI_DEVICE_ID_VLSI_82C543 0x000d #define PCI_DEVICE_ID_VLSI_82C532 0x0101 #define PCI_DEVICE_ID_VLSI_82C534 0x0102 #define PCI_DEVICE_ID_VLSI_82C535 0x0104 #define PCI_DEVICE_ID_VLSI_82C147 0x0105 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 #define PCI_VENDOR_ID_ADL 0x1005 #define PCI_DEVICE_ID_ADL_2301 0x2301 #define PCI_VENDOR_ID_NS 0x100b #define PCI_DEVICE_ID_NS_87415 0x0002 #define PCI_DEVICE_ID_NS_87410 0xd001 #define PCI_VENDOR_ID_TSENG 0x100c #define PCI_DEVICE_ID_TSENG_W32P_2 0x3202 #define PCI_DEVICE_ID_TSENG_W32P_b 0x3205 #define PCI_DEVICE_ID_TSENG_W32P_c 0x3206 #define PCI_DEVICE_ID_TSENG_W32P_d 0x3207 #define PCI_DEVICE_ID_TSENG_ET6000 0x3208 #define PCI_VENDOR_ID_WEITEK 0x100e #define PCI_DEVICE_ID_WEITEK_P9000 0x9001 #define PCI_DEVICE_ID_WEITEK_P9100 0x9100 #define PCI_VENDOR_ID_DEC 0x1011 #define PCI_DEVICE_ID_DEC_BRD 0x0001 #define PCI_DEVICE_ID_DEC_TULIP 0x0002 #define PCI_DEVICE_ID_DEC_TGA 0x0004 #define PCI_DEVICE_ID_DEC_TULIP_FAST 0x0009 #define PCI_DEVICE_ID_DEC_TGA2 0x000D #define PCI_DEVICE_ID_DEC_FDDI 0x000F #define PCI_DEVICE_ID_DEC_TULIP_PLUS 0x0014 #define PCI_DEVICE_ID_DEC_21142 0x0019 #define PCI_DEVICE_ID_DEC_21052 0x0021 #define PCI_DEVICE_ID_DEC_21150 0x0022 #define PCI_DEVICE_ID_DEC_21152 0x0024 #define PCI_DEVICE_ID_DEC_21153 0x0025 #define PCI_DEVICE_ID_DEC_21154 0x0026 #define PCI_DEVICE_ID_DEC_21285 0x1065 #define PCI_DEVICE_ID_DEC_21554 0x0046 #define PCI_DEVICE_ID_COMPAQ_42XX 0x0046 #define PCI_VENDOR_ID_CIRRUS 0x1013 #define PCI_DEVICE_ID_CIRRUS_7548 0x0038 #define PCI_DEVICE_ID_CIRRUS_5430 0x00a0 #define PCI_DEVICE_ID_CIRRUS_5434_4 0x00a4 #define PCI_DEVICE_ID_CIRRUS_5434_8 0x00a8 #define PCI_DEVICE_ID_CIRRUS_5436 0x00ac #define PCI_DEVICE_ID_CIRRUS_5446 0x00b8 #define PCI_DEVICE_ID_CIRRUS_5480 0x00bc #define PCI_DEVICE_ID_CIRRUS_5464 0x00d4 #define PCI_DEVICE_ID_CIRRUS_5465 0x00d6 #define PCI_DEVICE_ID_CIRRUS_6729 0x1100 #define PCI_DEVICE_ID_CIRRUS_6832 0x1110 #define PCI_DEVICE_ID_CIRRUS_7542 0x1200 #define PCI_DEVICE_ID_CIRRUS_7543 0x1202 #define PCI_DEVICE_ID_CIRRUS_7541 0x1204 #define PCI_VENDOR_ID_IBM 0x1014 #define PCI_DEVICE_ID_IBM_FIRE_CORAL 0x000a #define PCI_DEVICE_ID_IBM_TR 0x0018 #define PCI_DEVICE_ID_IBM_82G2675 0x001d #define PCI_DEVICE_ID_IBM_MCA 0x0020 #define PCI_DEVICE_ID_IBM_82351 0x0022 #define PCI_DEVICE_ID_IBM_PYTHON 0x002d #define PCI_DEVICE_ID_IBM_SERVERAID 0x002e #define PCI_DEVICE_ID_IBM_TR_WAKE 0x003e #define PCI_DEVICE_ID_IBM_MPIC 0x0046 #define PCI_DEVICE_ID_IBM_3780IDSP 0x007d #define PCI_DEVICE_ID_IBM_MPIC_2 0xffff #define PCI_VENDOR_ID_WD 0x101c #define PCI_DEVICE_ID_WD_7197 0x3296 #define PCI_VENDOR_ID_AMD 0x1022 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 #define PCI_DEVICE_ID_AMD_SCSI 0x2020 #define PCI_VENDOR_ID_TRIDENT 0x1023 #define PCI_DEVICE_ID_TRIDENT_9397 0x9397 #define PCI_DEVICE_ID_TRIDENT_9420 0x9420 #define PCI_DEVICE_ID_TRIDENT_9440 0x9440 #define PCI_DEVICE_ID_TRIDENT_9660 0x9660 #define PCI_DEVICE_ID_TRIDENT_9750 0x9750 #define PCI_VENDOR_ID_AI 0x1025 #define PCI_DEVICE_ID_AI_M1435 0x1435 #define PCI_VENDOR_ID_MATROX 0x102B #define PCI_DEVICE_ID_MATROX_MGA_2 0x0518 #define PCI_DEVICE_ID_MATROX_MIL 0x0519 #define PCI_DEVICE_ID_MATROX_MYS 0x051A #define PCI_DEVICE_ID_MATROX_MIL_2 0x051b #define PCI_DEVICE_ID_MATROX_MIL_2_AGP 0x051f #define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520 #define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521 #define PCI_DEVICE_ID_MATROX_MGA_IMP 0x0d10 #define PCI_DEVICE_ID_MATROX_G100_MM 0x1000 #define PCI_DEVICE_ID_MATROX_G100_AGP 0x1001 #define PCI_VENDOR_ID_CT 0x102c #define PCI_DEVICE_ID_CT_65545 0x00d8 #define PCI_DEVICE_ID_CT_65548 0x00dc #define PCI_DEVICE_ID_CT_65550 0x00e0 #define PCI_DEVICE_ID_CT_65554 0x00e4 #define PCI_DEVICE_ID_CT_65555 0x00e5 #define PCI_VENDOR_ID_MIRO 0x1031 #define PCI_DEVICE_ID_MIRO_36050 0x5601 #define PCI_VENDOR_ID_NEC 0x1033 #define PCI_DEVICE_ID_NEC_PCX2 0x0046 #define PCI_VENDOR_ID_FD 0x1036 #define PCI_DEVICE_ID_FD_36C70 0x0000 #define PCI_VENDOR_ID_SI 0x1039 #define PCI_DEVICE_ID_SI_5591_AGP 0x0001 #define PCI_DEVICE_ID_SI_6202 0x0002 #define PCI_DEVICE_ID_SI_503 0x0008 #define PCI_DEVICE_ID_SI_ACPI 0x0009 #define PCI_DEVICE_ID_SI_5597_VGA 0x0200 #define PCI_DEVICE_ID_SI_6205 0x0205 #define PCI_DEVICE_ID_SI_501 0x0406 #define PCI_DEVICE_ID_SI_496 0x0496 #define PCI_DEVICE_ID_SI_601 0x0601 #define PCI_DEVICE_ID_SI_5107 0x5107 #define PCI_DEVICE_ID_SI_5511 0x5511 #define PCI_DEVICE_ID_SI_5513 0x5513 #define PCI_DEVICE_ID_SI_5571 0x5571 #define PCI_DEVICE_ID_SI_5591 0x5591 #define PCI_DEVICE_ID_SI_5597 0x5597 #define PCI_DEVICE_ID_SI_7001 0x7001 #define PCI_VENDOR_ID_HP 0x103c #define PCI_DEVICE_ID_HP_J2585A 0x1030 #define PCI_DEVICE_ID_HP_J2585B 0x1031 #define PCI_VENDOR_ID_PCTECH 0x1042 #define PCI_DEVICE_ID_PCTECH_RZ1000 0x1000 #define PCI_DEVICE_ID_PCTECH_RZ1001 0x1001 #define PCI_DEVICE_ID_PCTECH_SAMURAI_0 0x3000 #define PCI_DEVICE_ID_PCTECH_SAMURAI_1 0x3010 #define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020 #define PCI_VENDOR_ID_DPT 0x1044 #define PCI_DEVICE_ID_DPT 0xa400 #define PCI_VENDOR_ID_OPTI 0x1045 #define PCI_DEVICE_ID_OPTI_92C178 0xc178 #define PCI_DEVICE_ID_OPTI_82C557 0xc557 #define PCI_DEVICE_ID_OPTI_82C558 0xc558 #define PCI_DEVICE_ID_OPTI_82C621 0xc621 #define PCI_DEVICE_ID_OPTI_82C700 0xc700 #define PCI_DEVICE_ID_OPTI_82C701 0xc701 #define PCI_DEVICE_ID_OPTI_82C814 0xc814 #define PCI_DEVICE_ID_OPTI_82C822 0xc822 #define PCI_DEVICE_ID_OPTI_82C861 0xc861 #define PCI_DEVICE_ID_OPTI_82C825 0xd568 #define PCI_VENDOR_ID_SGS 0x104a #define PCI_DEVICE_ID_SGS_2000 0x0008 #define PCI_DEVICE_ID_SGS_1764 0x0009 #define PCI_VENDOR_ID_BUSLOGIC 0x104B #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140 #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040 #define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT 0x8130 #define PCI_VENDOR_ID_TI 0x104c #define PCI_DEVICE_ID_TI_TVP4010 0x3d04 #define PCI_DEVICE_ID_TI_TVP4020 0x3d07 #define PCI_DEVICE_ID_TI_PCI1130 0xac12 #define PCI_DEVICE_ID_TI_PCI1031 0xac13 #define PCI_DEVICE_ID_TI_PCI1131 0xac15 #define PCI_DEVICE_ID_TI_PCI1250 0xac16 #define PCI_DEVICE_ID_TI_PCI1220 0xac17 #define PCI_VENDOR_ID_OAK 0x104e #define PCI_DEVICE_ID_OAK_OTI107 0x0107 /* Winbond have two vendor IDs! See 0x10ad as well */ #define PCI_VENDOR_ID_WINBOND2 0x1050 #define PCI_DEVICE_ID_WINBOND2_89C940 0x0940 #define PCI_VENDOR_ID_MOTOROLA 0x1057 #define PCI_VENDOR_ID_MOTOROLA_OOPS 0x1507 #define PCI_DEVICE_ID_MOTOROLA_MPC105 0x0001 #define PCI_DEVICE_ID_MOTOROLA_MPC106 0x0002 #define PCI_DEVICE_ID_MOTOROLA_RAVEN 0x4801 #define PCI_DEVICE_ID_MOTOROLA_FALCON 0x4802 #define PCI_DEVICE_ID_MOTOROLA_CPX8216 0x4806 #define PCI_VENDOR_ID_PROMISE 0x105a #define PCI_DEVICE_ID_PROMISE_20246 0x4d33 #define PCI_DEVICE_ID_PROMISE_5300 0x5300 #define PCI_VENDOR_ID_N9 0x105d #define PCI_DEVICE_ID_N9_I128 0x2309 #define PCI_DEVICE_ID_N9_I128_2 0x2339 #define PCI_DEVICE_ID_N9_I128_T2R 0x493d #define PCI_VENDOR_ID_UMC 0x1060 #define PCI_DEVICE_ID_UMC_UM8673F 0x0101 #define PCI_DEVICE_ID_UMC_UM8891A 0x0891 #define PCI_DEVICE_ID_UMC_UM8886BF 0x673a #define PCI_DEVICE_ID_UMC_UM8886A 0x886a #define PCI_DEVICE_ID_UMC_UM8881F 0x8881 #define PCI_DEVICE_ID_UMC_UM8886F 0x8886 #define PCI_DEVICE_ID_UMC_UM9017F 0x9017 #define PCI_DEVICE_ID_UMC_UM8886N 0xe886 #define PCI_DEVICE_ID_UMC_UM8891N 0xe891 #define PCI_VENDOR_ID_X 0x1061 #define PCI_DEVICE_ID_X_AGX016 0x0001 #define PCI_VENDOR_ID_PICOP 0x1066 #define PCI_DEVICE_ID_PICOP_PT86C52X 0x0001 #define PCI_DEVICE_ID_PICOP_PT80C524 0x8002 #define PCI_VENDOR_ID_MYLEX 0x1069 #define PCI_DEVICE_ID_MYLEX_DAC960_P 0x0001 #define PCI_DEVICE_ID_MYLEX_DAC960_PD 0x0002 #define PCI_DEVICE_ID_MYLEX_DAC960_PG 0x0010 #define PCI_DEVICE_ID_MYLEX_DAC960_LA 0x0020 #define PCI_DEVICE_ID_MYLEX_DAC960_LP 0x0050 #define PCI_DEVICE_ID_MYLEX_DAC960_BA 0xBA56 #define PCI_VENDOR_ID_APPLE 0x106b #define PCI_DEVICE_ID_APPLE_BANDIT 0x0001 #define PCI_DEVICE_ID_APPLE_GC 0x0002 #define PCI_DEVICE_ID_APPLE_HYDRA 0x000e #define PCI_VENDOR_ID_NEXGEN 0x1074 #define PCI_DEVICE_ID_NEXGEN_82C501 0x4e78 #define PCI_VENDOR_ID_QLOGIC 0x1077 #define PCI_DEVICE_ID_QLOGIC_ISP1020 0x1020 #define PCI_DEVICE_ID_QLOGIC_ISP1022 0x1022 #define PCI_DEVICE_ID_QLOGIC_ISP2100 0x2100 #define PCI_DEVICE_ID_QLOGIC_ISP2200 0x2200 #define PCI_VENDOR_ID_CYRIX 0x1078 #define PCI_DEVICE_ID_CYRIX_5510 0x0000 #define PCI_DEVICE_ID_CYRIX_PCI_MASTER 0x0001 #define PCI_DEVICE_ID_CYRIX_5520 0x0002 #define PCI_DEVICE_ID_CYRIX_5530_LEGACY 0x0100 #define PCI_DEVICE_ID_CYRIX_5530_SMI 0x0101 #define PCI_DEVICE_ID_CYRIX_5530_IDE 0x0102 #define PCI_DEVICE_ID_CYRIX_5530_AUDIO 0x0103 #define PCI_DEVICE_ID_CYRIX_5530_VIDEO 0x0104 #define PCI_VENDOR_ID_LEADTEK 0x107d #define PCI_DEVICE_ID_LEADTEK_805 0x0000 #define PCI_VENDOR_ID_CONTAQ 0x1080 #define PCI_DEVICE_ID_CONTAQ_82C599 0x0600 #define PCI_DEVICE_ID_CONTAQ_82C693 0xc693 #define PCI_VENDOR_ID_FOREX 0x1083 #define PCI_VENDOR_ID_OLICOM 0x108d #define PCI_DEVICE_ID_OLICOM_OC3136 0x0001 #define PCI_DEVICE_ID_OLICOM_OC2315 0x0011 #define PCI_DEVICE_ID_OLICOM_OC2325 0x0012 #define PCI_DEVICE_ID_OLICOM_OC2183 0x0013 #define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 #define PCI_DEVICE_ID_OLICOM_OC6151 0x0021 #define PCI_VENDOR_ID_SUN 0x108e #define PCI_DEVICE_ID_SUN_EBUS 0x1000 #define PCI_DEVICE_ID_SUN_HAPPYMEAL 0x1001 #define PCI_DEVICE_ID_SUN_SIMBA 0x5000 #define PCI_DEVICE_ID_SUN_PBM 0x8000 #define PCI_DEVICE_ID_SUN_SABRE 0xa000 #define PCI_VENDOR_ID_CMD 0x1095 #define PCI_DEVICE_ID_CMD_640 0x0640 #define PCI_DEVICE_ID_CMD_643 0x0643 #define PCI_DEVICE_ID_CMD_646 0x0646 #define PCI_DEVICE_ID_CMD_647 0x0647 #define PCI_DEVICE_ID_CMD_670 0x0670 #define PCI_VENDOR_ID_VISION 0x1098 #define PCI_DEVICE_ID_VISION_QD8500 0x0001 #define PCI_DEVICE_ID_VISION_QD8580 0x0002 #define PCI_VENDOR_ID_BROOKTREE 0x109e #define PCI_DEVICE_ID_BROOKTREE_848 0x0350 #define PCI_DEVICE_ID_BROOKTREE_849A 0x0351 #define PCI_DEVICE_ID_BROOKTREE_878_1 0x036e #define PCI_DEVICE_ID_BROOKTREE_878 0x0878 #define PCI_DEVICE_ID_BROOKTREE_8474 0x8474 #define PCI_VENDOR_ID_SIERRA 0x10a8 #define PCI_DEVICE_ID_SIERRA_STB 0x0000 #define PCI_VENDOR_ID_ACC 0x10aa #define PCI_DEVICE_ID_ACC_2056 0x0000 #define PCI_VENDOR_ID_WINBOND 0x10ad #define PCI_DEVICE_ID_WINBOND_83769 0x0001 #define PCI_DEVICE_ID_WINBOND_82C105 0x0105 #define PCI_DEVICE_ID_WINBOND_83C553 0x0565 #define PCI_VENDOR_ID_DATABOOK 0x10b3 #define PCI_DEVICE_ID_DATABOOK_87144 0xb106 #define PCI_VENDOR_ID_PLX 0x10b5 #define PCI_DEVICE_ID_PLX_9050 0x9050 #define PCI_DEVICE_ID_PLX_9060 0x9060 #define PCI_DEVICE_ID_PLX_9060ES 0x906E #define PCI_DEVICE_ID_PLX_9060SD 0x906D #define PCI_DEVICE_ID_PLX_9080 0x9080 #define PCI_VENDOR_ID_MADGE 0x10b6 #define PCI_DEVICE_ID_MADGE_MK2 0x0002 #define PCI_DEVICE_ID_MADGE_C155S 0x1001 #define PCI_VENDOR_ID_3COM 0x10b7 #define PCI_DEVICE_ID_3COM_3C985 0x0001 #define PCI_DEVICE_ID_3COM_3C339 0x3390 #define PCI_DEVICE_ID_3COM_3C590 0x5900 #define PCI_DEVICE_ID_3COM_3C595TX 0x5950 #define PCI_DEVICE_ID_3COM_3C595T4 0x5951 #define PCI_DEVICE_ID_3COM_3C595MII 0x5952 #define PCI_DEVICE_ID_3COM_3C900TPO 0x9000 #define PCI_DEVICE_ID_3COM_3C900COMBO 0x9001 #define PCI_DEVICE_ID_3COM_3C905TX 0x9050 #define PCI_DEVICE_ID_3COM_3C905T4 0x9051 #define PCI_DEVICE_ID_3COM_3C905B_TX 0x9055 #define PCI_VENDOR_ID_SMC 0x10b8 #define PCI_DEVICE_ID_SMC_EPIC100 0x0005 #define PCI_VENDOR_ID_AL 0x10b9 #define PCI_DEVICE_ID_AL_M1445 0x1445 #define PCI_DEVICE_ID_AL_M1449 0x1449 #define PCI_DEVICE_ID_AL_M1451 0x1451 #define PCI_DEVICE_ID_AL_M1461 0x1461 #define PCI_DEVICE_ID_AL_M1489 0x1489 #define PCI_DEVICE_ID_AL_M1511 0x1511 #define PCI_DEVICE_ID_AL_M1513 0x1513 #define PCI_DEVICE_ID_AL_M1521 0x1521 #define PCI_DEVICE_ID_AL_M1523 0x1523 #define PCI_DEVICE_ID_AL_M1531 0x1531 #define PCI_DEVICE_ID_AL_M1533 0x1533 #define PCI_DEVICE_ID_AL_M3307 0x3307 #define PCI_DEVICE_ID_AL_M4803 0x5215 #define PCI_DEVICE_ID_AL_M5219 0x5219 #define PCI_DEVICE_ID_AL_M5229 0x5229 #define PCI_DEVICE_ID_AL_M5237 0x5237 #define PCI_DEVICE_ID_AL_M7101 0x7101 #define PCI_VENDOR_ID_MITSUBISHI 0x10ba #define PCI_VENDOR_ID_SURECOM 0x10bd #define PCI_DEVICE_ID_SURECOM_NE34 0x0e34 #define PCI_VENDOR_ID_NEOMAGIC 0x10c8 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2070 0x0001 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128V 0x0002 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128ZV 0x0003 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_NM2160 0x0004 #define PCI_DEVICE_ID_NEOMAGIC_MAGICMEDIA_256AV 0x0005 #define PCI_DEVICE_ID_NEOMAGIC_MAGICGRAPH_128ZVPLUS 0x0083 #define PCI_VENDOR_ID_ASP 0x10cd #define PCI_DEVICE_ID_ASP_ABP940 0x1200 #define PCI_DEVICE_ID_ASP_ABP940U 0x1300 #define PCI_DEVICE_ID_ASP_ABP940UW 0x2300 #define PCI_VENDOR_ID_MACRONIX 0x10d9 #define PCI_DEVICE_ID_MACRONIX_MX98713 0x0512 #define PCI_DEVICE_ID_MACRONIX_MX987x5 0x0531 #define PCI_VENDOR_ID_CERN 0x10dc #define PCI_DEVICE_ID_CERN_SPSB_PMC 0x0001 #define PCI_DEVICE_ID_CERN_SPSB_PCI 0x0002 #define PCI_DEVICE_ID_CERN_HIPPI_DST 0x0021 #define PCI_DEVICE_ID_CERN_HIPPI_SRC 0x0022 #define PCI_VENDOR_ID_NVIDIA 0x10de #define PCI_VENDOR_ID_IMS 0x10e0 #define PCI_DEVICE_ID_IMS_8849 0x8849 #define PCI_VENDOR_ID_TEKRAM2 0x10e1 #define PCI_DEVICE_ID_TEKRAM2_690c 0x690c #define PCI_VENDOR_ID_TUNDRA 0x10e3 #define PCI_DEVICE_ID_TUNDRA_CA91C042 0x0000 #define PCI_VENDOR_ID_AMCC 0x10e8 #define PCI_DEVICE_ID_AMCC_MYRINET 0x8043 #define PCI_DEVICE_ID_AMCC_PARASTATION 0x8062 #define PCI_DEVICE_ID_AMCC_S5933 0x807d #define PCI_DEVICE_ID_AMCC_S5933_HEPC3 0x809c #define PCI_VENDOR_ID_INTERG 0x10ea #define PCI_DEVICE_ID_INTERG_1680 0x1680 #define PCI_DEVICE_ID_INTERG_1682 0x1682 #define PCI_VENDOR_ID_REALTEK 0x10ec #define PCI_DEVICE_ID_REALTEK_8029 0x8029 #define PCI_DEVICE_ID_REALTEK_8129 0x8129 #define PCI_DEVICE_ID_REALTEK_8139 0x8139 #define PCI_VENDOR_ID_TRUEVISION 0x10fa #define PCI_DEVICE_ID_TRUEVISION_T1000 0x000c #define PCI_VENDOR_ID_INIT 0x1101 #define PCI_DEVICE_ID_INIT_320P 0x9100 #define PCI_DEVICE_ID_INIT_360P 0x9500 #define PCI_VENDOR_ID_TTI 0x1103 #define PCI_DEVICE_ID_TTI_HPT343 0x0003 #define PCI_VENDOR_ID_VIA 0x1106 #define PCI_DEVICE_ID_VIA_82C505 0x0505 #define PCI_DEVICE_ID_VIA_82C561 0x0561 #define PCI_DEVICE_ID_VIA_82C586_1 0x0571 #define PCI_DEVICE_ID_VIA_82C576 0x0576 #define PCI_DEVICE_ID_VIA_82C585 0x0585 #define PCI_DEVICE_ID_VIA_82C586_0 0x0586 #define PCI_DEVICE_ID_VIA_82C595 0x0595 #define PCI_DEVICE_ID_VIA_82C596_0 0x0596 #define PCI_DEVICE_ID_VIA_82C597_0 0x0597 #define PCI_DEVICE_ID_VIA_82C598_0 0x0598 #define PCI_DEVICE_ID_VIA_82C926 0x0926 #define PCI_DEVICE_ID_VIA_82C416 0x1571 #define PCI_DEVICE_ID_VIA_82C595_97 0x1595 #define PCI_DEVICE_ID_VIA_82C586_2 0x3038 #define PCI_DEVICE_ID_VIA_82C586_3 0x3040 #define PCI_DEVICE_ID_VIA_82C686_5 0x3058 #define PCI_DEVICE_ID_VIA_86C100A 0x6100 #define PCI_DEVICE_ID_VIA_82C597_1 0x8597 #define PCI_DEVICE_ID_VIA_82C598_1 0x8598 #define PCI_VENDOR_ID_SMC2 0x1113 #define PCI_DEVICE_ID_SMC2_1211TX 0x1211 #define PCI_VENDOR_ID_VORTEX 0x1119 #define PCI_DEVICE_ID_VORTEX_GDT60x0 0x0000 #define PCI_DEVICE_ID_VORTEX_GDT6000B 0x0001 #define PCI_DEVICE_ID_VORTEX_GDT6x10 0x0002 #define PCI_DEVICE_ID_VORTEX_GDT6x20 0x0003 #define PCI_DEVICE_ID_VORTEX_GDT6530 0x0004 #define PCI_DEVICE_ID_VORTEX_GDT6550 0x0005 #define PCI_DEVICE_ID_VORTEX_GDT6x17 0x0006 #define PCI_DEVICE_ID_VORTEX_GDT6x27 0x0007 #define PCI_DEVICE_ID_VORTEX_GDT6537 0x0008 #define PCI_DEVICE_ID_VORTEX_GDT6557 0x0009 #define PCI_DEVICE_ID_VORTEX_GDT6x15 0x000a #define PCI_DEVICE_ID_VORTEX_GDT6x25 0x000b #define PCI_DEVICE_ID_VORTEX_GDT6535 0x000c #define PCI_DEVICE_ID_VORTEX_GDT6555 0x000d #define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x0100 #define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x0101 #define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x0102 #define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x0103 #define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x0104 #define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x0105 #define PCI_DEVICE_ID_VORTEX_GDT6x17RP1 0x0110 #define PCI_DEVICE_ID_VORTEX_GDT6x27RP1 0x0111 #define PCI_DEVICE_ID_VORTEX_GDT6537RP1 0x0112 #define PCI_DEVICE_ID_VORTEX_GDT6557RP1 0x0113 #define PCI_DEVICE_ID_VORTEX_GDT6x11RP1 0x0114 #define PCI_DEVICE_ID_VORTEX_GDT6x21RP1 0x0115 #define PCI_DEVICE_ID_VORTEX_GDT6x17RP2 0x0120 #define PCI_DEVICE_ID_VORTEX_GDT6x27RP2 0x0121 #define PCI_DEVICE_ID_VORTEX_GDT6537RP2 0x0122 #define PCI_DEVICE_ID_VORTEX_GDT6557RP2 0x0123 #define PCI_DEVICE_ID_VORTEX_GDT6x11RP2 0x0124 #define PCI_DEVICE_ID_VORTEX_GDT6x21RP2 0x0125 #define PCI_VENDOR_ID_EF 0x111a #define PCI_DEVICE_ID_EF_ATM_FPGA 0x0000 #define PCI_DEVICE_ID_EF_ATM_ASIC 0x0002 #define PCI_VENDOR_ID_FORE 0x1127 #define PCI_DEVICE_ID_FORE_PCA200PC 0x0210 #define PCI_DEVICE_ID_FORE_PCA200E 0x0300 #define PCI_VENDOR_ID_IMAGINGTECH 0x112f #define PCI_DEVICE_ID_IMAGINGTECH_ICPCI 0x0000 #define PCI_VENDOR_ID_PHILIPS 0x1131 #define PCI_DEVICE_ID_PHILIPS_SAA7145 0x7145 #define PCI_DEVICE_ID_PHILIPS_SAA7146 0x7146 #define PCI_VENDOR_ID_CYCLONE 0x113c #define PCI_DEVICE_ID_CYCLONE_SDK 0x0001 #define PCI_VENDOR_ID_ALLIANCE 0x1142 #define PCI_DEVICE_ID_ALLIANCE_PROMOTIO 0x3210 #define PCI_DEVICE_ID_ALLIANCE_PROVIDEO 0x6422 #define PCI_DEVICE_ID_ALLIANCE_AT24 0x6424 #define PCI_DEVICE_ID_ALLIANCE_AT3D 0x643d #define PCI_VENDOR_ID_SYSKONNECT 0x1148 #define PCI_DEVICE_ID_SYSKONNECT_FP 0x4000 #define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 #define PCI_DEVICE_ID_SYSKONNECT_GE 0x4300 #define PCI_VENDOR_ID_VMIC 0x114a #define PCI_DEVICE_ID_VMIC_VME 0x7587 #define PCI_VENDOR_ID_DIGI 0x114f #define PCI_DEVICE_ID_DIGI_EPC 0x0002 #define PCI_DEVICE_ID_DIGI_RIGHTSWITCH 0x0003 #define PCI_DEVICE_ID_DIGI_XEM 0x0004 #define PCI_DEVICE_ID_DIGI_XR 0x0005 #define PCI_DEVICE_ID_DIGI_CX 0x0006 #define PCI_DEVICE_ID_DIGI_XRJ 0x0009 #define PCI_DEVICE_ID_DIGI_EPCJ 0x000a #define PCI_DEVICE_ID_DIGI_XR_920 0x0027 #define PCI_VENDOR_ID_MUTECH 0x1159 #define PCI_DEVICE_ID_MUTECH_MV1000 0x0001 #define PCI_VENDOR_ID_RENDITION 0x1163 #define PCI_DEVICE_ID_RENDITION_VERITE 0x0001 #define PCI_DEVICE_ID_RENDITION_VERITE2100 0x2000 #define PCI_VENDOR_ID_SERVERWORKS 0x1166 #define PCI_DEVICE_ID_SERVERWORKS_HE 0x0008 #define PCI_DEVICE_ID_SERVERWORKS_LE 0x0009 #define PCI_DEVICE_ID_SERVERWORKS_CIOB30 0x0010 #define PCI_DEVICE_ID_SERVERWORKS_CMIC_HE 0x0011 #define PCI_DEVICE_ID_SERVERWORKS_CSB5 0x0201 #define PCI_VENDOR_ID_SBE 0x1176 #define PCI_DEVICE_ID_SBE_WANXL100 0x0301 #define PCI_DEVICE_ID_SBE_WANXL200 0x0302 #define PCI_DEVICE_ID_SBE_WANXL400 0x0104 #define PCI_VENDOR_ID_TOSHIBA 0x1179 #define PCI_DEVICE_ID_TOSHIBA_601 0x0601 #define PCI_DEVICE_ID_TOSHIBA_TOPIC95 0x060a #define PCI_DEVICE_ID_TOSHIBA_TOPIC97 0x060f #define PCI_VENDOR_ID_RICOH 0x1180 #define PCI_DEVICE_ID_RICOH_RL5C465 0x0465 #define PCI_DEVICE_ID_RICOH_RL5C466 0x0466 #define PCI_DEVICE_ID_RICOH_RL5C475 0x0475 #define PCI_DEVICE_ID_RICOH_RL5C478 0x0478 #define PCI_VENDOR_ID_ARTOP 0x1191 #define PCI_DEVICE_ID_ARTOP_ATP8400 0x0004 #define PCI_DEVICE_ID_ARTOP_ATP850UF 0x0005 #define PCI_VENDOR_ID_ZEITNET 0x1193 #define PCI_DEVICE_ID_ZEITNET_1221 0x0001 #define PCI_DEVICE_ID_ZEITNET_1225 0x0002 #define PCI_VENDOR_ID_OMEGA 0x119b #define PCI_DEVICE_ID_OMEGA_82C092G 0x1221 #define PCI_VENDOR_ID_GALILEO 0x11ab #define PCI_DEVICE_ID_GALILEO_GT64011 0x4146 #define PCI_VENDOR_ID_LITEON 0x11ad #define PCI_DEVICE_ID_LITEON_LNE100TX 0x0002 #define PCI_VENDOR_ID_NP 0x11bc #define PCI_DEVICE_ID_NP_PCI_FDDI 0x0001 #define PCI_VENDOR_ID_ATT 0x11c1 #define PCI_DEVICE_ID_ATT_L56XMF 0x0440 #define PCI_DEVICE_ID_ATT_L56DVP 0x0480 #define PCI_VENDOR_ID_SPECIALIX 0x11cb #define PCI_DEVICE_ID_SPECIALIX_IO8 0x2000 #define PCI_DEVICE_ID_SPECIALIX_XIO 0x4000 #define PCI_DEVICE_ID_SPECIALIX_RIO 0x8000 #define PCI_VENDOR_ID_AURAVISION 0x11d1 #define PCI_DEVICE_ID_AURAVISION_VXP524 0x01f7 #define PCI_VENDOR_ID_IKON 0x11d5 #define PCI_DEVICE_ID_IKON_10115 0x0115 #define PCI_DEVICE_ID_IKON_10117 0x0117 #define PCI_VENDOR_ID_ZORAN 0x11de #define PCI_DEVICE_ID_ZORAN_36057 0x6057 #define PCI_DEVICE_ID_ZORAN_36120 0x6120 #define PCI_VENDOR_ID_KINETIC 0x11f4 #define PCI_DEVICE_ID_KINETIC_2915 0x2915 #define PCI_VENDOR_ID_COMPEX 0x11f6 #define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112 #define PCI_DEVICE_ID_COMPEX_RL2000 0x1401 #define PCI_VENDOR_ID_RP 0x11fe #define PCI_DEVICE_ID_RP32INTF 0x0001 #define PCI_DEVICE_ID_RP8INTF 0x0002 #define PCI_DEVICE_ID_RP16INTF 0x0003 #define PCI_DEVICE_ID_RP4QUAD 0x0004 #define PCI_DEVICE_ID_RP8OCTA 0x0005 #define PCI_DEVICE_ID_RP8J 0x0006 #define PCI_DEVICE_ID_RPP4 0x000A #define PCI_DEVICE_ID_RPP8 0x000B #define PCI_DEVICE_ID_RP8M 0x000C #define PCI_VENDOR_ID_CYCLADES 0x120e #define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 #define PCI_DEVICE_ID_CYCLOM_Y_Hi 0x0101 #define PCI_DEVICE_ID_CYCLOM_4Y_Lo 0x0102 #define PCI_DEVICE_ID_CYCLOM_4Y_Hi 0x0103 #define PCI_DEVICE_ID_CYCLOM_8Y_Lo 0x0104 #define PCI_DEVICE_ID_CYCLOM_8Y_Hi 0x0105 #define PCI_DEVICE_ID_CYCLOM_Z_Lo 0x0200 #define PCI_DEVICE_ID_CYCLOM_Z_Hi 0x0201 #define PCI_DEVICE_ID_PC300_RX_2 0x0300 #define PCI_DEVICE_ID_PC300_RX_1 0x0301 #define PCI_DEVICE_ID_PC300_TE_2 0x0310 #define PCI_DEVICE_ID_PC300_TE_1 0x0311 #define PCI_VENDOR_ID_ESSENTIAL 0x120f #define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER 0x0001 #define PCI_VENDOR_ID_O2 0x1217 #define PCI_DEVICE_ID_O2_6729 0x6729 #define PCI_DEVICE_ID_O2_6730 0x673a #define PCI_DEVICE_ID_O2_6832 0x6832 #define PCI_DEVICE_ID_O2_6836 0x6836 #define PCI_VENDOR_ID_3DFX 0x121a #define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 #define PCI_DEVICE_ID_3DFX_VOODOO2 0x0002 #define PCI_DEVICE_ID_3DFX_BANSHEE 0x0003 #define PCI_VENDOR_ID_SIGMADES 0x1236 #define PCI_DEVICE_ID_SIGMADES_6425 0x6401 #define PCI_VENDOR_ID_CCUBE 0x123f #define PCI_VENDOR_ID_AVM 0x1244 #define PCI_DEVICE_ID_AVM_A1 0x0a00 #define PCI_VENDOR_ID_DIPIX 0x1246 #define PCI_VENDOR_ID_STALLION 0x124d #define PCI_DEVICE_ID_STALLION_ECHPCI832 0x0000 #define PCI_DEVICE_ID_STALLION_ECHPCI864 0x0002 #define PCI_DEVICE_ID_STALLION_EIOPCI 0x0003 #define PCI_VENDOR_ID_OPTIBASE 0x1255 #define PCI_DEVICE_ID_OPTIBASE_FORGE 0x1110 #define PCI_DEVICE_ID_OPTIBASE_FUSION 0x1210 #define PCI_DEVICE_ID_OPTIBASE_VPLEX 0x2110 #define PCI_DEVICE_ID_OPTIBASE_VPLEXCC 0x2120 #define PCI_DEVICE_ID_OPTIBASE_VQUEST 0x2130 #define PCI_VENDOR_ID_SATSAGEM 0x1267 #define PCI_DEVICE_ID_SATSAGEM_PCR2101 0x5352 #define PCI_DEVICE_ID_SATSAGEM_TELSATTURBO 0x5a4b #define PCI_VENDOR_ID_HUGHES 0x1273 #define PCI_DEVICE_ID_HUGHES_DIRECPC 0x0002 #define PCI_VENDOR_ID_ENSONIQ 0x1274 #define PCI_DEVICE_ID_ENSONIQ_AUDIOPCI 0x5000 #define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371 #define PCI_VENDOR_ID_ALTEON 0x12ae #define PCI_DEVICE_ID_ALTEON_ACENIC 0x0001 #define PCI_VENDOR_ID_PICTUREL 0x12c5 #define PCI_DEVICE_ID_PICTUREL_PCIVST 0x0081 #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 #define PCI_VENDOR_ID_CBOARDS 0x1307 #define PCI_DEVICE_ID_CBOARDS_DAS1602_16 0x0001 #define PCI_VENDOR_ID_SIIG 0x131f #define PCI_DEVICE_ID_SIIG_1S1P_10x_550 0x1010 #define PCI_DEVICE_ID_SIIG_1S1P_10x_650 0x1011 #define PCI_DEVICE_ID_SIIG_1S1P_10x_850 0x1012 #define PCI_DEVICE_ID_SIIG_1P_10x 0x1020 #define PCI_DEVICE_ID_SIIG_2P_10x 0x1021 #define PCI_DEVICE_ID_SIIG_2S1P_10x_550 0x1034 #define PCI_DEVICE_ID_SIIG_2S1P_10x_650 0x1035 #define PCI_DEVICE_ID_SIIG_2S1P_10x_850 0x1036 #define PCI_DEVICE_ID_SIIG_1P_20x 0x2020 #define PCI_DEVICE_ID_SIIG_2P_20x 0x2021 #define PCI_DEVICE_ID_SIIG_2P1S_20x_550 0x2040 #define PCI_DEVICE_ID_SIIG_2P1S_20x_650 0x2041 #define PCI_DEVICE_ID_SIIG_2P1S_20x_850 0x2042 #define PCI_DEVICE_ID_SIIG_1S1P_20x_550 0x2010 #define PCI_DEVICE_ID_SIIG_1S1P_20x_650 0x2011 #define PCI_DEVICE_ID_SIIG_1S1P_20x_850 0x2012 #define PCI_DEVICE_ID_SIIG_2S1P_20x_550 0x2060 #define PCI_DEVICE_ID_SIIG_2S1P_20x_650 0x2061 #define PCI_DEVICE_ID_SIIG_2S1P_20x_850 0x2062 #define PCI_VENDOR_ID_NETGEAR 0x1385 #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a #define PCI_VENDOR_ID_LAVA 0x1407 #define PCI_DEVICE_ID_LAVA_PARALLEL 0x8000 #define PCI_DEVICE_ID_LAVA_DUAL_PAR_A 0x8002 /* The Lava Dual Parallel is */ #define PCI_DEVICE_ID_LAVA_DUAL_PAR_B 0x8003 /* two PCI devices on a card */ #define PCI_VENDOR_ID_TIMEDIA 0x1409 #define PCI_DEVICE_ID_TIMEDIA_1889 0x7168 #define PCI_DEVICE_ID_TIMEDIA_4008A 0x7268 #define PCI_VENDOR_ID_AFAVLAB 0x14db #define PCI_DEVICE_ID_AFAVLAB_TK9902 0x2120 #define PCI_VENDOR_ID_SYMPHONY 0x1c1c #define PCI_DEVICE_ID_SYMPHONY_101 0x0001 #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 #define PCI_VENDOR_ID_3DLABS 0x3d3d #define PCI_DEVICE_ID_3DLABS_300SX 0x0001 #define PCI_DEVICE_ID_3DLABS_500TX 0x0002 #define PCI_DEVICE_ID_3DLABS_DELTA 0x0003 #define PCI_DEVICE_ID_3DLABS_PERMEDIA 0x0004 #define PCI_DEVICE_ID_3DLABS_MX 0x0006 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 #define PCI_DEVICE_ID_3DLABS_GAMMA 0x0008 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 #define PCI_VENDOR_ID_AVANCE 0x4005 #define PCI_DEVICE_ID_AVANCE_ALG2064 0x2064 #define PCI_DEVICE_ID_AVANCE_2302 0x2302 #define PCI_VENDOR_ID_NETVIN 0x4a14 #define PCI_DEVICE_ID_NETVIN_NV5000SC 0x5000 #define PCI_VENDOR_ID_S3 0x5333 #define PCI_DEVICE_ID_S3_PLATO_PXS 0x0551 #define PCI_DEVICE_ID_S3_ViRGE 0x5631 #define PCI_DEVICE_ID_S3_TRIO 0x8811 #define PCI_DEVICE_ID_S3_AURORA64VP 0x8812 #define PCI_DEVICE_ID_S3_TRIO64UVP 0x8814 #define PCI_DEVICE_ID_S3_ViRGE_VX 0x883d #define PCI_DEVICE_ID_S3_868 0x8880 #define PCI_DEVICE_ID_S3_928 0x88b0 #define PCI_DEVICE_ID_S3_864_1 0x88c0 #define PCI_DEVICE_ID_S3_864_2 0x88c1 #define PCI_DEVICE_ID_S3_964_1 0x88d0 #define PCI_DEVICE_ID_S3_964_2 0x88d1 #define PCI_DEVICE_ID_S3_968 0x88f0 #define PCI_DEVICE_ID_S3_TRIO64V2 0x8901 #define PCI_DEVICE_ID_S3_PLATO_PXG 0x8902 #define PCI_DEVICE_ID_S3_ViRGE_DXGX 0x8a01 #define PCI_DEVICE_ID_S3_ViRGE_GX2 0x8a10 #define PCI_DEVICE_ID_S3_ViRGE_MX 0x8c01 #define PCI_DEVICE_ID_S3_ViRGE_MXP 0x8c02 #define PCI_DEVICE_ID_S3_ViRGE_MXPMV 0x8c03 #define PCI_DEVICE_ID_S3_SONICVIBES 0xca00 #define PCI_VENDOR_ID_DCI 0x6666 #define PCI_DEVICE_ID_DCI_PCCOM4 0x0001 #define PCI_VENDOR_ID_GENROCO 0x5555 #define PCI_DEVICE_ID_GENROCO_HFP832 0x0003 #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_DEVICE_ID_INTEL_21145 0x0039 #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 #define PCI_DEVICE_ID_INTEL_82430 0x0486 #define PCI_DEVICE_ID_INTEL_82434 0x04a3 #define PCI_DEVICE_ID_INTEL_I960 0x0960 #define PCI_DEVICE_ID_INTEL_I960RN 0x0964 #define PCI_DEVICE_ID_INTEL_82559ER 0x1209 #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 #define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222 #define PCI_DEVICE_ID_INTEL_7116 0x1223 #define PCI_DEVICE_ID_INTEL_82596 0x1226 #define PCI_DEVICE_ID_INTEL_82865 0x1227 #define PCI_DEVICE_ID_INTEL_82557 0x1229 #define PCI_DEVICE_ID_INTEL_82437 0x122d #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e #define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230 #define PCI_DEVICE_ID_INTEL_82371MX 0x1234 #define PCI_DEVICE_ID_INTEL_82437MX 0x1235 #define PCI_DEVICE_ID_INTEL_82441 0x1237 #define PCI_DEVICE_ID_INTEL_82380FB 0x124b #define PCI_DEVICE_ID_INTEL_82439 0x1250 #define PCI_DEVICE_ID_INTEL_MEGARAID 0x1960 #define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000 #define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010 #define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020 #define PCI_DEVICE_ID_INTEL_82437VX 0x7030 #define PCI_DEVICE_ID_INTEL_82439TX 0x7100 #define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110 #define PCI_DEVICE_ID_INTEL_82371AB 0x7111 #define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112 #define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113 #define PCI_DEVICE_ID_INTEL_82443LX_0 0x7180 #define PCI_DEVICE_ID_INTEL_82443LX_1 0x7181 #define PCI_DEVICE_ID_INTEL_82443BX_0 0x7190 #define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191 #define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 #define PCI_DEVICE_ID_INTEL_P6 0x84c4 #define PCI_DEVICE_ID_INTEL_82450GX 0x84c4 #define PCI_DEVICE_ID_INTEL_82453GX 0x84c5 #define PCI_DEVICE_ID_INTEL_82451NX 0x84ca #define PCI_DEVICE_ID_INTEL_82454NX 0x84cb #define PCI_VENDOR_ID_COMPUTONE 0x8e0e #define PCI_DEVICE_ID_COMPUTONE_IP2EX 0x0291 #define PCI_VENDOR_ID_KTI 0x8e2e #define PCI_DEVICE_ID_KTI_ET32P2 0x3000 #define PCI_VENDOR_ID_ADAPTEC 0x9004 #define PCI_DEVICE_ID_ADAPTEC_7810 0x1078 #define PCI_DEVICE_ID_ADAPTEC_7821 0x2178 #define PCI_DEVICE_ID_ADAPTEC_38602 0x3860 #define PCI_DEVICE_ID_ADAPTEC_7850 0x5078 #define PCI_DEVICE_ID_ADAPTEC_7855 0x5578 #define PCI_DEVICE_ID_ADAPTEC_5800 0x5800 #define PCI_DEVICE_ID_ADAPTEC_3860 0x6038 #define PCI_DEVICE_ID_ADAPTEC_1480A 0x6075 #define PCI_DEVICE_ID_ADAPTEC_7860 0x6078 #define PCI_DEVICE_ID_ADAPTEC_7861 0x6178 #define PCI_DEVICE_ID_ADAPTEC_7870 0x7078 #define PCI_DEVICE_ID_ADAPTEC_7871 0x7178 #define PCI_DEVICE_ID_ADAPTEC_7872 0x7278 #define PCI_DEVICE_ID_ADAPTEC_7873 0x7378 #define PCI_DEVICE_ID_ADAPTEC_7874 0x7478 #define PCI_DEVICE_ID_ADAPTEC_7895 0x7895 #define PCI_DEVICE_ID_ADAPTEC_7880 0x8078 #define PCI_DEVICE_ID_ADAPTEC_7881 0x8178 #define PCI_DEVICE_ID_ADAPTEC_7882 0x8278 #define PCI_DEVICE_ID_ADAPTEC_7883 0x8378 #define PCI_DEVICE_ID_ADAPTEC_7884 0x8478 #define PCI_DEVICE_ID_ADAPTEC_7885 0x8578 #define PCI_DEVICE_ID_ADAPTEC_7886 0x8678 #define PCI_DEVICE_ID_ADAPTEC_7887 0x8778 #define PCI_DEVICE_ID_ADAPTEC_7888 0x8878 #define PCI_DEVICE_ID_ADAPTEC_1030 0x8b78 #define PCI_VENDOR_ID_ADAPTEC2 0x9005 #define PCI_DEVICE_ID_ADAPTEC2_2940U2 0x0010 #define PCI_DEVICE_ID_ADAPTEC2_2930U2 0x0011 #define PCI_DEVICE_ID_ADAPTEC2_7890B 0x0013 #define PCI_DEVICE_ID_ADAPTEC2_7890 0x001f #define PCI_DEVICE_ID_ADAPTEC2_3940U2 0x0050 #define PCI_DEVICE_ID_ADAPTEC2_3950U2D 0x0051 #define PCI_DEVICE_ID_ADAPTEC2_7896 0x005f #define PCI_DEVICE_ID_ADAPTEC2_7892A 0x0080 #define PCI_DEVICE_ID_ADAPTEC2_7892B 0x0081 #define PCI_DEVICE_ID_ADAPTEC2_7892D 0x0083 #define PCI_DEVICE_ID_ADAPTEC2_7892P 0x008f #define PCI_DEVICE_ID_ADAPTEC2_7899A 0x00c0 #define PCI_DEVICE_ID_ADAPTEC2_7899B 0x00c1 #define PCI_DEVICE_ID_ADAPTEC2_7899D 0x00c3 #define PCI_DEVICE_ID_ADAPTEC2_7899P 0x00cf #define PCI_VENDOR_ID_ATRONICS 0x907f #define PCI_DEVICE_ID_ATRONICS_2015 0x2015 #define PCI_VENDOR_ID_HOLTEK 0x9412 #define PCI_DEVICE_ID_HOLTEK_6565 0x6565 #define PCI_VENDOR_ID_TIGERJET 0xe159 #define PCI_DEVICE_ID_TIGERJET_300 0x0001 #define PCI_VENDOR_ID_ARK 0xedd8 #define PCI_DEVICE_ID_ARK_STING 0xa091 #define PCI_DEVICE_ID_ARK_STINGARK 0xa099 #define PCI_DEVICE_ID_ARK_2000MT 0xa0a1 #define PCI_VENDOR_ID_INTERPHASE 0x107e #define PCI_DEVICE_ID_INTERPHASE_5526 0x0004 #define PCI_DEVICE_ID_INTERPHASE_55x6 0x0005 /* * The PCI interface treats multi-function devices as independent * devices. The slot/function address of each device is encoded * in a single byte as follows: * * 7:3 = slot * 2:0 = function */ #define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) #endif /* USE_2_2_17_PCI_H */ #define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ #define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ #define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ #define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ #define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */ #define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */ #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */ #define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ static void fill_dev_name(ulong pci_dev, char *name) { ulong kobj, value; memset(name, 0, sizeof(*name) * BUFSIZE); kobj = pci_dev + OFFSET(pci_dev_dev) + OFFSET(device_kobj); readmem(kobj + OFFSET(kobject_name), KVADDR, &value, sizeof(void *), "kobject name", FAULT_ON_ERROR); read_string(value, name, BUFSIZE-1); } static void fill_bus_name(ulong pci_bus, char *name) { ulong kobj, value; memset(name, 0, sizeof(*name) * BUFSIZE); kobj = pci_bus + OFFSET(pci_bus_dev) + OFFSET(device_kobj); readmem(kobj + OFFSET(kobject_name), KVADDR, &value, sizeof(void *), "kobject name", FAULT_ON_ERROR); read_string(value, name, BUFSIZE-1); } static void fill_dev_id(ulong pci_dev, char *id) { unsigned short device, vendor; memset(id, 0, sizeof(*id) * BUFSIZE); readmem(pci_dev + OFFSET(pci_dev_device), KVADDR, &device, sizeof(short), "pci dev device", FAULT_ON_ERROR); readmem(pci_dev + OFFSET(pci_dev_vendor), KVADDR, &vendor, sizeof(short), "pci dev vendor", FAULT_ON_ERROR); sprintf(id, "%x:%x", vendor, device); } static void fill_dev_class(ulong pci_dev, char *c) { unsigned int class; memset(c, 0, sizeof(*c) * BUFSIZE); readmem(pci_dev + OFFSET(pci_dev_class), KVADDR, &class, sizeof(int), "pci class", FAULT_ON_ERROR); class >>= 8; sprintf(c, "%04x", class); } static int pci_pcie_type(ulong cap) { return (cap & PCI_EXP_FLAGS_TYPE) >> 4; } static int pci_is_bridge(unsigned char hdr_type) { return hdr_type == PCI_HEADER_TYPE_BRIDGE || hdr_type == PCI_HEADER_TYPE_CARDBUS; } static void fill_pcie_type(ulong pcidev, char *t) { int type, bufidx = 0; unsigned short pciecap; unsigned char hdr_type; memset(t, 0, sizeof(*t) * BUFSIZE); readmem(pcidev + OFFSET(pci_dev_hdr_type), KVADDR, &hdr_type, sizeof(char), "pci dev hdr_type", FAULT_ON_ERROR); if (!VALID_MEMBER(pci_dev_pcie_flags_reg)) goto bridge_chk; readmem(pcidev + OFFSET(pci_dev_pcie_flags_reg), KVADDR, &pciecap, sizeof(unsigned short), "pci dev pcie_flags_reg", FAULT_ON_ERROR); type = pci_pcie_type(pciecap); if (type == PCI_EXP_TYPE_ENDPOINT) bufidx = sprintf(t, "ENDPOINT"); else if (type == PCI_EXP_TYPE_LEG_END) bufidx = sprintf(t, "LEG_END"); else if (type == PCI_EXP_TYPE_ROOT_PORT) bufidx = sprintf(t, "ROOT_PORT"); else if (type == PCI_EXP_TYPE_UPSTREAM) bufidx = sprintf(t, "UPSTREAM"); else if (type == PCI_EXP_TYPE_DOWNSTREAM) bufidx = sprintf(t, "DOWNSTREAM"); else if (type == PCI_EXP_TYPE_PCI_BRIDGE) bufidx = sprintf(t, "PCI_BRIDGE"); else if (type == PCI_EXP_TYPE_PCIE_BRIDGE) bufidx = sprintf(t, "PCIE_BRIDGE"); else if (type == PCI_EXP_TYPE_RC_END) bufidx = sprintf(t, "RC_END"); else if (type == PCI_EXP_TYPE_RC_EC) bufidx = sprintf(t, "RC_EC"); bridge_chk: if (pci_is_bridge(hdr_type)) sprintf(t + bufidx, " [BRIDGE]"); } static void walk_devices(ulong pci_bus) { struct list_data list_data, *ld; int devcnt, i; ulong *devlist, self; char name[BUFSIZE], class[BUFSIZE], id[BUFSIZE], type[BUFSIZE]; char pcidev_hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; ld = &list_data; BZERO(ld, sizeof(struct list_data)); readmem(pci_bus + OFFSET(pci_bus_devices), KVADDR, &ld->start, sizeof(void *), "pci bus devices", FAULT_ON_ERROR); if (VALID_MEMBER(pci_dev_pcie_flags_reg)) snprintf(pcidev_hdr, sizeof(pcidev_hdr), "%s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PCI DEV"), mkstring(buf2, strlen("0000:00:00.0"), CENTER, "DO:BU:SL.FN"), mkstring(buf3, strlen("0000") + 2, CENTER, "CLASS"), mkstring(buf4, strlen("0000:0000"), CENTER, "PCI_ID"), mkstring(buf5, 10, CENTER, "TYPE")); else snprintf(pcidev_hdr, sizeof(pcidev_hdr), "%s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PCI DEV"), mkstring(buf2, strlen("0000:00:00.0"), CENTER, "DO:BU:SL.FN"), mkstring(buf3, strlen("0000") + 2, CENTER, "CLASS"), mkstring(buf4, strlen("0000:0000"), CENTER, "PCI_ID")); fprintf(fp, " %s", pcidev_hdr); readmem(pci_bus + OFFSET(pci_bus_self), KVADDR, &self, sizeof(void *), "pci bus self", FAULT_ON_ERROR); if (self) { fill_dev_name(self, name); fill_dev_class(self, class); fill_dev_id(self, id); fill_pcie_type(self, type); fprintf(fp, " %s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(self)), mkstring(buf2, strlen("0000:00:00.0"), CENTER, name), mkstring(buf3, strlen("0000") + 2, CENTER, class), mkstring(buf4, strlen("0000:0000"), CENTER, id), mkstring(buf5, 10, CENTER, type)); } if (ld->start == (pci_bus + OFFSET(pci_bus_devices))) return; ld->end = pci_bus + OFFSET(pci_bus_devices); hq_open(); devcnt = do_list(ld); devlist = (ulong *)GETBUF(devcnt * sizeof(ulong)); devcnt = retrieve_list(devlist, devcnt); hq_close(); for (i = 0; i < devcnt; i++) { fill_dev_name(devlist[i], name); fill_dev_class(devlist[i], class); fill_dev_id(devlist[i], id); fill_pcie_type(devlist[i], type); fprintf(fp, " %s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(devlist[i])), mkstring(buf2, strlen("0000:00:00.0"), CENTER, name), mkstring(buf3, strlen("0000") + 2, CENTER, class), mkstring(buf4, strlen("0000:0000"), CENTER, id), mkstring(buf5, 10, CENTER, type)); } FREEBUF(devlist); } static void walk_buses(ulong pci_bus) { struct list_data list_data, *ld; int buscnt, i; ulong *buslist, parent; char pcibus_hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; ld = &list_data; BZERO(ld, sizeof(struct list_data)); readmem(pci_bus + OFFSET(pci_bus_children), KVADDR, &ld->start, sizeof(void *), "pci bus children", FAULT_ON_ERROR); if (ld->start == (pci_bus + OFFSET(pci_bus_children))) return; ld->end = pci_bus + OFFSET(pci_bus_children); hq_open(); buscnt = do_list(ld); buslist = (ulong *)GETBUF(buscnt * sizeof(ulong)); buscnt = retrieve_list(buslist, buscnt); hq_close(); snprintf(pcibus_hdr, sizeof(pcibus_hdr), "%s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PCI BUS"), mkstring(buf2, VADDR_PRLEN, CENTER, "PARENT BUS")); for (i = 0; i < buscnt; i++) { readmem(buslist[i] + OFFSET(pci_bus_parent), KVADDR, &parent, sizeof(void *), "pci bus parent", FAULT_ON_ERROR); fprintf(fp, " %s", pcibus_hdr); fprintf(fp, " %s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(buslist[i])), mkstring(buf2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(parent))); walk_devices(buslist[i]); fprintf(fp, "\n"); walk_buses(buslist[i]); } FREEBUF(buslist); } static void do_pci2(void) { struct list_data list_data, *ld; int rootbuscnt, i; ulong *rootbuslist; unsigned long pci_root_bus_addr = symbol_value("pci_root_buses"); char name[BUFSIZE]; char pcirootbus_hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; ld = &list_data; BZERO(ld, sizeof(struct list_data)); get_symbol_data("pci_root_buses", sizeof(void *), &ld->start); if (ld->start == pci_root_bus_addr) error(FATAL, "no PCI devices found on this system.\n"); ld->end = pci_root_bus_addr; hq_open(); rootbuscnt = do_list(ld); rootbuslist = (ulong *)GETBUF(rootbuscnt * sizeof(ulong)); rootbuscnt = retrieve_list(rootbuslist, rootbuscnt); hq_close(); snprintf(pcirootbus_hdr, sizeof(pcirootbus_hdr), "%s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER, "ROOT BUS"), mkstring(buf2, strlen("0000:00"), CENTER, "BUSNAME")); for (i = 0; i < rootbuscnt; i++) { fprintf(fp, "%s", pcirootbus_hdr); fill_bus_name(rootbuslist[i], name); fprintf(fp, "%s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(rootbuslist[i])), mkstring(buf2, strlen("0000:00"), CENTER, name)); walk_devices(rootbuslist[i]); walk_buses(rootbuslist[i]); fprintf(fp, "\n"); } FREEBUF(rootbuslist); } static void do_pci(void) { struct list_data pcilist_data; int devcnt, i; unsigned int class; unsigned short device, vendor; unsigned char busno; ulong *devlist, bus, devfn, prev, next; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; BZERO(&pcilist_data, sizeof(struct list_data)); if (VALID_MEMBER(pci_dev_global_list)) { get_symbol_data("pci_devices", sizeof(void *), &pcilist_data.start); pcilist_data.end = symbol_value("pci_devices"); pcilist_data.list_head_offset = OFFSET(pci_dev_global_list); readmem(symbol_value("pci_devices") + OFFSET(list_head_prev), KVADDR, &prev, sizeof(void *), "list head prev", FAULT_ON_ERROR); /* * Check if this system does not have any PCI devices. */ if ((pcilist_data.start == pcilist_data.end) && (prev == pcilist_data.end)) error(FATAL, "no PCI devices found on this system.\n"); } else if (VALID_MEMBER(pci_dev_next)) { get_symbol_data("pci_devices", sizeof(void *), &pcilist_data.start); pcilist_data.member_offset = OFFSET(pci_dev_next); /* * Check if this system does not have any PCI devices. */ readmem(pcilist_data.start + pcilist_data.member_offset, KVADDR, &next, sizeof(void *), "pci dev next", FAULT_ON_ERROR); if (!next) error(FATAL, "no PCI devices found on this system.\n"); } else option_not_supported('p'); hq_open(); devcnt = do_list(&pcilist_data); devlist = (ulong *)GETBUF(devcnt * sizeof(ulong)); devcnt = retrieve_list(devlist, devcnt); hq_close(); fprintf(fp, "%s BU:SL.FN CLASS: VENDOR-DEVICE\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "PCI_DEV")); for (i = 0; i < devcnt; i++) { /* * Get the pci bus number */ readmem(devlist[i] + OFFSET(pci_dev_bus), KVADDR, &bus, sizeof(void *), "pci bus", FAULT_ON_ERROR); readmem(bus + OFFSET(pci_bus_number), KVADDR, &busno, sizeof(char), "pci bus number", FAULT_ON_ERROR); readmem(devlist[i] + OFFSET(pci_dev_devfn), KVADDR, &devfn, sizeof(ulong), "pci devfn", FAULT_ON_ERROR); fprintf(fp, "%lx %02x:%02lx.%lx ", devlist[i], busno, PCI_SLOT(devfn), PCI_FUNC(devfn)); /* * Now read in the class, device, and vendor. */ readmem(devlist[i] + OFFSET(pci_dev_class), KVADDR, &class, sizeof(int), "pci class", FAULT_ON_ERROR); readmem(devlist[i] + OFFSET(pci_dev_device), KVADDR, &device, sizeof(short), "pci device", FAULT_ON_ERROR); readmem(devlist[i] + OFFSET(pci_dev_vendor),KVADDR, &vendor, sizeof(short), "pci vendor", FAULT_ON_ERROR); fprintf(fp, "%s: %s %s", pci_strclass(class, buf1), pci_strvendor(vendor, buf2), pci_strdev(vendor, device, buf3)); fprintf(fp, "\n"); } FREEBUF(devlist); } /* * Taken from drivers/pci/oldproc.c, kernel ver 2.2.17 */ struct pci_dev_info { unsigned short vendor; /* vendor id */ unsigned short device; /* device id */ const char *name; /* device name */ }; #define DEVICE(vid,did,name) \ {PCI_VENDOR_ID_##vid, PCI_DEVICE_ID_##did, (name)} /* * Sorted in ascending order by vendor and device. * Use binary search for lookup. If you add a device make sure * it is sequential by both vendor and device id. */ struct pci_dev_info dev_info[] = { DEVICE( COMPAQ, COMPAQ_1280, "QVision 1280/p"), DEVICE( COMPAQ, COMPAQ_6010, "Hot Plug PCI Bridge"), DEVICE( COMPAQ, COMPAQ_SMART2P, "Smart-2/P RAID Controller"), DEVICE( COMPAQ, COMPAQ_NETEL100,"Netelligent 10/100"), DEVICE( COMPAQ, COMPAQ_NETEL10, "Netelligent 10"), DEVICE( COMPAQ, COMPAQ_NETFLEX3I,"NetFlex 3"), DEVICE( COMPAQ, COMPAQ_NETEL100D,"Netelligent 10/100 Dual"), DEVICE( COMPAQ, COMPAQ_NETEL100PI,"Netelligent 10/100 ProLiant"), DEVICE( COMPAQ, COMPAQ_NETEL100I,"Netelligent 10/100 Integrated"), DEVICE( COMPAQ, COMPAQ_THUNDER, "ThunderLAN"), DEVICE( COMPAQ, COMPAQ_NETFLEX3B,"NetFlex 3 BNC"), DEVICE( NCR, NCR_53C810, "53c810"), DEVICE( NCR, NCR_53C820, "53c820"), DEVICE( NCR, NCR_53C825, "53c825"), DEVICE( NCR, NCR_53C815, "53c815"), DEVICE( NCR, NCR_53C860, "53c860"), DEVICE( NCR, NCR_53C896, "53c896"), DEVICE( NCR, NCR_53C895, "53c895"), DEVICE( NCR, NCR_53C885, "53c885"), DEVICE( NCR, NCR_53C875, "53c875"), DEVICE( NCR, NCR_53C875J, "53c875J"), DEVICE( ATI, ATI_68800, "68800AX"), DEVICE( ATI, ATI_215CT222, "215CT222"), DEVICE( ATI, ATI_210888CX, "210888CX"), DEVICE( ATI, ATI_215GB, "Mach64 GB"), DEVICE( ATI, ATI_215GD, "Mach64 GD (Rage Pro)"), DEVICE( ATI, ATI_215GI, "Mach64 GI (Rage Pro)"), DEVICE( ATI, ATI_215GP, "Mach64 GP (Rage Pro)"), DEVICE( ATI, ATI_215GQ, "Mach64 GQ (Rage Pro)"), DEVICE( ATI, ATI_215GT, "Mach64 GT (Rage II)"), DEVICE( ATI, ATI_215GTB, "Mach64 GT (Rage II)"), DEVICE( ATI, ATI_210888GX, "210888GX"), DEVICE( ATI, ATI_215LG, "Mach64 LG (Rage Pro)"), DEVICE( ATI, ATI_264LT, "Mach64 LT"), DEVICE( ATI, ATI_264VT, "Mach64 VT"), DEVICE( VLSI, VLSI_82C592, "82C592-FC1"), DEVICE( VLSI, VLSI_82C593, "82C593-FC1"), DEVICE( VLSI, VLSI_82C594, "82C594-AFC2"), DEVICE( VLSI, VLSI_82C597, "82C597-AFC2"), DEVICE( VLSI, VLSI_82C541, "82C541 Lynx"), DEVICE( VLSI, VLSI_82C543, "82C543 Lynx ISA"), DEVICE( VLSI, VLSI_82C532, "82C532"), DEVICE( VLSI, VLSI_82C534, "82C534"), DEVICE( VLSI, VLSI_82C535, "82C535"), DEVICE( VLSI, VLSI_82C147, "82C147"), DEVICE( VLSI, VLSI_VAS96011, "VAS96011 (Golden Gate II)"), DEVICE( ADL, ADL_2301, "2301"), DEVICE( NS, NS_87415, "87415"), DEVICE( NS, NS_87410, "87410"), DEVICE( TSENG, TSENG_W32P_2, "ET4000W32P"), DEVICE( TSENG, TSENG_W32P_b, "ET4000W32P rev B"), DEVICE( TSENG, TSENG_W32P_c, "ET4000W32P rev C"), DEVICE( TSENG, TSENG_W32P_d, "ET4000W32P rev D"), DEVICE( TSENG, TSENG_ET6000, "ET6000"), DEVICE( WEITEK, WEITEK_P9000, "P9000"), DEVICE( WEITEK, WEITEK_P9100, "P9100"), DEVICE( DEC, DEC_BRD, "DC21050"), DEVICE( DEC, DEC_TULIP, "DC21040"), DEVICE( DEC, DEC_TGA, "TGA"), DEVICE( DEC, DEC_TULIP_FAST, "DC21140"), DEVICE( DEC, DEC_TGA2, "TGA2"), DEVICE( DEC, DEC_FDDI, "DEFPA"), DEVICE( DEC, DEC_TULIP_PLUS, "DC21041"), DEVICE( DEC, DEC_21142, "DC21142"), DEVICE( DEC, DEC_21052, "DC21052"), DEVICE( DEC, DEC_21150, "DC21150"), DEVICE( DEC, DEC_21152, "DC21152"), DEVICE( DEC, DEC_21153, "DC21153"), DEVICE( DEC, DEC_21154, "DC21154"), DEVICE( DEC, DEC_21285, "DC21285 Footbridge"), DEVICE( DEC, DEC_21554, "DC21554 DrawBridge"), DEVICE( CIRRUS, CIRRUS_7548, "GD 7548"), DEVICE( CIRRUS, CIRRUS_5430, "GD 5430"), DEVICE( CIRRUS, CIRRUS_5434_4, "GD 5434"), DEVICE( CIRRUS, CIRRUS_5434_8, "GD 5434"), DEVICE( CIRRUS, CIRRUS_5436, "GD 5436"), DEVICE( CIRRUS, CIRRUS_5446, "GD 5446"), DEVICE( CIRRUS, CIRRUS_5480, "GD 5480"), DEVICE( CIRRUS, CIRRUS_5464, "GD 5464"), DEVICE( CIRRUS, CIRRUS_5465, "GD 5465"), DEVICE( CIRRUS, CIRRUS_6729, "CL 6729"), DEVICE( CIRRUS, CIRRUS_6832, "PD 6832"), DEVICE( CIRRUS, CIRRUS_7542, "CL 7542"), DEVICE( CIRRUS, CIRRUS_7543, "CL 7543"), DEVICE( CIRRUS, CIRRUS_7541, "CL 7541"), DEVICE( IBM, IBM_FIRE_CORAL, "Fire Coral"), DEVICE( IBM, IBM_TR, "Token Ring"), DEVICE( IBM, IBM_82G2675, "82G2675"), DEVICE( IBM, IBM_MCA, "MicroChannel"), DEVICE( IBM, IBM_82351, "82351"), DEVICE( IBM, IBM_PYTHON, "Python"), DEVICE( IBM, IBM_SERVERAID, "ServeRAID"), DEVICE( IBM, IBM_TR_WAKE, "Wake On LAN Token Ring"), DEVICE( IBM, IBM_MPIC, "MPIC-2 Interrupt Controller"), DEVICE( IBM, IBM_3780IDSP, "MWave DSP"), DEVICE( IBM, IBM_MPIC_2, "MPIC-2 ASIC Interrupt Controller"), DEVICE( WD, WD_7197, "WD 7197"), DEVICE( AMD, AMD_LANCE, "79C970"), DEVICE( AMD, AMD_SCSI, "53C974"), DEVICE( TRIDENT, TRIDENT_9397, "Cyber9397"), DEVICE( TRIDENT, TRIDENT_9420, "TG 9420"), DEVICE( TRIDENT, TRIDENT_9440, "TG 9440"), DEVICE( TRIDENT, TRIDENT_9660, "TG 9660 / Cyber9385"), DEVICE( TRIDENT, TRIDENT_9750, "Image 975"), DEVICE( AI, AI_M1435, "M1435"), DEVICE( MATROX, MATROX_MGA_2, "Atlas PX2085"), DEVICE( MATROX, MATROX_MIL, "Millennium"), DEVICE( MATROX, MATROX_MYS, "Mystique"), DEVICE( MATROX, MATROX_MIL_2, "Millennium II"), DEVICE( MATROX, MATROX_MIL_2_AGP,"Millennium II AGP"), DEVICE( MATROX, MATROX_G200_PCI,"Matrox G200 PCI"), DEVICE( MATROX, MATROX_G200_AGP,"Matrox G200 AGP"), DEVICE( MATROX, MATROX_MGA_IMP, "MGA Impression"), DEVICE( MATROX, MATROX_G100_MM, "Matrox G100 multi monitor"), DEVICE( MATROX, MATROX_G100_AGP,"Matrox G100 AGP"), DEVICE( CT, CT_65545, "65545"), DEVICE( CT, CT_65548, "65548"), DEVICE( CT, CT_65550, "65550"), DEVICE( CT, CT_65554, "65554"), DEVICE( CT, CT_65555, "65555"), DEVICE( MIRO, MIRO_36050, "ZR36050"), DEVICE( NEC, NEC_PCX2, "PowerVR PCX2"), DEVICE( FD, FD_36C70, "TMC-18C30"), DEVICE( SI, SI_5591_AGP, "5591/5592 AGP"), DEVICE( SI, SI_6202, "6202"), DEVICE( SI, SI_503, "85C503"), DEVICE( SI, SI_ACPI, "ACPI"), DEVICE( SI, SI_5597_VGA, "5597/5598 VGA"), DEVICE( SI, SI_6205, "6205"), DEVICE( SI, SI_501, "85C501"), DEVICE( SI, SI_496, "85C496"), DEVICE( SI, SI_601, "85C601"), DEVICE( SI, SI_5107, "5107"), DEVICE( SI, SI_5511, "85C5511"), DEVICE( SI, SI_5513, "85C5513"), DEVICE( SI, SI_5571, "5571"), DEVICE( SI, SI_5591, "5591/5592 Host"), DEVICE( SI, SI_5597, "5597/5598 Host"), DEVICE( SI, SI_7001, "7001 USB"), DEVICE( HP, HP_J2585A, "J2585A"), DEVICE( HP, HP_J2585B, "J2585B (Lassen)"), DEVICE( PCTECH, PCTECH_RZ1000, "RZ1000 (buggy)"), DEVICE( PCTECH, PCTECH_RZ1001, "RZ1001 (buggy?)"), DEVICE( PCTECH, PCTECH_SAMURAI_0,"Samurai 0"), DEVICE( PCTECH, PCTECH_SAMURAI_1,"Samurai 1"), DEVICE( PCTECH, PCTECH_SAMURAI_IDE,"Samurai IDE"), DEVICE( DPT, DPT, "SmartCache/Raid"), DEVICE( OPTI, OPTI_92C178, "92C178"), DEVICE( OPTI, OPTI_82C557, "82C557 Viper-M"), DEVICE( OPTI, OPTI_82C558, "82C558 Viper-M ISA+IDE"), DEVICE( OPTI, OPTI_82C621, "82C621"), DEVICE( OPTI, OPTI_82C700, "82C700"), DEVICE( OPTI, OPTI_82C701, "82C701 FireStar Plus"), DEVICE( OPTI, OPTI_82C814, "82C814 Firebridge 1"), DEVICE( OPTI, OPTI_82C822, "82C822"), DEVICE( OPTI, OPTI_82C825, "82C825 Firebridge 2"), DEVICE( SGS, SGS_2000, "STG 2000X"), DEVICE( SGS, SGS_1764, "STG 1764X"), DEVICE( BUSLOGIC, BUSLOGIC_MULTIMASTER_NC, "MultiMaster NC"), DEVICE( BUSLOGIC, BUSLOGIC_MULTIMASTER, "MultiMaster"), DEVICE( BUSLOGIC, BUSLOGIC_FLASHPOINT, "FlashPoint"), DEVICE( TI, TI_TVP4010, "TVP4010 Permedia"), DEVICE( TI, TI_TVP4020, "TVP4020 Permedia 2"), DEVICE( TI, TI_PCI1130, "PCI1130"), DEVICE( TI, TI_PCI1131, "PCI1131"), DEVICE( TI, TI_PCI1250, "PCI1250"), DEVICE( OAK, OAK_OTI107, "OTI107"), DEVICE( WINBOND2, WINBOND2_89C940,"NE2000-PCI"), DEVICE( MOTOROLA, MOTOROLA_MPC105,"MPC105 Eagle"), DEVICE( MOTOROLA, MOTOROLA_MPC106,"MPC106 Grackle"), DEVICE( MOTOROLA, MOTOROLA_RAVEN, "Raven"), DEVICE( MOTOROLA, MOTOROLA_FALCON,"Falcon"), DEVICE( MOTOROLA, MOTOROLA_CPX8216,"CPX8216"), DEVICE( PROMISE, PROMISE_20246, "IDE UltraDMA/33"), DEVICE( PROMISE, PROMISE_5300, "DC5030"), DEVICE( N9, N9_I128, "Imagine 128"), DEVICE( N9, N9_I128_2, "Imagine 128v2"), DEVICE( N9, N9_I128_T2R, "Revolution 3D"), DEVICE( UMC, UMC_UM8673F, "UM8673F"), DEVICE( UMC, UMC_UM8891A, "UM8891A"), DEVICE( UMC, UMC_UM8886BF, "UM8886BF"), DEVICE( UMC, UMC_UM8886A, "UM8886A"), DEVICE( UMC, UMC_UM8881F, "UM8881F"), DEVICE( UMC, UMC_UM8886F, "UM8886F"), DEVICE( UMC, UMC_UM9017F, "UM9017F"), DEVICE( UMC, UMC_UM8886N, "UM8886N"), DEVICE( UMC, UMC_UM8891N, "UM8891N"), DEVICE( X, X_AGX016, "ITT AGX016"), DEVICE( PICOP, PICOP_PT86C52X, "PT86C52x Vesuvius"), DEVICE( PICOP, PICOP_PT80C524, "PT80C524 Nile"), DEVICE( MYLEX, MYLEX_DAC960_P, "DAC960 P Series"), DEVICE( MYLEX, MYLEX_DAC960_PD,"DAC960 PD Series"), DEVICE( MYLEX, MYLEX_DAC960_PG,"DAC960 PG Series"), DEVICE( MYLEX, MYLEX_DAC960_LP,"DAC960 LP Series"), DEVICE( MYLEX, MYLEX_DAC960_BA,"DAC960 BA Series"), DEVICE( APPLE, APPLE_BANDIT, "Bandit"), DEVICE( APPLE, APPLE_GC, "Grand Central"), DEVICE( APPLE, APPLE_HYDRA, "Hydra"), DEVICE( NEXGEN, NEXGEN_82C501, "82C501"), DEVICE( QLOGIC, QLOGIC_ISP1020, "ISP1020"), DEVICE( QLOGIC, QLOGIC_ISP1022, "ISP1022"), DEVICE( CYRIX, CYRIX_5510, "5510"), DEVICE( CYRIX, CYRIX_PCI_MASTER,"PCI Master"), DEVICE( CYRIX, CYRIX_5520, "5520"), DEVICE( CYRIX, CYRIX_5530_LEGACY,"5530 Kahlua Legacy"), DEVICE( CYRIX, CYRIX_5530_SMI, "5530 Kahlua SMI"), DEVICE( CYRIX, CYRIX_5530_IDE, "5530 Kahlua IDE"), DEVICE( CYRIX, CYRIX_5530_AUDIO,"5530 Kahlua Audio"), DEVICE( CYRIX, CYRIX_5530_VIDEO,"5530 Kahlua Video"), DEVICE( LEADTEK, LEADTEK_805, "S3 805"), DEVICE( CONTAQ, CONTAQ_82C599, "82C599"), DEVICE( CONTAQ, CONTAQ_82C693, "82C693"), DEVICE( OLICOM, OLICOM_OC3136, "OC-3136/3137"), DEVICE( OLICOM, OLICOM_OC2315, "OC-2315"), DEVICE( OLICOM, OLICOM_OC2325, "OC-2325"), DEVICE( OLICOM, OLICOM_OC2183, "OC-2183/2185"), DEVICE( OLICOM, OLICOM_OC2326, "OC-2326"), DEVICE( OLICOM, OLICOM_OC6151, "OC-6151/6152"), DEVICE( SUN, SUN_EBUS, "PCI-EBus Bridge"), DEVICE( SUN, SUN_HAPPYMEAL, "Happy Meal Ethernet"), DEVICE( SUN, SUN_SIMBA, "Advanced PCI Bridge"), DEVICE( SUN, SUN_PBM, "PCI Bus Module"), DEVICE( SUN, SUN_SABRE, "Ultra IIi PCI"), DEVICE( CMD, CMD_640, "640 (buggy)"), DEVICE( CMD, CMD_643, "643"), DEVICE( CMD, CMD_646, "646"), DEVICE( CMD, CMD_670, "670"), DEVICE( VISION, VISION_QD8500, "QD-8500"), DEVICE( VISION, VISION_QD8580, "QD-8580"), DEVICE( BROOKTREE, BROOKTREE_848, "Bt848"), DEVICE( BROOKTREE, BROOKTREE_849A, "Bt849"), DEVICE( BROOKTREE, BROOKTREE_878_1,"Bt878 2nd Contr. (?)"), DEVICE( BROOKTREE, BROOKTREE_878, "Bt878"), DEVICE( BROOKTREE, BROOKTREE_8474, "Bt8474"), DEVICE( SIERRA, SIERRA_STB, "STB Horizon 64"), DEVICE( ACC, ACC_2056, "2056"), DEVICE( WINBOND, WINBOND_83769, "W83769F"), DEVICE( WINBOND, WINBOND_82C105, "SL82C105"), DEVICE( WINBOND, WINBOND_83C553, "W83C553"), DEVICE( DATABOOK, DATABOOK_87144, "DB87144"), DEVICE( PLX, PLX_9050, "PCI9050 I2O"), DEVICE( PLX, PLX_9080, "PCI9080 I2O"), DEVICE( MADGE, MADGE_MK2, "Smart 16/4 BM Mk2 Ringnode"), DEVICE( MADGE, MADGE_C155S, "Collage 155 Server"), DEVICE( 3COM, 3COM_3C339, "3C339 TokenRing"), DEVICE( 3COM, 3COM_3C590, "3C590 10bT"), DEVICE( 3COM, 3COM_3C595TX, "3C595 100bTX"), DEVICE( 3COM, 3COM_3C595T4, "3C595 100bT4"), DEVICE( 3COM, 3COM_3C595MII, "3C595 100b-MII"), DEVICE( 3COM, 3COM_3C900TPO, "3C900 10bTPO"), DEVICE( 3COM, 3COM_3C900COMBO,"3C900 10b Combo"), DEVICE( 3COM, 3COM_3C905TX, "3C905 100bTX"), DEVICE( 3COM, 3COM_3C905T4, "3C905 100bT4"), DEVICE( 3COM, 3COM_3C905B_TX, "3C905B 100bTX"), DEVICE( SMC, SMC_EPIC100, "9432 TX"), DEVICE( AL, AL_M1445, "M1445"), DEVICE( AL, AL_M1449, "M1449"), DEVICE( AL, AL_M1451, "M1451"), DEVICE( AL, AL_M1461, "M1461"), DEVICE( AL, AL_M1489, "M1489"), DEVICE( AL, AL_M1511, "M1511"), DEVICE( AL, AL_M1513, "M1513"), DEVICE( AL, AL_M1521, "M1521"), DEVICE( AL, AL_M1523, "M1523"), DEVICE( AL, AL_M1531, "M1531 Aladdin IV"), DEVICE( AL, AL_M1533, "M1533 Aladdin IV"), DEVICE( AL, AL_M3307, "M3307 MPEG-1 decoder"), DEVICE( AL, AL_M4803, "M4803"), DEVICE( AL, AL_M5219, "M5219"), DEVICE( AL, AL_M5229, "M5229 TXpro"), DEVICE( AL, AL_M5237, "M5237 USB"), DEVICE( SURECOM, SURECOM_NE34, "NE-34PCI LAN"), DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_NM2070, "Magicgraph NM2070"), DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_128V, "MagicGraph 128V"), DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_128ZV, "MagicGraph 128ZV"), DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_NM2160, "MagicGraph NM2160"), DEVICE( NEOMAGIC, NEOMAGIC_MAGICGRAPH_128ZVPLUS, "MagicGraph 128ZV+"), DEVICE( ASP, ASP_ABP940, "ABP940"), DEVICE( ASP, ASP_ABP940U, "ABP940U"), DEVICE( ASP, ASP_ABP940UW, "ABP940UW"), DEVICE( MACRONIX, MACRONIX_MX98713,"MX98713"), DEVICE( MACRONIX, MACRONIX_MX987x5,"MX98715 / MX98725"), DEVICE( CERN, CERN_SPSB_PMC, "STAR/RD24 SCI-PCI (PMC)"), DEVICE( CERN, CERN_SPSB_PCI, "STAR/RD24 SCI-PCI (PMC)"), DEVICE( CERN, CERN_HIPPI_DST, "HIPPI destination"), DEVICE( CERN, CERN_HIPPI_SRC, "HIPPI source"), DEVICE( IMS, IMS_8849, "8849"), DEVICE( TEKRAM2, TEKRAM2_690c, "DC690c"), DEVICE( TUNDRA, TUNDRA_CA91C042,"CA91C042 Universe"), DEVICE( AMCC, AMCC_MYRINET, "Myrinet PCI (M2-PCI-32)"), DEVICE( AMCC, AMCC_PARASTATION,"ParaStation Interface"), DEVICE( AMCC, AMCC_S5933, "S5933 PCI44"), DEVICE( AMCC, AMCC_S5933_HEPC3,"S5933 Traquair HEPC3"), DEVICE( INTERG, INTERG_1680, "IGA-1680"), DEVICE( INTERG, INTERG_1682, "IGA-1682"), DEVICE( REALTEK, REALTEK_8029, "8029"), DEVICE( REALTEK, REALTEK_8129, "8129"), DEVICE( REALTEK, REALTEK_8139, "8139"), DEVICE( TRUEVISION, TRUEVISION_T1000,"TARGA 1000"), DEVICE( INIT, INIT_320P, "320 P"), DEVICE( INIT, INIT_360P, "360 P"), DEVICE( TTI, TTI_HPT343, "HPT343"), DEVICE( VIA, VIA_82C505, "VT 82C505"), DEVICE( VIA, VIA_82C561, "VT 82C561"), DEVICE( VIA, VIA_82C586_1, "VT 82C586 Apollo IDE"), DEVICE( VIA, VIA_82C576, "VT 82C576 3V"), DEVICE( VIA, VIA_82C585, "VT 82C585 Apollo VP1/VPX"), DEVICE( VIA, VIA_82C586_0, "VT 82C586 Apollo ISA"), DEVICE( VIA, VIA_82C595, "VT 82C595 Apollo VP2"), DEVICE( VIA, VIA_82C596_0, "VT 82C596 Apollo Pro"), DEVICE( VIA, VIA_82C597_0, "VT 82C597 Apollo VP3"), DEVICE( VIA, VIA_82C598_0, "VT 82C598 Apollo MVP3"), DEVICE( VIA, VIA_82C926, "VT 82C926 Amazon"), DEVICE( VIA, VIA_82C416, "VT 82C416MV"), DEVICE( VIA, VIA_82C595_97, "VT 82C595 Apollo VP2/97"), DEVICE( VIA, VIA_82C586_2, "VT 82C586 Apollo USB"), DEVICE( VIA, VIA_82C586_3, "VT 82C586B Apollo ACPI"), DEVICE( VIA, VIA_86C100A, "VT 86C100A"), DEVICE( VIA, VIA_82C597_1, "VT 82C597 Apollo VP3 AGP"), DEVICE( VIA, VIA_82C598_1, "VT 82C598 Apollo MVP3 AGP"), DEVICE( SMC2, SMC2_1211TX, "1211 TX"), DEVICE( VORTEX, VORTEX_GDT60x0, "GDT 60x0"), DEVICE( VORTEX, VORTEX_GDT6000B,"GDT 6000b"), DEVICE( VORTEX, VORTEX_GDT6x10, "GDT 6110/6510"), DEVICE( VORTEX, VORTEX_GDT6x20, "GDT 6120/6520"), DEVICE( VORTEX, VORTEX_GDT6530, "GDT 6530"), DEVICE( VORTEX, VORTEX_GDT6550, "GDT 6550"), DEVICE( VORTEX, VORTEX_GDT6x17, "GDT 6117/6517"), DEVICE( VORTEX, VORTEX_GDT6x27, "GDT 6127/6527"), DEVICE( VORTEX, VORTEX_GDT6537, "GDT 6537"), DEVICE( VORTEX, VORTEX_GDT6557, "GDT 6557"), DEVICE( VORTEX, VORTEX_GDT6x15, "GDT 6115/6515"), DEVICE( VORTEX, VORTEX_GDT6x25, "GDT 6125/6525"), DEVICE( VORTEX, VORTEX_GDT6535, "GDT 6535"), DEVICE( VORTEX, VORTEX_GDT6555, "GDT 6555"), DEVICE( VORTEX, VORTEX_GDT6x17RP,"GDT 6117RP/6517RP"), DEVICE( VORTEX, VORTEX_GDT6x27RP,"GDT 6127RP/6527RP"), DEVICE( VORTEX, VORTEX_GDT6537RP,"GDT 6537RP"), DEVICE( VORTEX, VORTEX_GDT6557RP,"GDT 6557RP"), DEVICE( VORTEX, VORTEX_GDT6x11RP,"GDT 6111RP/6511RP"), DEVICE( VORTEX, VORTEX_GDT6x21RP,"GDT 6121RP/6521RP"), DEVICE( VORTEX, VORTEX_GDT6x17RP1,"GDT 6117RP1/6517RP1"), DEVICE( VORTEX, VORTEX_GDT6x27RP1,"GDT 6127RP1/6527RP1"), DEVICE( VORTEX, VORTEX_GDT6537RP1,"GDT 6537RP1"), DEVICE( VORTEX, VORTEX_GDT6557RP1,"GDT 6557RP1"), DEVICE( VORTEX, VORTEX_GDT6x11RP1,"GDT 6111RP1/6511RP1"), DEVICE( VORTEX, VORTEX_GDT6x21RP1,"GDT 6121RP1/6521RP1"), DEVICE( VORTEX, VORTEX_GDT6x17RP2,"GDT 6117RP2/6517RP2"), DEVICE( VORTEX, VORTEX_GDT6x27RP2,"GDT 6127RP2/6527RP2"), DEVICE( VORTEX, VORTEX_GDT6537RP2,"GDT 6537RP2"), DEVICE( VORTEX, VORTEX_GDT6557RP2,"GDT 6557RP2"), DEVICE( VORTEX, VORTEX_GDT6x11RP2,"GDT 6111RP2/6511RP2"), DEVICE( VORTEX, VORTEX_GDT6x21RP2,"GDT 6121RP2/6521RP2"), DEVICE( EF, EF_ATM_FPGA, "155P-MF1 (FPGA)"), DEVICE( EF, EF_ATM_ASIC, "155P-MF1 (ASIC)"), DEVICE( FORE, FORE_PCA200PC, "PCA-200PC"), DEVICE( FORE, FORE_PCA200E, "PCA-200E"), DEVICE( IMAGINGTECH, IMAGINGTECH_ICPCI, "MVC IC-PCI"), DEVICE( PHILIPS, PHILIPS_SAA7145,"SAA7145"), DEVICE( PHILIPS, PHILIPS_SAA7146,"SAA7146"), DEVICE( CYCLONE, CYCLONE_SDK, "SDK"), DEVICE( ALLIANCE, ALLIANCE_PROMOTIO, "Promotion-6410"), DEVICE( ALLIANCE, ALLIANCE_PROVIDEO, "Provideo"), DEVICE( ALLIANCE, ALLIANCE_AT24, "AT24"), DEVICE( ALLIANCE, ALLIANCE_AT3D, "AT3D"), DEVICE( SYSKONNECT, SYSKONNECT_FP, "SK-FDDI-PCI"), DEVICE( SYSKONNECT, SYSKONNECT_TR, "SK-TR-PCI"), DEVICE( SYSKONNECT, SYSKONNECT_GE, "SK-98xx"), DEVICE( VMIC, VMIC_VME, "VMIVME-7587"), DEVICE( DIGI, DIGI_EPC, "AccelPort EPC"), DEVICE( DIGI, DIGI_RIGHTSWITCH, "RightSwitch SE-6"), DEVICE( DIGI, DIGI_XEM, "AccelPort Xem"), DEVICE( DIGI, DIGI_XR, "AccelPort Xr"), DEVICE( DIGI, DIGI_CX, "AccelPort C/X"), DEVICE( DIGI, DIGI_XRJ, "AccelPort Xr/J"), DEVICE( DIGI, DIGI_EPCJ, "AccelPort EPC/J"), DEVICE( DIGI, DIGI_XR_920, "AccelPort Xr 920"), DEVICE( MUTECH, MUTECH_MV1000, "MV-1000"), DEVICE( RENDITION, RENDITION_VERITE,"Verite 1000"), DEVICE( RENDITION, RENDITION_VERITE2100,"Verite 2100"), DEVICE( SERVERWORKS, SERVERWORKS_HE, "CNB20HE PCI Bridge"), DEVICE( SERVERWORKS, SERVERWORKS_LE, "CNB30LE PCI Bridge"), DEVICE( SERVERWORKS, SERVERWORKS_CMIC_HE, "CMIC-HE PCI Bridge"), DEVICE( SERVERWORKS, SERVERWORKS_CIOB30, "CIOB30 I/O Bridge"), DEVICE( SERVERWORKS, SERVERWORKS_CSB5, "CSB5 PCI Bridge"), DEVICE( TOSHIBA, TOSHIBA_601, "Laptop"), DEVICE( TOSHIBA, TOSHIBA_TOPIC95,"ToPIC95"), DEVICE( TOSHIBA, TOSHIBA_TOPIC97,"ToPIC97"), DEVICE( RICOH, RICOH_RL5C466, "RL5C466"), DEVICE( ARTOP, ARTOP_ATP8400, "ATP8400"), DEVICE( ARTOP, ARTOP_ATP850UF, "ATP850UF"), DEVICE( ZEITNET, ZEITNET_1221, "1221"), DEVICE( ZEITNET, ZEITNET_1225, "1225"), DEVICE( OMEGA, OMEGA_82C092G, "82C092G"), DEVICE( LITEON, LITEON_LNE100TX,"LNE100TX"), DEVICE( NP, NP_PCI_FDDI, "NP-PCI"), DEVICE( ATT, ATT_L56XMF, "L56xMF"), DEVICE( ATT, ATT_L56DVP, "L56DV+P"), DEVICE( SPECIALIX, SPECIALIX_IO8, "IO8+/PCI"), DEVICE( SPECIALIX, SPECIALIX_XIO, "XIO/SIO host"), DEVICE( SPECIALIX, SPECIALIX_RIO, "RIO host"), DEVICE( AURAVISION, AURAVISION_VXP524,"VXP524"), DEVICE( IKON, IKON_10115, "10115 Greensheet"), DEVICE( IKON, IKON_10117, "10117 Greensheet"), DEVICE( ZORAN, ZORAN_36057, "ZR36057"), DEVICE( ZORAN, ZORAN_36120, "ZR36120"), DEVICE( KINETIC, KINETIC_2915, "2915 CAMAC"), DEVICE( COMPEX, COMPEX_ENET100VG4, "Readylink ENET100-VG4"), DEVICE( COMPEX, COMPEX_RL2000, "ReadyLink 2000"), DEVICE( RP, RP32INTF, "RocketPort 32 Intf"), DEVICE( RP, RP8INTF, "RocketPort 8 Intf"), DEVICE( RP, RP16INTF, "RocketPort 16 Intf"), DEVICE( RP, RP4QUAD, "Rocketport 4 Quad"), DEVICE( RP, RP8OCTA, "RocketPort 8 Oct"), DEVICE( RP, RP8J, "RocketPort 8 J"), DEVICE( RP, RPP4, "RocketPort Plus 4 Quad"), DEVICE( RP, RPP8, "RocketPort Plus 8 Oct"), DEVICE( RP, RP8M, "RocketModem 8 J"), DEVICE( CYCLADES, CYCLOM_Y_Lo, "Cyclom-Y below 1Mbyte"), DEVICE( CYCLADES, CYCLOM_Y_Hi, "Cyclom-Y above 1Mbyte"), DEVICE( CYCLADES, CYCLOM_4Y_Lo, "Cyclom-4Y below 1Mbyte"), DEVICE( CYCLADES, CYCLOM_4Y_Hi, "Cyclom-4Y above 1Mbyte"), DEVICE( CYCLADES, CYCLOM_8Y_Lo, "Cyclom-8Y below 1Mbyte"), DEVICE( CYCLADES, CYCLOM_8Y_Hi, "Cyclom-8Y above 1Mbyte"), DEVICE( CYCLADES, CYCLOM_Z_Lo, "Cyclades-Z below 1Mbyte"), DEVICE( CYCLADES, CYCLOM_Z_Hi, "Cyclades-Z above 1Mbyte"), DEVICE( CYCLADES, PC300_RX_2, "PC300/RSV or /X21 (2 ports)"), DEVICE( CYCLADES, PC300_RX_1, "PC300/RSV or /X21 (1 port)"), DEVICE( CYCLADES, PC300_TE_2, "PC300/TE (2 ports)"), DEVICE( CYCLADES, PC300_TE_1, "PC300/TE (1 port)"), DEVICE( ESSENTIAL, ESSENTIAL_ROADRUNNER,"Roadrunner serial HIPPI"), DEVICE( O2, O2_6832, "6832"), DEVICE( 3DFX, 3DFX_VOODOO, "Voodoo"), DEVICE( 3DFX, 3DFX_VOODOO2, "Voodoo2"), DEVICE( 3DFX, 3DFX_BANSHEE, "Banshee"), DEVICE( SIGMADES, SIGMADES_6425, "REALmagic64/GX"), DEVICE( AVM, AVM_A1, "A1 (Fritz)"), DEVICE( STALLION, STALLION_ECHPCI832,"EasyConnection 8/32"), DEVICE( STALLION, STALLION_ECHPCI864,"EasyConnection 8/64"), DEVICE( STALLION, STALLION_EIOPCI,"EasyIO"), DEVICE( OPTIBASE, OPTIBASE_FORGE, "MPEG Forge"), DEVICE( OPTIBASE, OPTIBASE_FUSION,"MPEG Fusion"), DEVICE( OPTIBASE, OPTIBASE_VPLEX, "VideoPlex"), DEVICE( OPTIBASE, OPTIBASE_VPLEXCC,"VideoPlex CC"), DEVICE( OPTIBASE, OPTIBASE_VQUEST,"VideoQuest"), DEVICE( SATSAGEM, SATSAGEM_PCR2101,"PCR2101 DVB receiver"), DEVICE( SATSAGEM, SATSAGEM_TELSATTURBO,"Telsat Turbo DVB"), DEVICE( HUGHES, HUGHES_DIRECPC, "DirecPC"), DEVICE( ENSONIQ, ENSONIQ_ES1371, "ES1371"), DEVICE( ENSONIQ, ENSONIQ_AUDIOPCI,"AudioPCI"), DEVICE( ALTEON, ALTEON_ACENIC, "AceNIC"), DEVICE( PICTUREL, PICTUREL_PCIVST,"PCIVST"), DEVICE( NVIDIA_SGS, NVIDIA_SGS_RIVA128, "Riva 128"), DEVICE( CBOARDS, CBOARDS_DAS1602_16,"DAS1602/16"), DEVICE( MOTOROLA_OOPS, MOTOROLA_FALCON,"Falcon"), DEVICE( TIMEDIA, TIMEDIA_4008A, "Noname 4008A"), DEVICE( SYMPHONY, SYMPHONY_101, "82C101"), DEVICE( TEKRAM, TEKRAM_DC290, "DC-290"), DEVICE( 3DLABS, 3DLABS_300SX, "GLINT 300SX"), DEVICE( 3DLABS, 3DLABS_500TX, "GLINT 500TX"), DEVICE( 3DLABS, 3DLABS_DELTA, "GLINT Delta"), DEVICE( 3DLABS, 3DLABS_PERMEDIA,"PERMEDIA"), DEVICE( 3DLABS, 3DLABS_MX, "GLINT MX"), DEVICE( AVANCE, AVANCE_ALG2064, "ALG2064i"), DEVICE( AVANCE, AVANCE_2302, "ALG-2302"), DEVICE( NETVIN, NETVIN_NV5000SC,"NV5000"), DEVICE( S3, S3_PLATO_PXS, "PLATO/PX (system)"), DEVICE( S3, S3_ViRGE, "ViRGE"), DEVICE( S3, S3_TRIO, "Trio32/Trio64"), DEVICE( S3, S3_AURORA64VP, "Aurora64V+"), DEVICE( S3, S3_TRIO64UVP, "Trio64UV+"), DEVICE( S3, S3_ViRGE_VX, "ViRGE/VX"), DEVICE( S3, S3_868, "Vision 868"), DEVICE( S3, S3_928, "Vision 928-P"), DEVICE( S3, S3_864_1, "Vision 864-P"), DEVICE( S3, S3_864_2, "Vision 864-P"), DEVICE( S3, S3_964_1, "Vision 964-P"), DEVICE( S3, S3_964_2, "Vision 964-P"), DEVICE( S3, S3_968, "Vision 968"), DEVICE( S3, S3_TRIO64V2, "Trio64V2/DX or /GX"), DEVICE( S3, S3_PLATO_PXG, "PLATO/PX (graphics)"), DEVICE( S3, S3_ViRGE_DXGX, "ViRGE/DX or /GX"), DEVICE( S3, S3_ViRGE_GX2, "ViRGE/GX2"), DEVICE( S3, S3_ViRGE_MX, "ViRGE/MX"), DEVICE( S3, S3_ViRGE_MXP, "ViRGE/MX+"), DEVICE( S3, S3_ViRGE_MXPMV, "ViRGE/MX+MV"), DEVICE( S3, S3_SONICVIBES, "SonicVibes"), DEVICE( DCI, DCI_PCCOM4, "PC COM PCI Bus 4 port serial Adapter"), DEVICE( GENROCO, GENROCO_HFP832, "TURBOstor HFP832"), DEVICE( INTEL, INTEL_82375, "82375EB"), DEVICE( INTEL, INTEL_82424, "82424ZX Saturn"), DEVICE( INTEL, INTEL_82378, "82378IB"), DEVICE( INTEL, INTEL_82430, "82430ZX Aries"), DEVICE( INTEL, INTEL_82434, "82434LX Mercury/Neptune"), DEVICE( INTEL, INTEL_I960, "i960"), DEVICE( INTEL, INTEL_I960RN, "i960 RN"), DEVICE( INTEL, INTEL_82559ER, "82559ER"), DEVICE( INTEL, INTEL_82092AA_0,"82092AA PCMCIA bridge"), DEVICE( INTEL, INTEL_82092AA_1,"82092AA EIDE"), DEVICE( INTEL, INTEL_7116, "SAA7116"), DEVICE( INTEL, INTEL_82596, "82596"), DEVICE( INTEL, INTEL_82865, "82865"), DEVICE( INTEL, INTEL_82557, "82557"), DEVICE( INTEL, INTEL_82437, "82437"), DEVICE( INTEL, INTEL_82371FB_0,"82371FB PIIX ISA"), DEVICE( INTEL, INTEL_82371FB_1,"82371FB PIIX IDE"), DEVICE( INTEL, INTEL_82371MX, "430MX - 82371MX MPIIX"), DEVICE( INTEL, INTEL_82437MX, "430MX - 82437MX MTSC"), DEVICE( INTEL, INTEL_82441, "82441FX Natoma"), DEVICE( INTEL, INTEL_82380FB, "82380FB Mobile"), DEVICE( INTEL, INTEL_82439, "82439HX Triton II"), DEVICE( INTEL, INTEL_MEGARAID, "OEM MegaRAID Controller"), DEVICE( INTEL, INTEL_82371SB_0,"82371SB PIIX3 ISA"), DEVICE( INTEL, INTEL_82371SB_1,"82371SB PIIX3 IDE"), DEVICE( INTEL, INTEL_82371SB_2,"82371SB PIIX3 USB"), DEVICE( INTEL, INTEL_82437VX, "82437VX Triton II"), DEVICE( INTEL, INTEL_82439TX, "82439TX"), DEVICE( INTEL, INTEL_82371AB_0,"82371AB PIIX4 ISA"), DEVICE( INTEL, INTEL_82371AB, "82371AB PIIX4 IDE"), DEVICE( INTEL, INTEL_82371AB_2,"82371AB PIIX4 USB"), DEVICE( INTEL, INTEL_82371AB_3,"82371AB PIIX4 ACPI"), DEVICE( INTEL, INTEL_82443LX_0,"440LX - 82443LX PAC Host"), DEVICE( INTEL, INTEL_82443LX_1,"440LX - 82443LX PAC AGP"), DEVICE( INTEL, INTEL_82443BX_0,"440BX - 82443BX Host"), DEVICE( INTEL, INTEL_82443BX_1,"440BX - 82443BX AGP"), DEVICE( INTEL, INTEL_82443BX_2,"440BX - 82443BX Host (no AGP)"), DEVICE( INTEL, INTEL_P6, "Orion P6"), DEVICE( INTEL, INTEL_82450GX, "450KX/GX [Orion] - 82454KX/GX PCI Bridge"), DEVICE( INTEL, INTEL_82453GX, "450KX/GX [Orion] - 82453KX/GX Memory Controller"), DEVICE( INTEL, INTEL_82451NX, "450NX - 82451NX Memory & I/O Controller"), DEVICE( INTEL, INTEL_82454NX, "450NX - 82454NX PCI Expander Bridge"), DEVICE( COMPUTONE, COMPUTONE_IP2EX, "Computone IntelliPort Plus"), DEVICE( KTI, KTI_ET32P2, "ET32P2"), DEVICE( ADAPTEC, ADAPTEC_7810, "AIC-7810 RAID"), DEVICE( ADAPTEC, ADAPTEC_7821, "AIC-7860"), DEVICE( ADAPTEC, ADAPTEC_38602, "AIC-7860"), DEVICE( ADAPTEC, ADAPTEC_7850, "AIC-7850"), DEVICE( ADAPTEC, ADAPTEC_7855, "AIC-7855"), DEVICE( ADAPTEC, ADAPTEC_5800, "AIC-5800"), DEVICE( ADAPTEC, ADAPTEC_3860, "AIC-7860"), DEVICE( ADAPTEC, ADAPTEC_7860, "AIC-7860"), DEVICE( ADAPTEC, ADAPTEC_7861, "AIC-7861"), DEVICE( ADAPTEC, ADAPTEC_7870, "AIC-7870"), DEVICE( ADAPTEC, ADAPTEC_7871, "AIC-7871"), DEVICE( ADAPTEC, ADAPTEC_7872, "AIC-7872"), DEVICE( ADAPTEC, ADAPTEC_7873, "AIC-7873"), DEVICE( ADAPTEC, ADAPTEC_7874, "AIC-7874"), DEVICE( ADAPTEC, ADAPTEC_7895, "AIC-7895U"), DEVICE( ADAPTEC, ADAPTEC_7880, "AIC-7880U"), DEVICE( ADAPTEC, ADAPTEC_7881, "AIC-7881U"), DEVICE( ADAPTEC, ADAPTEC_7882, "AIC-7882U"), DEVICE( ADAPTEC, ADAPTEC_7883, "AIC-7883U"), DEVICE( ADAPTEC, ADAPTEC_7884, "AIC-7884U"), DEVICE( ADAPTEC, ADAPTEC_7885, "AIC-7885U"), DEVICE( ADAPTEC, ADAPTEC_7886, "AIC-7886U"), DEVICE( ADAPTEC, ADAPTEC_7887, "AIC-7887U"), DEVICE( ADAPTEC, ADAPTEC_7888, "AIC-7888U"), DEVICE( ADAPTEC, ADAPTEC_1030, "ABA-1030 DVB receiver"), DEVICE( ADAPTEC2, ADAPTEC2_2940U2,"AHA-2940U2"), DEVICE( ADAPTEC2, ADAPTEC2_2930U2,"AHA-2930U2"), DEVICE( ADAPTEC2, ADAPTEC2_7890B, "AIC-7890/1"), DEVICE( ADAPTEC2, ADAPTEC2_7890, "AIC-7890/1"), DEVICE( ADAPTEC2, ADAPTEC2_3940U2,"AHA-3940U2"), DEVICE( ADAPTEC2, ADAPTEC2_3950U2D,"AHA-3950U2D"), DEVICE( ADAPTEC2, ADAPTEC2_7896, "AIC-7896/7"), DEVICE( ADAPTEC2, ADAPTEC2_7892A, "AIC-7892"), DEVICE( ADAPTEC2, ADAPTEC2_7892B, "AIC-7892"), DEVICE( ADAPTEC2, ADAPTEC2_7892D, "AIC-7892"), DEVICE( ADAPTEC2, ADAPTEC2_7892P, "AIC-7892"), DEVICE( ADAPTEC2, ADAPTEC2_7899A, "AIC-7899"), DEVICE( ADAPTEC2, ADAPTEC2_7899B, "AIC-7899"), DEVICE( ADAPTEC2, ADAPTEC2_7899D, "AIC-7899"), DEVICE( ADAPTEC2, ADAPTEC2_7899P, "AIC-7899"), DEVICE( ATRONICS, ATRONICS_2015, "IDE-2015PL"), DEVICE( TIGERJET, TIGERJET_300, "Tiger300 ISDN"), DEVICE( ARK, ARK_STING, "Stingray"), DEVICE( ARK, ARK_STINGARK, "Stingray ARK 2000PV"), DEVICE( ARK, ARK_2000MT, "2000MT") }; /* * device_info[] is sorted so we can use binary search */ static struct pci_dev_info * pci_lookup_dev(unsigned int vendor, unsigned int dev) { int min = 0, max = sizeof(dev_info)/sizeof(dev_info[0]) - 1; for ( ; ; ) { int i = (min + max) >> 1; long order; order = dev_info[i].vendor - (long) vendor; if (!order) order = dev_info[i].device - (long) dev; if (order < 0) { min = i + 1; if ( min > max ) return 0; continue; } if (order > 0) { max = i - 1; if ( min > max ) return 0; continue; } return & dev_info[ i ]; } } static const char * pci_strclass (unsigned int class, char *buf) { char *s; switch (class >> 8) { case PCI_CLASS_NOT_DEFINED: s = "Non-VGA device"; break; case PCI_CLASS_NOT_DEFINED_VGA: s = "VGA compatible device"; break; case PCI_CLASS_STORAGE_SCSI: s = "SCSI storage controller"; break; case PCI_CLASS_STORAGE_IDE: s = "IDE interface"; break; case PCI_CLASS_STORAGE_FLOPPY: s = "Floppy disk controller"; break; case PCI_CLASS_STORAGE_IPI: s = "IPI storage controller"; break; case PCI_CLASS_STORAGE_RAID: s = "RAID storage controller"; break; case PCI_CLASS_STORAGE_OTHER: s = "Unknown mass storage controller"; break; case PCI_CLASS_NETWORK_ETHERNET: s = "Ethernet controller"; break; case PCI_CLASS_NETWORK_TOKEN_RING: s = "Token ring network controller"; break; case PCI_CLASS_NETWORK_FDDI: s = "FDDI network controller"; break; case PCI_CLASS_NETWORK_ATM: s = "ATM network controller"; break; case PCI_CLASS_NETWORK_OTHER: s = "Network controller"; break; case PCI_CLASS_DISPLAY_VGA: s = "VGA compatible controller"; break; case PCI_CLASS_DISPLAY_XGA: s = "XGA compatible controller"; break; case PCI_CLASS_DISPLAY_OTHER: s = "Display controller"; break; case PCI_CLASS_MULTIMEDIA_VIDEO: s = "Multimedia video controller"; break; case PCI_CLASS_MULTIMEDIA_AUDIO: s = "Multimedia audio controller"; break; case PCI_CLASS_MULTIMEDIA_OTHER: s = "Multimedia controller"; break; case PCI_CLASS_MEMORY_RAM: s = "RAM memory"; break; case PCI_CLASS_MEMORY_FLASH: s = "FLASH memory"; break; case PCI_CLASS_MEMORY_OTHER: s = "Memory"; break; case PCI_CLASS_BRIDGE_HOST: s = "Host bridge"; break; case PCI_CLASS_BRIDGE_ISA: s = "ISA bridge"; break; case PCI_CLASS_BRIDGE_EISA: s = "EISA bridge"; break; case PCI_CLASS_BRIDGE_MC: s = "MicroChannel bridge"; break; case PCI_CLASS_BRIDGE_PCI: s = "PCI bridge"; break; case PCI_CLASS_BRIDGE_PCMCIA: s = "PCMCIA bridge"; break; case PCI_CLASS_BRIDGE_NUBUS: s = "NuBus bridge"; break; case PCI_CLASS_BRIDGE_CARDBUS: s = "CardBus bridge"; break; case PCI_CLASS_BRIDGE_OTHER: s = "Bridge"; break; case PCI_CLASS_COMMUNICATION_SERIAL: s = "Serial controller"; break; case PCI_CLASS_COMMUNICATION_PARALLEL: s = "Parallel controller"; break; case PCI_CLASS_COMMUNICATION_OTHER: s = "Communication controller"; break; case PCI_CLASS_SYSTEM_PIC: s = "PIC"; break; case PCI_CLASS_SYSTEM_DMA: s = "DMA controller"; break; case PCI_CLASS_SYSTEM_TIMER: s = "Timer"; break; case PCI_CLASS_SYSTEM_RTC: s = "RTC"; break; case PCI_CLASS_SYSTEM_OTHER: s = "System peripheral"; break; case PCI_CLASS_INPUT_KEYBOARD: s = "Keyboard controller"; break; case PCI_CLASS_INPUT_PEN: s = "Digitizer Pen"; break; case PCI_CLASS_INPUT_MOUSE: s = "Mouse controller"; break; case PCI_CLASS_INPUT_OTHER: s = "Input device controller"; break; case PCI_CLASS_DOCKING_GENERIC: s = "Generic Docking Station"; break; case PCI_CLASS_DOCKING_OTHER: s = "Docking Station"; break; case PCI_CLASS_PROCESSOR_386: s = "386"; break; case PCI_CLASS_PROCESSOR_486: s = "486"; break; case PCI_CLASS_PROCESSOR_PENTIUM: s = "Pentium"; break; case PCI_CLASS_PROCESSOR_ALPHA: s = "Alpha"; break; case PCI_CLASS_PROCESSOR_POWERPC: s = "Power PC"; break; case PCI_CLASS_PROCESSOR_CO: s = "Co-processor"; break; case PCI_CLASS_SERIAL_FIREWIRE: s = "FireWire (IEEE 1394)"; break; case PCI_CLASS_SERIAL_ACCESS: s = "ACCESS Bus"; break; case PCI_CLASS_SERIAL_SSA: s = "SSA"; break; case PCI_CLASS_SERIAL_USB: s = "USB Controller"; break; case PCI_CLASS_SERIAL_FIBER: s = "Fiber Channel"; break; case PCI_CLASS_SERIAL_SMBUS: s = "SM Bus"; break; case PCI_CLASS_HOT_SWAP_CONTROLLER: s = "Hot Swap Controller"; break; default: sprintf(buf, "[PCI_CLASS %x]", class); s = buf; break; } return s; } static const char * pci_strvendor(unsigned int vendor, char *buf) { char *s; switch (vendor) { case PCI_VENDOR_ID_COMPAQ: s = "Compaq"; break; case PCI_VENDOR_ID_NCR: s = "NCR"; break; case PCI_VENDOR_ID_ATI: s = "ATI"; break; case PCI_VENDOR_ID_VLSI: s = "VLSI"; break; case PCI_VENDOR_ID_ADL: s = "Avance Logic"; break; case PCI_VENDOR_ID_NS: s = "NS"; break; case PCI_VENDOR_ID_TSENG: s = "Tseng'Lab"; break; case PCI_VENDOR_ID_WEITEK: s = "Weitek"; break; case PCI_VENDOR_ID_DEC: s = "DEC"; break; case PCI_VENDOR_ID_CIRRUS: s = "Cirrus Logic"; break; case PCI_VENDOR_ID_IBM: s = "IBM"; break; case PCI_VENDOR_ID_WD: s = "Western Digital"; break; case PCI_VENDOR_ID_AMD: s = "AMD"; break; case PCI_VENDOR_ID_TRIDENT: s = "Trident"; break; case PCI_VENDOR_ID_AI: s = "Acer Incorporated"; break; case PCI_VENDOR_ID_MATROX: s = "Matrox"; break; case PCI_VENDOR_ID_CT: s = "Chips & Technologies"; break; case PCI_VENDOR_ID_MIRO: s = "Miro"; break; case PCI_VENDOR_ID_NEC: s = "NEC"; break; case PCI_VENDOR_ID_FD: s = "Future Domain"; break; case PCI_VENDOR_ID_SI: s = "Silicon Integrated Systems"; break; case PCI_VENDOR_ID_HP: s = "Hewlett Packard"; break; case PCI_VENDOR_ID_PCTECH: s = "PCTECH"; break; case PCI_VENDOR_ID_DPT: s = "DPT"; break; case PCI_VENDOR_ID_OPTI: s = "OPTi"; break; case PCI_VENDOR_ID_SGS: s = "SGS Thomson"; break; case PCI_VENDOR_ID_BUSLOGIC: s = "BusLogic"; break; case PCI_VENDOR_ID_TI: s = "Texas Instruments"; break; case PCI_VENDOR_ID_OAK: s = "OAK"; break; case PCI_VENDOR_ID_WINBOND2: s = "Winbond"; break; case PCI_VENDOR_ID_MOTOROLA: s = "Motorola"; break; case PCI_VENDOR_ID_MOTOROLA_OOPS: s = "Motorola"; break; case PCI_VENDOR_ID_PROMISE: s = "Promise Technology"; break; case PCI_VENDOR_ID_N9: s = "Number Nine"; break; case PCI_VENDOR_ID_UMC: s = "UMC"; break; case PCI_VENDOR_ID_X: s = "X TECHNOLOGY"; break; case PCI_VENDOR_ID_MYLEX: s = "Mylex"; break; case PCI_VENDOR_ID_PICOP: s = "PicoPower"; break; case PCI_VENDOR_ID_APPLE: s = "Apple"; break; case PCI_VENDOR_ID_NEXGEN: s = "Nexgen"; break; case PCI_VENDOR_ID_QLOGIC: s = "Q Logic"; break; case PCI_VENDOR_ID_CYRIX: s = "Cyrix"; break; case PCI_VENDOR_ID_LEADTEK: s = "Leadtek Research"; break; case PCI_VENDOR_ID_CONTAQ: s = "Contaq"; break; case PCI_VENDOR_ID_FOREX: s = "Forex"; break; case PCI_VENDOR_ID_OLICOM: s = "Olicom"; break; case PCI_VENDOR_ID_SUN: s = "Sun Microsystems"; break; case PCI_VENDOR_ID_CMD: s = "CMD"; break; case PCI_VENDOR_ID_VISION: s = "Vision"; break; case PCI_VENDOR_ID_BROOKTREE: s = "Brooktree"; break; case PCI_VENDOR_ID_SIERRA: s = "Sierra"; break; case PCI_VENDOR_ID_ACC: s = "ACC MICROELECTRONICS"; break; case PCI_VENDOR_ID_WINBOND: s = "Winbond"; break; case PCI_VENDOR_ID_DATABOOK: s = "Databook"; break; case PCI_VENDOR_ID_PLX: s = "PLX"; break; case PCI_VENDOR_ID_MADGE: s = "Madge Networks"; break; case PCI_VENDOR_ID_3COM: s = "3Com"; break; case PCI_VENDOR_ID_SMC: s = "SMC"; break; case PCI_VENDOR_ID_AL: s = "Acer Labs"; break; case PCI_VENDOR_ID_MITSUBISHI: s = "Mitsubishi"; break; case PCI_VENDOR_ID_SURECOM: s = "Surecom"; break; case PCI_VENDOR_ID_NEOMAGIC: s = "Neomagic"; break; case PCI_VENDOR_ID_ASP: s = "Advanced System Products"; break; case PCI_VENDOR_ID_MACRONIX: s = "Macronix"; break; case PCI_VENDOR_ID_CERN: s = "CERN"; break; case PCI_VENDOR_ID_NVIDIA: s = "NVidia"; break; case PCI_VENDOR_ID_IMS: s = "IMS"; break; case PCI_VENDOR_ID_TEKRAM2: s = "Tekram"; break; case PCI_VENDOR_ID_TUNDRA: s = "Tundra"; break; case PCI_VENDOR_ID_AMCC: s = "AMCC"; break; case PCI_VENDOR_ID_INTERG: s = "Intergraphics"; break; case PCI_VENDOR_ID_REALTEK: s = "Realtek"; break; case PCI_VENDOR_ID_TRUEVISION: s = "Truevision"; break; case PCI_VENDOR_ID_INIT: s = "Initio Corp"; break; case PCI_VENDOR_ID_TTI: s = "Triones Technologies, Inc."; break; case PCI_VENDOR_ID_VIA: s = "VIA Technologies"; break; case PCI_VENDOR_ID_SMC2: s = "SMC"; break; case PCI_VENDOR_ID_VORTEX: s = "VORTEX"; break; case PCI_VENDOR_ID_EF: s = "Efficient Networks"; break; case PCI_VENDOR_ID_FORE: s = "Fore Systems"; break; case PCI_VENDOR_ID_IMAGINGTECH: s = "Imaging Technology"; break; case PCI_VENDOR_ID_PHILIPS: s = "Philips"; break; case PCI_VENDOR_ID_CYCLONE: s = "Cyclone"; break; case PCI_VENDOR_ID_ALLIANCE: s = "Alliance"; break; case PCI_VENDOR_ID_VMIC: s = "VMIC"; break; case PCI_VENDOR_ID_DIGI: s = "Digi Intl."; break; case PCI_VENDOR_ID_MUTECH: s = "Mutech"; break; case PCI_VENDOR_ID_RENDITION: s = "Rendition"; break; case PCI_VENDOR_ID_TOSHIBA: s = "Toshiba"; break; case PCI_VENDOR_ID_RICOH: s = "Ricoh"; break; case PCI_VENDOR_ID_ARTOP: s = "Artop Electronics"; break; case PCI_VENDOR_ID_ZEITNET: s = "ZeitNet"; break; case PCI_VENDOR_ID_OMEGA: s = "Omega Micro"; break; case PCI_VENDOR_ID_LITEON: s = "LiteOn"; break; case PCI_VENDOR_ID_NP: s = "Network Peripherals"; break; case PCI_VENDOR_ID_ATT: s = "Lucent (ex-AT&T) Microelectronics"; break; case PCI_VENDOR_ID_SPECIALIX: s = "Specialix"; break; case PCI_VENDOR_ID_AURAVISION: s = "Auravision"; break; case PCI_VENDOR_ID_IKON: s = "Ikon"; break; case PCI_VENDOR_ID_ZORAN: s = "Zoran"; break; case PCI_VENDOR_ID_KINETIC: s = "Kinetic"; break; case PCI_VENDOR_ID_COMPEX: s = "Compex"; break; case PCI_VENDOR_ID_RP: s = "Comtrol"; break; case PCI_VENDOR_ID_CYCLADES: s = "Cyclades"; break; case PCI_VENDOR_ID_ESSENTIAL: s = "Essential Communications"; break; case PCI_VENDOR_ID_O2: s = "O2 Micro"; break; case PCI_VENDOR_ID_3DFX: s = "3Dfx"; break; case PCI_VENDOR_ID_SIGMADES: s = "Sigma Designs"; break; case PCI_VENDOR_ID_AVM: s = "AVM"; break; case PCI_VENDOR_ID_CCUBE: s = "C-Cube"; break; case PCI_VENDOR_ID_DIPIX: s = "Dipix"; break; case PCI_VENDOR_ID_STALLION: s = "Stallion Technologies"; break; case PCI_VENDOR_ID_OPTIBASE: s = "Optibase"; break; case PCI_VENDOR_ID_SATSAGEM: s = "SatSagem"; break; case PCI_VENDOR_ID_HUGHES: s = "Hughes"; break; case PCI_VENDOR_ID_ENSONIQ: s = "Ensoniq"; break; case PCI_VENDOR_ID_ALTEON: s = "Alteon"; break; case PCI_VENDOR_ID_PICTUREL: s = "Picture Elements"; break; case PCI_VENDOR_ID_NVIDIA_SGS: s = "NVidia/SGS Thomson"; break; case PCI_VENDOR_ID_CBOARDS: s = "ComputerBoards"; break; case PCI_VENDOR_ID_TIMEDIA: s = "Timedia Technology"; break; case PCI_VENDOR_ID_SYMPHONY: s = "Symphony"; break; case PCI_VENDOR_ID_COMPUTONE: s = "Computone Corporation"; break; case PCI_VENDOR_ID_TEKRAM: s = "Tekram"; break; case PCI_VENDOR_ID_3DLABS: s = "3Dlabs"; break; case PCI_VENDOR_ID_AVANCE: s = "Avance"; break; case PCI_VENDOR_ID_NETVIN: s = "NetVin"; break; case PCI_VENDOR_ID_S3: s = "S3 Inc."; break; case PCI_VENDOR_ID_DCI: s = "Decision Computer Int."; break; case PCI_VENDOR_ID_GENROCO: s = "Genroco"; break; case PCI_VENDOR_ID_INTEL: s = "Intel"; break; case PCI_VENDOR_ID_KTI: s = "KTI"; break; case PCI_VENDOR_ID_ADAPTEC: s = "Adaptec"; break; case PCI_VENDOR_ID_ADAPTEC2: s = "Adaptec"; break; case PCI_VENDOR_ID_ATRONICS: s = "Atronics"; break; case PCI_VENDOR_ID_TIGERJET: s = "TigerJet"; break; case PCI_VENDOR_ID_ARK: s = "ARK Logic"; break; case PCI_VENDOR_ID_SYSKONNECT: s = "SysKonnect"; break; default: sprintf(buf, "[PCI_VENDOR %x]", vendor); s = buf; break; } return s; } static const char * pci_strdev(unsigned int vendor, unsigned int device, char *buf) { struct pci_dev_info *info; if ((info = pci_lookup_dev(vendor, device))) return info->name; else { sprintf(buf, "[PCI_DEVICE %x]", device); return buf; } } /* * If the disk's name is started with these strings, we will skip it and do not * display its statistics. */ static char *skipped_disk_name[] = { "ram", "loop", NULL }; static int is_skipped_disk(char *name) { char **p = skipped_disk_name; while (*p) { if (strncmp(name, *p, strlen(*p)) == 0) return TRUE; p++; } return FALSE; } struct diskio { int read; int write; }; struct iter { /* If the kernel uses klist, the address should be klist.k_list */ long head_address; long current_address; long type_address; /* the address of symbol "disk_type" */ /* * If it is true, it means request_list.count[2] contains async/sync * requests. */ int sync_count; int diskname_len; unsigned long (*next_disk)(struct iter *); /* * The argument is the address of request_queue, and the function * returns the total requests in the driver(not ended) */ unsigned int (*get_in_flight)(unsigned long); /* * this function reads request_list.count[2], and the first argument * is the address of request_queue. */ void (*get_diskio)(unsigned long , unsigned long, struct diskio *); /* * check if device.type == &disk_type * * old kernel(version <= 2.6.24) does not have the symbol "disk_type", * and this callback should be null. */ int (*match)(struct iter *, unsigned long); /* * If the kernel uses list, the argument is the address of list_head, * otherwise, the argument is the address of klist_node. */ unsigned long (*get_gendisk)(unsigned long); }; /* kernel version <= 2.6.24 */ static unsigned long get_gendisk_1(unsigned long entry) { return entry - OFFSET(kobject_entry) - OFFSET(gendisk_kobj); } /* 2.6.24 < kernel version <= 2.6.27 */ static unsigned long get_gendisk_2(unsigned long entry) { return entry - OFFSET(device_node) - OFFSET(gendisk_dev); } /* kernel version > 2.6.27 && struct gendisk contains dev/__dev */ static unsigned long get_gendisk_3(unsigned long entry) { return entry - OFFSET(device_knode_class) - OFFSET(gendisk_dev); } /* kernel version > 2.6.27 && struct gendisk does not contain dev/__dev */ static unsigned long get_gendisk_4(unsigned long entry) { return entry - OFFSET(device_knode_class) - OFFSET(hd_struct_dev) - OFFSET(gendisk_part0); } /* kernel version >= 5.1 */ static unsigned long get_gendisk_5(unsigned long entry) { unsigned long device_address; unsigned long device_private_address; unsigned long gendisk; device_private_address = entry - OFFSET(device_private_knode_class); readmem(device_private_address + OFFSET(device_private_device), KVADDR, &device_address, sizeof(device_address), "device_private.device", FAULT_ON_ERROR); if (VALID_MEMBER(hd_struct_dev)) return device_address - OFFSET(hd_struct_dev) - OFFSET(gendisk_part0); /* kernel version >= 5.11 */ readmem(device_address - OFFSET(block_device_bd_device) + OFFSET(block_device_bd_disk), KVADDR, &gendisk, sizeof(ulong), "block_device.bd_disk", FAULT_ON_ERROR); return gendisk; } /* 2.6.24 < kernel version <= 2.6.27 */ static int match_list(struct iter *i, unsigned long entry) { unsigned long device_address; unsigned long device_type; device_address = entry - OFFSET(device_node); readmem(device_address + OFFSET(device_type), KVADDR, &device_type, sizeof(device_type), "device.type", FAULT_ON_ERROR); if (device_type != i->type_address) return FALSE; return TRUE; } /* kernel version > 2.6.27 */ static int match_klist(struct iter *i, unsigned long entry) { unsigned long device_address; unsigned long device_type; unsigned long device_private_address; if (VALID_MEMBER(device_knode_class)) device_address = entry - OFFSET(device_knode_class); else { /* kernel version >= 5.1 */ device_private_address = entry - OFFSET(device_private_knode_class); readmem(device_private_address + OFFSET(device_private_device), KVADDR, &device_address, sizeof(device_address), "device_private.device", FAULT_ON_ERROR); } readmem(device_address + OFFSET(device_type), KVADDR, &device_type, sizeof(device_type), "device.type", FAULT_ON_ERROR); if (device_type != i->type_address) return FALSE; return TRUE; } /* old kernel(version <= 2.6.27): list */ static unsigned long next_disk_list(struct iter *i) { unsigned long list_head_address, next_address; if (i->current_address) { list_head_address = i->current_address; } else { list_head_address = i->head_address; } again: /* read list_head.next */ readmem(list_head_address + OFFSET(list_head_next), KVADDR, &next_address, sizeof(next_address), "list_head.next", FAULT_ON_ERROR); if (next_address == i->head_address) return 0; if (i->match && !i->match(i, next_address)) { list_head_address = next_address; goto again; } i->current_address = next_address; return i->get_gendisk(next_address); } /* new kernel(version > 2.6.27): klist */ static unsigned long next_disk_klist(struct iter* i) { unsigned long klist_node_address, list_head_address, next_address; unsigned long n_klist; if (i->current_address) { list_head_address = i->current_address; } else { list_head_address = i->head_address; } again: /* read list_head.next */ readmem(list_head_address + OFFSET(list_head_next), KVADDR, &next_address, sizeof(next_address), "list_head.next", FAULT_ON_ERROR); /* skip dead klist_node */ while(next_address != i->head_address) { klist_node_address = next_address - OFFSET(klist_node_n_node); readmem(klist_node_address + OFFSET(klist_node_n_klist), KVADDR, &n_klist, sizeof(n_klist), "klist_node.n_klist", FAULT_ON_ERROR); if (!(n_klist & 1)) break; /* the klist_node is dead, skip to next klist_node */ readmem(next_address + OFFSET(list_head_next), KVADDR, &next_address, sizeof(next_address), "list_head.next", FAULT_ON_ERROR); } if (next_address == i->head_address) return 0; if (i->match && !i->match(i, klist_node_address)) { list_head_address = next_address; goto again; } i->current_address = next_address; return i->get_gendisk(klist_node_address); } static int use_mq_interface(unsigned long q) { unsigned long mq_ops; if (!VALID_MEMBER(request_queue_mq_ops)) return 0; readmem(q + OFFSET(request_queue_mq_ops), KVADDR, &mq_ops, sizeof(ulong), "request_queue.mq_ops", FAULT_ON_ERROR); if (mq_ops == 0) return 0; else return 1; } static void get_one_mctx_diskio(unsigned long mctx, struct diskio *io) { unsigned long dispatch[2]; unsigned long comp[2]; readmem(mctx + OFFSET(blk_mq_ctx_rq_dispatched), KVADDR, dispatch, sizeof(ulong) * 2, "blk_mq_ctx.rq_dispatched", FAULT_ON_ERROR); readmem(mctx + OFFSET(blk_mq_ctx_rq_completed), KVADDR, comp, sizeof(ulong) * 2, "blk_mq_ctx.rq_completed", FAULT_ON_ERROR); io->read = (dispatch[0] - comp[0]); io->write = (dispatch[1] - comp[1]); } typedef bool (busy_tag_iter_fn)(ulong rq, void *data); struct mq_inflight { ulong q; struct diskio *dio; }; struct bt_iter_data { ulong tags; uint reserved; uint nr_reserved_tags; busy_tag_iter_fn *fn; void *data; }; /* * See the include/linux/blk_types.h and include/linux/blk-mq.h */ #define MQ_RQ_IN_FLIGHT 1 #define REQ_OP_BITS 8 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1) #define BLK_MQ_F_TAG_HCTX_SHARED (1 << 3) static bool blk_mq_is_shared_tags(unsigned int flags) { return flags & BLK_MQ_F_TAG_HCTX_SHARED; } static uint op_is_write(uint op) { return (op & REQ_OP_MASK) & 1; } static bool mq_check_inflight(ulong rq, void *data) { uint cmd_flags = 0, state = 0; ulong addr = 0, queue = 0; struct mq_inflight *mi = data; if (!IS_KVADDR(rq)) return TRUE; addr = rq + OFFSET(request_q); if (!readmem(addr, KVADDR, &queue, sizeof(ulong), "request.q", RETURN_ON_ERROR)) return FALSE; addr = rq + OFFSET(request_cmd_flags); if (!readmem(addr, KVADDR, &cmd_flags, sizeof(uint), "request.cmd_flags", RETURN_ON_ERROR)) return FALSE; addr = rq + OFFSET(request_state); if (!readmem(addr, KVADDR, &state, sizeof(uint), "request.state", RETURN_ON_ERROR)) return FALSE; if (queue == mi->q && state == MQ_RQ_IN_FLIGHT) { if (op_is_write(cmd_flags)) mi->dio->write++; else mi->dio->read++; } return TRUE; } static bool bt_iter(uint bitnr, void *data) { ulong addr = 0, rqs_addr = 0, rq = 0; struct bt_iter_data *iter_data = data; ulong tag = iter_data->tags; if (!iter_data->reserved) bitnr += iter_data->nr_reserved_tags; /* rqs */ addr = tag + OFFSET(blk_mq_tags_rqs); if (!readmem(addr, KVADDR, &rqs_addr, sizeof(void *), "blk_mq_tags.rqs", RETURN_ON_ERROR)) return FALSE; addr = rqs_addr + bitnr * sizeof(ulong); /* rqs[bitnr] */ if (!readmem(addr, KVADDR, &rq, sizeof(ulong), "blk_mq_tags.rqs[]", RETURN_ON_ERROR)) return FALSE; return iter_data->fn(rq, iter_data->data); } static void bt_for_each(ulong q, ulong tags, ulong sbq, uint reserved, uint nr_resvd_tags, struct diskio *dio) { struct sbitmap_context sc = {0}; struct mq_inflight mi = { .q = q, .dio = dio, }; struct bt_iter_data iter_data = { .tags = tags, .reserved = reserved, .nr_reserved_tags = nr_resvd_tags, .fn = mq_check_inflight, .data = &mi, }; sbitmap_context_load(sbq + OFFSET(sbitmap_queue_sb), &sc); sbitmap_for_each_set(&sc, bt_iter, &iter_data); } static bool queue_for_each_hw_ctx(ulong q, ulong blk_mq_tags_ptr, bool bitmap_tags_is_ptr, struct diskio *dio) { uint nr_reserved_tags = 0; ulong tags = 0, addr = 0; bool ret = FALSE; if (!readmem(blk_mq_tags_ptr, KVADDR, &tags, sizeof(ulong), "blk_mq_hw_ctx.tags", RETURN_ON_ERROR)) goto out; addr = tags + OFFSET(blk_mq_tags_nr_reserved_tags); if (!readmem(addr, KVADDR, &nr_reserved_tags, sizeof(uint), "blk_mq_tags_nr_reserved_tags", RETURN_ON_ERROR)) goto out; if (nr_reserved_tags) { addr = tags + OFFSET(blk_mq_tags_breserved_tags); if (bitmap_tags_is_ptr && !readmem(addr, KVADDR, &addr, sizeof(ulong), "blk_mq_tags.bitmap_tags", RETURN_ON_ERROR)) goto out; bt_for_each(q, tags, addr, 1, nr_reserved_tags, dio); } addr = tags + OFFSET(blk_mq_tags_bitmap_tags); if (bitmap_tags_is_ptr && !readmem(addr, KVADDR, &addr, sizeof(ulong), "blk_mq_tags.bitmap_tags", RETURN_ON_ERROR)) goto out; bt_for_each(q, tags, addr, 0, nr_reserved_tags, dio); ret = TRUE; out: return ret; } /* * Replica of kernel block/blk-mq-tag.c:blk_mq_queue_tag_busy_iter() */ static void blk_mq_queue_tag_busy_iter(ulong q, ulong *hctx, uint cnt, struct diskio *dio) { uint i, flags; int bitmap_tags_is_ptr = 0; ulong addr = 0; if (MEMBER_TYPE("blk_mq_tags", "bitmap_tags") == TYPE_CODE_PTR) bitmap_tags_is_ptr = 1; readmem(q + OFFSET(request_queue_tag_set), KVADDR, &addr, sizeof(ulong), "request_queue.tag_set", RETURN_ON_ERROR); readmem(addr + OFFSET(blk_mq_tag_set_flags), KVADDR, &flags, sizeof(uint), "blk_mq_tag_set.flags", RETURN_ON_ERROR); if (blk_mq_is_shared_tags(flags) && VALID_MEMBER(blk_mq_tag_set_shared_tags)) { addr = addr + OFFSET(blk_mq_tag_set_shared_tags); queue_for_each_hw_ctx(q, addr, bitmap_tags_is_ptr, dio); return; } for (i = 0; i < cnt; i++) { /* Tags owned by the block driver */ addr = hctx[i] + OFFSET(blk_mq_hw_ctx_tags); if (queue_for_each_hw_ctx(q, addr, bitmap_tags_is_ptr, dio) == FALSE) return; } } static void get_mq_diskio_from_hw_queues(ulong q, struct diskio *dio) { uint cnt = 0; ulong addr = 0, hctx_addr = 0; ulong *hctx_array = NULL; struct list_pair *lp = NULL; if (VALID_MEMBER(request_queue_hctx_table)) { addr = q + OFFSET(request_queue_hctx_table); cnt = do_xarray(addr, XARRAY_COUNT, NULL); lp = (struct list_pair *)GETBUF(sizeof(struct list_pair) * (cnt + 1)); if (!lp) error(FATAL, "fail to get memory for list_pair.\n"); lp[0].index = cnt; cnt = do_xarray(addr, XARRAY_GATHER, lp); } else { addr = q + OFFSET(request_queue_nr_hw_queues); readmem(addr, KVADDR, &cnt, sizeof(uint), "request_queue.nr_hw_queues", FAULT_ON_ERROR); addr = q + OFFSET(request_queue_queue_hw_ctx); readmem(addr, KVADDR, &hctx_addr, sizeof(void *), "request_queue.queue_hw_ctx", FAULT_ON_ERROR); } hctx_array = (ulong *)GETBUF(sizeof(void *) * cnt); if (!hctx_array) { if (lp) FREEBUF(lp); error(FATAL, "fail to get memory for the hctx_array\n"); } if (lp && hctx_array) { uint i; /* copy it from list_pair to hctx_array */ for (i = 0; i < cnt; i++) hctx_array[i] = (ulong)lp[i].value; FREEBUF(lp); } else if (!readmem(hctx_addr, KVADDR, hctx_array, sizeof(void *) * cnt, "request_queue.queue_hw_ctx[]", RETURN_ON_ERROR)) { FREEBUF(hctx_array); return; } blk_mq_queue_tag_busy_iter(q, hctx_array, cnt, dio); FREEBUF(hctx_array); } static void get_mq_diskio(unsigned long q, unsigned long *mq_count) { int cpu; unsigned long queue_ctx; unsigned long mctx_addr; struct diskio tmp = {0}; /* * Currently this function does not support old blk-mq implementation * before 12f5b9314545 ("blk-mq: Remove generation seqeunce"), so * filter them out. */ if (VALID_MEMBER(request_state)) { if (CRASHDEBUG(1)) fprintf(fp, "mq: using sbitmap\n"); get_mq_diskio_from_hw_queues(q, &tmp); mq_count[0] = tmp.read; mq_count[1] = tmp.write; return; } if (CRASHDEBUG(1)) fprintf(fp, "mq: using blk_mq_ctx.rq_{completed,dispatched} counters\n"); readmem(q + OFFSET(request_queue_queue_ctx), KVADDR, &queue_ctx, sizeof(ulong), "request_queue.queue_ctx", FAULT_ON_ERROR); for (cpu = 0; cpu < kt->cpus; cpu++) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { mctx_addr = queue_ctx + kt->__per_cpu_offset[cpu]; get_one_mctx_diskio(mctx_addr, &tmp); mq_count[0] += tmp.read; mq_count[1] += tmp.write; } } } static void get_one_diskio_from_dkstats(unsigned long dkstats, unsigned long *count) { int cpu; unsigned long dkstats_addr; unsigned long in_flight[2]; for (cpu = 0; cpu < kt->cpus; cpu++) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) { dkstats_addr = dkstats + kt->__per_cpu_offset[cpu]; readmem(dkstats_addr + OFFSET(disk_stats_in_flight), KVADDR, in_flight, sizeof(long) * 2, "disk_stats.in_flight", FAULT_ON_ERROR); count[0] += in_flight[0]; count[1] += in_flight[1]; } } } /* read request_queue.rq.count[2] */ static void get_diskio_1(unsigned long rq, unsigned long gendisk, struct diskio *io) { int count[2]; unsigned long io_counts[2] = { 0 }; unsigned long dkstats; if (!use_mq_interface(rq)) { if (VALID_MEMBER(request_queue_rq)) { readmem(rq + OFFSET(request_queue_rq) + OFFSET(request_list_count), KVADDR, count, sizeof(int) * 2, "request_list.count", FAULT_ON_ERROR); io->read = count[0]; io->write = count[1]; } else { if (VALID_MEMBER(hd_struct_dkstats)) readmem(gendisk + OFFSET(gendisk_part0) + OFFSET(hd_struct_dkstats), KVADDR, &dkstats, sizeof(ulong), "gendisk.part0.dkstats", FAULT_ON_ERROR); else { /* kernel version >= 5.11 */ ulong block_device; readmem(gendisk + OFFSET(gendisk_part0), KVADDR, &block_device, sizeof(ulong), "gendisk.part0", FAULT_ON_ERROR); readmem(block_device + OFFSET(block_device_bd_stats), KVADDR, &dkstats, sizeof(ulong), "block_device.bd_stats", FAULT_ON_ERROR); } get_one_diskio_from_dkstats(dkstats, io_counts); io->read = io_counts[0]; io->write = io_counts[1]; } } else { get_mq_diskio(rq, io_counts); io->read = io_counts[0]; io->write = io_counts[1]; } } /* request_queue.in_flight contains total requests */ static unsigned int get_in_flight_1(unsigned long rq) { unsigned int in_flight; readmem(rq+ OFFSET(request_queue_in_flight), KVADDR, &in_flight, sizeof(uint), "request_queue.in_flight", FAULT_ON_ERROR); return in_flight; } /* request_queue.in_flight[2] contains read/write requests */ static unsigned int get_in_flight_2(unsigned long rq) { unsigned int in_flight[2]; readmem(rq+ OFFSET(request_queue_in_flight), KVADDR, in_flight, sizeof(uint) * 2, "request_queue.in_flight", FAULT_ON_ERROR); return in_flight[0] + in_flight[1]; } static void init_iter(struct iter *i) { ARRAY_LENGTH_INIT(i->diskname_len, gendisk.disk_name, "gendisk.disk_name", NULL, sizeof(char)); if (i->diskname_len < 0 || i->diskname_len > BUFSIZE) { option_not_supported('d'); return; } i->current_address = 0; /* check whether BLK_RW_SYNC exists */ i->sync_count = get_symbol_type("BLK_RW_SYNC", NULL, NULL) == TYPE_CODE_ENUM; if (SIZE(rq_in_flight) == sizeof(int)) { i->get_in_flight = get_in_flight_1; } else if (SIZE(rq_in_flight) == sizeof(int) * 2) { i->get_in_flight = get_in_flight_2; } i->get_diskio = get_diskio_1; if (symbol_exists("block_subsys") || symbol_exists("block_kset")) { /* kernel version <= 2.6.24 */ unsigned long block_subsys_addr; if (symbol_exists("block_subsys")) block_subsys_addr = symbol_value("block_subsys"); else block_subsys_addr = symbol_value("block_kset"); if (VALID_STRUCT(subsystem)) i->head_address = block_subsys_addr + OFFSET(subsystem_kset) + OFFSET(kset_list); else i->head_address = block_subsys_addr + OFFSET(kset_list); i->type_address = 0; i->next_disk = next_disk_list; i->match = NULL; i->get_gendisk = get_gendisk_1; } else if (symbol_exists("block_class")) { unsigned long block_class_addr = symbol_value("block_class"); i->type_address = symbol_value("disk_type"); if (VALID_MEMBER(class_devices) || (VALID_MEMBER(class_private_devices) && SIZE(class_private_devices) == SIZE(list_head))) { /* 2.6.24 < kernel version <= 2.6.27, list */ if (!VALID_STRUCT(class_private)) { /* 2.6.24 < kernel version <= 2.6.26 */ i->head_address = block_class_addr + OFFSET(class_devices); } else { /* kernel version is 2.6.27 */ unsigned long class_private_addr; readmem(block_class_addr + OFFSET(class_p), KVADDR, &class_private_addr, sizeof(class_private_addr), "class.p", FAULT_ON_ERROR); i->head_address = class_private_addr + OFFSET(class_private_devices); } i->next_disk = next_disk_list; i->match = match_list; i->get_gendisk = get_gendisk_2; } else { /* kernel version > 2.6.27, klist */ unsigned long class_private_addr; if (INVALID_MEMBER(class_p)) /* kernel version >= 6.4 */ class_private_addr = get_subsys_private("class_kset", "block"); else readmem(block_class_addr + OFFSET(class_p), KVADDR, &class_private_addr, sizeof(class_private_addr), "class.p", FAULT_ON_ERROR); if (!class_private_addr) error(FATAL, "cannot determine subsys_private for block.\n"); if (VALID_STRUCT(class_private)) { /* 2.6.27 < kernel version <= 2.6.37-rc2 */ i->head_address = class_private_addr + OFFSET(class_private_devices); } else { /* kernel version > 2.6.37-rc2 */ i->head_address = class_private_addr + OFFSET(subsys_private_klist_devices); } i->head_address += OFFSET(klist_k_list); i->next_disk = next_disk_klist; i->match = match_klist; if (VALID_MEMBER(gendisk_dev)) i->get_gendisk = get_gendisk_3; else if (VALID_MEMBER(device_knode_class)) i->get_gendisk = get_gendisk_4; else i->get_gendisk = get_gendisk_5; } } else { option_not_supported('d'); return; } } static void display_one_diskio(struct iter *i, unsigned long gendisk, ulong flags) { char disk_name[BUFSIZE + 1]; char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; int major; unsigned long queue_addr; unsigned int in_flight; struct diskio io; memset(disk_name, 0, BUFSIZE + 1); readmem(gendisk + OFFSET(gendisk_disk_name), KVADDR, disk_name, i->diskname_len, "gen_disk.disk_name", FAULT_ON_ERROR); if (is_skipped_disk(disk_name)) return; readmem(gendisk + OFFSET(gendisk_queue), KVADDR, &queue_addr, sizeof(ulong), "gen_disk.queue", FAULT_ON_ERROR); readmem(gendisk + OFFSET(gendisk_major), KVADDR, &major, sizeof(int), "gen_disk.major", FAULT_ON_ERROR); i->get_diskio(queue_addr, gendisk, &io); if ((flags & DIOF_NONZERO) && (io.read + io.write == 0)) return; fprintf(fp, "%s%s%s %s%s%s%s %s%5d%s%s%s%s%s", mkstring(buf0, 5, RJUST|INT_DEC, (char *)(unsigned long)major), space(MINSPACE), mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, (char *)gendisk), space(MINSPACE), mkstring(buf2, 10, LJUST, disk_name), space(MINSPACE), mkstring(buf3, VADDR_PRLEN <= 11 ? 11 : VADDR_PRLEN, LJUST|LONG_HEX, (char *)queue_addr), space(MINSPACE), io.read + io.write, space(MINSPACE), mkstring(buf4, 5, RJUST|INT_DEC, (char *)(unsigned long)io.read), space(MINSPACE), mkstring(buf5, 5, RJUST|INT_DEC, (char *)(unsigned long)io.write), space(MINSPACE)); if (VALID_MEMBER(request_queue_in_flight)) { if (!use_mq_interface(queue_addr)) { in_flight = i->get_in_flight(queue_addr); fprintf(fp, "%5u\n", in_flight); } else fprintf(fp, "%s\n", "N/A(MQ)"); } else fprintf(fp, "\n"); } static void display_all_diskio(ulong flags) { struct iter i; unsigned long gendisk; char buf0[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; init_iter(&i); fprintf(fp, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", "MAJOR", space(MINSPACE), mkstring(buf0, VADDR_PRLEN + 2, LJUST, "GENDISK"), space(MINSPACE), "NAME ", space(MINSPACE), mkstring(buf1, VADDR_PRLEN <= 11 ? 13 : VADDR_PRLEN + 2, LJUST, "REQUEST_QUEUE"), space(MINSPACE), mkstring(buf2, 5, RJUST, "TOTAL"), space(MINSPACE), i.sync_count ? mkstring(buf3, 5, RJUST, "ASYNC") : mkstring(buf3, 5, RJUST, "READ"), space(MINSPACE), i.sync_count ? mkstring(buf4, 5, RJUST, "SYNC") : mkstring(buf4, 5, RJUST, "WRITE"), space(MINSPACE), VALID_MEMBER(request_queue_in_flight) ? mkstring(buf5, 5, RJUST, "DRV") : ""); while ((gendisk = i.next_disk(&i)) != 0) display_one_diskio(&i, gendisk, flags); } static void diskio_init(void) { if (dt->flags & DISKIO_INIT) return; MEMBER_OFFSET_INIT(class_devices, "class", "class_devices"); if (INVALID_MEMBER(class_devices)) MEMBER_OFFSET_INIT(class_devices, "class", "devices"); MEMBER_OFFSET_INIT(class_p, "class", "p"); if (INVALID_MEMBER(class_p)) { MEMBER_OFFSET_INIT(kset_list, "kset", "list"); MEMBER_OFFSET_INIT(kset_kobj, "kset", "kobj"); MEMBER_OFFSET_INIT(kobject_name, "kobject", "name"); MEMBER_OFFSET_INIT(kobject_entry, "kobject", "entry"); MEMBER_OFFSET_INIT(subsys_private_subsys, "subsys_private", "subsys"); } MEMBER_OFFSET_INIT(class_private_devices, "class_private", "class_devices"); MEMBER_OFFSET_INIT(device_knode_class, "device", "knode_class"); MEMBER_OFFSET_INIT(device_node, "device", "node"); MEMBER_OFFSET_INIT(device_type, "device", "type"); MEMBER_OFFSET_INIT(device_private_device, "device_private", "device"); MEMBER_OFFSET_INIT(device_private_knode_class, "device_private", "knode_class"); MEMBER_OFFSET_INIT(gendisk_dev, "gendisk", "dev"); if (INVALID_MEMBER(gendisk_dev)) MEMBER_OFFSET_INIT(gendisk_dev, "gendisk", "__dev"); MEMBER_OFFSET_INIT(gendisk_kobj, "gendisk", "kobj"); MEMBER_OFFSET_INIT(gendisk_part0, "gendisk", "part0"); MEMBER_OFFSET_INIT(gendisk_queue, "gendisk", "queue"); MEMBER_OFFSET_INIT(hd_struct_dev, "hd_struct", "__dev"); MEMBER_OFFSET_INIT(hd_struct_dkstats, "hd_struct", "dkstats"); MEMBER_OFFSET_INIT(block_device_bd_device, "block_device", "bd_device"); MEMBER_OFFSET_INIT(block_device_bd_stats, "block_device", "bd_stats"); MEMBER_OFFSET_INIT(klist_k_list, "klist", "k_list"); MEMBER_OFFSET_INIT(klist_node_n_klist, "klist_node", "n_klist"); MEMBER_OFFSET_INIT(klist_node_n_node, "klist_node", "n_node"); MEMBER_OFFSET_INIT(kobject_entry, "kobject", "entry"); MEMBER_OFFSET_INIT(kset_list, "kset", "list"); MEMBER_OFFSET_INIT(request_list_count, "request_list", "count"); MEMBER_OFFSET_INIT(request_cmd_flags, "request", "cmd_flags"); MEMBER_OFFSET_INIT(request_q, "request", "q"); MEMBER_OFFSET_INIT(request_state, "request", "state"); MEMBER_OFFSET_INIT(request_queue_in_flight, "request_queue", "in_flight"); if (MEMBER_EXISTS("request_queue", "rq")) MEMBER_OFFSET_INIT(request_queue_rq, "request_queue", "rq"); else MEMBER_OFFSET_INIT(request_queue_rq, "request_queue", "root_rl"); if (MEMBER_EXISTS("request_queue", "mq_ops")) { MEMBER_OFFSET_INIT(request_queue_mq_ops, "request_queue", "mq_ops"); ANON_MEMBER_OFFSET_INIT(request_queue_queue_ctx, "request_queue", "queue_ctx"); MEMBER_OFFSET_INIT(request_queue_queue_hw_ctx, "request_queue", "queue_hw_ctx"); MEMBER_OFFSET_INIT(request_queue_nr_hw_queues, "request_queue", "nr_hw_queues"); MEMBER_OFFSET_INIT(request_queue_hctx_table, "request_queue", "hctx_table"); MEMBER_OFFSET_INIT(blk_mq_ctx_rq_dispatched, "blk_mq_ctx", "rq_dispatched"); MEMBER_OFFSET_INIT(blk_mq_ctx_rq_completed, "blk_mq_ctx", "rq_completed"); MEMBER_OFFSET_INIT(blk_mq_hw_ctx_tags, "blk_mq_hw_ctx", "tags"); MEMBER_OFFSET_INIT(blk_mq_tags_bitmap_tags, "blk_mq_tags", "bitmap_tags"); MEMBER_OFFSET_INIT(blk_mq_tags_breserved_tags, "blk_mq_tags", "breserved_tags"); MEMBER_OFFSET_INIT(blk_mq_tags_nr_reserved_tags, "blk_mq_tags", "nr_reserved_tags"); MEMBER_OFFSET_INIT(blk_mq_tags_rqs, "blk_mq_tags", "rqs"); STRUCT_SIZE_INIT(blk_mq_tags, "blk_mq_tags"); STRUCT_SIZE_INIT(sbitmap, "sbitmap"); STRUCT_SIZE_INIT(sbitmap_word, "sbitmap_word"); MEMBER_OFFSET_INIT(sbitmap_word_word, "sbitmap_word", "word"); MEMBER_OFFSET_INIT(sbitmap_word_cleared, "sbitmap_word", "cleared"); MEMBER_OFFSET_INIT(sbitmap_depth, "sbitmap", "depth"); MEMBER_OFFSET_INIT(sbitmap_shift, "sbitmap", "shift"); MEMBER_OFFSET_INIT(sbitmap_map_nr, "sbitmap", "map_nr"); MEMBER_OFFSET_INIT(sbitmap_map, "sbitmap", "map"); MEMBER_OFFSET_INIT(sbitmap_queue_sb, "sbitmap_queue", "sb"); } MEMBER_OFFSET_INIT(subsys_private_klist_devices, "subsys_private", "klist_devices"); MEMBER_OFFSET_INIT(subsystem_kset, "subsystem", "kset"); STRUCT_SIZE_INIT(subsystem, "subsystem"); STRUCT_SIZE_INIT(class_private, "class_private"); MEMBER_SIZE_INIT(rq_in_flight, "request_queue", "in_flight"); MEMBER_SIZE_INIT(class_private_devices, "class_private", "class_devices"); MEMBER_OFFSET_INIT(disk_stats_in_flight, "disk_stats", "in_flight"); MEMBER_OFFSET_INIT(request_queue_tag_set, "request_queue", "tag_set"); MEMBER_OFFSET_INIT(blk_mq_tag_set_flags, "blk_mq_tag_set", "flags"); MEMBER_OFFSET_INIT(blk_mq_tag_set_shared_tags, "blk_mq_tag_set", "shared_tags"); dt->flags |= DISKIO_INIT; } static void diskio_option(ulong flags) { diskio_init(); display_all_diskio(flags); } void devdump_extract(void *_note, ulonglong offset, char *dump_file, FILE *ofp) { struct vmcoredd_header *vh = (struct vmcoredd_header *)_note; ulong dump_size, count; FILE *tmpfp; if (vh->n_type != NT_VMCOREDD) error(FATAL, "unsupported note type: 0x%x", vh->n_type); dump_size = vh->n_descsz - VMCOREDD_MAX_NAME_BYTES; if (dump_file) { tmpfp = fopen(dump_file, "w"); if (!tmpfp) { error(FATAL, "cannot open output file: %s\n", dump_file); return; } set_tmpfile2(tmpfp); } fprintf(ofp, "DEVICE: %s\n", vh->dump_name); if (dump_file) count = dump_size; else count = dump_size/sizeof(uint64_t) + (dump_size % sizeof(uint64_t) ? 1 : 0); display_memory_from_file_offset(offset + sizeof(struct vmcoredd_header), count, dump_file); } void devdump_info(void *_note, ulonglong offset, FILE *ofp) { struct vmcoredd_header *vh = (struct vmcoredd_header *)_note; char buf[BUFSIZE]; ulong dump_size; if (vh->n_type != NT_VMCOREDD) return; dump_size = vh->n_descsz - VMCOREDD_MAX_NAME_BYTES; offset += sizeof(struct vmcoredd_header); fprintf(ofp, "0x%s ", mkstring(buf, LONG_LONG_PRLEN, LJUST | LONGLONG_HEX, MKSTR(&offset))); fprintf(ofp, "%s ", mkstring(buf, LONG_PRLEN, LJUST | LONG_DEC, MKSTR(dump_size))); fprintf(ofp, "%s\n", vh->dump_name); } crash-utility-crash-9cd43f5/.rh_rpm_package0000664000372000037200000000000615107550337020333 0ustar juerghjuergh9.0.1 crash-utility-crash-9cd43f5/gdb-16.2.patch0000664000372000037200000020771715107550337017553 0ustar juerghjuergh # When this file is updated in an existing source tree, it gets re-applied # during the next build using "patch -N --fuzz=0", which ignores patches # that have already been applied. However, if a gdb file has been modified # multiple times, the subsequent patching may fail to recognize that a # given patch has been previously applied, and will attempt to re-apply it. # To prevent any unintended consequences, this file also acts as a # shell script that can restore any gdb file to its original state prior # to all subsequent patch applications. tar xvzmf gdb-16.2.tar.gz \ gdb-16.2/gdb/symfile.c \ gdb-16.2/gdb/symtab.c exit 0 --- gdb-16.2/Makefile.in.orig +++ gdb-16.2/Makefile.in @@ -369,6 +369,9 @@ CC_FOR_BUILD = @CC_FOR_BUILD@ CFLAGS_FOR_BUILD = @CFLAGS_FOR_BUILD@ CPP_FOR_BUILD = @CPP_FOR_BUILD@ CPPFLAGS_FOR_BUILD = @CPPFLAGS_FOR_BUILD@ +ifeq (${CRASH_TARGET}, PPC64) +CFLAGS_FOR_BUILD += -m64 -fPIC +endif CXXFLAGS_FOR_BUILD = @CXXFLAGS_FOR_BUILD@ CXX_FOR_BUILD = @CXX_FOR_BUILD@ DLLTOOL_FOR_BUILD = @DLLTOOL_FOR_BUILD@ @@ -441,6 +444,9 @@ GNATBIND = @GNATBIND@ GNATMAKE = @GNATMAKE@ CFLAGS = @CFLAGS@ +ifeq (${CRASH_TARGET}, PPC64) +CFLAGS += -m64 -fPIC +endif LDFLAGS = @LDFLAGS@ LIBCFLAGS = $(CFLAGS) CXXFLAGS = @CXXFLAGS@ --- gdb-16.2/gdb/Makefile.in.orig +++ gdb-16.2/gdb/Makefile.in @@ -603,7 +603,7 @@ CONFIG_DEP_SUBDIR = $(addsuffix /$(DEPDIR),$(CONFIG_SRC_SUBDIR)) # It is also possible that you will need to add -I/usr/include/sys if # your system doesn't have fcntl.h in /usr/include (which is where it # should be according to Posix). -DEFS = @DEFS@ +DEFS = -DCRASH_MERGE -D${CRASH_TARGET} @DEFS@ GDB_INCLUDED_HEADER = -include $(srcdir)/defs.h GDB_CFLAGS = \ -I. \ @@ -1220,6 +1220,7 @@ COMMON_SFILES = \ symmisc.c \ symtab.c \ target.c \ + ../../crash_target.c \ target-connection.c \ target-dcache.c \ target-descriptions.c \ @@ -1906,7 +1907,7 @@ COMMON_OBS = $(DEPFILES) $(CONFIG_OBS) $(YYOBJ) \ $(SUBDIR_TARGET_OBS) \ $(SUBDIR_GCC_COMPILE_OBS) -SUBDIRS = doc @subdirs@ data-directory +SUBDIRS = build_no_subdirs CLEANDIRS = $(SUBDIRS) # List of subdirectories in the build tree that must exist. @@ -1947,8 +1948,8 @@ generated_files = \ # Flags needed to compile Python code PYTHON_CFLAGS = @PYTHON_CFLAGS@ -all: gdb$(EXEEXT) $(CONFIG_ALL) gdb-gdb.py gdb-gdb.gdb gcore gstack - @$(MAKE) $(FLAGS_TO_PASS) DO=all "DODIRS=$(SUBDIRS)" subdir_do +all: gdb$(EXEEXT) gdb-gdb.py gdb-gdb.gdb gcore gstack + @$(MAKE) -s $(FLAGS_TO_PASS) DO=all "DODIRS=$(SUBDIRS)" subdir_do # Rule for compiling .c files in the top-level gdb directory. # The order-only dependencies ensure that we create the build subdirectories. @@ -2230,9 +2231,10 @@ libgdb.a: $(LIBGDB_OBS) # Removing the old gdb first works better if it is running, at least on SunOS. gdb$(EXEEXT): gdb.o $(LIBGDB_OBS) $(CDEPS) $(TDEPLIBS) $(SILENCE) rm -f gdb$(EXEEXT) + @$(MAKE) -C ../.. GDB_FLAGS=-DGDB_16_2 library $(ECHO_CXXLD) $(CC_LD) $(INTERNAL_LDFLAGS) $(WIN32LDAPP) \ - -o gdb$(EXEEXT) gdb.o $(LIBGDB_OBS) \ - $(TDEPLIBS) $(TUI_LIBRARY) $(CLIBS) $(LOADLIBES) + -o $(shell /bin/cat mergeobj) $(LIBGDB_OBS) \ + $(TDEPLIBS) $(TUI_LIBRARY) $(CLIBS) $(LOADLIBES) $(shell /bin/cat mergelibs) ifneq ($(CODESIGN_CERT),) $(ECHO_SIGN) $(CODESIGN) -s $(CODESIGN_CERT) gdb$(EXEEXT) endif @@ -2699,9 +2701,9 @@ ifeq ($(DEPMODE),depmode=gcc3) # into place if the compile succeeds. We need this because gcc does # not atomically write the dependency output file. override COMPILE.post = -c -o $@ -MT $@ -MMD -MP \ - -MF $(@D)/$(DEPDIR)/$(basename $(@F)).Tpo -override POSTCOMPILE = @mv $(@D)/$(DEPDIR)/$(basename $(@F)).Tpo \ - $(@D)/$(DEPDIR)/$(basename $(@F)).Po + -MF $(subst ../..,.,$(@D))/$(DEPDIR)/$(basename $(@F)).Tpo +override POSTCOMPILE = @mv $(subst ../..,.,$(@D))/$(DEPDIR)/$(basename $(@F)).Tpo \ + $(subst ../..,.,$(@D))/$(DEPDIR)/$(basename $(@F)).Po else override COMPILE.pre = source='$<' object='$@' libtool=no \ DEPDIR=$(DEPDIR) $(DEPMODE) $(depcomp) \ --- gdb-16.2/gdb/c-typeprint.c.orig +++ gdb-16.2/gdb/c-typeprint.c @@ -1066,6 +1066,9 @@ c_type_print_base_struct_union (struct type *type, struct ui_file *stream, = podata->end_bitpos - type->field (i).type ()->length () * TARGET_CHAR_BIT; } + else if (strlen(type->field(i).name()) == 0) + /* crash: Print details for unnamed struct and union. */ + newshow = show; c_print_type_1 (type->field (i).type (), type->field (i).name (), --- gdb-16.2/gdb/cli/cli-cmds.c.orig +++ gdb-16.2/gdb/cli/cli-cmds.c @@ -427,6 +427,11 @@ complete_command (const char *arg, int from_tty) } } +#ifdef CRASH_MERGE +static int crash_from_tty = 0; +extern "C" void untrusted_file(FILE *, char *); +#endif + int is_complete_command (struct cmd_list_element *c) { @@ -659,8 +664,32 @@ find_and_open_script (const char *script_file, int search_path) close (fd); errno = save_errno; } - else - opened.emplace (gdb_file_up (result), std::move (full_path)); +#ifdef CRASH_MERGE + /* + * Only allow trusted versions of .gdbinit files to be + * sourced during session initialization. + */ + if (crash_from_tty == -1) + { + struct stat statbuf; + FILE *stream = result; + int _fd = fileno (stream); + if (fstat (_fd, &statbuf) < 0) + { + perror_with_name (full_path.get()); + fclose (stream); + return opened; + } + if (statbuf.st_uid != getuid () || (statbuf.st_mode & S_IWOTH)) + { + untrusted_file(NULL, full_path.get()); + fclose (stream); + return opened; + } + } +#endif + opened.emplace (gdb_file_up (result), std::move (full_path)); + return opened; } @@ -724,7 +753,11 @@ source_script_with_search (const char *file, int from_tty, int search_path) If the source command was invoked interactively, throw an error. Otherwise (e.g. if it was invoked by a script), just emit a warning, rather than cause an error. */ +#ifdef CRASH_MERGE + if (from_tty > 0) +#else if (from_tty) +#endif perror_with_name (file); else { @@ -756,7 +789,14 @@ source_script_with_search (const char *file, int from_tty, int search_path) void source_script (const char *file, int from_tty) { +#ifdef CRASH_MERGE + crash_from_tty = from_tty; +#endif source_script_with_search (file, from_tty, 0); +#ifdef CRASH_MERGE + crash_from_tty = 0; +#endif + } static void --- gdb-16.2/gdb/completer.c.orig +++ gdb-16.2/gdb/completer.c @@ -3315,6 +3315,8 @@ gdb_display_match_list_1 (char **matches, int len, int max, /* How many items of MAX length can we fit in the screen window? */ cols = gdb_complete_get_screenwidth (displayer); + rl_reset_screen_size(); + rl_get_screen_size(NULL, &cols); max += 2; limit = cols / max; if (limit != 1 && (limit * max == cols)) --- gdb-16.2/gdb/defs.h.orig +++ gdb-16.2/gdb/defs.h @@ -407,4 +407,7 @@ DEF_ENUM_FLAGS_TYPE (enum user_selected_what_flag, user_selected_what); #include "utils.h" +#ifdef CRASH_MERGE +extern "C" int gdb_main_entry(int, char **); +#endif #endif /* GDB_DEFS_H */ --- gdb-16.2/gdb/dwarf2/read-gdb-index.c.orig +++ gdb-16.2/gdb/dwarf2/read-gdb-index.c @@ -369,7 +369,11 @@ read_gdb_index_from_buffer (const char *filename, indices. */ if (version < 4) { +#ifdef CRASH_MERGE + static int warning_printed = 1; +#else static int warning_printed = 0; +#endif if (!warning_printed) { warning (_("Skipping obsolete .gdb_index section in %s."), @@ -388,7 +392,11 @@ read_gdb_index_from_buffer (const char *filename, "set use-deprecated-index-sections on". */ if (version < 6 && !deprecated_ok) { +#ifdef CRASH_MERGE + static int warning_printed = 1; +#else static int warning_printed = 0; +#endif if (!warning_printed) { warning (_("\ --- gdb-16.2/gdb/event-top.c.orig +++ gdb-16.2/gdb/event-top.c @@ -1558,6 +1558,10 @@ gdb_setup_readline (int editing) { struct ui *ui = current_ui; + if (!batch_silent) + gdb_stdout = new stdio_file (ui->outstream); + gdb_stderr = new stderr_file (ui->errstream); + /* If the input stream is connected to a terminal, turn on editing. However, that is only allowed on the main UI, as we can only have one instance of readline. Also, INSTREAM might be nullptr when --- gdb-16.2/gdb/frame.c.orig +++ gdb-16.2/gdb/frame.c @@ -966,6 +966,10 @@ frame_find_by_id (struct frame_id id) return NULL; } +#if defined(CRASH_MERGE) && defined(ARM64) +extern "C" void crash_decode_ptrauth_pc(ulong* pc); +#endif + static CORE_ADDR frame_unwind_pc (const frame_info_ptr &this_frame) { @@ -996,6 +1000,9 @@ frame_unwind_pc (const frame_info_ptr &this_frame) try { pc = gdbarch_unwind_pc (prev_gdbarch, this_frame); +#if defined(CRASH_MERGE) && defined(ARM64) + crash_decode_ptrauth_pc(&pc); +#endif pc_p = true; } catch (const gdb_exception_error &ex) --- gdb-16.2/gdb/main.c.orig +++ gdb-16.2/gdb/main.c @@ -442,6 +442,14 @@ start_event_loop () return; } +#ifdef CRASH_MERGE +extern "C" void update_gdb_hooks(void); +extern "C" void main_loop(void); +extern "C" unsigned long crash_get_kaslr_offset(void); +extern "C" int console(const char *, ...); +void crash_target_init (void); +#endif + /* Call command_loop. */ /* Prevent inlining this function for the benefit of GDB's selftests @@ -1031,7 +1039,11 @@ captured_main_1 (struct captured_main_args *context) } } +#ifdef CRASH_MERGE + save_original_signals_state (1); +#else save_original_signals_state (quiet); +#endif /* Try to set up an alternate signal stack for SIGSEGV handlers. */ gdb::alternate_signal_stack signal_stack; @@ -1129,7 +1141,7 @@ captured_main_1 (struct captured_main_args *context) if (print_version) { print_gdb_version (gdb_stdout, false); - gdb_printf ("\n"); + gdb_printf ("\n\n"); exit (0); } @@ -1150,6 +1162,10 @@ captured_main_1 (struct captured_main_args *context) look at things by now. Initialize the default interpreter. */ set_top_level_interpreter (interpreter_p.c_str (), false); +#ifdef CRASH_MERGE + update_gdb_hooks(); +#endif + /* The interpreter should have installed the real uiout by now. */ gdb_assert (current_uiout != temp_uiout.get ()); temp_uiout = nullptr; @@ -1177,7 +1193,11 @@ captured_main_1 (struct captured_main_args *context) if (!system_gdbinit.empty () && !inhibit_gdbinit) { for (const std::string &file : system_gdbinit) +#ifdef CRASH_MERGE + ret = catch_command_errors (source_script, file.c_str (), -1); +#else ret = catch_command_errors (source_script, file.c_str (), 0); +#endif } /* Read and execute $HOME/.gdbinit file, if it exists. This is done @@ -1186,7 +1206,11 @@ captured_main_1 (struct captured_main_args *context) debugging or what directory you are in. */ if (!home_gdbinit.empty () && !inhibit_gdbinit && !inhibit_home_gdbinit) +#ifdef CRASH_MERGE + ret = catch_command_errors (source_script, home_gdbinit.c_str (), -1); +#else ret = catch_command_errors (source_script, home_gdbinit.c_str (), 0); +#endif /* Process '-ix' and '-iex' options early. */ execute_cmdargs (&cmdarg_vec, CMDARG_INIT_FILE, CMDARG_INIT_COMMAND, &ret); @@ -1217,7 +1241,11 @@ captured_main_1 (struct captured_main_args *context) !batch_flag); if (ret != 0) ret = catch_command_errors (symbol_file_add_main_adapter, +#ifdef CRASH_MERGE + symarg, 0); +#else symarg, !batch_flag); +#endif } else { @@ -1291,7 +1319,11 @@ captured_main_1 (struct captured_main_args *context) { auto_load_local_gdbinit_loaded = 1; +#ifdef CRASH_MERGE + ret = catch_command_errors (source_script, local_gdbinit.c_str (), -1); +#else ret = catch_command_errors (source_script, local_gdbinit.c_str (), 0); +#endif } } } @@ -1332,6 +1364,16 @@ captured_main (void *data) captured_main_1 (context); +#ifdef CRASH_MERGE + /* Relocate the vmlinux. */ + objfile_rebase (current_program_space->symfile_object_file, crash_get_kaslr_offset()); + + crash_target_init(); + + /* Back to crash. */ + main_loop(); +#endif + /* NOTE: cagney/1999-11-07: There is probably no reason for not moving this loop and the code found in captured_command_loop() into the command_loop() proper. The main thing holding back that @@ -1350,6 +1392,9 @@ captured_main (void *data) { exception_print (gdb_stderr, ex); } +#ifdef CRASH_MERGE + console("\n"); +#endif } /* No exit -- exit is through quit_command. */ } @@ -1371,6 +1416,24 @@ gdb_main (struct captured_main_args *args) return 1; } +#ifdef CRASH_MERGE +/* + * NOTE: adapted from gdb.c, which is no longer built in; changed name of + * original main() to gdb_main_entry() for use as crash entry point + */ +extern bool is_main_thread (); +int +gdb_main_entry (int argc, char **argv) +{ + struct captured_main_args args; + gdb_assert (is_main_thread ()); + memset (&args, 0, sizeof args); + args.argc = argc; + args.argv = argv; + args.interpreter_p = INTERP_CONSOLE; + return gdb_main (&args); +} +#endif /* Don't use *_filtered for printing help. We don't want to prompt for continue no matter how small the screen or how much we're going --- gdb-16.2/gdb/objfiles.h.orig +++ gdb-16.2/gdb/objfiles.h @@ -891,6 +891,8 @@ struct objfile : intrusive_list_node mechanism as ELF should set this flag too. This flag is used in conjunction with the minimal_symbol::maybe_copied method. */ bool object_format_has_copy_relocs = false; + + bool all_symtabs_expanded = false; }; /* A deleter for objfile. */ @@ -951,11 +953,11 @@ extern bool objfile_has_symbols (objfile *objfile); /* Return true if any objfile of PSPACE has partial symbols. */ -extern bool have_partial_symbols (program_space *pspace); +extern "C" bool have_partial_symbols (program_space *pspace); /* Return true if any objfile of PSPACE has full symbols. */ -extern bool have_full_symbols (program_space *pspace); +extern "C" bool have_full_symbols (program_space *pspace); extern void objfile_set_sym_fns (struct objfile *objfile, const struct sym_fns *sf); --- gdb-16.2/gdb/printcmd.c.orig +++ gdb-16.2/gdb/printcmd.c @@ -552,6 +552,9 @@ set_next_address (struct gdbarch *gdbarch, CORE_ADDR addr) form. However note that DO_DEMANGLE can be overridden by the specific settings of the demangle and asm_demangle variables. Returns non-zero if anything was printed; zero otherwise. */ +#ifdef CRASH_MERGE +extern "C" int gdb_print_callback(unsigned long); +#endif int print_address_symbolic (struct gdbarch *gdbarch, CORE_ADDR addr, @@ -563,6 +566,12 @@ print_address_symbolic (struct gdbarch *gdbarch, CORE_ADDR addr, int offset = 0; int line = 0; +#ifdef CRASH_MERGE + if (!gdb_print_callback(addr)) { + return 0; + } +#endif + if (build_address_symbolic (gdbarch, addr, do_demangle, false, &name, &offset, &filename, &line, &unmapped)) return 0; @@ -595,6 +604,10 @@ print_address_symbolic (struct gdbarch *gdbarch, CORE_ADDR addr, /* See valprint.h. */ +#ifdef CRASH_MERGE +extern "C" char *gdb_lookup_module_symbol(unsigned long, unsigned long *); +#endif + int build_address_symbolic (struct gdbarch *gdbarch, CORE_ADDR addr, /* IN */ @@ -701,7 +714,19 @@ build_address_symbolic (struct gdbarch *gdbarch, } } if (symbol == NULL && msymbol.minsym == NULL) +#ifdef CRASH_MERGE + { + char *name_ptr = gdb_lookup_module_symbol(addr, (unsigned long *)offset); + if (name_ptr) { + *name = name_ptr; + return 0; + } else { + return 1; + } + } +#else return 1; +#endif /* If the nearest symbol is too far away, don't print anything symbolic. */ @@ -1242,6 +1267,42 @@ print_command_parse_format (const char **expp, const char *cmdname, *expp = exp; } +static void +print_command_2 (const char *args, int voidprint) +{ + struct value *val; + value_print_options print_opts; + + get_user_print_options (&print_opts); + /* Override global settings with explicit options, if any. */ + auto group = make_value_print_options_def_group (&print_opts); + gdb::option::process_options + (&args, gdb::option::PROCESS_OPTIONS_REQUIRE_DELIMITER, group); + + print_command_parse_format (&args, "print", &print_opts); + + const char *exp = args; + + if (exp != nullptr && *exp) + { + expression_up expr = parse_expression (exp); + val = expr->evaluate (); + } else + val = access_value_history (0); + + gdb_printf("%d %d %ld %ld %ld %ld\n", + check_typedef(val->type ())->code(), + check_typedef(val->type ())->is_unsigned(), + check_typedef(val->type ())->length(), + val->offset(), val->bitpos(), val->bitsize()); +} + +static void +printm_command (const char *exp, int from_tty) +{ + print_command_2 (exp, 1); +} + /* See valprint.h. */ void @@ -3357,6 +3418,12 @@ but no count or size letter (see \"x\" command)."), = add_com ("print", class_vars, print_command, print_help.c_str ()); set_cmd_completer_handle_brkchars (print_cmd, print_command_completer); add_com_alias ("p", print_cmd, class_vars, 1); + + c = add_com ("printm", class_vars, printm_command, _("\ +Similar to \"print\" command, but it used to print the type, size, offset,\n\ +bitpos and bitsize of the expression EXP.")); + set_cmd_completer (c, expression_completer); + add_com_alias ("inspect", print_cmd, class_vars, 1); add_setshow_uinteger_cmd ("max-symbolic-offset", no_class, --- gdb-16.2/gdb/psymtab.c.orig +++ gdb-16.2/gdb/psymtab.c @@ -80,6 +80,9 @@ psymbol_functions::partial_symbols (struct objfile *objfile) return m_partial_symtabs->range (); } +#ifdef CRASH_MERGE + extern "C" int gdb_line_number_callback(unsigned long, unsigned long, unsigned long); +#endif /* Find which partial symtab contains PC and SECTION starting at psymtab PST. We may find a different psymtab than PST. See FIND_PC_SECT_PSYMTAB. */ @@ -170,7 +173,12 @@ psymbol_functions::find_pc_sect_psymtab (struct objfile *objfile, best_pst = find_pc_sect_psymtab_closer (objfile, pc, section, pst, msymbol); +#ifdef CRASH_MERGE + if ((best_pst != NULL) && + gdb_line_number_callback(pc, pst->text_low (objfile), pst->text_high (objfile))) +#else if (best_pst != NULL) +#endif return best_pst; } --- gdb-16.2/gdb/stack.c.orig +++ gdb-16.2/gdb/stack.c @@ -1968,6 +1968,11 @@ static frame_command_helper select_frame_cmd; /* Print briefly all stack frames or just the innermost COUNT_EXP frames. */ +#ifdef CRASH_MERGE +extern "C" int is_kvaddr(ulong); +extern "C" int gdb_CRASHDEBUG(ulong); +#endif + static void backtrace_command_1 (const frame_print_options &fp_opts, const backtrace_cmd_options &bt_opts, @@ -2062,6 +2067,17 @@ backtrace_command_1 (const frame_print_options &fp_opts, hand, perhaps the code does or could be fixed to make sure the frame->prev field gets set to NULL in that case). */ +#ifdef CRASH_MERGE + CORE_ADDR pc = 0; + get_frame_pc_if_available (fi, &pc); + if (!is_kvaddr(pc)) { + if (gdb_CRASHDEBUG(1)) { + gdb_printf(_("Backtrace stopped: due to non-kernel addr: 0x%lx\n"),pc); + } + fi = NULL; + break; + } +#endif print_frame_info (fp_opts, fi, 1, LOCATION, 1, 0); if ((flags & PRINT_LOCALS) != 0) print_frame_local_vars (fi, false, NULL, NULL, 1, gdb_stdout); @@ -2081,7 +2097,7 @@ backtrace_command_1 (const frame_print_options &fp_opts, enum unwind_stop_reason reason; reason = get_frame_unwind_stop_reason (trailing); - if (reason >= UNWIND_FIRST_ERROR) + if (reason >= UNWIND_FIRST_ERROR && gdb_CRASHDEBUG(1)) gdb_printf (_("Backtrace stopped: %s\n"), frame_stop_reason_string (trailing)); } --- gdb-16.2/gdb/symfile.c.orig +++ gdb-16.2/gdb/symfile.c @@ -633,7 +633,26 @@ default_symfile_offsets (struct objfile *objfile, for (cur_sec = abfd->sections; cur_sec != NULL; cur_sec = cur_sec->next) /* We do not expect this to happen; just skip this step if the relocatable file has a section with an assigned VMA. */ - if (bfd_section_vma (cur_sec) != 0) + if (bfd_section_vma (cur_sec) != 0 + /* + * Kernel modules may have some non-zero VMAs, i.e., like the + * __ksymtab and __ksymtab_gpl sections in this example: + * + * Section Headers: + * [Nr] Name Type Address Offset + * Size EntSize Flags Link Info Align + * ... + * [ 8] __ksymtab PROGBITS 0000000000000060 0000ad90 + * 0000000000000010 0000000000000000 A 0 0 16 + * [ 9] .rela__ksymtab RELA 0000000000000000 0000ada0 + * 0000000000000030 0000000000000018 43 8 8 + * [10] __ksymtab_gpl PROGBITS 0000000000000070 0000add0 + * 00000000000001a0 0000000000000000 A 0 0 16 + * ... + * + * but they should be treated as if they are NULL. + */ + && strncmp (bfd_section_name (cur_sec), "__k", 3) != 0) break; if (cur_sec == NULL) @@ -1069,6 +1088,12 @@ symbol_file_add_with_addrs (const gdb_bfd_ref_ptr &abfd, const char *name, objfile *objfile = objfile::make (abfd, current_program_space, name, flags, parent); +#ifdef CRASH_MERGE + if (add_flags & SYMFILE_MAINLINE) { + extern struct objfile *gdb_kernel_objfile; + gdb_kernel_objfile = objfile; + } +#endif /* We either created a new mapped symbol table, mapped an existing symbol table file which has not had initial symbol reading @@ -1095,6 +1120,7 @@ symbol_file_add_with_addrs (const gdb_bfd_ref_ptr &abfd, const char *name, styled_string (file_name_style.style (), name)); objfile->expand_all_symtabs (); + objfile->all_symtabs_expanded = true; } /* Note that we only print a message if we have no symbols and have @@ -1352,6 +1378,10 @@ show_debug_file_directory (struct ui_file *file, int from_tty, #if ! defined (DEBUG_SUBDIRECTORY) #define DEBUG_SUBDIRECTORY ".debug" #endif +#ifdef CRASH_MERGE +extern "C" int check_specified_module_tree(const char *, const char *); +extern "C" char *check_specified_kernel_debug_file(); +#endif /* Find a separate debuginfo file for OBJFILE, using DIR as the directory where the original file resides (may not be the same as @@ -1390,6 +1420,15 @@ find_separate_debug_file (const char *dir, if (separate_debug_file_exists (debugfile, crc32, objfile, warnings)) return debugfile; +#ifdef CRASH_MERGE +{ + if (check_specified_module_tree(objfile_name (objfile), debugfile.c_str()) && + separate_debug_file_exists(debugfile, crc32, objfile, warnings)) { + return debugfile; + } +} +#endif + /* Then try in the global debugfile directories. Keep backward compatibility so that DEBUG_FILE_DIRECTORY being "" will @@ -1545,6 +1584,14 @@ find_separate_debug_file_by_debuglink } } +#ifdef CRASH_MERGE + if (debugfile.empty ()) { + char *name_copy; + name_copy = check_specified_kernel_debug_file(); + return name_copy ? std::string (name_copy) : std::string (); + } +#endif + return debugfile; } @@ -2318,8 +2365,10 @@ add_symbol_file_command (const char *args, int from_tty) else if (section_addrs.empty ()) gdb_printf ("\n"); +#ifndef CRASH_MERGE if (from_tty && (!query ("%s", ""))) error (_("Not confirmed.")); +#endif objf = symbol_file_add (filename.get (), add_flags, §ion_addrs, flags); @@ -2660,6 +2709,7 @@ reread_symbols (int from_tty) objfile_name (objfile))); objfile->expand_all_symtabs (); + objfile->all_symtabs_expanded = true; } if (!objfile_has_symbols (objfile)) @@ -3638,6 +3688,15 @@ bfd_byte * symfile_relocate_debug_section (struct objfile *objfile, asection *sectp, bfd_byte *buf) { +#ifdef CRASH_MERGE + /* Executable files have all the relocations already resolved. + * Handle files linked with --emit-relocs. + * http://sources.redhat.com/ml/gdb/2006-08/msg00137.html + */ + bfd *abfd = objfile->obfd.get(); + if ((abfd->flags & EXEC_P) != 0) + return NULL; +#endif gdb_assert (objfile->sf->sym_relocate); return (*objfile->sf->sym_relocate) (objfile, sectp, buf); --- gdb-16.2/gdb/symtab.c.orig +++ gdb-16.2/gdb/symtab.c @@ -2012,6 +2012,10 @@ search_name_hash (enum language language, const char *search_name) variable and thus can probably assume it will never hit the C++ code). */ +#ifdef CRASH_MERGE +static void gdb_bait_and_switch(char *, struct symbol *); +#endif + struct block_symbol lookup_symbol_in_language (const char *name, const struct block *block, const domain_search_flags domain, @@ -2020,22 +2024,37 @@ lookup_symbol_in_language (const char *name, const struct block *block, { SYMBOL_LOOKUP_SCOPED_DEBUG_ENTER_EXIT; + struct block_symbol result; demangle_result_storage storage; const char *modified_name = demangle_for_lookup (name, lang, storage); - return lookup_symbol_aux (modified_name, + result = lookup_symbol_aux (modified_name, symbol_name_match_type::FULL, block, domain, lang, is_a_field_of_this); +#ifdef CRASH_MERGE + if (result.symbol && (domain & SEARCH_TYPE_DOMAIN)) + gdb_bait_and_switch((char *)modified_name, result.symbol); +#endif + return result; } /* See symtab.h. */ +#ifdef CRASH_MERGE +static const struct block *gdb_get_crash_block(void); +#endif + struct block_symbol lookup_symbol (const char *name, const struct block *block, domain_search_flags domain, struct field_of_this_result *is_a_field_of_this) { +#ifdef CRASH_MERGE + if (!block) + block = gdb_get_crash_block(); +#endif + return lookup_symbol_in_language (name, block, domain, current_language->la_language, is_a_field_of_this); @@ -3004,7 +3023,7 @@ find_pc_sect_compunit_symtab (CORE_ADDR pc, struct obj_section *section) for (objfile *objf : current_program_space->objfiles ()) { struct compunit_symtab *result - = objf->find_pc_sect_compunit_symtab (msymbol, pc, section, 1); + = objf->find_pc_sect_compunit_symtab (msymbol, pc, section, 0); if (result != NULL) return result; } @@ -7320,3 +7339,914 @@ the use of prologue scanners."), "symtab"); gdb::observers::free_objfile.attach (symtab_free_objfile_observer, "symtab"); } + +#ifdef CRASH_MERGE +#include "gdb-stabs.h" +#include "gdbsupport/version.h" +#define GDB_COMMON +#include "../../defs.h" + +static void get_member_data(struct gnu_request *, struct type *, long, int); +static void walk_enum(struct type *, struct gnu_request *); +static void eval_enum(struct type *, struct gnu_request *); +static void gdb_get_line_number(struct gnu_request *); +static void gdb_get_datatype(struct gnu_request *); +static void gdb_get_symbol_type(struct gnu_request *); +static void gdb_command_exists(struct gnu_request *); +static void gdb_debug_command(struct gnu_request *); +static void gdb_function_numargs(struct gnu_request *); +static void gdb_add_symbol_file(struct gnu_request *); +static void gdb_delete_symbol_file(struct gnu_request *); +static void gdb_patch_symbol_values(struct gnu_request *); +static void get_user_print_option_address(struct gnu_request *); +extern int get_frame_offset(CORE_ADDR); +static void gdb_set_crash_block(struct gnu_request *); +extern "C" void gdb_command_funnel(struct gnu_request *); +void gdb_command_funnel_1(struct gnu_request *); +static long lookup_struct_contents(struct gnu_request *); +static void iterate_datatypes(struct gnu_request *); +extern void execute_command (const char *, int); + +struct objfile *gdb_kernel_objfile = { 0 }; + +static ulong gdb_merge_flags = 0; +#define KERNEL_SYMBOLS_PATCHED (0x1) + +#undef STREQ +#define STREQ(A, B) (A && B && (strcmp(A, B) == 0)) +#define TYPE_CODE(t) (t->code ()) +#define TYPE_TAG_NAME(t) (TYPE_MAIN_TYPE(t)->name) +#define TYPE_NFIELDS(t) (t->num_fields ()) +#define TYPE_NAME(t) (t->name ()) + +/* + * All commands from above come through here. + */ +void +gdb_command_funnel(struct gnu_request *req) +{ + try { + gdb_command_funnel_1(req); + } catch (const gdb_exception &ex) { + if (req->flags & GNU_RETURN_ON_ERROR) + req->flags |= GNU_COMMAND_FAILED; + else + throw ex; + } +} + +void +gdb_command_funnel_1(struct gnu_request *req) +{ + struct symbol *sym; + FILE *original_stdout_stream = nullptr; + FILE *original_stderr_stream = nullptr; + + if (req->command != GNU_VERSION && req->command != GNU_USER_PRINT_OPTION) { + original_stdout_stream = (dynamic_cast< stdio_file * >gdb_stdout)->get_stream(); + original_stderr_stream = (dynamic_cast< stdio_file * >gdb_stderr)->get_stream(); + (dynamic_castgdb_stdout)->set_stream(req->fp); + (dynamic_castgdb_stderr)->set_stream(req->fp); + } + + switch (req->command) + { + case GNU_VERSION: + req->buf = (char *)version; + break; + + case GNU_PASS_THROUGH: + execute_command(req->buf, + req->flags & GNU_FROM_TTY_OFF ? FALSE : TRUE); + break; + + case GNU_USER_PRINT_OPTION: + get_user_print_option_address(req); + break; + + case GNU_RESOLVE_TEXT_ADDR: + sym = find_pc_function(req->addr); + if (!sym || TYPE_CODE(sym->type()) != TYPE_CODE_FUNC) + req->flags |= GNU_COMMAND_FAILED; + break; + + case GNU_DISASSEMBLE: + if (req->addr2) + sprintf(req->buf, "disassemble 0x%lx 0x%lx", + req->addr, req->addr2); + else + sprintf(req->buf, "disassemble 0x%lx", req->addr); + execute_command(req->buf, TRUE); + break; + + case GNU_ADD_SYMBOL_FILE: + gdb_add_symbol_file(req); + break; + + case GNU_DELETE_SYMBOL_FILE: + gdb_delete_symbol_file(req); + break; + + case GNU_GET_LINE_NUMBER: + gdb_get_line_number(req); + break; + + case GNU_GET_DATATYPE: + gdb_get_datatype(req); + break; + + case GNU_GET_SYMBOL_TYPE: + gdb_get_symbol_type(req); + break; + + case GNU_COMMAND_EXISTS: + gdb_command_exists(req); + break; + + case GNU_ALPHA_FRAME_OFFSET: + req->value = 0; + break; + + case GNU_FUNCTION_NUMARGS: + gdb_function_numargs(req); + break; + + case GNU_DEBUG_COMMAND: + gdb_debug_command(req); + break; + + case GNU_PATCH_SYMBOL_VALUES: + gdb_patch_symbol_values(req); + break; + + case GNU_SET_CRASH_BLOCK: + gdb_set_crash_block(req); + break; + + case GNU_GET_FUNCTION_RANGE: + { + CORE_ADDR start, end; + if (!find_pc_partial_function(req->pc, NULL, &start, &end)) + req->flags |= GNU_COMMAND_FAILED; + else { + req->addr = (ulong)start; + req->addr2 = (ulong)end; + } + } + break; + + case GNU_LOOKUP_STRUCT_CONTENTS: + req->value = lookup_struct_contents(req); + break; + + case GNU_ITERATE_DATATYPES: + iterate_datatypes(req); + break; + + default: + req->flags |= GNU_COMMAND_FAILED; + break; + } + + /* Restore the streams gdb output was using */ + if (original_stdout_stream) + (dynamic_castgdb_stdout)->set_stream(original_stdout_stream); + if (original_stderr_stream) + (dynamic_castgdb_stderr)->set_stream(original_stderr_stream); +} + +/* + * Given a PC value, return the file and line number. + */ +static void +gdb_get_line_number(struct gnu_request *req) +{ + struct symtab_and_line sal; + struct objfile *objfile; + CORE_ADDR pc; + +#define LASTCHAR(s) (s[strlen(s)-1]) + + /* + * Prime the addrmap pump. + */ + pc = req->addr; + + sal = find_pc_line(pc, 0); + + if (!sal.symtab) { + /* + * If a module address line number can't be found, it's typically + * due to its addrmap still containing offset values because its + * objfile doesn't have full symbols loaded. + */ + if (req->lm) { + objfile = req->lm->loaded_objfile; + if (!objfile->all_symtabs_expanded && objfile->sf) { + objfile->expand_all_symtabs(); + objfile->all_symtabs_expanded = true; + sal = find_pc_line(pc, 0); + } + } + if (!sal.symtab) { + req->buf[0] = '\0'; + return; + } + } + + if (sal.symtab->filename && (sal.symtab)->compunit ()->dirname ()) { + if (sal.symtab->filename[0] == '/') + sprintf(req->buf, "%s: %d", + sal.symtab->filename, sal.line); + else + sprintf(req->buf, "%s%s%s: %d", + (sal.symtab)->compunit ()->dirname (), + LASTCHAR((sal.symtab)->compunit ()->dirname ()) == '/' ? "" : "/", + sal.symtab->filename, sal.line); + } +} + + +/* + * Follow the type linkage for full member and value type resolution, with callback + */ +static void drillDownType(struct gnu_request *req, struct type *type) +{ + while (type) + { + /* check out for stub types and pull in the definition instead */ + if (type->is_stub() && TYPE_TAG_NAME(type)) { + struct symbol *sym; + sym = lookup_symbol(TYPE_TAG_NAME(type), 0, SEARCH_STRUCT_DOMAIN, 0).symbol; + if (sym) + type = sym->type(); + } + switch (TYPE_CODE(type)) { + drill_ops_t op; + long l1, l2; + int typecode; + + case TYPE_CODE_PTR: + req->tcb(EOP_POINTER, req, 0, 0, 0, 0); + break; + + case TYPE_CODE_TYPEDEF: + req->is_typedef = 1; + req->typecode = TYPE_CODE(type); + if (!req->tcb(EOP_TYPEDEF, req, TYPE_NAME(type), 0, 0, 0)) + return; + break; + + case TYPE_CODE_FUNC: + req->tcb(EOP_FUNCTION, req, 0, 0, 0, 0); + break; + + case TYPE_CODE_ARRAY: + l1 = type->length(); + l2 = check_typedef(type->target_type())->length(); + req->tcb(EOP_ARRAY, req, &l1, &l2, 0, 0); + break; + + case TYPE_CODE_VOID: + case TYPE_CODE_INT: + case TYPE_CODE_BOOL: + l1 = type->length(); + req->tcb(EOP_INT, req, &l1, 0, 0, 0); + break; + + case TYPE_CODE_UNION: + op = EOP_UNION; + goto label; + + case TYPE_CODE_ENUM: + op = EOP_ENUM; + goto label; + + case TYPE_CODE_STRUCT: + op = EOP_STRUCT; + goto label; + + default: + typecode = TYPE_CODE(type); + req->tcb(EOP_OOPS, req, &typecode, "Unknown typecode", 0, 0); + return; /* not reached */ + + label: + l1 = type->length(); + req->tcb(op, req, &l1, type, TYPE_TAG_NAME(type), 0); + } + type = type->target_type(); + } + req->tcb(EOP_DONE, req, 0, 0, 0, 0); +} + +/* + * General purpose routine for determining datatypes. + */ + +static void +gdb_get_datatype(struct gnu_request *req) +{ + struct type *type; + struct type *typedef_type; + expression_up expr; + struct symbol *sym; + struct value *val; + + if (gdb_CRASHDEBUG(2)) + console("gdb_get_datatype [%s] (a)\n", req->name); + + req->typecode = TYPE_CODE_UNDEF; + + /* + * lookup_symbol() will pick up struct and union names. + */ + sym = lookup_symbol(req->name, 0, SEARCH_STRUCT_DOMAIN, 0).symbol; + if (sym) { + req->typecode = TYPE_CODE(sym->type()); + req->length = sym->type()->length(); + if (req->member) + get_member_data(req, sym->type(), 0, 1); + + if (TYPE_CODE(sym->type()) == TYPE_CODE_ENUM) + walk_enum(sym->type(), req); + + return; + } + + /* + * Otherwise parse the expression. + */ + if (gdb_CRASHDEBUG(2)) + console("gdb_get_datatype [%s] (b)\n", req->name); + + expr = parse_expression(req->name); + + + switch (expr->first_opcode()) + { + case OP_VAR_VALUE: + if (gdb_CRASHDEBUG(2)) + console("expr->first_opcode(): OP_VAR_VALUE\n"); + type = expr->evaluate_type()->type(); + if (req->tcb) { + long value = value_as_long(expr->evaluate()); + /* callback with symbol value */ + req->typecode = TYPE_CODE(type); + req->tcb(EOP_VALUE, req, &value, 0, 0, 0); + drillDownType(req, type); + } else { + if (req->flags & GNU_VAR_LENGTH_TYPECODE) { + req->typecode = TYPE_CODE(type); + req->length = type->length(); + } + if (TYPE_CODE(type) == TYPE_CODE_ENUM) { + req->typecode = TYPE_CODE(type); + req->value = value_as_long(expr->evaluate()); + req->tagname = (char *)TYPE_TAG_NAME(type); + if (!req->tagname) { + val = expr->evaluate_type(); + eval_enum(val->type(), req); + } + } + } + break; + + case OP_TYPE: + if (gdb_CRASHDEBUG(2)) + console("expr->first_opcode(): OP_TYPE\n"); + type = expr->evaluate_type()->type(); + + if (req->tcb) { + drillDownType(req, type); + } else { + req->typecode = TYPE_CODE(type); + req->length = type->length(); + if (TYPE_CODE(type) == TYPE_CODE_TYPEDEF) { + req->is_typedef = TYPE_CODE_TYPEDEF; + if ((typedef_type = check_typedef(type))) { + req->typecode = TYPE_CODE(typedef_type); + req->length = typedef_type->length(); + type = typedef_type; + } + } + if (TYPE_CODE(type) == TYPE_CODE_ENUM) + walk_enum(type, req); + } + + if (req->member) + get_member_data(req, type, 0, 1); + + break; + + default: + if (gdb_CRASHDEBUG(2)) + console("expr->first_opcode(): %d (?)\n", + expr->first_opcode()); + break; + + } +} + +/* + * More robust enum list dump that gdb's, showing the value of each + * identifier, each on its own line. + */ +static void +walk_enum(struct type *type, struct gnu_request *req) +{ + int i; + int len, print = (req->flags & GNU_PRINT_ENUMERATORS); + long long lastval; + + if (print) { + if (req->is_typedef) + gdb_printf(gdb_stdout, "typedef "); + if (TYPE_TAG_NAME(type)) + gdb_printf(gdb_stdout, "enum %s {\n", TYPE_TAG_NAME (type)); + else + gdb_printf(gdb_stdout, "enum {\n"); + } + + len = TYPE_NFIELDS (type); + for (i = 0; i < len; i++) { + if (print) + gdb_printf(gdb_stdout, " %s", type->field(i).name()); + lastval = type->field(i).loc_enumval(); + if (print) { + gdb_printf(gdb_stdout, " = %s", plongest(lastval)); + gdb_printf(gdb_stdout, "\n"); + } else if (req->tcb) + req->tcb(EOP_ENUMVAL, req, type->field(i).name(), &lastval, 0, 0); + } + if (print) { + if (TYPE_TAG_NAME(type)) + gdb_printf(gdb_stdout, "};\n"); + else + gdb_printf(gdb_stdout, "} %s;\n", req->name); + } +} + +/* + * Given an enum type with no tagname, determine its value. + */ +static void +eval_enum(struct type *type, struct gnu_request *req) +{ + int i; + int len; + long long lastval; + + len = TYPE_NFIELDS (type); + lastval = 0; + + for (i = 0; i < len; i++) { + if (lastval != type->field(i).loc_enumval()) + lastval = type->field(i).loc_enumval(); + + if (STREQ(type->field(i).name(), req->name)) { + req->tagname = "(unknown)"; + req->value = lastval; + return; + } + lastval++; + } +} + +/* + * Walk through a struct type's list of fields looking for the desired + * member field, and when found, return its relevant data. + */ +static void +get_member_data(struct gnu_request *req, struct type *type, long offset, int is_first) +{ + short i; + struct field *nextfield; + short nfields; + struct type *typedef_type, *target_type; + + req->member_offset = -1; + + nfields = TYPE_MAIN_TYPE(type)->m_nfields; + nextfield = TYPE_MAIN_TYPE(type)->flds_bnds.fields; + + if (nfields == 0 && is_first /* The first call */) { + struct type *newtype; + newtype = lookup_transparent_type(req->name); + if (newtype) { + console("get_member_data(%s.%s): switching type from %lx to %lx\n", + req->name, req->member, type, newtype); + nfields = TYPE_MAIN_TYPE(newtype)->m_nfields; + nextfield = TYPE_MAIN_TYPE(newtype)->flds_bnds.fields; + } + } + + for (i = 0; i < nfields; i++) { + if (*nextfield->m_name == 0) { /* Anonymous struct/union */ + get_member_data(req, nextfield->type(), + offset + nextfield->m_loc.bitpos, 0); + if (req->member_offset != -1) + return; + } else { + /* callback may be just looking for a specific member name */ + if (req->tcb) { + if (req->tcb(EOP_MEMBER_NAME, req, nextfield->m_name, 0, 0, 0)) { + long bitpos = nextfield->loc_bitpos(); + long bitsize = nextfield->bitsize(); + long len = nextfield->type()->length(); + long byteOffset; + offset += nextfield->m_loc.bitpos; + byteOffset = offset/8; + console("EOP_MEMBER_SIZES\n"); + req->tcb(EOP_MEMBER_SIZES, req, &byteOffset, &len, &bitpos, &bitsize); + /* callback with full type info */ + drillDownType(req, nextfield->type()); + } + } else if (STREQ(req->member, nextfield->m_name)) { + req->member_offset = offset + nextfield->m_loc.bitpos; + req->member_length = nextfield->type()->length(); + req->member_typecode = TYPE_CODE(nextfield->type()); + req->member_main_type_name = (char *)TYPE_NAME(nextfield->type()); + req->member_main_type_tag_name = (char *)TYPE_TAG_NAME(nextfield->type()); + target_type = nextfield->type()->target_type(); + if (target_type) { + req->member_target_type_name = (char *)TYPE_NAME(target_type); + req->member_target_type_tag_name = (char *)TYPE_TAG_NAME(target_type); + } + if ((req->member_typecode == TYPE_CODE_TYPEDEF) && + (typedef_type = check_typedef(nextfield->type()))) { + req->member_length = typedef_type->length(); + } + return; + } + } + nextfield++; + } +} + +/* + * Check whether a command exists. If it doesn't, the command will be + * returned indirectly via the error_hook. + */ +static void +gdb_command_exists(struct gnu_request *req) +{ + extern struct cmd_list_element *cmdlist; + + req->value = FALSE; + lookup_cmd((const char **)&req->name, cmdlist, "", NULL, 0, 1); + req->value = TRUE; +} + +static void +gdb_function_numargs(struct gnu_request *req) +{ + struct symbol *sym; + + sym = find_pc_function(req->pc); + + if (!sym || TYPE_CODE(sym->type()) != TYPE_CODE_FUNC) { + req->flags |= GNU_COMMAND_FAILED; + return; + } + + req->value = (ulong)TYPE_NFIELDS(sym->type()); +} + +struct load_module *gdb_current_load_module = NULL; + +static void +gdb_add_symbol_file(struct gnu_request *req) +{ + struct load_module *lm; + int i; + int allsect = 0; + char *secname; + char buf[96]; + + gdb_current_load_module = lm = (struct load_module *)req->addr; + + req->name = lm->mod_namelist; + gdb_delete_symbol_file(req); + lm->loaded_objfile = NULL; + + if ((lm->mod_flags & MOD_NOPATCH) == 0) { + for (i = 0 ; i < lm->mod_sections; i++) { + if (STREQ(lm->mod_section_data[i].name, ".text") && + (lm->mod_section_data[i].flags & SEC_FOUND)) + allsect = 1; + } + + if (!allsect) { + sprintf(req->buf, "add-symbol-file %s 0x%lx %s", lm->mod_namelist, + lm->mod_text_start ? lm->mod_text_start : lm->mod_base, + lm->mod_flags & MOD_DO_READNOW ? "-readnow" : ""); + if (lm->mod_data_start) { + sprintf(buf, " -s .data 0x%lx", lm->mod_data_start); + strcat(req->buf, buf); + } + if (lm->mod_bss_start) { + sprintf(buf, " -s .bss 0x%lx", lm->mod_bss_start); + strcat(req->buf, buf); + } + if (lm->mod_rodata_start) { + sprintf(buf, " -s .rodata 0x%lx", lm->mod_rodata_start); + strcat(req->buf, buf); + } + } else { + sprintf(req->buf, "add-symbol-file %s 0x%lx %s", lm->mod_namelist, + lm->mod_text_start, lm->mod_flags & MOD_DO_READNOW ? + "-readnow" : ""); + for (i = 0; i < lm->mod_sections; i++) { + secname = lm->mod_section_data[i].name; + if ((lm->mod_section_data[i].flags & SEC_FOUND) && + !STREQ(secname, ".text")) { + if (lm->mod_section_data[i].addr) + sprintf(buf, " -s %s 0x%lx", secname, lm->mod_section_data[i].addr); + else + sprintf(buf, " -s %s 0x%lx", secname, + lm->mod_section_data[i].offset + lm->mod_base); + strcat(req->buf, buf); + } + } + } + } + + if (gdb_CRASHDEBUG(1)) + gdb_printf(gdb_stdout, "%s\n", req->buf); + + execute_command(req->buf, FALSE); + + for (objfile *objfile : current_program_space->objfiles ()) { + if (same_file((char *)objfile_name(objfile), lm->mod_namelist)) { + if (objfile->separate_debug_objfile) + lm->loaded_objfile = objfile->separate_debug_objfile; + else + lm->loaded_objfile = objfile; + break; + } + } + + if (!lm->loaded_objfile) + req->flags |= GNU_COMMAND_FAILED; +} + +static void +gdb_delete_symbol_file(struct gnu_request *req) +{ + for (objfile *objfile : current_program_space->objfiles ()) { + if (STREQ(objfile_name(objfile), req->name) || + same_file((char *)objfile_name(objfile), req->name)) { + objfile->unlink (); + break; + } + } + + if (gdb_CRASHDEBUG(2)) { + gdb_printf(gdb_stdout, "current object files:\n"); + for (objfile *objfile : current_program_space->objfiles ()) + gdb_printf(gdb_stdout, " %s\n", objfile_name(objfile)); + } +} + +/* + * Walk through all minimal_symbols, patching their values with the + * correct addresses. + */ +static void +gdb_patch_symbol_values(struct gnu_request *req) +{ + req->name = PATCH_KERNEL_SYMBOLS_START; + patch_kernel_symbol(req); + + for (objfile *objfile : current_program_space->objfiles ()) + for (minimal_symbol *msymbol : objfile->msymbols ()) + { + req->name = (char *)msymbol->m_name; + req->addr = (ulong)(&msymbol->m_value.ivalue); + if (!patch_kernel_symbol(req)) { + req->flags |= GNU_COMMAND_FAILED; + break; + } + } + + req->name = PATCH_KERNEL_SYMBOLS_STOP; + patch_kernel_symbol(req); + + clear_symtab_users(0); + gdb_merge_flags |= KERNEL_SYMBOLS_PATCHED; +} + +static void +gdb_get_symbol_type(struct gnu_request *req) +{ + expression_up expr; + struct value *val; + struct type *type; + struct type *target_type; + + req->typecode = TYPE_CODE_UNDEF; + + expr = parse_expression (req->name); + val = expr->evaluate_type(); + + type = val->type(); + + req->type_name = (char *)TYPE_MAIN_TYPE(type)->name; + req->typecode = TYPE_MAIN_TYPE(type)->code; + req->length = type->m_length; + req->type_tag_name = (char *)TYPE_TAG_NAME(type); + target_type = TYPE_MAIN_TYPE(type)->m_target_type; + + if (target_type) { + req->target_typename = (char *)TYPE_MAIN_TYPE(target_type)->name; + req->target_typecode = TYPE_MAIN_TYPE(target_type)->code; + req->target_length = target_type->m_length; + } + + if (req->member) + get_member_data(req, type, 0, 1); +} + +static void +gdb_debug_command(struct gnu_request *req) +{ + +} + +/* + * Only necessary on "patched" kernel symbol sessions, and called only by + * lookup_symbol(), pull a symbol value bait-and-switch operation by altering + * either a data symbol's address value or a text symbol's block start address. + */ +static void +gdb_bait_and_switch(char *name, struct symbol *sym) +{ + struct bound_minimal_symbol msym; + struct block *block; + + if ((gdb_merge_flags & KERNEL_SYMBOLS_PATCHED) && + (msym = lookup_minimal_symbol(current_program_space, name, gdb_kernel_objfile, NULL)).minsym) { + if (sym->aclass() == LOC_BLOCK) { + block = (struct block *)(sym->value_block()); + block->set_start(msym.value_address()); + } else + sym->set_value_address(msym.value_address()); + } +} + +#include "valprint.h" + +void +get_user_print_option_address(struct gnu_request *req) +{ + extern struct value_print_options user_print_options; + + req->addr = 0; + + if (strcmp(req->name, "output_format") == 0) + req->addr = (ulong)&user_print_options.output_format; + if (strcmp(req->name, "print_max") == 0) + req->addr = (ulong)&user_print_options.print_max; + if (strcmp(req->name, "prettyprint_structs") == 0) + req->addr = (ulong)&user_print_options.prettyformat_structs; + if (strcmp(req->name, "prettyprint_arrays") == 0) + req->addr = (ulong)&user_print_options.prettyformat_arrays; + if (strcmp(req->name, "repeat_count_threshold") == 0) + req->addr = (ulong)&user_print_options.repeat_count_threshold; + if (strcmp(req->name, "stop_print_at_null") == 0) + req->addr = (ulong)&user_print_options.stop_print_at_null; + if (strcmp(req->name, "output_radix") == 0) + req->addr = (ulong)&output_radix; +} + +CORE_ADDR crash_text_scope; + +static void +gdb_set_crash_block(struct gnu_request *req) +{ + if (!req->addr) { /* debug */ + crash_text_scope = 0; + return; + } + + if ((req->addr2 = (ulong)block_for_pc(req->addr))) + crash_text_scope = req->addr; + else { + crash_text_scope = 0; + req->flags |= GNU_COMMAND_FAILED; + } +} + +static const struct block * +gdb_get_crash_block(void) +{ + if (crash_text_scope) + return block_for_pc(crash_text_scope); + else + return NULL; +} + +static long +lookup_struct_contents(struct gnu_request *req) +{ + int i; + long r; + struct field *f; + struct main_type *m; + const char *n; + struct main_type *top_m = (struct main_type *)req->addr; + char *type_name = req->type_name; + + if (!top_m || !type_name) + return 0; + + for (i = 0; i < top_m->m_nfields; i++) + { + f = top_m->flds_bnds.fields + i; + if (!f->type()) + continue; + m = f->type()->main_type; + + // If the field is an array, check the target type - + // it might be structure, or might not be. + // - struct request_sock *syn_table[0]; + // here m->target_type->main_type->code is expected + // to be TYPE_CODE_PTR + // - struct list_head vec[TVN_SIZE]; + // here m->target_type->main_type->code should be + // TYPE_CODE_STRUCT + if (m->code == TYPE_CODE_ARRAY && m->m_target_type) + m = m->m_target_type->main_type; + + /* Here is a recursion. + * If we have struct variable (not pointer), + * scan this inner structure + */ + if (m->code == TYPE_CODE_STRUCT) { + req->addr = (ulong)m; + r = lookup_struct_contents(req); + req->addr = (ulong)top_m; + if (r) + return 1; + } + + if (m->code == TYPE_CODE_PTR && m->m_target_type) + m = m->m_target_type->main_type; + if (m->name) + n = m->name; + else + continue; + + if (strstr(n, type_name)) + return 1; + } + + return 0; +} + +static void +iterate_datatypes (struct gnu_request *req) +{ + for (objfile *objfile : current_program_space->objfiles ()) + { + if (objfile->sf) { + objfile->expand_all_symtabs(); + objfile->all_symtabs_expanded = true; + } + + for (compunit_symtab *cust : objfile->compunits ()) + { + const struct blockvector *bv = cust->blockvector(); + + for (int i = GLOBAL_BLOCK; i <= STATIC_BLOCK; ++i) + { + const struct block *b = bv->block(i); + + for (struct symbol *sym : block_iterator_range (b)) + { + QUIT; + + if (sym->aclass() != LOC_TYPEDEF) + continue; + + if (req->highest && + !(req->lowest <= sym->type()->m_length && sym->type()->m_length <= req->highest)) + continue; + + req->addr = (ulong)(sym->type()->main_type); + req->name = (char *)(sym->m_name); + req->length = sym->type()->m_length; + + if (req->member) { + req->value = lookup_struct_contents(req); + if (!req->value) + continue; + } + req->callback(req, req->callback_data); + } + } + } + } +} +#endif --- gdb-16.2/gdb/ui-file.c.orig +++ gdb-16.2/gdb/ui-file.c @@ -251,6 +251,12 @@ stdio_file::~stdio_file () fclose (m_file); } +FILE* +stdio_file::get_stream(void) +{ + return m_file; +} + void stdio_file::set_stream (FILE *file) { --- gdb-16.2/gdb/ui-file.h.orig +++ gdb-16.2/gdb/ui-file.h @@ -273,10 +273,11 @@ class stdio_file : public ui_file int fd () const override { return m_fd; } -private: + FILE *get_stream(void); /* Sets the internal stream to FILE, and saves the FILE's file descriptor in M_FD. */ void set_stream (FILE *file); +private: /* The file. */ FILE *m_file; --- gdb-16.2/gdb/xml-syscall.c.orig +++ gdb-16.2/gdb/xml-syscall.c @@ -36,7 +36,11 @@ static void syscall_warn_user (void) { +#ifdef CRASH_MERGE + static int have_warned = 1; +#else static int have_warned = 0; +#endif if (!have_warned) { have_warned = 1; --- gdb-16.2/libiberty/Makefile.in.orig +++ gdb-16.2/libiberty/Makefile.in @@ -181,6 +181,7 @@ REQUIRED_OFILES = \ ./getruntime.$(objext) ./hashtab.$(objext) ./hex.$(objext) \ ./lbasename.$(objext) ./lrealpath.$(objext) \ ./make-relative-prefix.$(objext) ./make-temp-file.$(objext) \ + ./mkstemps.$(objext) \ ./objalloc.$(objext) \ ./obstack.$(objext) \ ./partition.$(objext) ./pexecute.$(objext) ./physmem.$(objext) \ @@ -214,7 +215,7 @@ CONFIGURED_OFILES = ./asprintf.$(objext) ./atexit.$(objext) \ ./index.$(objext) ./insque.$(objext) \ ./memchr.$(objext) ./memcmp.$(objext) ./memcpy.$(objext) \ ./memmem.$(objext) ./memmove.$(objext) \ - ./mempcpy.$(objext) ./memset.$(objext) ./mkstemps.$(objext) \ + ./mempcpy.$(objext) ./memset.$(objext) \ ./pex-djgpp.$(objext) ./pex-msdos.$(objext) \ ./pex-unix.$(objext) ./pex-win32.$(objext) \ ./putenv.$(objext) \ --- gdb-16.2/opcodes/i386-dis.c.orig +++ gdb-16.2/opcodes/i386-dis.c @@ -9589,6 +9589,10 @@ print_insn (bfd_vma pc, disassemble_info *info, int intel_syntax) dp = &dis386_twobyte[*ins.codep]; ins.need_modrm = twobyte_has_modrm[*ins.codep]; + if (dp->name && ((strcmp(dp->name, "ud2a") == 0) || (strcmp(dp->name, "ud2") == 0))) { + extern int kernel_BUG_encoding_bytes(void); + ins.codep += kernel_BUG_encoding_bytes(); + } } else { --- gdb-16.2/readline/readline/misc.c.orig +++ gdb-16.2/readline/readline/misc.c @@ -411,7 +411,7 @@ _rl_history_set_point (void) #if defined (VI_MODE) if (rl_editing_mode == vi_mode && _rl_keymap != vi_insertion_keymap) - rl_point = 0; + rl_point = rl_end; #endif /* VI_MODE */ if (rl_editing_mode == emacs_mode) --- gdb-16.2/readline/readline/readline.h.orig +++ gdb-16.2/readline/readline/readline.h @@ -407,7 +407,7 @@ extern int rl_mark_active_p (void); #if defined (USE_VARARGS) && defined (PREFER_STDARG) extern int rl_message (const char *, ...) __attribute__((__format__ (printf, 1, 2))); #else -extern int rl_message (); +extern int rl_message (void); #endif extern int rl_show_char (int); --- gdb-16.2/readline/readline/rltypedefs.h.orig +++ gdb-16.2/readline/readline/rltypedefs.h @@ -32,10 +32,10 @@ extern "C" { # define _FUNCTION_DEF #if defined(__GNUC__) || defined(__clang__) -typedef int Function () __attribute__((deprecated)); -typedef void VFunction () __attribute__((deprecated)); -typedef char *CPFunction () __attribute__((deprecated)); -typedef char **CPPFunction () __attribute__((deprecated)); +typedef int Function (void) __attribute__ ((deprecated)); +typedef void VFunction (void) __attribute__ ((deprecated)); +typedef char *CPFunction (void) __attribute__ ((deprecated)); +typedef char **CPPFunction (void) __attribute__ ((deprecated)); #else typedef int Function (); typedef void VFunction (); --- gdb-16.2/readline/readline/util.c.orig +++ gdb-16.2/readline/readline/util.c @@ -489,10 +489,13 @@ _rl_trace (va_alist) if (_rl_tracefp == 0) _rl_tropen (); + if (!_rl_tracefp) + goto out; vfprintf (_rl_tracefp, format, args); fprintf (_rl_tracefp, "\n"); fflush (_rl_tracefp); +out: va_end (args); } @@ -512,16 +515,17 @@ _rl_tropen (void) #endif snprintf (fnbuf, sizeof (fnbuf), "%s/rltrace.%ld", x, (long)getpid()); unlink(fnbuf); - _rl_tracefp = fopen (fnbuf, "w+"); + _rl_tracefp = fopen (fnbuf, "w+xe"); return _rl_tracefp != 0; } int _rl_trclose (void) { - int r; + int r = 0; - r = fclose (_rl_tracefp); + if (_rl_tracefp) + r = fclose (_rl_tracefp); _rl_tracefp = 0; return r; } --- gdb-16.2/configure.orig +++ gdb-16.2/configure @@ -3620,6 +3620,9 @@ case "${target}" in ;; esac +#disable gdbserver build in crash-utility +enable_gdbserver=no + # Only allow gdbserver on some systems. if test -d ${srcdir}/gdbserver; then if test x$enable_gdbserver = x; then --- gdb-16.2/gdb/symfile.c.orig +++ gdb-16.2/gdb/symfile.c @@ -341,8 +341,15 @@ place_section (bfd *abfd, asection *sect, section_offsets &offsets, return; /* If the user specified an offset, honor it. */ - if (offsets[gdb_bfd_section_index (abfd, sect)] != 0) + if (offsets[gdb_bfd_section_index (abfd, sect)] != 0) { + /* + * addr_info_make_relative() subtracts out the section VMA. But if the user + * specified an offset, they have already taken this into account. Add it + * back in + */ + offsets[gdb_bfd_section_index (abfd, sect)] += bfd_section_vma(sect); return; + } /* Otherwise, let's try to find a place for the section. */ start_addr = (lowest + align - 1) & -align; @@ -630,33 +637,6 @@ default_symfile_offsets (struct objfile *objfile, bfd *abfd = objfile->obfd.get (); asection *cur_sec; - for (cur_sec = abfd->sections; cur_sec != NULL; cur_sec = cur_sec->next) - /* We do not expect this to happen; just skip this step if the - relocatable file has a section with an assigned VMA. */ - if (bfd_section_vma (cur_sec) != 0 - /* - * Kernel modules may have some non-zero VMAs, i.e., like the - * __ksymtab and __ksymtab_gpl sections in this example: - * - * Section Headers: - * [Nr] Name Type Address Offset - * Size EntSize Flags Link Info Align - * ... - * [ 8] __ksymtab PROGBITS 0000000000000060 0000ad90 - * 0000000000000010 0000000000000000 A 0 0 16 - * [ 9] .rela__ksymtab RELA 0000000000000000 0000ada0 - * 0000000000000030 0000000000000018 43 8 8 - * [10] __ksymtab_gpl PROGBITS 0000000000000070 0000add0 - * 00000000000001a0 0000000000000000 A 0 0 16 - * ... - * - * but they should be treated as if they are NULL. - */ - && strncmp (bfd_section_name (cur_sec), "__k", 3) != 0) - break; - - if (cur_sec == NULL) - { section_offsets &offsets = objfile->section_offsets; /* Pick non-overlapping offsets for sections the user did not @@ -704,7 +684,6 @@ default_symfile_offsets (struct objfile *objfile, offsets[cur_sec->index]); offsets[cur_sec->index] = 0; } - } } /* Remember the bfd indexes for the .text, .data, .bss and --- gdb-16.2/gdb/symtab.c.orig +++ gdb-16.2/gdb/symtab.c @@ -7690,7 +7690,11 @@ console("expr->first_opcode(): OP_VAR_VALUE\n"); type = expr->evaluate_type()->type(); if (req->tcb) { - long value = value_as_long(expr->evaluate()); + expr::var_value_operation *vvop + = (gdb::checked_static_cast + (expr->op.get ())); + sym = vvop->get_symbol (); + long value = sym->value_longest (); /* callback with symbol value */ req->typecode = TYPE_CODE(type); req->tcb(EOP_VALUE, req, &value, 0, 0, 0); @@ -7701,8 +7705,12 @@ req->length = type->length(); } if (TYPE_CODE(type) == TYPE_CODE_ENUM) { + expr::var_value_operation *vvop + = (gdb::checked_static_cast + (expr->op.get ())); + sym = vvop->get_symbol (); req->typecode = TYPE_CODE(type); - req->value = value_as_long(expr->evaluate()); + req->value = sym->value_longest (); req->tagname = (char *)TYPE_TAG_NAME(type); if (!req->tagname) { val = expr->evaluate_type(); --- gdb-16.2/gdb/solib-svr4.c.orig +++ gdb-16.2/gdb/solib-svr4.c @@ -741,13 +741,13 @@ return 0; return extract_typed_address (pbuf, ptr_type); } - +#ifndef CRASH_MERGE /* Find DT_DEBUG. */ if (gdb_bfd_scan_elf_dyntag (DT_DEBUG, current_program_space->exec_bfd (), &dyn_ptr, NULL) || scan_dyntag_auxv (DT_DEBUG, &dyn_ptr, NULL)) return dyn_ptr; - +#endif /* This may be a static executable. Look for the symbol conventionally named _r_debug, as a last resort. */ bound_minimal_symbol msymbol --- gdb-16.2/gdb/symtab.c.orig +++ gdb-16.2/gdb/symtab.c @@ -7726,6 +7726,7 @@ type = expr->evaluate_type()->type(); if (req->tcb) { + req->typecode = TYPE_CODE(type); drillDownType(req, type); } else { req->typecode = TYPE_CODE(type); crash-utility-crash-9cd43f5/makedumpfile.c0000664000372000037200000002206615107550337020211 0ustar juerghjuergh/* * makedumpfile.c * * This code is for reading a dumpfile ganarated by makedumpfile command. * * Copyright (C) 2011 NEC Soft, Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Ken'ichi Ohmichi */ #define _LARGEFILE64_SOURCE 1 /* stat64() */ #include "defs.h" #include "makedumpfile.h" #include static void flattened_format_get_osrelease(char *); int flattened_format = 0; struct flat_data { int64_t off_flattened; int64_t off_rearranged; /* offset which will be rearranged. */ int64_t buf_size; }; struct all_flat_data { unsigned long long num_array; struct flat_data *array; size_t file_size; }; struct all_flat_data afd; struct makedumpfile_header fh_save; static int is_bigendian(void) { int i = 0x12345678; if (*(char *)&i == 0x12) return TRUE; else return FALSE; } static unsigned long long store_flat_data_array(char *file, struct flat_data **fda) { int result = FALSE, fd; int64_t offset_fdh; int64_t offset_report = 0; unsigned long long num_allocated = 0; unsigned long long num_stored = 0; unsigned long long sort_idx; unsigned long long size_allocated; struct flat_data *ptr = NULL, *cur, *new; struct makedumpfile_data_header fdh; struct stat64 stat; ulonglong pct, last_pct; char buf[BUFSIZE]; ssize_t bytes_read; fd = open(file, O_RDONLY); if (fd < 0) { error(INFO, "unable to open dump file %s\n", file); return -1; } if (lseek(fd, MAX_SIZE_MDF_HEADER, SEEK_SET) < 0) { error(INFO, "%s: seek error (flat format)\n", file); close(fd); return -1; } if (stat64(file, &stat) < 0) { error(INFO, "cannot stat64 %s\n", file); return -1; } please_wait("sorting flat format data"); pct = last_pct = 0; while (1) { if (num_allocated <= num_stored) { num_allocated += 100; size_allocated = sizeof(struct flat_data) * num_allocated; new = realloc(ptr, size_allocated); if (new == NULL) { error(INFO, "unable to realloc flat_data structures\n"); break; } ptr = new; } offset_fdh = lseek(fd, 0x0, SEEK_CUR); if ((bytes_read = read(fd, &fdh, sizeof(fdh))) != sizeof(fdh)) { if (bytes_read >= 0) error(INFO, "read error: %s (flat format): truncated/incomplete\n", file); else error(INFO, "read error: %s (flat format)\n", file); break; } if (!is_bigendian()){ fdh.offset = bswap_64(fdh.offset); fdh.buf_size = bswap_64(fdh.buf_size); } if (fdh.offset == END_FLAG_FLAT_HEADER) { result = TRUE; break; } cur = ptr + num_stored; sort_idx = num_stored; while (sort_idx) { new = ptr + --sort_idx; if (new->off_rearranged >= fdh.offset) { cur->off_flattened = new->off_flattened; cur->off_rearranged = new->off_rearranged; cur->buf_size = new->buf_size; cur = new; } else { if (CRASHDEBUG(1) && sort_idx + 1 != num_stored) { fprintf(fp, "makedumpfile: Moved from %lld to %lld\n", num_stored, sort_idx + 1); } break; } } cur->off_flattened = offset_fdh + sizeof(fdh); cur->off_rearranged = fdh.offset; cur->buf_size = fdh.buf_size; num_stored++; pct = (offset_fdh * 100ULL) / stat.st_size; if (pct > last_pct) { sprintf(buf, "sorting flat format data: %lld%%", (ulonglong)pct); please_wait(buf); if (CRASHDEBUG(1)) fprintf(fp, "\n"); last_pct = pct; } if (CRASHDEBUG(1) && (fdh.offset >> 30) > (offset_report >> 30)) { fprintf(fp, "makedumpfile: At %lld GiB\n", (ulonglong)(fdh.offset >> 30)); offset_report = fdh.offset; } /* seek for next makedumpfile_data_header. */ if (lseek(fd, fdh.buf_size, SEEK_CUR) < 0) { error(INFO, "%s: seek error (flat format)\n", file); break; } } please_wait_done(); close(fd); if (result == FALSE) { free(ptr); return -1; } *fda = ptr; return num_stored; } static int read_all_makedumpfile_data_header(char *file) { unsigned long long num; struct flat_data *fda = NULL; long long retval; retval = num = store_flat_data_array(file, &fda); if (retval < 0) return FALSE; afd.num_array = num; afd.array = fda; return TRUE; } void check_flattened_format(char *file) { int fd, get_osrelease; struct stat stat; struct makedumpfile_header fh; if (pc->flags2 & GET_OSRELEASE) { get_osrelease = TRUE; pc->flags2 &= ~GET_OSRELEASE; } else get_osrelease = FALSE; if (flattened_format) goto out; if (file_exists(file, &stat) && S_ISCHR(stat.st_mode)) goto out; fd = open(file, O_RDONLY); if (fd < 0) { error(INFO, "unable to open dump file %s\n", file); goto out; } if (read(fd, &fh, sizeof(fh)) < 0) { error(INFO, "unable to read dump file %s\n", file); close(fd); goto out; } close(fd); if (!is_bigendian()){ fh.type = bswap_64(fh.type); fh.version = bswap_64(fh.version); } if ((strncmp(fh.signature, MAKEDUMPFILE_SIGNATURE, sizeof(MAKEDUMPFILE_SIGNATURE)) != 0) || (fh.type != TYPE_FLAT_HEADER)) goto out; if (get_osrelease) { flattened_format_get_osrelease(file); return; } if (!read_all_makedumpfile_data_header(file)) return; if (CRASHDEBUG(1)) fprintf(fp, "%s: FLAT\n\n", file); fh_save = fh; flattened_format = TRUE; return; out: if (get_osrelease) pc->flags2 |= GET_OSRELEASE; } static int read_raw_dump_file(int fd, off_t offset, void *buf, size_t size) { if (lseek(fd, offset, SEEK_SET) < 0) { if (CRASHDEBUG(1)) error(INFO, "read_raw_dump_file: lseek error (flat format)\n"); return FALSE; } if (read(fd, buf, size) < size) { if (CRASHDEBUG(1)) error(INFO, "read_raw_dump_file: read error (flat format)\n"); return FALSE; } return TRUE; } int read_flattened_format(int fd, off_t offset, void *buf, size_t size) { unsigned long long index, index_start, index_end; int64_t range_start, range_end; size_t read_size, remain_size; off_t offset_read; struct flat_data *ptr; index_start = 0; index_end = afd.num_array; while (1) { index = (index_start + index_end) / 2; ptr = afd.array + index; range_start = ptr->off_rearranged; range_end = ptr->off_rearranged + ptr->buf_size; if ((range_start <= offset) && (offset < range_end)) { /* Found a corresponding array. */ offset_read = (offset - range_start) + ptr->off_flattened; if (offset + size <= range_end) { if (!read_raw_dump_file(fd, offset_read, buf, size)) return FALSE; break; } /* Searh other array corresponding to remaining data. */ read_size = range_end - offset; remain_size = size - read_size; if (!read_raw_dump_file(fd, offset_read, buf, read_size)) return FALSE; if (!read_flattened_format(fd, offset + read_size, (char *)buf + read_size, remain_size)) return FALSE; break; } else if ((index == index_start) && (index_start + 1 == index_end)) { /* * Try to read not-written area. That is a common case, * because the area might be skipped by lseek(). * This area should be the data filled with zero. */ ptr = afd.array + index_end; if (offset + size <= ptr->off_rearranged) { memset(buf, 0x0, size); } else { read_size = ptr->off_rearranged - offset; remain_size = size - read_size; memset(buf, 0x0, read_size); if (!read_flattened_format(fd, offset + read_size, (char *)buf + read_size, remain_size)) return FALSE; } break; } else if (offset < ptr->off_rearranged) index_end = index; else index_start = index; } return TRUE; } int is_flattened_format(char *file) { check_flattened_format(file); return flattened_format; } void dump_flat_header(FILE *ofp) { int i; fprintf(ofp, "makedumpfile header:\n"); fprintf(ofp, " signature: \""); for (i = 0; i < SIG_LEN_MDF; i++) { if (!fh_save.signature[i]) break; fprintf(ofp, "%c", fh_save.signature[i]); } fprintf(ofp, "\"\n"); fprintf(ofp, " type: %llx\n", (ulonglong)fh_save.type); fprintf(ofp, " version: %llx\n", (ulonglong)fh_save.version); fprintf(ofp, " all_flat_data:\n"); fprintf(ofp, " num_array: %lld\n", (ulonglong)afd.num_array); fprintf(ofp, " array: %lx\n", (ulong)afd.array); fprintf(ofp, " file_size: %ld\n\n", (ulong)afd.file_size); } static void flattened_format_get_osrelease(char *file) { int c; FILE *pipe; char buf[BUFSIZE], *p1, *p2; c = strlen("OSRELEASE="); sprintf(buf, "/usr/bin/strings -n %d %s", c, file); if ((pipe = popen(buf, "r")) == NULL) return; for (c = 0; (c < 100) && fgets(buf, BUFSIZE-1, pipe); c++) { if ((p1 = strstr(buf, "OSRELEASE="))) { p2 = strstr(p1, "="); fprintf(fp, "%s", p2+1); flattened_format = TRUE; pc->flags2 |= GET_OSRELEASE; } } pclose(pipe); } crash-utility-crash-9cd43f5/test.c0000664000372000037200000000475515107550337016532 0ustar juerghjuergh/* test.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2011 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2011 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include static struct option test_long_options[] = { {"no", no_argument, 0, 0}, {"req", required_argument, 0, 0}, {0, 0, 0, 0} }; /* * Test your stuff here first if you'd like. If anything's being done * below in this routine, consider it leftover trash... */ void cmd_test(void) { int c; int option_index; while ((c = getopt_long(argcnt, args, "", test_long_options, &option_index)) != EOF) { switch(c) { case 0: if (STREQ(test_long_options[option_index].name, "no")) fprintf(fp, "no argument\n"); if (STREQ(test_long_options[option_index].name, "req")) fprintf(fp, "required argument: %s\n", optarg); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { ; optind++; } } /* * Scratch routine for testing a feature on a per-task basis by entering * the "foreach test" command. Like cmd_test(), anything that's being done * below in this routine can be considered trash. */ void foreach_test(ulong task, ulong flags) { } /* * Template for building a new command. */ void cmd_template(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { ; optind++; } } crash-utility-crash-9cd43f5/unwind_x86_32_64.c0000664000372000037200000010163415107550337020373 0ustar juerghjuergh/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #if defined(X86_64) /* * Support for genarating DWARF CFI based backtraces. * Borrowed heavily from the kernel's implementation of unwinding using the * DWARF CFI written by Jan Beulich */ #ifdef X86_64 #include "unwind_x86_64.h" #endif #ifdef X86 #include "unwind_x86.h" #endif #include "defs.h" #define MAX_STACK_DEPTH 8 static struct local_unwind_table { struct { unsigned long pc; unsigned long range; } core, init; void *address; unsigned long size; } *local_unwind_tables, default_unwind_table; static int gather_in_memory_unwind_tables(void); static int populate_local_tables(ulong, char *); static int unwind_tables_cnt = 0; static struct local_unwind_table *find_table(unsigned long); static void dump_local_unwind_tables(void); static const struct { unsigned offs:BITS_PER_LONG / 2; unsigned width:BITS_PER_LONG / 2; } reg_info[] = { UNW_REGISTER_INFO }; #undef PTREGS_INFO #undef EXTRA_INFO #ifndef REG_INVALID #define REG_INVALID(r) (reg_info[r].width == 0) #endif #define DW_CFA_nop 0x00 #define DW_CFA_set_loc 0x01 #define DW_CFA_advance_loc1 0x02 #define DW_CFA_advance_loc2 0x03 #define DW_CFA_advance_loc4 0x04 #define DW_CFA_offset_extended 0x05 #define DW_CFA_restore_extended 0x06 #define DW_CFA_undefined 0x07 #define DW_CFA_same_value 0x08 #define DW_CFA_register 0x09 #define DW_CFA_remember_state 0x0a #define DW_CFA_restore_state 0x0b #define DW_CFA_def_cfa 0x0c #define DW_CFA_def_cfa_register 0x0d #define DW_CFA_def_cfa_offset 0x0e #define DW_CFA_def_cfa_expression 0x0f #define DW_CFA_expression 0x10 #define DW_CFA_offset_extended_sf 0x11 #define DW_CFA_def_cfa_sf 0x12 #define DW_CFA_def_cfa_offset_sf 0x13 #define DW_CFA_val_offset 0x14 #define DW_CFA_val_offset_sf 0x15 #define DW_CFA_val_expression 0x16 #define DW_CFA_lo_user 0x1c #define DW_CFA_GNU_window_save 0x2d #define DW_CFA_GNU_args_size 0x2e #define DW_CFA_GNU_negative_offset_extended 0x2f #define DW_CFA_hi_user 0x3f #define DW_EH_PE_FORM 0x07 #define DW_EH_PE_native 0x00 #define DW_EH_PE_leb128 0x01 #define DW_EH_PE_data2 0x02 #define DW_EH_PE_data4 0x03 #define DW_EH_PE_data8 0x04 #define DW_EH_PE_signed 0x08 #define DW_EH_PE_ADJUST 0x70 #define DW_EH_PE_abs 0x00 #define DW_EH_PE_pcrel 0x10 #define DW_EH_PE_textrel 0x20 #define DW_EH_PE_datarel 0x30 #define DW_EH_PE_funcrel 0x40 #define DW_EH_PE_aligned 0x50 #define DW_EH_PE_indirect 0x80 #define DW_EH_PE_omit 0xff #define min(x,y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (void) (&_x == &_y); \ _x < _y ? _x : _y; }) #define max(x,y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (void) (&_x == &_y); \ _x > _y ? _x : _y; }) #define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) typedef unsigned long uleb128_t; typedef signed long sleb128_t; struct unwind_item { enum item_location { Nowhere, Memory, Register, Value } where; uleb128_t value; }; struct unwind_state { uleb128_t loc, org; const u8 *cieStart, *cieEnd; uleb128_t codeAlign; sleb128_t dataAlign; struct cfa { uleb128_t reg, offs; } cfa; struct unwind_item regs[ARRAY_SIZE(reg_info)]; unsigned stackDepth:8; unsigned version:8; const u8 *label; const u8 *stack[MAX_STACK_DEPTH]; }; static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 }; static uleb128_t get_uleb128(const u8 **pcur, const u8 *end) { const u8 *cur = *pcur; uleb128_t value; unsigned shift; for (shift = 0, value = 0; cur < end; shift += 7) { if (shift + 7 > 8 * sizeof(value) && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { cur = end + 1; break; } value |= (uleb128_t)(*cur & 0x7f) << shift; if (!(*cur++ & 0x80)) break; } *pcur = cur; return value; } static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) { const u8 *cur = *pcur; sleb128_t value; unsigned shift; for (shift = 0, value = 0; cur < end; shift += 7) { if (shift + 7 > 8 * sizeof(value) && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { cur = end + 1; break; } value |= (sleb128_t)(*cur & 0x7f) << shift; if (!(*cur & 0x80)) { value |= -(*cur++ & 0x40) << shift; break; } } *pcur = cur; return value; } static unsigned long read_pointer(const u8 **pLoc, const void *end, signed ptrType) { unsigned long value = 0; union { const u8 *p8; const u16 *p16u; const s16 *p16s; const u32 *p32u; const s32 *p32s; const unsigned long *pul; } ptr; if (ptrType < 0 || ptrType == DW_EH_PE_omit) return 0; ptr.p8 = *pLoc; switch(ptrType & DW_EH_PE_FORM) { case DW_EH_PE_data2: if (end < (const void *)(ptr.p16u + 1)) return 0; if(ptrType & DW_EH_PE_signed) value = get_unaligned(ptr.p16s++); else value = get_unaligned(ptr.p16u++); break; case DW_EH_PE_data4: #ifdef CONFIG_64BIT if (end < (const void *)(ptr.p32u + 1)) return 0; if(ptrType & DW_EH_PE_signed) value = get_unaligned(ptr.p32s++); else value = get_unaligned(ptr.p32u++); break; case DW_EH_PE_data8: BUILD_BUG_ON(sizeof(u64) != sizeof(value)); #else BUILD_BUG_ON(sizeof(u32) != sizeof(value)); #endif case DW_EH_PE_native: if (end < (const void *)(ptr.pul + 1)) return 0; value = get_unaligned(ptr.pul++); break; case DW_EH_PE_leb128: BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value)); value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end) : get_uleb128(&ptr.p8, end); if ((const void *)ptr.p8 > end) return 0; break; default: return 0; } switch(ptrType & DW_EH_PE_ADJUST) { case DW_EH_PE_abs: break; case DW_EH_PE_pcrel: value += (unsigned long)*pLoc; break; default: return 0; } /* TBD if ((ptrType & DW_EH_PE_indirect) && __get_user(value, (unsigned long *)value)) return 0; */ *pLoc = ptr.p8; return value; } static signed fde_pointer_type(const u32 *cie) { const u8 *ptr = (const u8 *)(cie + 2); unsigned version = *ptr; if (version != 1) return -1; /* unsupported */ if (*++ptr) { const char *aug; const u8 *end = (const u8 *)(cie + 1) + *cie; uleb128_t len; /* check if augmentation size is first (and thus present) */ if (*ptr != 'z') return -1; /* check if augmentation string is nul-terminated */ if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL) return -1; ++ptr; /* skip terminator */ get_uleb128(&ptr, end); /* skip code alignment */ get_sleb128(&ptr, end); /* skip data alignment */ /* skip return address column */ version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end); len = get_uleb128(&ptr, end); /* augmentation length */ if (ptr + len < ptr || ptr + len > end) return -1; end = ptr + len; while (*++aug) { if (ptr >= end) return -1; switch(*aug) { case 'L': ++ptr; break; case 'P': { signed ptrType = *ptr++; if (!read_pointer(&ptr, end, ptrType) || ptr > end) return -1; } break; case 'R': return *ptr; default: return -1; } } } return DW_EH_PE_native|DW_EH_PE_abs; } static int advance_loc(unsigned long delta, struct unwind_state *state) { state->loc += delta * state->codeAlign; return delta > 0; } static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value, struct unwind_state *state) { if (reg < ARRAY_SIZE(state->regs)) { state->regs[reg].where = where; state->regs[reg].value = value; } } static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, signed ptrType, struct unwind_state *state) { union { const u8 *p8; const u16 *p16; const u32 *p32; } ptr; int result = 1; if (start != state->cieStart) { state->loc = state->org; result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state); if (targetLoc == 0 && state->label == NULL) return result; } for (ptr.p8 = start; result && ptr.p8 < end; ) { switch(*ptr.p8 >> 6) { uleb128_t value; case 0: switch(*ptr.p8++) { case DW_CFA_nop: break; case DW_CFA_set_loc: if ((state->loc = read_pointer(&ptr.p8, end, ptrType)) == 0) result = 0; break; case DW_CFA_advance_loc1: result = ptr.p8 < end && advance_loc(*ptr.p8++, state); break; case DW_CFA_advance_loc2: result = ptr.p8 <= end + 2 && advance_loc(*ptr.p16++, state); break; case DW_CFA_advance_loc4: result = ptr.p8 <= end + 4 && advance_loc(*ptr.p32++, state); break; case DW_CFA_offset_extended: value = get_uleb128(&ptr.p8, end); set_rule(value, Memory, get_uleb128(&ptr.p8, end), state); break; case DW_CFA_val_offset: value = get_uleb128(&ptr.p8, end); set_rule(value, Value, get_uleb128(&ptr.p8, end), state); break; case DW_CFA_offset_extended_sf: value = get_uleb128(&ptr.p8, end); set_rule(value, Memory, get_sleb128(&ptr.p8, end), state); break; case DW_CFA_val_offset_sf: value = get_uleb128(&ptr.p8, end); set_rule(value, Value, get_sleb128(&ptr.p8, end), state); break; case DW_CFA_restore_extended: case DW_CFA_undefined: case DW_CFA_same_value: set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state); break; case DW_CFA_register: value = get_uleb128(&ptr.p8, end); set_rule(value, Register, get_uleb128(&ptr.p8, end), state); break; case DW_CFA_remember_state: if (ptr.p8 == state->label) { state->label = NULL; return 1; } if (state->stackDepth >= MAX_STACK_DEPTH) return 0; state->stack[state->stackDepth++] = ptr.p8; break; case DW_CFA_restore_state: if (state->stackDepth) { const uleb128_t loc = state->loc; const u8 *label = state->label; state->label = state->stack[state->stackDepth - 1]; memcpy(&state->cfa, &badCFA, sizeof(state->cfa)); memset(state->regs, 0, sizeof(state->regs)); state->stackDepth = 0; result = processCFI(start, end, 0, ptrType, state); state->loc = loc; state->label = label; } else return 0; break; case DW_CFA_def_cfa: state->cfa.reg = get_uleb128(&ptr.p8, end); /*nobreak*/ case DW_CFA_def_cfa_offset: state->cfa.offs = get_uleb128(&ptr.p8, end); break; case DW_CFA_def_cfa_sf: state->cfa.reg = get_uleb128(&ptr.p8, end); /*nobreak*/ case DW_CFA_def_cfa_offset_sf: state->cfa.offs = get_sleb128(&ptr.p8, end) * state->dataAlign; break; case DW_CFA_def_cfa_register: state->cfa.reg = get_uleb128(&ptr.p8, end); break; /*todo case DW_CFA_def_cfa_expression: */ /*todo case DW_CFA_expression: */ /*todo case DW_CFA_val_expression: */ case DW_CFA_GNU_args_size: get_uleb128(&ptr.p8, end); break; case DW_CFA_GNU_negative_offset_extended: value = get_uleb128(&ptr.p8, end); set_rule(value, Memory, (uleb128_t)0 - get_uleb128(&ptr.p8, end), state); break; case DW_CFA_GNU_window_save: default: result = 0; break; } break; case 1: result = advance_loc(*ptr.p8++ & 0x3f, state); break; case 2: value = *ptr.p8++ & 0x3f; set_rule(value, Memory, get_uleb128(&ptr.p8, end), state); break; case 3: set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state); break; } if (ptr.p8 > end) result = 0; if (result && targetLoc != 0 && targetLoc < state->loc) return 1; } return result && ptr.p8 == end && (targetLoc == 0 || (/*todo While in theory this should apply, gcc in practice omits everything past the function prolog, and hence the location never reaches the end of the function. targetLoc < state->loc &&*/ state->label == NULL)); } /* Unwind to previous to frame. Returns 0 if successful, negative * number in case of an error. */ int unwind(struct unwind_frame_info *frame, int is_ehframe) { #define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs]) const u32 *fde = NULL, *cie = NULL; const u8 *ptr = NULL, *end = NULL; unsigned long startLoc = 0, endLoc = 0, cfa; unsigned i; signed ptrType = -1; uleb128_t retAddrReg = 0; // struct unwind_table *table; void *unwind_table; struct local_unwind_table *table; struct unwind_state state; u64 reg_ptr = 0; if (UNW_PC(frame) == 0) return -EINVAL; if ((table = find_table(UNW_PC(frame)))) { // unsigned long tableSize = unwind_table_size; unsigned long tableSize = table->size; unwind_table = table->address; for (fde = unwind_table; tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { if (!*fde || (*fde & (sizeof(*fde) - 1))) break; if (is_ehframe && !fde[1]) continue; /* this is a CIE */ else if (fde[1] == 0xffffffff) continue; /* this is a CIE */ if ((fde[1] & (sizeof(*fde) - 1)) || fde[1] > (unsigned long)(fde + 1) - (unsigned long)unwind_table) continue; /* this is not a valid FDE */ if (is_ehframe) cie = fde + 1 - fde[1] / sizeof(*fde); else cie = unwind_table + fde[1]; if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) || (*cie & (sizeof(*cie) - 1)) || (cie[1] != 0xffffffff && cie[1]) || (ptrType = fde_pointer_type(cie)) < 0) { cie = NULL; /* this is not a (valid) CIE */ continue; } ptr = (const u8 *)(fde + 2); startLoc = read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, ptrType); endLoc = startLoc + read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, ptrType & DW_EH_PE_indirect ? ptrType : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); if (UNW_PC(frame) >= startLoc && UNW_PC(frame) < endLoc) break; cie = NULL; } } if (cie != NULL) { memset(&state, 0, sizeof(state)); state.cieEnd = ptr; /* keep here temporarily */ ptr = (const u8 *)(cie + 2); end = (const u8 *)(cie + 1) + *cie; if ((state.version = *ptr) != 1) cie = NULL; /* unsupported version */ else if (*++ptr) { /* check if augmentation size is first (and thus present) */ if (*ptr == 'z') { /* check for ignorable (or already handled) * nul-terminated augmentation string */ while (++ptr < end && *ptr) if (strchr("LPR", *ptr) == NULL) break; } if (ptr >= end || *ptr) cie = NULL; } ++ptr; } if (cie != NULL) { /* get code aligment factor */ state.codeAlign = get_uleb128(&ptr, end); /* get data aligment factor */ state.dataAlign = get_sleb128(&ptr, end); if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end) cie = NULL; else { retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); /* skip augmentation */ if (((const char *)(cie + 2))[1] == 'z') ptr += get_uleb128(&ptr, end); if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info) || REG_INVALID(retAddrReg) || reg_info[retAddrReg].width != sizeof(unsigned long)) cie = NULL; } } if (cie != NULL) { state.cieStart = ptr; ptr = state.cieEnd; state.cieEnd = end; end = (const u8 *)(fde + 1) + *fde; /* skip augmentation */ if (((const char *)(cie + 2))[1] == 'z') { uleb128_t augSize = get_uleb128(&ptr, end); if ((ptr += augSize) > end) fde = NULL; } } if (cie == NULL || fde == NULL) return -ENXIO; state.org = startLoc; memcpy(&state.cfa, &badCFA, sizeof(state.cfa)); /* process instructions */ if (!processCFI(ptr, end, UNW_PC(frame), ptrType, &state) || state.loc > endLoc || state.regs[retAddrReg].where == Nowhere || state.cfa.reg >= ARRAY_SIZE(reg_info) || reg_info[state.cfa.reg].width != sizeof(unsigned long) || state.cfa.offs % sizeof(unsigned long)) { return -EIO; } /* update frame */ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs; startLoc = min((unsigned long)UNW_SP(frame), cfa); endLoc = max((unsigned long)UNW_SP(frame), cfa); if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) { startLoc = min(STACK_LIMIT(cfa), cfa); endLoc = max(STACK_LIMIT(cfa), cfa); } #ifndef CONFIG_64BIT # define CASES CASE(8); CASE(16); CASE(32) #else # define CASES CASE(8); CASE(16); CASE(32); CASE(64) #endif for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { if (REG_INVALID(i)) { if (state.regs[i].where == Nowhere) continue; return -EIO; } switch(state.regs[i].where) { default: break; case Register: if (state.regs[i].value >= ARRAY_SIZE(reg_info) || REG_INVALID(state.regs[i].value) || reg_info[i].width > reg_info[state.regs[i].value].width){ return -EIO; } switch(reg_info[state.regs[i].value].width) { #define CASE(n) \ case sizeof(u##n): \ state.regs[i].value = FRAME_REG(state.regs[i].value, \ const u##n); \ break CASES; #undef CASE default: return -EIO; } break; } } for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { if (REG_INVALID(i)) continue; switch(state.regs[i].where) { case Nowhere: if (reg_info[i].width != sizeof(UNW_SP(frame)) || &FRAME_REG(i, __typeof__(UNW_SP(frame))) != &UNW_SP(frame)) continue; UNW_SP(frame) = cfa; break; case Register: switch(reg_info[i].width) { #define CASE(n) case sizeof(u##n): \ FRAME_REG(i, u##n) = state.regs[i].value; \ break CASES; #undef CASE default: return -EIO; } break; case Value: if (reg_info[i].width != sizeof(unsigned long)){ return -EIO;} FRAME_REG(i, unsigned long) = cfa + state.regs[i].value * state.dataAlign; break; case Memory: { unsigned long addr = cfa + state.regs[i].value * state.dataAlign; if ((state.regs[i].value * state.dataAlign) % sizeof(unsigned long) || addr < startLoc || addr + sizeof(unsigned long) < addr || addr + sizeof(unsigned long) > endLoc){ return -EIO;} switch(reg_info[i].width) { #define CASE(n) case sizeof(u##n): \ readmem(addr, KVADDR, ®_ptr,sizeof(u##n), "register", RETURN_ON_ERROR|QUIET); \ FRAME_REG(i, u##n) = (u##n)reg_ptr;\ break CASES; #undef CASE default: return -EIO; } } break; } } return 0; #undef CASES #undef FRAME_REG } /* * Initialize the unwind table(s) in the best-case order: * * 1. Use the in-memory kernel and module unwind tables. * 2. Use the in-memory kernel-only .eh_frame data. (possible?) * 3. Use the kernel-only .eh_frame data from the vmlinux file. */ void init_unwind_table(void) { ulong unwind_table_size; void *unwind_table; kt->flags &= ~DWARF_UNWIND; if (gather_in_memory_unwind_tables()) { if (CRASHDEBUG(1)) fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY (%d tables)\n", unwind_tables_cnt); kt->flags |= DWARF_UNWIND_MEMORY; if (unwind_tables_cnt > 1) kt->flags |= DWARF_UNWIND_MODULES; if (!(kt->flags & NO_DWARF_UNWIND)) kt->flags |= DWARF_UNWIND; return; } if (symbol_exists("__start_unwind") && symbol_exists("__end_unwind")) { unwind_table_size = symbol_value("__end_unwind") - symbol_value("__start_unwind"); if (!(unwind_table = malloc(unwind_table_size))) { error(WARNING, "cannot malloc unwind table space\n"); goto try_eh_frame; } if (!readmem(symbol_value("__start_unwind"), KVADDR, unwind_table, unwind_table_size, "unwind table", RETURN_ON_ERROR)) { error(WARNING, "cannot read unwind table data\n"); free(unwind_table); goto try_eh_frame; } kt->flags |= DWARF_UNWIND_MEMORY; if (!(kt->flags & NO_DWARF_UNWIND)) kt->flags |= DWARF_UNWIND; default_unwind_table.size = unwind_table_size; default_unwind_table.address = unwind_table; if (CRASHDEBUG(1)) fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY\n"); return; } try_eh_frame: if (st->dwarf_eh_frame_size || st->dwarf_debug_frame_size) { int fd; int is_ehframe = (!st->dwarf_debug_frame_size && st->dwarf_eh_frame_size); unwind_table_size = is_ehframe ? st->dwarf_eh_frame_size : st->dwarf_debug_frame_size; if (!(unwind_table = malloc(unwind_table_size))) { error(WARNING, "cannot malloc unwind table space\n"); return; } if ((fd = open(pc->namelist, O_RDONLY)) < 0) { error(WARNING, "cannot open %s for %s data\n", pc->namelist, is_ehframe ? ".eh_frame" : ".debug_frame"); free(unwind_table); return; } if (is_ehframe) lseek(fd, st->dwarf_eh_frame_file_offset, SEEK_SET); else lseek(fd, st->dwarf_debug_frame_file_offset, SEEK_SET); if (read(fd, unwind_table, unwind_table_size) != unwind_table_size) { if (CRASHDEBUG(1)) error(WARNING, "cannot read %s data from %s\n", is_ehframe ? ".eh_frame" : ".debug_frame", pc->namelist); free(unwind_table); close(fd); return; } close(fd); default_unwind_table.size = unwind_table_size; default_unwind_table.address = unwind_table; kt->flags |= DWARF_UNWIND_EH_FRAME; if (!(kt->flags & NO_DWARF_UNWIND)) kt->flags |= DWARF_UNWIND; if (CRASHDEBUG(1)) fprintf(fp, "init_unwind_table: DWARF_UNWIND_EH_FRAME\n"); return; } } /* * Find the appropriate kernel-only "root_table" unwind_table, * and pass it to populate_local_tables() to do the heavy lifting. */ static int gather_in_memory_unwind_tables(void) { int i, cnt, found; struct syment *sp, *root_tables[10]; char *root_table_buf; char buf[BUFSIZE]; ulong name; STRUCT_SIZE_INIT(unwind_table, "unwind_table"); MEMBER_OFFSET_INIT(unwind_table_core, "unwind_table", "core"); MEMBER_OFFSET_INIT(unwind_table_init, "unwind_table", "init"); MEMBER_OFFSET_INIT(unwind_table_address, "unwind_table", "address"); MEMBER_OFFSET_INIT(unwind_table_size, "unwind_table", "size"); MEMBER_OFFSET_INIT(unwind_table_link, "unwind_table", "link"); MEMBER_OFFSET_INIT(unwind_table_name, "unwind_table", "name"); if (INVALID_SIZE(unwind_table) || INVALID_MEMBER(unwind_table_core) || INVALID_MEMBER(unwind_table_init) || INVALID_MEMBER(unwind_table_address) || INVALID_MEMBER(unwind_table_size) || INVALID_MEMBER(unwind_table_link) || INVALID_MEMBER(unwind_table_name)) { if (CRASHDEBUG(1)) error(NOTE, "unwind_table structure has changed, or does not exist in this kernel\n"); return 0; } /* * Unfortunately there are two kernel root_table symbols. */ if (!(cnt = get_syment_array("root_table", root_tables, 10))) return 0; root_table_buf = GETBUF(SIZE(unwind_table)); for (i = found = 0; i < cnt; i++) { sp = root_tables[i]; if (!readmem(sp->value, KVADDR, root_table_buf, SIZE(unwind_table), "root unwind_table", RETURN_ON_ERROR|QUIET)) goto gather_failed; name = ULONG(root_table_buf + OFFSET(unwind_table_name)); if (read_string(name, buf, strlen("kernel")+1) && STREQ("kernel", buf)) { found++; if (CRASHDEBUG(1)) fprintf(fp, "root_table name: %lx [%s]\n", name, buf); break; } } if (!found) goto gather_failed; cnt = populate_local_tables(sp->value, root_table_buf); FREEBUF(root_table_buf); return cnt; gather_failed: FREEBUF(root_table_buf); return 0; } /* * Transfer the relevant data from the kernel and module unwind_table * structures to the local_unwind_table structures. */ static int populate_local_tables(ulong root, char *buf) { struct list_data list_data, *ld; int i, cnt; ulong *table_list; ulong vaddr; struct local_unwind_table *tp; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->start = root; ld->member_offset = OFFSET(unwind_table_link); ld->flags = RETURN_ON_LIST_ERROR; if (CRASHDEBUG(1)) ld->flags |= VERBOSE; hq_open(); cnt = do_list(ld); if (cnt == -1) { error(WARNING, "UNWIND: failed to gather unwind_table list"); return 0; } table_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(table_list, cnt); hq_close(); if (!(local_unwind_tables = malloc(sizeof(struct local_unwind_table) * cnt))) { error(WARNING, "cannot malloc unwind_table space (%d tables)\n", cnt); FREEBUF(table_list); return 0; } for (i = 0; i < cnt; i++, tp++) { if (!readmem(table_list[i], KVADDR, buf, SIZE(unwind_table), "unwind_table", RETURN_ON_ERROR|QUIET)) { error(WARNING, "cannot read unwind_table\n"); goto failed; } tp = &local_unwind_tables[i]; /* * Copy the required table info for find_table(). */ BCOPY(buf + OFFSET(unwind_table_core), (char *)&tp->core.pc, sizeof(ulong)*2); BCOPY(buf + OFFSET(unwind_table_init), (char *)&tp->init.pc, sizeof(ulong)*2); BCOPY(buf + OFFSET(unwind_table_size), (char *)&tp->size, sizeof(ulong)); /* * Then read the DWARF CFI data. */ vaddr = ULONG(buf + OFFSET(unwind_table_address)); if (!(tp->address = malloc(tp->size))) { error(WARNING, "cannot malloc unwind_table space\n"); goto failed; break; } if (!readmem(vaddr, KVADDR, tp->address, tp->size, "DWARF CFI data", RETURN_ON_ERROR|QUIET)) { error(WARNING, "cannot read unwind_table data\n"); goto failed; } } unwind_tables_cnt = cnt; if (CRASHDEBUG(7)) dump_local_unwind_tables(); failed: FREEBUF(table_list); return unwind_tables_cnt; } /* * Find the unwind_table containing a pc. */ static struct local_unwind_table * find_table(unsigned long pc) { int i; struct local_unwind_table *tp, *table; table = &default_unwind_table; for (i = 0; i < unwind_tables_cnt; i++, tp++) { tp = &local_unwind_tables[i]; if ((pc >= tp->core.pc && pc < tp->core.pc + tp->core.range) || (pc >= tp->init.pc && pc < tp->init.pc + tp->init.range)) { table = tp; break; } } return table; } static void dump_local_unwind_tables(void) { int i, others; struct local_unwind_table *tp; others = 0; fprintf(fp, "DWARF flags: ("); if (kt->flags & DWARF_UNWIND) fprintf(fp, "%sDWARF_UNWIND", others++ ? "|" : ""); if (kt->flags & NO_DWARF_UNWIND) fprintf(fp, "%sNO_DWARF_UNWIND", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_MEMORY) fprintf(fp, "%sDWARF_UNWIND_MEMORY", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_EH_FRAME) fprintf(fp, "%sDWARF_UNWIND_EH_FRAME", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_MODULES) fprintf(fp, "%sDWARF_UNWIND_MODULES", others++ ? "|" : ""); fprintf(fp, ")\n\n"); fprintf(fp, "default_unwind_table:\n"); fprintf(fp, " address: %lx\n", (ulong)default_unwind_table.address); fprintf(fp, " size: %ld\n\n", (ulong)default_unwind_table.size); fprintf(fp, "local_unwind_tables[%d]:\n", unwind_tables_cnt); for (i = 0; i < unwind_tables_cnt; i++, tp++) { tp = &local_unwind_tables[i]; fprintf(fp, "[%d]\n", i); fprintf(fp, " core: pc: %lx\n", tp->core.pc); fprintf(fp, " range: %ld\n", tp->core.range); fprintf(fp, " init: pc: %lx\n", tp->init.pc); fprintf(fp, " range: %ld\n", tp->init.range); fprintf(fp, " address: %lx\n", (ulong)tp->address); fprintf(fp, " size: %ld\n", tp->size); } } int dwarf_backtrace(struct bt_info *bt, int level, ulong stacktop) { unsigned long bp, offset; struct syment *sp; char *name; struct unwind_frame_info *frame; int is_ehframe = (!st->dwarf_debug_frame_size && st->dwarf_eh_frame_size); frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); // frame->regs.rsp = bt->stkptr; // frame->regs.rip = bt->instptr; UNW_SP(frame) = bt->stkptr; UNW_PC(frame) = bt->instptr; /* read rbp from stack for non active tasks */ if (!(bt->flags & BT_DUMPFILE_SEARCH) && !bt->bptr) { // readmem(frame->regs.rsp, KVADDR, &bp, readmem(UNW_SP(frame), KVADDR, &bp, sizeof(unsigned long), "reading bp", FAULT_ON_ERROR); frame->regs.rbp = bp; /* fixme for x86 */ } sp = value_search(UNW_PC(frame), &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)); goto bailout; } /* * If offset is zero, it means we have crossed over to the next * function. Recalculate by adjusting the text address */ if (!offset) { sp = value_search(UNW_PC(frame) - 1, &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)-1); goto bailout; } } name = sp->name; fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame)); if (CRASHDEBUG(2)) fprintf(fp, " < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), UNW_PC(frame), frame->regs.rbp); while ((UNW_SP(frame) < stacktop) && !unwind(frame, is_ehframe) && UNW_PC(frame)) { /* To prevent rip pushed on IRQ stack being reported both * both on the IRQ and process stacks */ if ((bt->flags & BT_IRQSTACK) && (UNW_SP(frame) >= stacktop - 16)) break; level++; sp = value_search(UNW_PC(frame), &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)); break; } /* * If offset is zero, it means we have crossed over to the next * function. Recalculate by adjusting the text address */ if (!offset) { sp = value_search(UNW_PC(frame) - 1, &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)-1); goto bailout; } } name = sp->name; fprintf(fp, "%s#%d [%016lx] %s at %016lx \n", level < 10 ? " " : "", level, UNW_SP(frame), name, UNW_PC(frame)); if (CRASHDEBUG(2)) fprintf(fp, " < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), UNW_PC(frame), frame->regs.rbp); } bailout: FREEBUF(frame); return ++level; } int dwarf_print_stack_entry(struct bt_info *bt, int level) { unsigned long offset; struct syment *sp; char *name; struct unwind_frame_info *frame; frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); UNW_SP(frame) = bt->stkptr; UNW_PC(frame) = bt->instptr; sp = value_search(UNW_PC(frame), &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)); goto bailout; } /* * If offset is zero, it means we have crossed over to the next * function. Recalculate by adjusting the text address */ if (!offset) { sp = value_search(UNW_PC(frame) - 1, &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)-1); goto bailout; } } name = sp->name; fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame)); bailout: FREEBUF(frame); return level; } void dwarf_debug(struct bt_info *bt) { struct unwind_frame_info *frame; ulong bp; int is_ehframe = (!st->dwarf_debug_frame_size && st->dwarf_eh_frame_size); if (!bt->hp->eip) { dump_local_unwind_tables(); return; } if (!(kt->flags & DWARF_UNWIND_CAPABLE)) { error(INFO, "not DWARF capable\n"); return; } frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); /* * XXX: This only works for the first PC/SP pair seen in a normal * backtrace, so it's not particularly helpful. Ideally it should * be capable to take any PC/SP pair in a stack, but it appears to * related to the rbp value. */ UNW_PC(frame) = bt->hp->eip; UNW_SP(frame) = bt->hp->esp; readmem(UNW_SP(frame), KVADDR, &bp, sizeof(unsigned long), "reading bp", FAULT_ON_ERROR); frame->regs.rbp = bp; /* fixme for x86 */ unwind(frame, is_ehframe); fprintf(fp, "frame size: %lx (%lx)\n", (ulong)UNW_SP(frame), (ulong)UNW_SP(frame) - bt->hp->esp); FREEBUF(frame); } #endif crash-utility-crash-9cd43f5/.github/0000775000372000037200000000000015107550337016734 5ustar juerghjuerghcrash-utility-crash-9cd43f5/.github/workflows/0000775000372000037200000000000015107550337020771 5ustar juerghjuerghcrash-utility-crash-9cd43f5/.github/workflows/ci-test.yml0000664000372000037200000000204715107550337023067 0ustar juerghjuerghname: Testing Farm CI on: schedule: - cron: '10 2 * * 1' jobs: cron_schedule: name: Testing Farm CI (${{ matrix.arch }} ${{ matrix.compose }}) runs-on: ubuntu-latest timeout-minutes: 90 strategy: fail-fast: false matrix: arch: [x86_64] compose: [Fedora-Rawhide] steps: - name: Checkout repository uses: actions/checkout@v4 - name: Checking TF_API_KEY run: | if [ -z "${{ secrets.TF_API_KEY }}" ]; then echo "TF_API_KEY is empty!" exit 1 else echo "TF_API_KEY is set!" fi - name: Schedule tests on Testing Farm id: testing-farm uses: sclorg/testing-farm-as-github-action@v4 with: api_key: ${{ secrets.TF_API_KEY }} git_url: https://github.com/crash-utility/crash.git tf_scope: private git_ref: master tmt_path: ci-tests tmt_plan_regex: local$ compose: ${{ matrix.compose }} arch: ${{ matrix.arch }} crash-utility-crash-9cd43f5/.github/workflows/pr-closer.yml0000664000372000037200000000072415107550337023425 0ustar juerghjuerghname: Close Pull Request on: pull_request_target: types: [opened, reopened] jobs: run: runs-on: ubuntu-latest steps: - uses: superbrothers/close-pull-request@v3 with: comment: "The github repo does not accept PRs, please subscribe to mail list via https://lists.crash-utility.osci.io/admin/lists/devel.lists.crash-utility.osci.io/ for contribution and discussion. Or post your patch to mail list: devel@lists.crash-utility.osci.io" crash-utility-crash-9cd43f5/.github/workflows/ci-build.yml0000664000372000037200000000535015107550337023207 0ustar juerghjuerghname: Build on: push: branches: [ "master" ] jobs: build: name: Build runs-on: ubuntu-24.04 strategy: matrix: arch: - x86_64 - aarch64 - s390x - powerpc64 - x86 - riscv64 - mips - alpha - sparc64 steps: - name: Checkout uses: actions/checkout@v4 - name: Set Environment env: ARCH: ${{ matrix.arch }} run: | case $ARCH in x86) GNU_ARCH="i686-linux-gnu" ;; x86_64) CROSS_COMPILER_PKG="gcc-x86-64-linux-gnu" CROSS_COMPILER_PKG+=" g++-x86-64-linux-gnu" GNU_ARCH="$ARCH-linux-gnu" ;; *) GNU_ARCH="$ARCH-linux-gnu" ;; esac if [ -n "$GNU_ARCH" ]; then if [ -z "$CROSS_COMPILER_PKG" ]; then CROSS_COMPILER_PKG="gcc-$GNU_ARCH" CROSS_COMPILER_PKG+=" g++-$GNU_ARCH" fi EXTRA_PKGS+=" $CROSS_COMPILER_PKG" CROSS_COMPILE="$GNU_ARCH" fi echo "EXTRA_PKGS=$EXTRA_PKGS" >> $GITHUB_ENV echo "CROSS_COMPILE=$CROSS_COMPILE" >> $GITHUB_ENV - name: Install deps for crash-utility building run: | sudo apt-get update sudo apt-get install make gcc g++ bison flex texinfo wget patch tar build-essential libc-dev autoconf automake libncurses-dev - name: Install corss compile tool pkgs if: env.EXTRA_PKGS != '' run: | sudo apt-get update sudo apt-get -q=2 install ${{ env.EXTRA_PKGS }} - name: Cross compile gmp lib run: | mkdir $GITHUB_WORKSPACE/libtools wget https://gcc.gnu.org/pub/gcc/infrastructure/gmp-6.2.1.tar.bz2 tar -jxvf gmp-6.2.1.tar.bz2 cd gmp-6.2.1 ./configure --host=${{ env.CROSS_COMPILE }} --prefix=$GITHUB_WORKSPACE/libtools make -j`nproc` sudo make install cd .. - name: Cross compile mpfr lib run: | wget https://gcc.gnu.org/pub/gcc/infrastructure/mpfr-4.1.0.tar.bz2 tar -jxvf mpfr-4.1.0.tar.bz2 cd mpfr-4.1.0 ./configure --host=${{ env.CROSS_COMPILE }} --prefix=$GITHUB_WORKSPACE/libtools --with-gmp=$GITHUB_WORKSPACE/libtools make -j`nproc` sudo make install cd .. - name: Cross compile crash-utility run: | sudo cp $GITHUB_WORKSPACE/libtools/include/* /usr/include/ sudo cp -r $GITHUB_WORKSPACE/libtools/lib/* /usr/lib/ make CROSS_COMPILE=${{ env.CROSS_COMPILE }}- -j`nproc` warn - name: Checking and Clean up run: | echo "Run command: file crash ..." file crash echo "Clean up for compiling ..." make CROSS_COMPILE=${{ env.CROSS_COMPILE }}- -j`nproc` clean crash-utility-crash-9cd43f5/xendump.h0000664000372000037200000001174715107550337017237 0ustar juerghjuergh/* * xendump.h * * Copyright (C) 2006, 2007, 2009, 2010, 2014 David Anderson * Copyright (C) 2006, 2007, 2009, 2010, 2014 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include #include #define XC_SAVE_SIGNATURE "LinuxGuestRecord" #define XC_CORE_MAGIC 0xF00FEBED #define XC_CORE_MAGIC_HVM 0xF00FEBEE /* * From xenctrl.h, but probably not on most host machines. */ typedef struct xc_core_header { unsigned int xch_magic; unsigned int xch_nr_vcpus; unsigned int xch_nr_pages; unsigned int xch_ctxt_offset; unsigned int xch_index_offset; unsigned int xch_pages_offset; } xc_core_header_t; /* * Based upon the original xensource xc_core_header struct above, * but with unsigned long offset values so that it can be used * with the original dumpfile format and new ELF-style format. */ struct xen_core_header { unsigned int xch_magic; unsigned int xch_nr_vcpus; unsigned int xch_nr_pages; off_t xch_ctxt_offset; off_t xch_index_offset; off_t xch_pages_offset; }; struct pfn_offset_cache { off_t file_offset; ulong pfn; ulong cnt; }; #define PFN_TO_OFFSET_CACHE_ENTRIES (5000) struct elf_index_pfn { ulong index; ulong pfn; }; #define INDEX_PFN_COUNT (128) struct last_batch { ulong index; ulong start; ulong end; ulong accesses; ulong duplicates; }; struct xendump_data { ulong flags; /* XENDUMP_LOCAL, plus anything else... */ int xfd; int pc_next; uint page_size; FILE *ofp; char *page; ulong accesses; ulong cache_hits; ulong redundant; ulong last_pfn; struct pfn_offset_cache *poc; struct xc_core_data { int p2m_frames; ulong *p2m_frame_index_list; struct xen_core_header header; int elf_class; uint64_t format_version; off_t elf_strtab_offset; off_t shared_info_offset; off_t ia64_mapped_regs_offset; struct elf_index_pfn elf_index_pfn[INDEX_PFN_COUNT]; struct last_batch last_batch; Elf32_Ehdr *elf32; Elf64_Ehdr *elf64; } xc_core; struct xc_save_data { ulong nr_pfns; int vmconfig_size; char *vmconfig_buf; ulong *p2m_frame_list; uint pfns_not; off_t pfns_not_offset; off_t vcpu_ctxt_offset; off_t shared_info_page_offset; off_t *batch_offsets; ulong batch_count; ulong *region_pfn_type; ulong ia64_version; ulong *ia64_page_offsets; } xc_save; ulong panic_pc; ulong panic_sp; }; #define XC_SAVE (XENDUMP_LOCAL << 1) #define XC_CORE_ORIG (XENDUMP_LOCAL << 2) #define XC_CORE_P2M_CREATE (XENDUMP_LOCAL << 3) #define XC_CORE_PFN_CREATE (XENDUMP_LOCAL << 4) #define XC_CORE_NO_P2M (XENDUMP_LOCAL << 5) #define XC_SAVE_IA64 (XENDUMP_LOCAL << 6) #define XC_CORE_64BIT_HOST (XENDUMP_LOCAL << 7) #define XC_CORE_ELF (XENDUMP_LOCAL << 8) #define MACHINE_BYTE_ORDER() \ (machine_type("X86") || \ machine_type("X86_64") || \ machine_type("IA64") ? __LITTLE_ENDIAN : __BIG_ENDIAN) #define BYTE_SWAP_REQUIRED(endian) (endian != MACHINE_BYTE_ORDER()) static inline uint32_t swab32(uint32_t x) { return (((x & 0x000000ffU) << 24) | ((x & 0x0000ff00U) << 8) | ((x & 0x00ff0000U) >> 8) | ((x & 0xff000000U) >> 24)); } #define MFN_NOT_FOUND (-1) #define PFN_NOT_FOUND (-1) #define INVALID_MFN (~0UL) /* * ia64 "xm save" format is completely different than the others. */ typedef struct xen_domctl_arch_setup { uint64_t flags; /* XEN_DOMAINSETUP_* */ /* #ifdef __ia64__ */ uint64_t bp; /* mpaddr of boot param area */ uint64_t maxmem; /* Highest memory address for MDT. */ uint64_t xsi_va; /* Xen shared_info area virtual address. */ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */ /* #endif */ } xen_domctl_arch_setup_t; /* * xc_core ELF note, which differs from the standard Elf[32|64]_Nhdr * structure by the additional name field. */ struct elfnote { uint32_t namesz; uint32_t descsz; uint32_t type; char name[4]; }; #define XEN_ELFNOTE_DUMPCORE_NONE 0x2000000 #define XEN_ELFNOTE_DUMPCORE_HEADER 0x2000001 #define XEN_ELFNOTE_DUMPCORE_XEN_VERSION 0x2000002 #define XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION 0x2000003 struct xen_dumpcore_elfnote_header_desc { uint64_t xch_magic; uint64_t xch_nr_vcpus; uint64_t xch_nr_pages; uint64_t xch_page_size; }; #define FORMAT_VERSION_0000000000000001 0x0000000000000001ULL struct xen_dumpcore_elfnote_format_version_desc { uint64_t version; }; struct xen_dumpcore_p2m { uint64_t pfn; uint64_t gmfn; }; extern struct xendump_data *xd; crash-utility-crash-9cd43f5/riscv64.c0000664000372000037200000015305215107550337017046 0ustar juerghjuergh/* riscv64.c - core analysis suite * * Copyright (C) 2022 Alibaba Group Holding Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #ifdef RISCV64 #include #include static ulong riscv64_get_page_size(void); static int riscv64_vtop_3level_4k(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose); static int riscv64_vtop_4level_4k(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose); static int riscv64_vtop_5level_4k(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose); static void riscv64_page_type_init(void); static int riscv64_is_kvaddr(ulong vaddr); static int riscv64_is_uvaddr(ulong vaddr, struct task_context *tc); static int riscv64_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose); static int riscv64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose); static void riscv64_cmd_mach(void); static void riscv64_irq_stack_init(void); static void riscv64_overflow_stack_init(void); static void riscv64_stackframe_init(void); static void riscv64_back_trace_cmd(struct bt_info *bt); static int riscv64_eframe_search(struct bt_info *bt); static int riscv64_get_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp); static void riscv64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp); static int riscv64_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp); static void riscv64_display_full_frame(struct bt_info *bt, struct riscv64_unwind_frame *current, struct riscv64_unwind_frame *previous); static int riscv64_translate_pte(ulong, void *, ulonglong); static int riscv64_init_active_task_regs(void); static int riscv64_get_crash_notes(void); static int riscv64_get_elf_notes(void); static void riscv64_get_va_range(struct machine_specific *ms); static void riscv64_get_va_bits(struct machine_specific *ms); static void riscv64_get_struct_page_size(struct machine_specific *ms); static void riscv64_print_exception_frame(struct bt_info *, ulong , int ); static int riscv64_is_kernel_exception_frame(struct bt_info *, ulong ); static int riscv64_on_irq_stack(int , ulong); static int riscv64_on_process_stack(struct bt_info *, ulong ); static void riscv64_set_process_stack(struct bt_info *); static void riscv64_set_irq_stack(struct bt_info *); static int riscv64_on_overflow_stack(int, ulong); static void riscv64_set_overflow_stack(struct bt_info *); #define REG_FMT "%016lx" #define SZ_2G 0x80000000 #define USER_MODE (0) #define KERNEL_MODE (1) /* * Holds registers during the crash. */ static struct riscv64_register *panic_task_regs; /* from arch/riscv/include/asm/stacktrace.h */ struct stackframe { ulong fp; ulong ra; }; static struct machine_specific riscv64_machine_specific = { ._page_present = (1 << 0), ._page_read = (1 << 1), ._page_write = (1 << 2), ._page_exec = (1 << 3), ._page_user = (1 << 4), ._page_global = (1 << 5), ._page_accessed = (1 << 6), ._page_dirty = (1 << 7), ._page_soft = (1 << 8), .va_bits = 0, .struct_page_size = 0, }; static void pt_level_alloc(char **lvl, char *name) { size_t sz = PAGESIZE(); void *pointer = malloc(sz); if (!pointer) error(FATAL, name); *lvl = pointer; } static ulong riscv64_get_page_size(void) { return memory_page_size(); } static ulong riscv64_vmalloc_start(void) { return ((ulong)VMALLOC_START); } /* Get the size of struct page {} */ static void riscv64_get_struct_page_size(struct machine_specific *ms) { char *string; string = pc->read_vmcoreinfo("SIZE(page)"); if (string) { ms->struct_page_size = atol(string); free(string); } } /* * "mach" command output. */ static void riscv64_display_machine_stats(void) { int i, pad; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, " KERNEL VIRTUAL BASE: %lx\n", machdep->machspec->page_offset); fprintf(fp, " KERNEL MODULES BASE: %lx\n", machdep->machspec->modules_vaddr); fprintf(fp, " KERNEL VMALLOC BASE: %lx\n", machdep->machspec->vmalloc_start_addr); fprintf(fp, " KERNEL VMEMMAP BASE: %lx\n", machdep->machspec->vmemmap_vaddr); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); if (machdep->machspec->irq_stack_size) { fprintf(fp, " IRQ STACK SIZE: %ld\n", machdep->machspec->irq_stack_size); fprintf(fp, " IRQ STACKS:\n"); for (i = 0; i < kt->cpus; i++) { pad = (i < 10) ? 3 : (i < 100) ? 2 : (i < 1000) ? 1 : 0; fprintf(fp, "%s CPU %d: %lx\n", space(pad), i, machdep->machspec->irq_stacks[i]); } } if (machdep->machspec->overflow_stack_size) { fprintf(fp, "OVERFLOW STACK SIZE: %ld\n", machdep->machspec->overflow_stack_size); fprintf(fp, " OVERFLOW STACKS:\n"); for (i = 0; i < kt->cpus; i++) { pad = (i < 10) ? 3 : (i < 100) ? 2 : (i < 1000) ? 1 : 0; fprintf(fp, "%s CPU %d: %lx\n", space(pad), i, machdep->machspec->overflow_stacks[i]); } } } static void riscv64_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cmo")) != EOF) { switch (c) { case 'c': case 'm': case 'o': option_not_supported(c); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); riscv64_display_machine_stats(); } /* * Accept or reject a symbol from the kernel namelist. */ static int riscv64_verify_symbol(const char *name, ulong value, char type) { if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (!(machdep->flags & KSYMS_START)) { if (STREQ(name, "_text") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; return (name && strlen(name) && !STRNEQ(name, "__func__.") && !STRNEQ(name, "__crc_")); } return TRUE; } void riscv64_dump_machdep_table(ulong arg) { const struct machine_specific *ms = machdep->machspec; int others = 0, i = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & IRQ_STACKS) fprintf(fp, "%sIRQ_STACKS", others++ ? "|" : ""); if (machdep->flags & OVERFLOW_STACKS) fprintf(fp, "%sOVERFLOW_STACKS", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " pgdir_shift: %ld\n", machdep->machspec->va_bits - 9); fprintf(fp, " ptrs_per_pgd: %u\n", PTRS_PER_PGD); fprintf(fp, " ptrs_per_pte: %d\n", PTRS_PER_PTE); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " back_trace: riscv64_back_trace_cmd()\n"); fprintf(fp, " eframe_search: riscv64_eframe_search()\n"); fprintf(fp, " processor_speed: riscv64_processor_speed()\n"); fprintf(fp, " uvtop: riscv64_uvtop()\n"); fprintf(fp, " kvtop: riscv64_kvtop()\n"); fprintf(fp, " get_stack_frame: riscv64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: riscv64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: riscv64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: riscv64_is_task_addr()\n"); fprintf(fp, " verify_symbol: riscv64_verify_symbol()\n"); fprintf(fp, " dis_filter: generic_dis_filter()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " cmd_mach: riscv64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: riscv64_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: riscv64_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: riscv64_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: NULL\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_p4d_read: %lx\n", machdep->machspec->last_p4d_read); fprintf(fp, " last_pud_read: %lx\n", machdep->last_pud_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " p4d: %lx\n", (ulong)machdep->machspec->p4d); fprintf(fp, " pud: %lx\n", (ulong)machdep->pud); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); if (machdep->flags & IRQ_STACKS) { fprintf(fp, " irq_stack_size: %ld\n", ms->irq_stack_size); for (i = 0; i < kt->cpus; i++) fprintf(fp, " irq_stacks[%d]: %lx\n", i, ms->irq_stacks[i]); } else { fprintf(fp, " irq_stack_size: (unused)\n"); fprintf(fp, " irq_stacks: (unused)\n"); } if (machdep->flags & OVERFLOW_STACKS) { fprintf(fp, " overflow_stack_size: %ld\n", ms->overflow_stack_size); for (i = 0; i < kt->cpus; i++) fprintf(fp, " overflow_stacks[%d]: %lx\n", i, ms->overflow_stacks[i]); } else { fprintf(fp, " overflow_stack_size: (unused)\n"); fprintf(fp, " overflow_stacks: (unused)\n"); } } static ulong riscv64_processor_speed(void) { /* TODO: */ return 0; } static unsigned long riscv64_get_kernel_version(void) { char *string; if (THIS_KERNEL_VERSION) return THIS_KERNEL_VERSION; if ((string = pc->read_vmcoreinfo("OSRELEASE"))) { parse_kernel_version(string); free(string); } return THIS_KERNEL_VERSION; } static void riscv64_get_phys_ram_base(struct machine_specific *ms) { unsigned long kernel_version = riscv64_get_kernel_version(); /* * phys_ram_base is defined in Linux kernel since 5.14. */ if (kernel_version >= LINUX(5,14,0)) { char *string; if ((string = pc->read_vmcoreinfo("NUMBER(phys_ram_base)"))) { ms->phys_base = atol(string); free(string); } else error(FATAL, "cannot read phys_ram_base\n"); } else /* * For qemu rv64 env and hardware platform, default phys base * may different, eg, * hardware platform: 0x200000 * qemu rv64 env: 0x80200000 * * But we only can set one default value, in this case, qemu * rv64 env may can't work. */ ms->phys_base = 0x200000; } static void riscv64_get_va_bits(struct machine_specific *ms) { unsigned long kernel_version = riscv64_get_kernel_version(); /* * VA_BITS is defined in Linux kernel since 5.17. So we use the * default va bits 39 when Linux version < 5.17. */ if (kernel_version >= LINUX(5,17,0)) { char *string; if ((string = pc->read_vmcoreinfo("NUMBER(VA_BITS)"))) { ms->va_bits = atol(string); free(string); } } else ms->va_bits = 39; } static void riscv64_get_va_range(struct machine_specific *ms) { unsigned long kernel_version = riscv64_get_kernel_version(); char *string; if ((string = pc->read_vmcoreinfo("NUMBER(PAGE_OFFSET)"))) { ms->page_offset = htol(string, QUIET, NULL); free(string); } else goto error; if ((string = pc->read_vmcoreinfo("NUMBER(VMALLOC_START)"))) { ms->vmalloc_start_addr = htol(string, QUIET, NULL); free(string); } else goto error; if ((string = pc->read_vmcoreinfo("NUMBER(VMALLOC_END)"))) { ms->vmalloc_end = htol(string, QUIET, NULL); free(string); } else goto error; if ((string = pc->read_vmcoreinfo("NUMBER(VMEMMAP_START)"))) { ms->vmemmap_vaddr = htol(string, QUIET, NULL); free(string); } else goto error; if ((string = pc->read_vmcoreinfo("NUMBER(VMEMMAP_END)"))) { ms->vmemmap_end = htol(string, QUIET, NULL); free(string); } else goto error; if ((string = pc->read_vmcoreinfo("NUMBER(KERNEL_LINK_ADDR)"))) { ms->kernel_link_addr = htol(string, QUIET, NULL); free(string); } else goto error; if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) ms->kernel_link_addr += (kt->relocate * -1); /* * From Linux 5.13, the kernel mapping is moved to the last 2GB * of the address space, modules use the 2GB memory range right * before the kernel. Before Linux 5.13, modules area is embedded * in vmalloc area. * */ if (kernel_version >= LINUX(5,13,0)) { if ((string = pc->read_vmcoreinfo("NUMBER(MODULES_VADDR)"))) { ms->modules_vaddr = htol(string, QUIET, NULL); free(string); } else goto error; if ((string = pc->read_vmcoreinfo("NUMBER(MODULES_END)"))) { ms->modules_end = htol(string, QUIET, NULL); free(string); } else goto error; } else { ms->modules_vaddr = ms->vmalloc_start_addr; ms->modules_end = ms->vmalloc_end; } if (CRASHDEBUG(1)) { fprintf(fp, "vmemmap : 0x%lx - 0x%lx\n", ms->vmemmap_vaddr, ms->vmemmap_end); fprintf(fp, "vmalloc : 0x%lx - 0x%lx\n", ms->vmalloc_start_addr, ms->vmalloc_end); fprintf(fp, "mudules : 0x%lx - 0x%lx\n", ms->modules_vaddr, ms->modules_end); fprintf(fp, "lowmem : 0x%lx -\n", ms->page_offset); fprintf(fp, "kernel link addr : 0x%lx\n", ms->kernel_link_addr); } return; error: error(FATAL, "cannot get vm layout\n"); } static void riscv64_get_va_kernel_pa_offset(struct machine_specific *ms) { unsigned long kernel_version = riscv64_get_kernel_version(); /* * Since Linux v6.4 phys_base is not the physical start of the kernel, * trying to use "va_kernel_pa_offset" to determine the offset between * kernel virtual and physical addresses. */ if (kernel_version >= LINUX(6,4,0)) { char *string; if ((string = pc->read_vmcoreinfo("NUMBER(va_kernel_pa_offset)"))) { ms->va_kernel_pa_offset = htol(string, QUIET, NULL); free(string); } else error(FATAL, "cannot read va_kernel_pa_offset\n"); } else ms->va_kernel_pa_offset = ms->kernel_link_addr - ms->phys_base; } static int riscv64_is_kvaddr(ulong vaddr) { if (IS_VMALLOC_ADDR(vaddr)) return TRUE; return (vaddr >= machdep->kvbase); } static int riscv64_is_uvaddr(ulong vaddr, struct task_context *unused) { if (IS_VMALLOC_ADDR(vaddr)) return FALSE; return (vaddr < machdep->kvbase); } static int riscv64_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); return (IS_KVADDR(task) && ALIGNED_STACK_OFFSET(task) == 0); } static int riscv64_get_smp_cpus(void) { return (get_cpus_present() > 0) ? get_cpus_present() : kt->cpus; } /* * Include both vmalloc'd and module address space as VMALLOC space. */ int riscv64_IS_VMALLOC_ADDR(ulong vaddr) { return ((vaddr >= VMALLOC_START && vaddr <= VMALLOC_END) || (vaddr >= VMEMMAP_VADDR && vaddr <= VMEMMAP_END) || (vaddr >= MODULES_VADDR && vaddr <= MODULES_END)); } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int riscv64_translate_pte(ulong pte, void *physaddr, ulonglong unused) { char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char buf[BUFSIZE]; int page_present; int len1, len2, others; ulong paddr; paddr = PTOB(pte >> _PAGE_PFN_SHIFT); page_present = !!(pte & _PAGE_PRESENT); if (physaddr) { *(ulong *)physaddr = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER | LJUST, "PTE")); if (!page_present) return page_present; sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER | LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER | RJUST, NULL), mkstring(physbuf, len2, CENTER | RJUST, NULL)); fprintf(fp, "("); others = 0; #define CHECK_PAGE_FLAG(flag) \ if ((_PAGE_##flag) && (pte & _PAGE_##flag)) \ fprintf(fp, "%s" #flag, others++ ? "|" : "") if (pte) { CHECK_PAGE_FLAG(PRESENT); CHECK_PAGE_FLAG(READ); CHECK_PAGE_FLAG(WRITE); CHECK_PAGE_FLAG(EXEC); CHECK_PAGE_FLAG(USER); CHECK_PAGE_FLAG(GLOBAL); CHECK_PAGE_FLAG(ACCESSED); CHECK_PAGE_FLAG(DIRTY); CHECK_PAGE_FLAG(SOFT); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return page_present; } static void riscv64_page_type_init(void) { ulong va_bits = machdep->machspec->va_bits; /* * For RISCV64 arch, any level of PTE may be a leaf PTE, * so in addition to 4KiB pages, * Sv39 supports 2 MiB megapages, 1 GiB gigapages; * Sv48 supports 2 MiB megapages, 1 GiB gigapages, 512 GiB terapages; * Sv57 supports 2 MiB megapages, 1 GiB gigapages, 512 GiB terapages, and 256 TiB petapages. * * refs to riscv-privileged spec. * * We just support 4KiB, 2MiB, 1GiB now. */ switch (machdep->pagesize) { case 0x1000: // 4 KiB machdep->flags |= (va_bits == 57 ? VM_L5_4K : (va_bits == 48 ? VM_L4_4K : VM_L3_4K)); break; case 0x200000: // 2 MiB /* TODO: */ case 0x40000000: // 1 GiB /* TODO: */ default: if (machdep->pagesize) error(FATAL, "invalid/unsupported page size: %d\n", machdep->pagesize); else error(FATAL, "cannot determine page size\n"); } } static int riscv64_vtop_3level_4k(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_ptr, pgd_val; ulong pmd_base, pmd_addr, pmd_val; ulong pte_base, pte_addr, pte_val, pte_pfn; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); /* PGD */ pgd_ptr = pgd + pgd_index_l3_4k(vaddr); FILL_PGD(pgd, KVADDR, PAGESIZE()); pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; pgd_val &= PTE_PFN_PROT_MASK; pmd_base = (pgd_val >> _PAGE_PFN_SHIFT) << PAGESHIFT(); /* PMD */ FILL_PMD(PAGEBASE(pmd_base), PHYSADDR, PAGESIZE()); pmd_addr = pmd_base + sizeof(pmd_t) * pmd_index_l3_4k(vaddr); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_addr)); if (verbose) fprintf(fp, " PMD: %016lx => %016lx\n", pmd_addr, pmd_val); if (!pmd_val) goto no_page; pmd_val &= PTE_PFN_PROT_MASK; pte_base = (pmd_val >> _PAGE_PFN_SHIFT) << PAGESHIFT(); /* PTE */ FILL_PTBL(PAGEBASE(pte_base), PHYSADDR, PAGESIZE()); pte_addr = pte_base + sizeof(pmd_t) * pte_index_l3_4k(vaddr); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_addr)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", pte_addr, pte_val); if (!pte_val) goto no_page; pte_val &= PTE_PFN_PROT_MASK; pte_pfn = pte_val >> _PAGE_PFN_SHIFT; if (!(pte_val & _PAGE_PRESENT)) { if (verbose) { fprintf(fp, "\n"); riscv64_translate_pte((ulong)pte_val, 0, 0); } fprintf(fp, " PAGE: %016lx not present\n\n", PAGEBASE(*paddr)); return FALSE; } *paddr = PTOB(pte_pfn) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %016lx\n\n", PAGEBASE(*paddr)); riscv64_translate_pte(pte_val, 0, 0); } return TRUE; no_page: fprintf(fp, "invalid\n"); return FALSE; } /* * 'bt -f' command output * Display all stack data contained in a frame */ static void riscv64_display_full_frame(struct bt_info *bt, struct riscv64_unwind_frame *current, struct riscv64_unwind_frame *previous) { int i, u_idx; ulong *up; ulong words, addr; char buf[BUFSIZE]; if (previous->sp < current->sp) return; if (!(INSTACK(previous->sp, bt) && INSTACK(current->sp, bt))) return; words = (previous->sp - current->sp) / sizeof(ulong) + 1; addr = current->sp; u_idx = (current->sp - bt->stackbase) / sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); } /* * Gather Overflow stack values. */ static void riscv64_overflow_stack_init(void) { int i; struct syment *sp; struct gnu_request request, *req; struct machine_specific *ms = machdep->machspec; req = &request; if (symbol_exists("overflow_stack") && (sp = per_cpu_symbol_search("overflow_stack")) && get_symbol_type("overflow_stack", NULL, req)) { if (CRASHDEBUG(1)) { fprintf(fp, "overflow_stack: \n"); fprintf(fp, " type: %x, %s\n", (int)req->typecode, (req->typecode == TYPE_CODE_ARRAY) ? "TYPE_CODE_ARRAY" : "other"); fprintf(fp, " target_typecode: %x, %s\n", (int)req->target_typecode, req->target_typecode == TYPE_CODE_INT ? "TYPE_CODE_INT" : "other"); fprintf(fp, " target_length: %ld\n", req->target_length); fprintf(fp, " length: %ld\n", req->length); } if (!(ms->overflow_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong))))) error(FATAL, "cannot malloc overflow_stack addresses\n"); ms->overflow_stack_size = RISCV64_OVERFLOW_STACK_SIZE; machdep->flags |= OVERFLOW_STACKS; for (i = 0; i < kt->cpus; i++) ms->overflow_stacks[i] = kt->__per_cpu_offset[i] + sp->value; } } /* * Gather IRQ stack values. */ static void riscv64_irq_stack_init(void) { int i; struct syment *sp; struct gnu_request request, *req; struct machine_specific *ms = machdep->machspec; ulong p, sz; req = &request; if (symbol_exists("irq_stack_ptr") && (sp = per_cpu_symbol_search("irq_stack_ptr")) && get_symbol_type("irq_stack_ptr", NULL, req)) { if (CRASHDEBUG(1)) { fprintf(fp, "irq_stack_ptr: \n"); fprintf(fp, " type: %x, %s\n", (int)req->typecode, (req->typecode == TYPE_CODE_PTR) ? "TYPE_CODE_PTR" : "other"); fprintf(fp, " target_typecode: %x, %s\n", (int)req->target_typecode, req->target_typecode == TYPE_CODE_INT ? "TYPE_CODE_INT" : "other"); fprintf(fp, " target_length: %ld\n", req->target_length); fprintf(fp, " length: %ld\n", req->length); } if (!(ms->irq_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong))))) error(FATAL, "cannot malloc irq_stack addresses\n"); /* * find IRQ_STACK_SIZE (i.e. THREAD_SIZE) via thread_union.stack * or set STACKSIZE() as default. */ if (MEMBER_EXISTS("thread_union", "stack")) { if ((sz = MEMBER_SIZE("thread_union", "stack")) > 0) ms->irq_stack_size = sz; } else ms->irq_stack_size = machdep->stacksize; machdep->flags |= IRQ_STACKS; for (i = 0; i < kt->cpus; i++) { p = kt->__per_cpu_offset[i] + sp->value; if (CRASHDEBUG(1)) fprintf(fp, " IRQ stack pointer[%d] is %lx\n", i, p); readmem(p, KVADDR, &(ms->irq_stacks[i]), sizeof(ulong), "IRQ stack pointer", RETURN_ON_ERROR); } } } static int riscv64_on_irq_stack(int cpu, ulong stkptr) { struct machine_specific *ms = machdep->machspec; ulong * stacks = ms->irq_stacks; ulong stack_size = ms->irq_stack_size; if ((cpu >= kt->cpus) || (stacks == NULL) || !stack_size) return FALSE; if ((stkptr >= stacks[cpu]) && (stkptr < (stacks[cpu] + stack_size))) return TRUE; return FALSE; } static int riscv64_on_overflow_stack(int cpu, ulong stkptr) { struct machine_specific *ms = machdep->machspec; ulong * stacks = ms->overflow_stacks; ulong stack_size = ms->overflow_stack_size; if ((cpu >= kt->cpus) || (stacks == NULL) || !stack_size) return FALSE; if ((stkptr >= stacks[cpu]) && (stkptr < (stacks[cpu] + stack_size))) return TRUE; return FALSE; } static int riscv64_on_process_stack(struct bt_info *bt, ulong stkptr) { ulong stackbase, stacktop; stackbase = GET_STACKBASE(bt->task); stacktop = GET_STACKTOP(bt->task); if ((stkptr >= stackbase) && (stkptr < stacktop)) return TRUE; return FALSE; } static void riscv64_set_irq_stack(struct bt_info *bt) { struct machine_specific *ms = machdep->machspec; bt->stackbase = ms->irq_stacks[bt->tc->processor]; bt->stacktop = bt->stackbase + ms->irq_stack_size; alter_stackbuf(bt); } static void riscv64_set_overflow_stack(struct bt_info *bt) { struct machine_specific *ms = machdep->machspec; bt->stackbase = ms->overflow_stacks[bt->tc->processor]; bt->stacktop = bt->stackbase + ms->overflow_stack_size; alter_stackbuf(bt); } static void riscv64_set_process_stack(struct bt_info *bt) { bt->stackbase = GET_STACKBASE(bt->task); bt->stacktop = GET_STACKTOP(bt->task); alter_stackbuf(bt); } static void riscv64_stackframe_init(void) { long task_struct_thread = MEMBER_OFFSET("task_struct", "thread"); /* from arch/riscv/include/asm/processor.h */ long thread_reg_ra = MEMBER_OFFSET("thread_struct", "ra"); long thread_reg_sp = MEMBER_OFFSET("thread_struct", "sp"); long thread_reg_fp = MEMBER_OFFSET("thread_struct", "s"); if ((task_struct_thread == INVALID_OFFSET) || (thread_reg_ra == INVALID_OFFSET) || (thread_reg_sp == INVALID_OFFSET) || (thread_reg_fp == INVALID_OFFSET) ) error(FATAL, "cannot determine thread_struct offsets\n"); ASSIGN_OFFSET(task_struct_thread_context_pc) = task_struct_thread + thread_reg_ra; ASSIGN_OFFSET(task_struct_thread_context_sp) = task_struct_thread + thread_reg_sp; ASSIGN_OFFSET(task_struct_thread_context_fp) = task_struct_thread + thread_reg_fp; } static void riscv64_dump_backtrace_entry(struct bt_info *bt, struct syment *sym, struct riscv64_unwind_frame *current, struct riscv64_unwind_frame *previous, int level) { const char *name = sym ? sym->name : "(invalid)"; struct load_module *lm; char *name_plus_offset = NULL; struct syment *symp; ulong symbol_offset; char buf[BUFSIZE]; if (bt->flags & BT_SYMBOL_OFFSET) { symp = value_search(current->pc, &symbol_offset); if (symp && symbol_offset) name_plus_offset = value_to_symstr(current->pc, buf, bt->radix); } fprintf(fp, "%s#%d [%016lx] %s at %016lx", level < 10 ? " " : "", level, current->sp, name_plus_offset ? name_plus_offset : name, current->pc); if (module_symbol(current->pc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); /* * 'bt -l', get a line number associated with a current pc address. */ if (bt->flags & BT_LINE_NUMBERS) { get_line_number(current->pc, buf, FALSE); if (strlen(buf)) fprintf(fp, " %s\n", buf); } /* bt -f */ if (bt->flags & BT_FULL) { fprintf(fp, " " "[PC: %016lx RA: %016lx SP: %016lx SIZE: %ld]\n", current->pc, previous->pc, current->sp, previous->sp - current->sp); riscv64_display_full_frame(bt, current, previous); } } /* * Unroll a kernel stack. */ static void riscv64_back_trace_cmd(struct bt_info *bt) { struct riscv64_unwind_frame current, previous; struct stackframe curr_frame; struct riscv64_register *regs, *irq_regs, *overflow_regs; int level = 0; if (bt->flags & BT_REGS_NOT_FOUND) return; regs = (struct riscv64_register *) bt->machdep; if (riscv64_on_irq_stack(bt->tc->processor, bt->frameptr)) { riscv64_set_irq_stack(bt); bt->flags |= BT_IRQSTACK; } if (riscv64_on_overflow_stack(bt->tc->processor, bt->frameptr)) { riscv64_set_overflow_stack(bt); bt->flags |= BT_OVERFLOW_STACK; } current.pc = bt->instptr; current.sp = bt->stkptr; current.fp = bt->frameptr; if (!INSTACK(current.sp, bt)) return; for (;;) { struct syment *symbol = NULL; struct stackframe *frameptr; ulong low, high; ulong offset; if (CRASHDEBUG(8)) fprintf(fp, "level %d pc %#lx sp %lx fp 0x%lx\n", level, current.pc, current.sp, current.fp); /* Validate frame pointer */ low = current.sp + sizeof(struct stackframe); high = bt->stacktop; if (current.fp < low || current.fp > high || current.fp & 0x7) { if (CRASHDEBUG(8)) fprintf(fp, "fp 0x%lx sp 0x%lx low 0x%lx high 0x%lx\n", current.fp, current.sp, low, high); return; } symbol = value_search(current.pc, &offset); if (!symbol) return; frameptr = (struct stackframe *)current.fp - 1; if (!readmem((ulong)frameptr, KVADDR, &curr_frame, sizeof(curr_frame), "get stack frame", RETURN_ON_ERROR)) return; /* correct PC and FP of the second frame when the first frame has no callee */ if (regs && (regs->regs[RISCV64_REGS_EPC] == current.pc) && curr_frame.fp & 0x7){ previous.pc = regs->regs[RISCV64_REGS_RA]; previous.fp = curr_frame.ra; } else { previous.pc = curr_frame.ra; previous.fp = curr_frame.fp; } previous.sp = current.fp; riscv64_dump_backtrace_entry(bt, symbol, ¤t, &previous, level++); current.pc = previous.pc; current.fp = previous.fp; current.sp = previous.sp; /* * When backtracing to do_irq(), find the original FP of do_irq() * and then use the saved pt_regs in process stack to continue */ if ((bt->flags & BT_IRQSTACK) && !riscv64_on_irq_stack(bt->tc->processor, current.fp)){ if (riscv64_on_process_stack(bt, current.fp)){ frameptr = (struct stackframe *)current.fp - 1; if (!readmem((ulong)frameptr, KVADDR, &curr_frame, sizeof(curr_frame), "get do_irq stack frame", RETURN_ON_ERROR)) return; riscv64_set_process_stack(bt); irq_regs = (struct riscv64_register *) &bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(curr_frame.fp))]; current.pc = irq_regs->regs[RISCV64_REGS_EPC]; current.fp = irq_regs->regs[RISCV64_REGS_FP]; current.sp = irq_regs->regs[RISCV64_REGS_SP]; bt->flags &= ~BT_IRQSTACK; riscv64_print_exception_frame(bt, curr_frame.fp, KERNEL_MODE); fprintf(fp, "--- ---\n"); } } /* * When backtracing to handle_kernel_stack_overflow() * use pt_regs saved in overflow stack to continue */ if ((bt->flags & BT_OVERFLOW_STACK) && !riscv64_on_overflow_stack(bt->tc->processor, current.fp)) { overflow_regs = (struct riscv64_register *) &bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(current.sp))]; riscv64_print_exception_frame(bt, current.sp, KERNEL_MODE); current.pc = overflow_regs->regs[RISCV64_REGS_EPC]; current.fp = overflow_regs->regs[RISCV64_REGS_FP]; current.sp = overflow_regs->regs[RISCV64_REGS_SP]; riscv64_set_process_stack(bt); bt->flags &= ~BT_OVERFLOW_STACK; fprintf(fp, "--- ---\n"); } if (CRASHDEBUG(8)) fprintf(fp, "next %d pc %#lx sp %#lx fp %lx\n", level, current.pc, current.sp, current.fp); } } /* * Get a stack frame combination of pc and ra from the most relevant spot. */ static void riscv64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { ulong ksp = 0, nip = 0; int ret = 0; if (DUMPFILE() && is_task_active(bt->task)) ret = riscv64_get_dumpfile_stack_frame(bt, &nip, &ksp); else ret = riscv64_get_frame(bt, &nip, &ksp); if (!ret) error(WARNING, "cannot determine starting stack frame for task %lx\n", bt->task); if (pcp) *pcp = nip; if (spp) *spp = ksp; } /* * Get the starting point for the active cpu in a diskdump. */ static int riscv64_get_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp) { const struct machine_specific *ms = machdep->machspec; struct riscv64_register *regs; ulong epc, sp; if (!ms->crash_task_regs) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } /* * We got registers for panic task from crash_notes. Just return them. */ regs = &ms->crash_task_regs[bt->tc->processor]; epc = regs->regs[RISCV64_REGS_EPC]; sp = regs->regs[RISCV64_REGS_SP]; /* * Set stack frame ptr. */ bt->frameptr = regs->regs[RISCV64_REGS_FP]; if (nip) *nip = epc; if (ksp) *ksp = sp; bt->machdep = regs; return TRUE; } /* * Do the work for riscv64_get_stack_frame() for non-active tasks. * Get SP and PC values for idle tasks. */ static int riscv64_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (!bt->tc || !(tt->flags & THREAD_INFO)) return FALSE; if (!readmem(bt->task + OFFSET(task_struct_thread_context_pc), KVADDR, pcp, sizeof(*pcp), "thread_struct.ra", RETURN_ON_ERROR)) return FALSE; if (!readmem(bt->task + OFFSET(task_struct_thread_context_sp), KVADDR, spp, sizeof(*spp), "thread_struct.sp", RETURN_ON_ERROR)) return FALSE; if (!readmem(bt->task + OFFSET(task_struct_thread_context_fp), KVADDR, &bt->frameptr, sizeof(bt->frameptr), "thread_struct.fp", RETURN_ON_ERROR)) return FALSE; return TRUE; } static int riscv64_vtop_4level_4k(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_ptr, pgd_val; ulong pud_val; ulong pmd_val; ulong pte_val, pte_pfn; ulong pt_phys; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); /* PGD */ pgd_ptr = pgd + pgd_index_l4_4k(vaddr); FILL_PGD(pgd, KVADDR, PAGESIZE()); pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; pgd_val &= PTE_PFN_PROT_MASK; pt_phys = (pgd_val >> _PAGE_PFN_SHIFT) << PAGESHIFT(); /* PUD */ FILL_PUD(PAGEBASE(pt_phys), PHYSADDR, PAGESIZE()); pud_val = ULONG(machdep->pud + PAGEOFFSET(sizeof(pud_t) * pud_index_l4_4k(vaddr))); if (verbose) fprintf(fp, " PUD: %016lx => %016lx\n", pt_phys, pud_val); if (!pud_val) goto no_page; pud_val &= PTE_PFN_PROT_MASK; pt_phys = (pud_val >> _PAGE_PFN_SHIFT) << PAGESHIFT(); /* PMD */ FILL_PMD(PAGEBASE(pt_phys), PHYSADDR, PAGESIZE()); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(sizeof(pmd_t) * pmd_index_l4_4k(vaddr))); if (verbose) fprintf(fp, " PMD: %016lx => %016lx\n", pt_phys, pmd_val); if (!pmd_val) goto no_page; pmd_val &= PTE_PFN_PROT_MASK; pt_phys = (pmd_val >> _PAGE_PFN_SHIFT) << PAGESHIFT(); /* PTE */ FILL_PTBL(PAGEBASE(pt_phys), PHYSADDR, PAGESIZE()); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(sizeof(pte_t) * pte_index_l4_4k(vaddr))); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", pt_phys, pte_val); if (!pte_val) goto no_page; pte_val &= PTE_PFN_PROT_MASK; pte_pfn = pte_val >> _PAGE_PFN_SHIFT; if (!(pte_val & _PAGE_PRESENT)) { if (verbose) { fprintf(fp, "\n"); riscv64_translate_pte((ulong)pte_val, 0, 0); } fprintf(fp, " PAGE: %016lx not present\n\n", PAGEBASE(*paddr)); return FALSE; } *paddr = PTOB(pte_pfn) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %016lx\n\n", PAGEBASE(*paddr)); riscv64_translate_pte(pte_val, 0, 0); } return TRUE; no_page: fprintf(fp, "invalid\n"); return FALSE; } static int riscv64_vtop_5level_4k(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_ptr, pgd_val; ulong p4d_val; ulong pud_val; ulong pmd_val; ulong pte_val, pte_pfn; ulong pt_phys; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); /* PGD */ pgd_ptr = pgd + pgd_index_l5_4k(vaddr); FILL_PGD(pgd, KVADDR, PAGESIZE()); pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; pgd_val &= PTE_PFN_PROT_MASK; pt_phys = (pgd_val >> _PAGE_PFN_SHIFT) << PAGESHIFT(); /* P4D */ FILL_P4D(PAGEBASE(pt_phys), PHYSADDR, PAGESIZE()); p4d_val = ULONG(machdep->machspec->p4d + PAGEOFFSET(sizeof(p4d_t) * p4d_index_l5_4k(vaddr))); if (verbose) fprintf(fp, " P4D: %016lx => %016lx\n", pt_phys, p4d_val); if (!p4d_val) goto no_page; p4d_val &= PTE_PFN_PROT_MASK; pt_phys = (p4d_val >> _PAGE_PFN_SHIFT) << PAGESHIFT(); /* PUD */ FILL_PUD(PAGEBASE(pt_phys), PHYSADDR, PAGESIZE()); pud_val = ULONG(machdep->pud + PAGEOFFSET(sizeof(pud_t) * pud_index_l5_4k(vaddr))); if (verbose) fprintf(fp, " PUD: %016lx => %016lx\n", pt_phys, pud_val); if (!pud_val) goto no_page; pud_val &= PTE_PFN_PROT_MASK; pt_phys = (pud_val >> _PAGE_PFN_SHIFT) << PAGESHIFT(); /* PMD */ FILL_PMD(PAGEBASE(pt_phys), PHYSADDR, PAGESIZE()); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(sizeof(pmd_t) * pmd_index_l4_4k(vaddr))); if (verbose) fprintf(fp, " PMD: %016lx => %016lx\n", pt_phys, pmd_val); if (!pmd_val) goto no_page; pmd_val &= PTE_PFN_PROT_MASK; pt_phys = (pmd_val >> _PAGE_PFN_SHIFT) << PAGESHIFT(); /* PTE */ FILL_PTBL(PAGEBASE(pt_phys), PHYSADDR, PAGESIZE()); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(sizeof(pte_t) * pte_index_l4_4k(vaddr))); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", pt_phys, pte_val); if (!pte_val) goto no_page; pte_val &= PTE_PFN_PROT_MASK; pte_pfn = pte_val >> _PAGE_PFN_SHIFT; if (!(pte_val & _PAGE_PRESENT)) { if (verbose) { fprintf(fp, "\n"); riscv64_translate_pte((ulong)pte_val, 0, 0); } printf("!_PAGE_PRESENT\n"); return FALSE; } *paddr = PTOB(pte_pfn) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %016lx\n\n", PAGEBASE(*paddr)); riscv64_translate_pte(pte_val, 0, 0); } return TRUE; no_page: fprintf(fp, "invalid\n"); return FALSE; } static int riscv64_init_active_task_regs(void) { int retval; retval = riscv64_get_crash_notes(); if (retval == TRUE) return retval; return riscv64_get_elf_notes(); } /* * Retrieve task registers for the time of the crash. */ static int riscv64_get_crash_notes(void) { struct machine_specific *ms = machdep->machspec; ulong crash_notes; Elf64_Nhdr *note; ulong offset; char *buf, *p; ulong *notes_ptrs; ulong i; /* * crash_notes contains per cpu memory for storing cpu states * in case of system crash. */ if (!symbol_exists("crash_notes")) return FALSE; crash_notes = symbol_value("crash_notes"); notes_ptrs = (ulong *)GETBUF(kt->cpus*sizeof(notes_ptrs[0])); /* * Read crash_notes for the first CPU. crash_notes are in standard ELF * note format. */ if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read crash_notes\n"); FREEBUF(notes_ptrs); return FALSE; } if (symbol_exists("__per_cpu_offset")) { /* * Add __per_cpu_offset for each cpu to form the pointer to the notes */ for (i = 0; i < kt->cpus; i++) notes_ptrs[i] = notes_ptrs[kt->cpus-1] + kt->__per_cpu_offset[i]; } buf = GETBUF(SIZE(note_buf)); if (!(panic_task_regs = calloc((size_t)kt->cpus, sizeof(*panic_task_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = 0; i < kt->cpus; i++) { if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); goto fail; } /* * Do some sanity checks for this note before reading registers from it. */ note = (Elf64_Nhdr *)buf; p = buf + sizeof(Elf64_Nhdr); /* * dumpfiles created with qemu won't have crash_notes, but there will * be elf notes; dumpfiles created by kdump do not create notes for * offline cpus. */ if (note->n_namesz == 0 && (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE())) { if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (note) { /* * SIZE(note_buf) accounts for a "final note", which is a * trailing empty elf note header. */ long notesz = SIZE(note_buf) - sizeof(Elf64_Nhdr); if (sizeof(Elf64_Nhdr) + roundup(note->n_namesz, 4) + note->n_descsz == notesz) BCOPY((char *)note, buf, notesz); } else { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } } /* * Check the sanity of NT_PRSTATUS note only for each online cpu. */ if (note->n_type != NT_PRSTATUS) { error(WARNING, "invalid NT_PRSTATUS note (n_type != NT_PRSTATUS)\n"); goto fail; } if (!STRNEQ(p, "CORE")) { error(WARNING, "invalid NT_PRSTATUS note (name != \"CORE\"\n"); goto fail; } /* * Find correct location of note data. This contains elf_prstatus * structure which has registers etc. for the crashed task. */ offset = sizeof(Elf64_Nhdr); offset = roundup(offset + note->n_namesz, 4); p = buf + offset; /* start of elf_prstatus */ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); } /* * And finally we have the registers for the crashed task. This is * used later on when dumping backtrace. */ ms->crash_task_regs = panic_task_regs; FREEBUF(buf); FREEBUF(notes_ptrs); return TRUE; fail: FREEBUF(buf); FREEBUF(notes_ptrs); free(panic_task_regs); return FALSE; } static int riscv64_get_elf_notes(void) { struct machine_specific *ms = machdep->machspec; int i; if (!DISKDUMP_DUMPFILE() && !KDUMP_DUMPFILE()) return FALSE; panic_task_regs = calloc(kt->cpus, sizeof(*panic_task_regs)); if (!panic_task_regs) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = 0; i < kt->cpus; i++) { Elf64_Nhdr *note = NULL; size_t len; if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (!note) { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); BCOPY((char *)note + len + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); } ms->crash_task_regs = panic_task_regs; return TRUE; } /* * Translates a user virtual address to its physical address. */ static int riscv64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(uvaddr)) { readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } switch (machdep->flags & VM_FLAGS) { case VM_L3_4K: return riscv64_vtop_3level_4k(pgd, uvaddr, paddr, verbose); case VM_L4_4K: return riscv64_vtop_4level_4k(pgd, uvaddr, paddr, verbose); case VM_L5_4K: return riscv64_vtop_5level_4k(pgd, uvaddr, paddr, verbose); default: return FALSE; } } static int riscv64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong kernel_pgd; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } kernel_pgd = vt->kernel_pgd[0]; *paddr = 0; switch (machdep->flags & VM_FLAGS) { case VM_L3_4K: return riscv64_vtop_3level_4k((ulong *)kernel_pgd, kvaddr, paddr, verbose); case VM_L4_4K: return riscv64_vtop_4level_4k((ulong *)kernel_pgd, kvaddr, paddr, verbose); case VM_L5_4K: return riscv64_vtop_5level_4k((ulong *)kernel_pgd, kvaddr, paddr, verbose); default: return FALSE; } } void riscv64_init(int when) { switch (when) { case SETUP_ENV: machdep->process_elf_notes = process_elf64_notes; break; case PRE_SYMTAB: machdep->verify_symbol = riscv64_verify_symbol; machdep->machspec = &riscv64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; /* * Even if CONFIG_RANDOMIZE_BASE is not configured, * derive_kaslr_offset() should work and set * kt->relocate to 0 */ if (!kt->relocate && !(kt->flags2 & (RELOC_AUTO|KASLR))) kt->flags2 |= (RELOC_AUTO|KASLR); break; case PRE_GDB: machdep->pagesize = riscv64_get_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize << THREAD_SIZE_ORDER; riscv64_get_phys_ram_base(machdep->machspec); riscv64_get_struct_page_size(machdep->machspec); riscv64_get_va_bits(machdep->machspec); riscv64_get_va_range(machdep->machspec); riscv64_get_va_kernel_pa_offset(machdep->machspec); pt_level_alloc(&machdep->pgd, "cannot malloc pgd space."); pt_level_alloc(&machdep->machspec->p4d, "cannot malloc p4d space."); pt_level_alloc(&machdep->pud, "cannot malloc pud space."); pt_level_alloc(&machdep->pmd, "cannot malloc pmd space."); pt_level_alloc(&machdep->ptbl, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->machspec->last_p4d_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->kvbase = machdep->machspec->page_offset; machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = riscv64_is_kvaddr; machdep->is_uvaddr = riscv64_is_uvaddr; machdep->uvtop = riscv64_uvtop; machdep->kvtop = riscv64_kvtop; machdep->cmd_mach = riscv64_cmd_mach; machdep->get_stack_frame = riscv64_get_stack_frame; machdep->back_trace = riscv64_back_trace_cmd; machdep->eframe_search = riscv64_eframe_search; machdep->vmalloc_start = riscv64_vmalloc_start; machdep->processor_speed = riscv64_processor_speed; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = riscv64_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = riscv64_is_task_addr; machdep->get_smp_cpus = riscv64_get_smp_cpus; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->dis_filter = generic_dis_filter; machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->init_kernel_pgd = NULL; /* pgd set by symbol_value("swapper_pg_dir") */ break; case POST_GDB: machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; riscv64_irq_stack_init(); riscv64_overflow_stack_init(); riscv64_stackframe_init(); riscv64_page_type_init(); if (!machdep->hz) machdep->hz = 250; if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); STRUCT_SIZE_INIT(note_buf, "note_buf_t"); break; case POST_VM: /* * crash_notes contains machine specific information about the * crash. In particular, it contains CPU registers at the time * of the crash. We need this information to extract correct * backtraces from the panic task. */ if (!ACTIVE() && !riscv64_init_active_task_regs()) error(WARNING, "cannot retrieve registers for active task%s\n\n", kt->cpus > 1 ? "s" : ""); break; } } /* bool pt_regs : pass 1 to dump pt_regs , pass 0 to dump user_regs_struct */ static void riscv64_dump_pt_regs(struct riscv64_register *regs, FILE *ofp, bool pt_regs) { /* Print riscv64 32 regs */ fprintf(ofp, "epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n" " gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n" " t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n" " s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n" " a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n" " a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n" " s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n" " s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n" " s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n" " s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n" " t5 : " REG_FMT " t6 : " REG_FMT "\n", regs->regs[0], regs->regs[1], regs->regs[2], regs->regs[3], regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7], regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11], regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15], regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19], regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23], regs->regs[24], regs->regs[25], regs->regs[26], regs->regs[27], regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]); if (pt_regs) fprintf(ofp, " status: " REG_FMT " badaddr: " REG_FMT "\n" " cause: " REG_FMT " orig_a0: " REG_FMT "\n", regs->regs[32], regs->regs[33], regs->regs[34], regs->regs[35]); } /* * 'help -r' command output */ void riscv64_display_regs_from_elf_notes(int cpu, FILE *ofp) { const struct machine_specific *ms = machdep->machspec; struct riscv64_register *regs; if (!ms->crash_task_regs) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } regs = &ms->crash_task_regs[cpu]; if (!regs->regs[RISCV64_REGS_SP] && !regs->regs[RISCV64_REGS_EPC]) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } riscv64_dump_pt_regs(regs, ofp, 0); } static void riscv64_print_exception_frame(struct bt_info *bt, ulong ptr, int mode) { struct syment *sp; ulong PC, RA, SP, offset; struct riscv64_register *regs; regs = (struct riscv64_register *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(ptr))]; PC = regs->regs[RISCV64_REGS_EPC]; RA = regs->regs[RISCV64_REGS_RA]; SP = regs->regs[RISCV64_REGS_SP]; switch (mode) { case USER_MODE: fprintf(fp, " PC: %016lx RA: %016lx SP: %016lx\n" " ORIG_A0: %016lx SYSCALLNO: %016lx\n", PC, RA, SP, regs->regs[35], regs->regs[17]); break; case KERNEL_MODE: fprintf(fp, " PC: %016lx ", PC); if (is_kernel_text(PC) && (sp = value_search(PC, &offset))) { fprintf(fp, "[%s", sp->name); if (offset) fprintf(fp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); fprintf(fp, "]\n"); } else fprintf(fp, "[unknown or invalid address]\n"); fprintf(fp, " RA: %016lx ", RA); if (is_kernel_text(RA) && (sp = value_search(RA, &offset))) { fprintf(fp, "[%s", sp->name); if (offset) fprintf(fp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); fprintf(fp, "]\n"); } else fprintf(fp, "[unknown or invalid address]\n"); fprintf(fp, " SP: %016lx CAUSE: %016lx\n", SP, regs->regs[RISCV64_REGS_CAUSE]); break; } riscv64_dump_pt_regs(regs, fp, 1); } static int riscv64_is_kernel_exception_frame(struct bt_info *bt, ulong stkptr) { struct riscv64_register *regs; if (stkptr > STACKSIZE() && !INSTACK(stkptr, bt)) { if (CRASHDEBUG(1)) error(WARNING, "stkptr: %lx is outside the kernel stack range\n", stkptr); return FALSE; } regs = (struct riscv64_register *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(stkptr))]; if (INSTACK(regs->regs[RISCV64_REGS_SP], bt) && INSTACK(regs->regs[RISCV64_REGS_FP], bt) && is_kernel_text(regs->regs[RISCV64_REGS_RA]) && is_kernel_text(regs->regs[RISCV64_REGS_EPC]) && ((regs->regs[RISCV64_REGS_STATUS] >> 8) & 0x1) && // sstatus.SPP != 0 !((regs->regs[RISCV64_REGS_CAUSE] >> 63) & 0x1 ) && // scause.Interrupt != 1 !(regs->regs[RISCV64_REGS_CAUSE] == 0x00000008UL)) { // scause != ecall from U-mode return TRUE; } return FALSE; } static int riscv64_dump_kernel_eframes(struct bt_info *bt) { ulong ptr; int count; /* * use old_regs to avoid the identical contiguous kernel exception frames * created by Linux handle_exception() path ending at riscv_crash_save_regs() */ struct riscv64_register *regs, *old_regs; count = 0; old_regs = NULL; for (ptr = bt->stackbase; ptr < bt->stacktop - SIZE(pt_regs); ptr++) { regs = (struct riscv64_register *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(ptr))]; if (riscv64_is_kernel_exception_frame(bt, ptr)){ if (!old_regs || (old_regs && memcmp(old_regs, regs, sizeof(struct riscv64_register))) != 0){ old_regs = regs; fprintf(fp, "\nKERNEL-MODE EXCEPTION FRAME AT: %lx\n", ptr); riscv64_print_exception_frame(bt, ptr, KERNEL_MODE); count++; } } } return count; } static int riscv64_eframe_search(struct bt_info *bt) { ulong ptr; int count, c; struct machine_specific *ms = machdep->machspec; if (bt->flags & BT_EFRAME_SEARCH2) { if (!(machdep->flags & IRQ_STACKS)) error(FATAL, "IRQ stacks do not exist in this kernel\n"); for (c = 0; c < kt->cpus; c++) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; fprintf(fp, "CPU %d IRQ STACK: ", c); bt->stackbase = ms->irq_stacks[c]; bt->stacktop = bt->stackbase + ms->irq_stack_size; alter_stackbuf(bt); count = riscv64_dump_kernel_eframes(bt); if (count) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } return 0; } count = riscv64_dump_kernel_eframes(bt); if (is_kernel_thread(bt->tc->task)) return count; ptr = bt->stacktop - SIZE(pt_regs); fprintf(fp, "%sUSER-MODE EXCEPTION FRAME AT: %lx\n", count++ ? "\n" : "", ptr); riscv64_print_exception_frame(bt, ptr, USER_MODE); return count; } #else /* !RISCV64 */ void riscv64_display_regs_from_elf_notes(int cpu, FILE *ofp) { return; } #endif /* !RISCV64 */ crash-utility-crash-9cd43f5/lkcd_vmdump_v1.h0000664000372000037200000001207715107550337020467 0ustar juerghjuergh/* lkcd_vmdump_v1.h - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _VMDUMP_H #define _VMDUMP_H /* necessary header files */ #ifndef MCLX #include /* for utsname structure */ #endif #ifndef IA64 typedef unsigned int u32; #include /* for pt_regs */ #endif /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ #ifdef CONFIG_VMDUMP /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_VERSION_NUMBER 0x1 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump flags -- add as necessary */ #define DUMP_RAW 0x1 /* raw page (no compression) */ #define DUMP_COMPRESSED 0x2 /* page is compressed */ #define DUMP_END 0x4 /* end marker on a full dump */ /* dump types - type specific stuff added later for page typing */ #define DUMP_NONE 0 /* no dumping at all -- just bail */ #define DUMP_HEADER 1 /* kernel dump header only */ #define DUMP_KERN 2 /* dump header and kernel pages */ #define DUMP_USED 3 /* dump header, kernel/user pages */ #define DUMP_ALL 4 /* dump header, all memory pages */ /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the esp for i386 systems -- MOVE LATER */ uint32_t dh_esp; /* the eip for i386 systems -- MOVE LATER */ uint32_t dh_eip; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* the time of the system crash */ struct timeval dh_time; /* the utsname (uname) information */ struct new_utsname dh_utsname; /* the dump registers */ #if !defined(IA64) && !defined(S390) && !defined(S390X) && !defined(ARM64) && !defined(RISCV64) && !defined(LOONGARCH64) struct pt_regs dh_regs; #endif /* the address of the current task */ struct task_struct *dh_current_task; } dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } dump_page_t; #endif /* CONFIG_VMDUMP */ #ifdef __KERNEL__ extern void dump_init(uint64_t, uint64_t); extern void dump_open(char *); extern void dump_execute(char *, struct pt_regs *); #endif #endif /* _VMDUMP_H */ crash-utility-crash-9cd43f5/arm64.c0000664000372000037200000051103315107550337016474 0ustar juerghjuergh/* * arm64.c - core analysis suite * * Copyright (C) 2012-2020 David Anderson * Copyright (C) 2012-2020 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef ARM64 #include "defs.h" #include #include #include #include #define NOT_IMPLEMENTED(X) error((X), "%s: function not implemented\n", __func__) static struct machine_specific arm64_machine_specific = { 0 }; static int arm64_verify_symbol(const char *, ulong, char); static void arm64_parse_cmdline_args(void); static int arm64_search_for_kimage_voffset(ulong); static int verify_kimage_voffset(void); static void arm64_calc_kimage_voffset(void); static void arm64_calc_phys_offset(void); static void arm64_calc_physvirt_offset(void); static void arm64_calc_virtual_memory_ranges(void); static void arm64_get_section_size_bits(void); static int arm64_kdump_phys_base(ulong *); static ulong arm64_processor_speed(void); static void arm64_init_kernel_pgd(void); static int arm64_kvtop(struct task_context *, ulong, physaddr_t *, int); static int arm64_uvtop(struct task_context *, ulong, physaddr_t *, int); static int arm64_vtop_2level_64k(ulong, ulong, physaddr_t *, int); static int arm64_vtop_3level_64k(ulong, ulong, physaddr_t *, int); static int arm64_vtop_2level_16k(ulong, ulong, physaddr_t *, int); static int arm64_vtop_3level_16k(ulong, ulong, physaddr_t *, int); static int arm64_vtop_4level_16k(ulong, ulong, physaddr_t *, int); static int arm64_vtop_3level_4k(ulong, ulong, physaddr_t *, int); static int arm64_vtop_4level_4k(ulong, ulong, physaddr_t *, int); static ulong arm64_get_task_pgd(ulong); static void arm64_irq_stack_init(void); static void arm64_overflow_stack_init(void); static void arm64_stackframe_init(void); static int arm64_eframe_search(struct bt_info *); static int arm64_is_kernel_exception_frame(struct bt_info *, ulong); static int arm64_in_exception_text(ulong); static int arm64_in_exp_entry(ulong); static void arm64_back_trace_cmd(struct bt_info *); static void arm64_back_trace_cmd_v2(struct bt_info *); static void arm64_print_text_symbols(struct bt_info *, struct arm64_stackframe *, FILE *); static int arm64_print_stackframe_entry(struct bt_info *, int, struct arm64_stackframe *, FILE *); static int arm64_print_stackframe_entry_v2(struct bt_info *, int, struct arm64_stackframe *, FILE *); static void arm64_display_full_frame(struct bt_info *, ulong); static void arm64_display_full_frame_v2(struct bt_info *, struct arm64_stackframe *, struct arm64_stackframe *); static int arm64_unwind_frame(struct bt_info *, struct arm64_stackframe *); static int arm64_unwind_frame_v2(struct bt_info *, struct arm64_stackframe *, FILE *); static int arm64_get_dumpfile_stackframe(struct bt_info *, struct arm64_stackframe *); static int arm64_in_kdump_text(struct bt_info *, struct arm64_stackframe *); static int arm64_in_kdump_text_on_irq_stack(struct bt_info *); static int arm64_switch_stack(struct bt_info *, struct arm64_stackframe *, FILE *); static int arm64_switch_stack_from_overflow(struct bt_info *, struct arm64_stackframe *, FILE *); static int arm64_get_stackframe(struct bt_info *, struct arm64_stackframe *); static void arm64_get_stack_frame(struct bt_info *, ulong *, ulong *); static void arm64_gen_hidden_frame(struct bt_info *bt, ulong, struct arm64_stackframe *); static void arm64_print_exception_frame(struct bt_info *, ulong, int, FILE *); static void arm64_do_bt_reference_check(struct bt_info *, ulong, char *); static int arm64_translate_pte(ulong, void *, ulonglong); static ulong arm64_vmalloc_start(void); static int arm64_is_task_addr(ulong); static int arm64_dis_filter(ulong, char *, unsigned int); static void arm64_cmd_mach(void); static void arm64_display_machine_stats(void); static int arm64_get_smp_cpus(void); static void arm64_clear_machdep_cache(void); static int arm64_on_process_stack(struct bt_info *, ulong); static int arm64_in_alternate_stack(int, ulong); static int arm64_in_alternate_stackv(int cpu, ulong stkptr, ulong *stacks, ulong stack_size); static int arm64_on_irq_stack(int, ulong); static int arm64_on_overflow_stack(int, ulong); static void arm64_set_irq_stack(struct bt_info *); static void arm64_set_overflow_stack(struct bt_info *); static void arm64_set_process_stack(struct bt_info *); static int arm64_get_kvaddr_ranges(struct vaddr_range *); static void arm64_get_crash_notes(void); static void arm64_calc_VA_BITS(void); static int arm64_is_uvaddr(ulong, struct task_context *); static void arm64_calc_KERNELPACMASK(void); static void arm64_recalc_KERNELPACMASK(void); static int arm64_get_vmcoreinfo(unsigned long *vaddr, const char *label, int base); static ulong arm64_set_irq_stack_size(void); struct kernel_range { unsigned long modules_vaddr, modules_end; unsigned long vmalloc_start_addr, vmalloc_end; unsigned long vmemmap_vaddr, vmemmap_end; }; static struct kernel_range *arm64_get_va_range(struct machine_specific *ms); static void arm64_get_struct_page_size(struct machine_specific *ms); /* mte tag shift bit */ #define MTE_TAG_SHIFT 56 /* native kernel pointers tag */ #define KASAN_TAG_KERNEL 0xFF /* minimum value for random tags */ #define KASAN_TAG_MIN 0xF0 /* right shift the tag to MTE_TAG_SHIFT bit */ #define mte_tag_shifted(tag) ((ulong)(tag) << MTE_TAG_SHIFT) /* get the top byte value of the original kvaddr */ #define mte_tag_get(addr) (unsigned char)((ulong)(addr) >> MTE_TAG_SHIFT) /* reset the top byte to get an untaggged kvaddr */ #define mte_tag_reset(addr) (((ulong)addr & ~mte_tag_shifted(KASAN_TAG_KERNEL)) | \ mte_tag_shifted(KASAN_TAG_KERNEL)) struct user_regs_bitmap_struct { struct arm64_pt_regs ur; ulong bitmap[32]; }; #define MAX_EXCEPTION_STACKS 7 ulong extra_stacks_idx = 0; struct user_regs_bitmap_struct *extra_stacks_regs[MAX_EXCEPTION_STACKS] = {0}; static inline bool is_mte_kvaddr(ulong addr) { /* check for ARM64_MTE enabled */ if (!(machdep->flags & ARM64_MTE)) return false; /* check the validity of HW Tag-Based kvaddr */ if (mte_tag_get(addr) >= KASAN_TAG_MIN && mte_tag_get(addr) < KASAN_TAG_KERNEL) return true; return false; } static int arm64_is_kvaddr(ulong addr) { if (is_mte_kvaddr(addr)) return (mte_tag_reset(addr) >= (ulong)(machdep->kvbase)); return (addr >= (ulong)(machdep->kvbase)); } static void arm64_calc_kernel_start(void) { struct machine_specific *ms = machdep->machspec; struct syment *sp; if (THIS_KERNEL_VERSION >= LINUX(5,11,0)) sp = kernel_symbol_search("_stext"); else sp = kernel_symbol_search("_text"); ms->kimage_text = (sp ? sp->value : 0); sp = kernel_symbol_search("_end"); ms->kimage_end = (sp ? sp->value : 0); } static int arm64_vmemmap_is_page_ptr(ulong addr, physaddr_t *phys) { ulong size = SIZE(page); ulong pfn, nr; if (IS_SPARSEMEM() && (machdep->flags & VMEMMAP) && (addr >= VMEMMAP_VADDR && addr <= VMEMMAP_END) && !((addr - VMEMMAP_VADDR) % size)) { pfn = (addr - machdep->machspec->vmemmap) / size; nr = pfn_to_section_nr(pfn); if (valid_section_nr(nr)) { if (phys) *phys = PTOB(pfn); return TRUE; } } return FALSE; } static void arm64_get_vmemmap_page_ptr(void) { struct machine_specific *ms = machdep->machspec; /* If vmemmap exists, it means kernel enabled CONFIG_SPARSEMEM_VMEMMAP */ if (arm64_get_vmcoreinfo(&ms->vmemmap, "SYMBOL(vmemmap)", NUM_HEX)) goto out; /* The global symbol of vmemmap is removed since kernel commit 7bc1a0f9e1765 */ if (kernel_symbol_exists("vmemmap")) ms->vmemmap = symbol_value("vmemmap"); else ms->vmemmap = ms->vmemmap_vaddr - ((ms->phys_offset >> machdep->pageshift) * ms->struct_page_size); out: if (ms->vmemmap) machdep->is_page_ptr = arm64_vmemmap_is_page_ptr; } static int arm64_get_current_task_reg(int regno, const char *name, int size, void *value, int sid) { struct bt_info bt_info, bt_setup; struct task_context *tc; struct user_regs_bitmap_struct *ur_bitmap; ulong ip, sp; bool ret = FALSE; switch (regno) { case X0_REGNUM ... PC_REGNUM: break; default: return FALSE; } tc = CURRENT_CONTEXT(); if (!tc) return FALSE; if (sid && sid <= extra_stacks_idx) { ur_bitmap = extra_stacks_regs[extra_stacks_idx - 1]; goto get_sub; } BZERO(&bt_setup, sizeof(struct bt_info)); clone_bt_info(&bt_setup, &bt_info, tc); if (bt_info.stackbase == 0) return FALSE; fill_stackbuf(&bt_info); get_dumpfile_regs(&bt_info, &sp, &ip); if (bt_info.stackbuf) FREEBUF(bt_info.stackbuf); ur_bitmap = (struct user_regs_bitmap_struct *)bt_info.machdep; if (!ur_bitmap) return FALSE; if (!bt_info.need_free) { goto get_all; } get_sub: switch (regno) { case X0_REGNUM ... X30_REGNUM: if (!NUM_IN_BITMAP(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + regno - X0_REGNUM)) { if (!sid) FREEBUF(ur_bitmap); return FALSE; } break; case SP_REGNUM: if (!NUM_IN_BITMAP(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, sp))) { if (!sid) FREEBUF(ur_bitmap); return FALSE; } break; case PC_REGNUM: if (!NUM_IN_BITMAP(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, pc))) { if (!sid) FREEBUF(ur_bitmap); return FALSE; } break; } get_all: switch (regno) { case X0_REGNUM ... X30_REGNUM: if (size != sizeof(ur_bitmap->ur.regs[regno])) break; memcpy(value, &ur_bitmap->ur.regs[regno], size); ret = TRUE; break; case SP_REGNUM: if (size != sizeof(ur_bitmap->ur.sp)) break; memcpy(value, &ur_bitmap->ur.sp, size); ret = TRUE; break; case PC_REGNUM: if (size != sizeof(ur_bitmap->ur.pc)) break; memcpy(value, &ur_bitmap->ur.pc, size); ret = TRUE; break; } if (!sid && bt_info.need_free) { FREEBUF(ur_bitmap); bt_info.need_free = FALSE; } return ret; } /* * Do all necessary machine-specific setup here. This is called several times * during initialization. */ void arm64_init(int when) { ulong value; struct machine_specific *ms; #if defined(__x86_64__) if (ACTIVE()) error(FATAL, "compiled for the ARM64 architecture\n"); #endif switch (when) { case SETUP_ENV: machdep->process_elf_notes = process_elf64_notes; break; case PRE_SYMTAB: machdep->machspec = &arm64_machine_specific; machdep->verify_symbol = arm64_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->verify_paddr = generic_verify_paddr; if (machdep->cmdline_args[0]) arm64_parse_cmdline_args(); machdep->flags |= MACHDEP_BT_TEXT; ms = machdep->machspec; /* * The st->_stext_vmlinux is needed in arm64_init(PRE_GDB) when a * dumpfile does not have vmcoreinfo and we use -m vabits_actual * option, e.g. a raw RAM dumpfile. */ if (ms->VA_BITS_ACTUAL) st->_stext_vmlinux = UNINITIALIZED; if (!ms->kimage_voffset && STREQ(pc->live_memsrc, "/dev/crash")) ioctl(pc->mfd, DEV_CRASH_ARCH_DATA, &ms->kimage_voffset); if (!ms->kimage_voffset) arm64_get_vmcoreinfo(&ms->kimage_voffset, "NUMBER(kimage_voffset)", NUM_HEX); if (ms->kimage_voffset || (ACTIVE() && (symbol_value_from_proc_kallsyms("kimage_voffset") != BADVAL))) { machdep->flags |= NEW_VMEMMAP; /* * Even if CONFIG_RANDOMIZE_BASE is not configured, * derive_kaslr_offset() should work and set * kt->relocate to 0 */ if (!kt->relocate && !(kt->flags2 & (RELOC_AUTO|KASLR))) kt->flags2 |= (RELOC_AUTO|KASLR); } break; case PRE_GDB: if (kernel_symbol_exists("kimage_voffset")) machdep->flags |= NEW_VMEMMAP; if (kernel_symbol_exists("cpu_enable_mte")) machdep->flags |= ARM64_MTE; if (!machdep->pagesize && arm64_get_vmcoreinfo(&value, "PAGESIZE", NUM_DEC)) machdep->pagesize = (unsigned int)value; if (!machdep->pagesize) { /* * Kerneldoc Documentation/arm64/booting.txt describes * the kernel image header flags field. */ value = machdep->machspec->kernel_flags; value = (value >> 1) & 3; switch(value) { case 0: break; case 1: machdep->pagesize = 4096; break; case 2: machdep->pagesize = 16384; break; case 3: machdep->pagesize = 65536; break; } } /* * This code section will only be executed if the kernel is * earlier than Linux 4.4 (if there is no vmcoreinfo) */ if (!machdep->pagesize && kernel_symbol_exists("swapper_pg_dir") && kernel_symbol_exists("idmap_pg_dir")) { value = symbol_value("swapper_pg_dir") - symbol_value("idmap_pg_dir"); /* * idmap_pg_dir is 2 pages prior to 4.1, * and 3 pages thereafter. Only 4K and 64K * page sizes are supported. */ switch (value) { case (4096 * 2): case (4096 * 3): machdep->pagesize = 4096; break; case (65536 * 2): case (65536 * 3): machdep->pagesize = 65536; break; } } else if (ACTIVE()) machdep->pagesize = memory_page_size(); /* host */ machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); ms = machdep->machspec; arm64_get_struct_page_size(ms); arm64_calc_VA_BITS(); arm64_calc_KERNELPACMASK(); /* vabits_actual introduced after mm flip, so it should be flipped layout */ if (ms->VA_BITS_ACTUAL) { ms->page_offset = ARM64_FLIP_PAGE_OFFSET; /* useless on arm64 */ machdep->identity_map_base = ARM64_FLIP_PAGE_OFFSET; machdep->kvbase = ARM64_FLIP_PAGE_OFFSET; ms->userspace_top = ARM64_USERSPACE_TOP_ACTUAL; } else { ms->page_offset = ARM64_PAGE_OFFSET; machdep->identity_map_base = ARM64_PAGE_OFFSET; machdep->kvbase = ARM64_VA_START; ms->userspace_top = ARM64_USERSPACE_TOP; } machdep->is_kvaddr = arm64_is_kvaddr; machdep->kvtop = arm64_kvtop; /* The defaults */ ms->vmalloc_end = ARM64_VMALLOC_END; ms->vmemmap_vaddr = ARM64_VMEMMAP_VADDR; ms->vmemmap_end = ARM64_VMEMMAP_END; if (machdep->flags & NEW_VMEMMAP) { struct syment *sp; struct kernel_range *r; /* It is finally decided in arm64_calc_kernel_start() */ sp = kernel_symbol_search("_text"); ms->kimage_text = (sp ? sp->value : 0); sp = kernel_symbol_search("_end"); ms->kimage_end = (sp ? sp->value : 0); if (ms->struct_page_size && (r = arm64_get_va_range(ms))) { /* We can get all the MODULES/VMALLOC/VMEMMAP ranges now.*/ ms->modules_vaddr = r->modules_vaddr; ms->modules_end = r->modules_end - 1; ms->vmalloc_start_addr = r->vmalloc_start_addr; ms->vmalloc_end = r->vmalloc_end - 1; ms->vmemmap_vaddr = r->vmemmap_vaddr; ms->vmemmap_end = r->vmemmap_end - 1; } else if (ms->VA_BITS_ACTUAL) { ms->modules_vaddr = (st->_stext_vmlinux & TEXT_OFFSET_MASK) - ARM64_MODULES_VSIZE; ms->modules_end = ms->modules_vaddr + ARM64_MODULES_VSIZE -1; ms->vmalloc_start_addr = ms->modules_end + 1; } else { ms->modules_vaddr = ARM64_VA_START; if (kernel_symbol_exists("kasan_init")) ms->modules_vaddr += ARM64_KASAN_SHADOW_SIZE; ms->modules_end = ms->modules_vaddr + ARM64_MODULES_VSIZE -1; ms->vmalloc_start_addr = ms->modules_end + 1; } arm64_calc_kimage_voffset(); } else { ms->modules_vaddr = ARM64_PAGE_OFFSET - MEGABYTES(64); ms->modules_end = ARM64_PAGE_OFFSET - 1; ms->vmalloc_start_addr = ARM64_VA_START; } switch (machdep->pagesize) { case 4096: machdep->ptrs_per_pgd = PTRS_PER_PGD_L3_4K; if ((machdep->pgd = (char *)malloc(PTRS_PER_PGD_L3_4K * 8)) == NULL) error(FATAL, "cannot malloc pgd space."); if (machdep->machspec->VA_BITS > PGDIR_SHIFT_L4_4K) { machdep->flags |= VM_L4_4K; if ((machdep->pud = (char *)malloc(PTRS_PER_PUD_L4_4K * 8)) == NULL) error(FATAL, "cannot malloc pud space."); } else { machdep->flags |= VM_L3_4K; machdep->pud = NULL; /* not used */ } if ((machdep->pmd = (char *)malloc(PTRS_PER_PMD_L3_4K * 8)) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PTRS_PER_PTE_L3_4K * 8)) == NULL) error(FATAL, "cannot malloc ptbl space."); break; case 16384: if (machdep->machspec->VA_BITS == 48) { machdep->flags |= VM_L4_16K; if (!machdep->ptrs_per_pgd) machdep->ptrs_per_pgd = PTRS_PER_PGD_L4_16K; if ((machdep->pgd = (char *)malloc(machdep->ptrs_per_pgd * 8)) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pud = (char *)malloc(PTRS_PER_PUD_L4_16K * 8)) == NULL) error(FATAL, "cannot malloc pud space."); if ((machdep->pmd = (char *)malloc(PTRS_PER_PMD_L4_16K * 8)) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PTRS_PER_PTE_L4_16K * 8)) == NULL) error(FATAL, "cannot malloc ptbl space."); } else if (machdep->machspec->VA_BITS == 47) { machdep->flags |= VM_L3_16K; if (!machdep->ptrs_per_pgd) machdep->ptrs_per_pgd = PTRS_PER_PGD_L3_16K; if ((machdep->pgd = (char *)malloc(machdep->ptrs_per_pgd * 8)) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PTRS_PER_PMD_L3_16K * 8)) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PTRS_PER_PTE_L3_16K * 8)) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->pud = NULL; /* not used */ } else if (machdep->machspec->VA_BITS == 36) { machdep->flags |= VM_L2_16K; if (!machdep->ptrs_per_pgd) machdep->ptrs_per_pgd = PTRS_PER_PGD_L2_16K; if ((machdep->pgd = (char *)malloc(machdep->ptrs_per_pgd * 8)) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->ptbl = (char *)malloc(PTRS_PER_PTE_L2_16K * 8)) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->pmd = NULL; /* not used */ machdep->pud = NULL; /* not used */ } else { error(FATAL, "Do not support 52 bits, 4-level for 16K page now."); } break; case 65536: /* * idmap_ptrs_per_pgd has been removed since Linux-v6.0-rc1, see: * commit ebd9aea1f27e ("arm64: head: drop idmap_ptrs_per_pgd") */ if (kernel_symbol_exists("idmap_ptrs_per_pgd") && readmem(symbol_value("idmap_ptrs_per_pgd"), KVADDR, &value, sizeof(ulong), "idmap_ptrs_per_pgd", QUIET|RETURN_ON_ERROR)) machdep->ptrs_per_pgd = value; if (machdep->machspec->VA_BITS > PGDIR_SHIFT_L3_64K) { machdep->flags |= VM_L3_64K; if (!machdep->ptrs_per_pgd) { if (machdep->machspec->VA_BITS == 52) machdep->ptrs_per_pgd = PTRS_PER_PGD_L3_64K_52; else if (machdep->machspec->VA_BITS == 48) machdep->ptrs_per_pgd = PTRS_PER_PGD_L3_64K_48; else error(FATAL, "wrong VA_BITS for 64K page."); } if ((machdep->pgd = (char *)malloc(machdep->ptrs_per_pgd * 8)) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PTRS_PER_PMD_L3_64K * 8)) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PTRS_PER_PTE_L3_64K * 8)) == NULL) error(FATAL, "cannot malloc ptbl space."); } else { machdep->flags |= VM_L2_64K; if (!machdep->ptrs_per_pgd) machdep->ptrs_per_pgd = PTRS_PER_PGD_L2_64K; if ((machdep->pgd = (char *)malloc(machdep->ptrs_per_pgd * 8)) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->ptbl = (char *)malloc(PTRS_PER_PTE_L2_64K * 8)) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->pmd = NULL; /* not used */ } machdep->pud = NULL; /* not used */ break; default: if (machdep->pagesize) error(FATAL, "invalid/unsupported page size: %d\n", machdep->pagesize); else error(FATAL, "cannot determine page size\n"); } machdep->last_pgd_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->clear_machdep_cache = arm64_clear_machdep_cache; machdep->stacksize = ARM64_STACK_SIZE; machdep->flags |= VMEMMAP; machdep->uvtop = arm64_uvtop; machdep->is_uvaddr = arm64_is_uvaddr; machdep->eframe_search = arm64_eframe_search; machdep->back_trace = arm64_back_trace_cmd; machdep->in_alternate_stack = arm64_in_alternate_stack; machdep->processor_speed = arm64_processor_speed; machdep->get_task_pgd = arm64_get_task_pgd; machdep->get_stack_frame = arm64_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = arm64_translate_pte; machdep->memory_size = generic_memory_size; machdep->vmalloc_start = arm64_vmalloc_start; machdep->get_kvaddr_ranges = arm64_get_kvaddr_ranges; machdep->is_task_addr = arm64_is_task_addr; machdep->dis_filter = arm64_dis_filter; machdep->cmd_mach = arm64_cmd_mach; machdep->get_smp_cpus = arm64_get_smp_cpus; machdep->line_number_hooks = NULL; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->dumpfile_init = NULL; machdep->verify_line_number = NULL; machdep->init_kernel_pgd = arm64_init_kernel_pgd; machdep->get_current_task_reg = arm64_get_current_task_reg; /* use machdep parameters */ arm64_calc_phys_offset(); arm64_calc_physvirt_offset(); if (CRASHDEBUG(1)) { if (machdep->flags & NEW_VMEMMAP) fprintf(fp, "kimage_voffset: %lx\n", machdep->machspec->kimage_voffset); fprintf(fp, "phys_offset: %lx\n", machdep->machspec->phys_offset); fprintf(fp, "physvirt_offset: %lx\n", machdep->machspec->physvirt_offset); } break; case POST_GDB: /* Rely on kernel version to decide the kernel start address */ arm64_calc_kernel_start(); /* Can we get the size of struct page before POST_GDB */ ms = machdep->machspec; if (!ms->struct_page_size) arm64_calc_virtual_memory_ranges(); arm64_get_vmemmap_page_ptr(); arm64_get_section_size_bits(); if (!machdep->max_physmem_bits) { if (arm64_get_vmcoreinfo(&machdep->max_physmem_bits, "NUMBER(MAX_PHYSMEM_BITS)", NUM_DEC)) { /* nothing */ } else if (machdep->machspec->VA_BITS == 52) /* guess */ machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_52; else if (THIS_KERNEL_VERSION >= LINUX(3,17,0)) machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_3_17; else machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; } if (CRASHDEBUG(1)) { if (ms->VA_BITS_ACTUAL) { fprintf(fp, "CONFIG_ARM64_VA_BITS: %ld\n", ms->CONFIG_ARM64_VA_BITS); fprintf(fp, " VA_BITS_ACTUAL: %ld\n", ms->VA_BITS_ACTUAL); fprintf(fp, "(calculated) VA_BITS: %ld\n", ms->VA_BITS); fprintf(fp, " PAGE_OFFSET: %lx\n", ARM64_FLIP_PAGE_OFFSET); fprintf(fp, " VA_START: %lx\n", ms->VA_START); fprintf(fp, " modules: %lx - %lx\n", ms->modules_vaddr, ms->modules_end); fprintf(fp, " vmalloc: %lx - %lx\n", ms->vmalloc_start_addr, ms->vmalloc_end); fprintf(fp, "kernel image: %lx - %lx\n", ms->kimage_text, ms->kimage_end); fprintf(fp, " vmemmap: %lx - %lx\n\n", ms->vmemmap_vaddr, ms->vmemmap_end); } } if (THIS_KERNEL_VERSION >= LINUX(5,19,0)) { ms->__SWP_TYPE_BITS = 5; ms->__SWP_TYPE_SHIFT = 3; ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); ms->__SWP_OFFSET_BITS = 50; ms->__SWP_OFFSET_MASK = ((1UL << ms->__SWP_OFFSET_BITS) - 1); ms->PTE_PROT_NONE = (1UL << 58); ms->PTE_FILE = 0; /* unused */ } else if (THIS_KERNEL_VERSION >= LINUX(4,0,0)) { ms->__SWP_TYPE_BITS = 6; ms->__SWP_TYPE_SHIFT = 2; ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); ms->__SWP_OFFSET_BITS = 50; ms->__SWP_OFFSET_MASK = ((1UL << ms->__SWP_OFFSET_BITS) - 1); ms->PTE_PROT_NONE = (1UL << 58); ms->PTE_FILE = 0; /* unused */ } else if (THIS_KERNEL_VERSION >= LINUX(3,13,0)) { ms->__SWP_TYPE_BITS = 6; ms->__SWP_TYPE_SHIFT = 3; ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); ms->__SWP_OFFSET_BITS = 49; ms->__SWP_OFFSET_MASK = ((1UL << ms->__SWP_OFFSET_BITS) - 1); ms->PTE_PROT_NONE = (1UL << 58); ms->PTE_FILE = (1UL << 2); } else if (THIS_KERNEL_VERSION >= LINUX(3,11,0)) { ms->__SWP_TYPE_BITS = 6; ms->__SWP_TYPE_SHIFT = 4; ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); ms->__SWP_OFFSET_BITS = 0; /* unused */ ms->__SWP_OFFSET_MASK = 0; /* unused */ ms->PTE_PROT_NONE = (1UL << 2); ms->PTE_FILE = (1UL << 3); } else { ms->__SWP_TYPE_BITS = 6; ms->__SWP_TYPE_SHIFT = 3; ms->__SWP_TYPE_MASK = ((1UL << ms->__SWP_TYPE_BITS) - 1); ms->__SWP_OFFSET_SHIFT = (ms->__SWP_TYPE_BITS + ms->__SWP_TYPE_SHIFT); ms->__SWP_OFFSET_BITS = 0; /* unused */ ms->__SWP_OFFSET_MASK = 0; /* unused */ ms->PTE_PROT_NONE = (1UL << 1); ms->PTE_FILE = (1UL << 2); } if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); if (!machdep->hz) machdep->hz = 100; /* * Let's calculate the KERNELPACMASK value based on the * vabits, see: * arch/arm64/kernel/vmcore_info.c * arch/arm64/include/asm/pointer_auth.h */ if(!machdep->machspec->CONFIG_ARM64_KERNELPACMASK) arm64_recalc_KERNELPACMASK(); arm64_irq_stack_init(); arm64_overflow_stack_init(); arm64_stackframe_init(); break; case POST_INIT: /* * crash_notes contains machine specific information about the * crash. In particular, it contains CPU registers at the time * of the crash. We need this information to extract correct * backtraces from the panic task. */ if (!LIVE()) arm64_get_crash_notes(); gdb_change_thread_context(); break; case LOG_ONLY: machdep->machspec = &arm64_machine_specific; arm64_calc_VA_BITS(); arm64_calc_KERNELPACMASK(); arm64_calc_phys_offset(); machdep->machspec->page_offset = ARM64_PAGE_OFFSET; arm64_calc_physvirt_offset(); break; } } struct kernel_va_range_handler { unsigned long kernel_versions_start; /* include */ unsigned long kernel_versions_end; /* exclude */ struct kernel_range *(*get_range)(struct machine_specific *); }; static struct kernel_range tmp_range; #define _PAGE_END(va) (-(1UL << ((va) - 1))) #define SZ_64K 0x00010000 #define SZ_2M 0x00200000 /* * Get the max shift of the size of struct page. * Most of the time, it is 64 bytes, but not sure. */ static int arm64_get_struct_page_max_shift(struct machine_specific *ms) { return (int)ceil(log2(ms->struct_page_size)); } /* Return TRUE if we succeed, return FALSE on failure. */ static int arm64_get_vmcoreinfo(unsigned long *vaddr, const char *label, int base) { int err = 0; char *string = pc->read_vmcoreinfo(label); if (!string) return FALSE; switch (base) { case NUM_HEX: *vaddr = strtoul(string, NULL, 16); break; case NUM_DEC: *vaddr = strtoul(string, NULL, 10); break; default: err++; error(INFO, "Unknown type:%#x, (NUM_HEX|NUM_DEC)\n", base); } free(string); return err ? FALSE: TRUE; } /* * The change is caused by the kernel patch since v5.18-rc1: * "arm64: crash_core: Export MODULES, VMALLOC, and VMEMMAP ranges" */ static struct kernel_range *arm64_get_range_v5_18(struct machine_specific *ms) { struct kernel_range *r = &tmp_range; /* Get the MODULES_VADDR ~ MODULES_END */ if (!arm64_get_vmcoreinfo(&r->modules_vaddr, "NUMBER(MODULES_VADDR)", NUM_HEX)) return NULL; if (!arm64_get_vmcoreinfo(&r->modules_end, "NUMBER(MODULES_END)", NUM_HEX)) return NULL; /* Get the VMEMMAP_START ~ VMEMMAP_END */ if (!arm64_get_vmcoreinfo(&r->vmemmap_vaddr, "NUMBER(VMEMMAP_START)", NUM_HEX)) return NULL; if (!arm64_get_vmcoreinfo(&r->vmemmap_end, "NUMBER(VMEMMAP_END)", NUM_HEX)) return NULL; /* Get the VMALLOC_START ~ VMALLOC_END */ if (!arm64_get_vmcoreinfo(&r->vmalloc_start_addr, "NUMBER(VMALLOC_START)", NUM_HEX)) return NULL; if (!arm64_get_vmcoreinfo(&r->vmalloc_end, "NUMBER(VMALLOC_END)", NUM_HEX)) return NULL; return r; } /* * The change is caused by the kernel patch since v5.17-rc1: * "b89ddf4cca43 arm64/bpf: Remove 128MB limit for BPF JIT programs" */ static struct kernel_range *arm64_get_range_v5_17(struct machine_specific *ms) { struct kernel_range *r = &tmp_range; unsigned long v = ms->CONFIG_ARM64_VA_BITS; unsigned long vmem_shift, vmemmap_size; /* Not initialized yet */ if (v == 0) return NULL; if (v > 48) v = 48; /* Get the MODULES_VADDR ~ MODULES_END */ r->modules_vaddr = _PAGE_END(v); r->modules_end = r->modules_vaddr + MEGABYTES(128); /* Get the VMEMMAP_START ~ VMEMMAP_END */ vmem_shift = machdep->pageshift - arm64_get_struct_page_max_shift(ms); vmemmap_size = (_PAGE_END(v) - PAGE_OFFSET) >> vmem_shift; r->vmemmap_vaddr = (-(1UL << (ms->CONFIG_ARM64_VA_BITS - vmem_shift))); r->vmemmap_end = r->vmemmap_vaddr + vmemmap_size; /* Get the VMALLOC_START ~ VMALLOC_END */ r->vmalloc_start_addr = r->modules_end; r->vmalloc_end = r->vmemmap_vaddr - MEGABYTES(256); return r; } /* * The change is caused by the kernel patch since v5.11: * "9ad7c6d5e75b arm64: mm: tidy up top of kernel VA space" */ static struct kernel_range *arm64_get_range_v5_11(struct machine_specific *ms) { struct kernel_range *r = &tmp_range; unsigned long v = ms->CONFIG_ARM64_VA_BITS; unsigned long vmem_shift, vmemmap_size, bpf_jit_size = MEGABYTES(128); /* Not initialized yet */ if (v == 0) return NULL; if (v > 48) v = 48; /* Get the MODULES_VADDR ~ MODULES_END */ r->modules_vaddr = _PAGE_END(v) + bpf_jit_size; r->modules_end = r->modules_vaddr + MEGABYTES(128); /* Get the VMEMMAP_START ~ VMEMMAP_END */ vmem_shift = machdep->pageshift - arm64_get_struct_page_max_shift(ms); vmemmap_size = (_PAGE_END(v) - PAGE_OFFSET) >> vmem_shift; r->vmemmap_vaddr = (-(1UL << (ms->CONFIG_ARM64_VA_BITS - vmem_shift))); r->vmemmap_end = r->vmemmap_vaddr + vmemmap_size; /* Get the VMALLOC_START ~ VMALLOC_END */ r->vmalloc_start_addr = r->modules_end; r->vmalloc_end = r->vmemmap_vaddr - MEGABYTES(256); return r; } static unsigned long arm64_get_pud_size(void) { unsigned long PUD_SIZE = 0; switch (machdep->pagesize) { case 4096: if (machdep->machspec->VA_BITS > PGDIR_SHIFT_L4_4K) { PUD_SIZE = PUD_SIZE_L4_4K; } else { PUD_SIZE = PGDIR_SIZE_L3_4K; } break; case 65536: PUD_SIZE = PGDIR_SIZE_L2_64K; default: break; } return PUD_SIZE; } /* * The change is caused by the kernel patches since v5.4, such as: * "ce3aaed87344 arm64: mm: Modify calculation of VMEMMAP_SIZE" * "14c127c957c1 arm64: mm: Flip kernel VA space" */ static struct kernel_range *arm64_get_range_v5_4(struct machine_specific *ms) { struct kernel_range *r = &tmp_range; unsigned long v = ms->CONFIG_ARM64_VA_BITS; unsigned long kasan_shadow_shift, kasan_shadow_offset, PUD_SIZE; unsigned long vmem_shift, vmemmap_size, bpf_jit_size = MEGABYTES(128); char *string; int ret; /* Not initialized yet */ if (v == 0) return NULL; if (v > 48) v = 48; /* Get the MODULES_VADDR ~ MODULES_END */ if (kernel_symbol_exists("kasan_init")) { /* See the arch/arm64/Makefile */ ret = get_kernel_config("CONFIG_KASAN_SW_TAGS", NULL); if (ret == IKCONFIG_N) return NULL; kasan_shadow_shift = (ret == IKCONFIG_Y) ? 4: 3; /* See the arch/arm64/Kconfig*/ ret = get_kernel_config("CONFIG_KASAN_SHADOW_OFFSET", &string); if (ret != IKCONFIG_STR) return NULL; kasan_shadow_offset = atol(string); r->modules_vaddr = (1UL << (64 - kasan_shadow_shift)) + kasan_shadow_offset + bpf_jit_size; } else { r->modules_vaddr = _PAGE_END(v) + bpf_jit_size; } r->modules_end = r->modules_vaddr + MEGABYTES(128); /* Get the VMEMMAP_START ~ VMEMMAP_END */ vmem_shift = machdep->pageshift - arm64_get_struct_page_max_shift(ms); vmemmap_size = (_PAGE_END(v) - PAGE_OFFSET) >> vmem_shift; r->vmemmap_vaddr = (-vmemmap_size - SZ_2M); /* * In the v5.7, the patch: "bbd6ec605c arm64/mm: Enable memory hot remove" * adds the VMEMMAP_END. * * But before the VMEMMAP_END was added to kernel, we can also see * the following in arch/arm64/mm/dump.c: * { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" }, */ r->vmemmap_end = r->vmemmap_vaddr + vmemmap_size; /* Get the VMALLOC_START ~ VMALLOC_END */ PUD_SIZE = arm64_get_pud_size(); r->vmalloc_start_addr = r->modules_end; r->vmalloc_end = (-PUD_SIZE - vmemmap_size - SZ_64K); return r; } /* * The change is caused by the kernel patches since v5.0, such as: * "91fc957c9b1d arm64/bpf: don't allocate BPF JIT programs in module memory" */ static struct kernel_range *arm64_get_range_v5_0(struct machine_specific *ms) { struct kernel_range *r = &tmp_range; unsigned long v = ms->CONFIG_ARM64_VA_BITS; unsigned long kasan_shadow_shift, PUD_SIZE; unsigned long vmemmap_size, bpf_jit_size = MEGABYTES(128); unsigned long va_start, page_offset; int ret; /* Not initialized yet */ if (v == 0) return NULL; va_start = (0xffffffffffffffffUL - (1UL << v) + 1); page_offset = (0xffffffffffffffffUL - (1UL << (v - 1)) + 1); /* Get the MODULES_VADDR ~ MODULES_END */ if (kernel_symbol_exists("kasan_init")) { /* See the arch/arm64/Makefile */ ret = get_kernel_config("CONFIG_KASAN_SW_TAGS", NULL); if (ret == IKCONFIG_N) return NULL; kasan_shadow_shift = (ret == IKCONFIG_Y) ? 4: 3; r->modules_vaddr = va_start + (1UL << (v - kasan_shadow_shift)) + bpf_jit_size; } else { r->modules_vaddr = va_start + bpf_jit_size; } r->modules_end = r->modules_vaddr + MEGABYTES(128); /* Get the VMEMMAP_START ~ VMEMMAP_END */ vmemmap_size = (1UL << (v - machdep->pageshift - 1 + arm64_get_struct_page_max_shift(ms))); r->vmemmap_vaddr = page_offset - vmemmap_size; r->vmemmap_end = r->vmemmap_vaddr + vmemmap_size; /* See the arch/arm64/mm/dump.c */ /* Get the VMALLOC_START ~ VMALLOC_END */ PUD_SIZE = arm64_get_pud_size(); r->vmalloc_start_addr = r->modules_end; r->vmalloc_end = page_offset - PUD_SIZE - vmemmap_size - SZ_64K; return r; } static struct kernel_va_range_handler kernel_va_range_handlers[] = { { LINUX(5,18,0), LINUX(999,0,0), /* Just a boundary */ get_range: arm64_get_range_v5_18, }, { LINUX(5,17,0), LINUX(5,18,0), get_range: arm64_get_range_v5_17, }, { LINUX(5,11,0), LINUX(5,17,0), get_range: arm64_get_range_v5_11, }, { LINUX(5,4,0), LINUX(5,11,0), get_range: arm64_get_range_v5_4, }, { LINUX(5,0,0), LINUX(5,4,0), get_range: arm64_get_range_v5_0, }, }; #define ARRAY_SIZE(a) (sizeof (a) / sizeof ((a)[0])) static unsigned long arm64_get_kernel_version(void) { char *string; if (THIS_KERNEL_VERSION) return THIS_KERNEL_VERSION; if ((string = pc->read_vmcoreinfo("OSRELEASE"))) { parse_kernel_version(string); free(string); } return THIS_KERNEL_VERSION; } /* Return NULL if we fail. */ static struct kernel_range *arm64_get_va_range(struct machine_specific *ms) { struct kernel_va_range_handler *h; unsigned long kernel_version = arm64_get_kernel_version(); struct kernel_range *r = NULL; int i; if (!kernel_version) goto range_failed; for (i = 0; i < ARRAY_SIZE(kernel_va_range_handlers); i++) { h = kernel_va_range_handlers + i; /* Get the right hook for this kernel version */ if (h->kernel_versions_start <= kernel_version && kernel_version < h->kernel_versions_end) { /* Get the correct virtual address ranges */ r = h->get_range(ms); if (!r) goto range_failed; return r; } } range_failed: /* Reset ms->struct_page_size to 0 for arm64_calc_virtual_memory_ranges() */ ms->struct_page_size = 0; return NULL; } /* Get the size of struct page {} */ static void arm64_get_struct_page_size(struct machine_specific *ms) { arm64_get_vmcoreinfo(&ms->struct_page_size, "SIZE(page)", NUM_DEC); } /* * Accept or reject a symbol from the kernel namelist. */ static int arm64_verify_symbol(const char *name, ulong value, char type) { if (!name || !strlen(name)) return FALSE; if ((type == 'A') && STREQ(name, "_kernel_flags_le")) machdep->machspec->kernel_flags = le64toh(value); if ((type == 'A') && STREQ(name, "_kernel_flags_le_hi32")) machdep->machspec->kernel_flags |= ((ulong)le32toh(value) << 32); if ((type == 'A') && STREQ(name, "_kernel_flags_le_lo32")) machdep->machspec->kernel_flags |= le32toh(value); if (((type == 'A') || (type == 'a')) && (highest_bit_long(value) != 63)) return FALSE; if ((value == 0) && ((type == 'a') || (type == 'n') || (type == 'N') || (type == 'U'))) return FALSE; if (STREQ(name, "$d") || STRNEQ(name, "$d.") || STREQ(name, "$x") || STRNEQ(name, "$x.") || STREQ(name, "$c") || STRNEQ(name, "$c.")) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if ((type == 'N') && strstr(name, "$d")) return FALSE; if (!(machdep->flags & KSYMS_START) && STREQ(name, "idmap_pg_dir")) machdep->flags |= KSYMS_START; return TRUE; } void arm64_dump_machdep_table(ulong arg) { const struct machine_specific *ms = machdep->machspec; int others, i; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PHYS_OFFSET) fprintf(fp, "%sPHYS_OFFSET", others++ ? "|" : ""); if (machdep->flags & VM_L2_64K) fprintf(fp, "%sVM_L2_64K", others++ ? "|" : ""); if (machdep->flags & VM_L3_64K) fprintf(fp, "%sVM_L3_64K", others++ ? "|" : ""); if (machdep->flags & VM_L2_16K) fprintf(fp, "%sVM_L2_16K", others++ ? "|" : ""); if (machdep->flags & VM_L3_16K) fprintf(fp, "%sVM_L3_16K", others++ ? "|" : ""); if (machdep->flags & VM_L4_16K) fprintf(fp, "%sVM_L4_16K", others++ ? "|" : ""); if (machdep->flags & VM_L3_4K) fprintf(fp, "%sVM_L3_4K", others++ ? "|" : ""); if (machdep->flags & VM_L4_4K) fprintf(fp, "%sVM_L4_4K", others++ ? "|" : ""); if (machdep->flags & VMEMMAP) fprintf(fp, "%sVMEMMAP", others++ ? "|" : ""); if (machdep->flags & KDUMP_ENABLED) fprintf(fp, "%sKDUMP_ENABLED", others++ ? "|" : ""); if (machdep->flags & IRQ_STACKS) fprintf(fp, "%sIRQ_STACKS", others++ ? "|" : ""); if (machdep->flags & UNW_4_14) fprintf(fp, "%sUNW_4_14", others++ ? "|" : ""); if (machdep->flags & MACHDEP_BT_TEXT) fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); if (machdep->flags & NEW_VMEMMAP) fprintf(fp, "%sNEW_VMEMMAP", others++ ? "|" : ""); if (machdep->flags & FLIPPED_VM) fprintf(fp, "%sFLIPPED_VM", others++ ? "|" : ""); if (machdep->flags & HAS_PHYSVIRT_OFFSET) fprintf(fp, "%sHAS_PHYSVIRT_OFFSET", others++ ? "|" : ""); if (machdep->flags & ARM64_MTE) fprintf(fp, "%sARM64_MTE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %lx\n", (ulong)machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", (ulonglong)machdep->memsize, (ulonglong)machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: arm64_eframe_search()\n"); fprintf(fp, " back_trace: arm64_back_trace_cmd() (default: %s method)\n", kt->flags & USE_OPT_BT ? "optional" : "original"); fprintf(fp, " in_alternate_stack: arm64_in_alternate_stack()\n"); fprintf(fp, " processor_speed: arm64_processor_speed()\n"); fprintf(fp, " uvtop: arm64_uvtop()->%s()\n", machdep->flags & VM_L3_4K ? "arm64_vtop_3level_4k" : machdep->flags & VM_L4_4K ? "arm64_vtop_4level_4k" : machdep->flags & VM_L2_16K ? "arm64_vtop_2level_16k" : machdep->flags & VM_L3_16K ? "arm64_vtop_3level_16k" : machdep->flags & VM_L4_16K ? "arm64_vtop_4level_16k" : machdep->flags & VM_L3_64K ? "arm64_vtop_3level_64k" : "arm64_vtop_2level_64k"); fprintf(fp, " kvtop: arm64_kvtop()->%s()\n", machdep->flags & VM_L3_4K ? "arm64_vtop_3level_4k" : machdep->flags & VM_L4_4K ? "arm64_vtop_4level_4k" : machdep->flags & VM_L2_16K ? "arm64_vtop_2level_16k" : machdep->flags & VM_L3_16K ? "arm64_vtop_3level_16k" : machdep->flags & VM_L4_16K ? "arm64_vtop_4level_16k" : machdep->flags & VM_L3_64K ? "arm64_vtop_3level_64k" : "arm64_vtop_2level_64k"); fprintf(fp, " get_task_pgd: arm64_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_stack_frame: arm64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: arm64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: arm64_vmalloc_start()\n"); fprintf(fp, " get_kvaddr_ranges: arm64_get_kvaddr_ranges()\n"); fprintf(fp, " is_task_addr: arm64_is_task_addr()\n"); fprintf(fp, " verify_symbol: arm64_verify_symbol()\n"); fprintf(fp, " dis_filter: arm64_dis_filter()\n"); fprintf(fp, " cmd_mach: arm64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: arm64_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: arm64_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: arm64_is_uvaddr()\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " init_kernel_pgd: arm64_init_kernel_pgd\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " dumpfile_init: (not used)\n"); fprintf(fp, " process_elf_notes: process_elf64_notes()\n"); fprintf(fp, " verify_line_number: (not used)\n"); fprintf(fp, " xendump_p2m_create: (n/a)\n"); fprintf(fp, "xen_kdump_p2m_create: (n/a)\n"); fprintf(fp, " xendump_panic_task: (n/a)\n"); fprintf(fp, " get_xendump_regs: (n/a)\n"); fprintf(fp, " line_number_hooks: (not used)\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pud_read: "); if ((!(machdep->flags & VM_L4_4K)) && (!(machdep->flags & VM_L4_16K))) fprintf(fp, "(not used)\n"); else fprintf(fp, "%lx\n", machdep->last_pud_read); fprintf(fp, " last_pmd_read: "); if ((machdep->flags & VM_L2_64K) || (machdep->flags & VM_L2_16K)) fprintf(fp, "(not used)\n"); else fprintf(fp, "%lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " clear_machdep_cache: arm64_clear_machdep_cache()\n"); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pud: %lx\n", (ulong)machdep->pud); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } fprintf(fp, " machspec: %lx\n", (ulong)ms); fprintf(fp, " struct_page_size: %ld\n", ms->struct_page_size); fprintf(fp, " VA_BITS: %ld\n", ms->VA_BITS); fprintf(fp, " CONFIG_ARM64_VA_BITS: %ld\n", ms->CONFIG_ARM64_VA_BITS); fprintf(fp, " VA_START: "); if (ms->VA_START) fprintf(fp, "%lx\n", ms->VA_START); else fprintf(fp, "(unused)\n"); fprintf(fp, " VA_BITS_ACTUAL: "); if (ms->VA_BITS_ACTUAL) fprintf(fp, "%ld\n", ms->VA_BITS_ACTUAL); else fprintf(fp, "(unused)\n"); fprintf(fp, "CONFIG_ARM64_KERNELPACMASK: "); if (ms->CONFIG_ARM64_KERNELPACMASK) fprintf(fp, "%lx\n", ms->CONFIG_ARM64_KERNELPACMASK); else fprintf(fp, "(unused)\n"); fprintf(fp, " userspace_top: %016lx\n", ms->userspace_top); fprintf(fp, " page_offset: %016lx\n", ms->page_offset); fprintf(fp, " vmalloc_start_addr: %016lx\n", ms->vmalloc_start_addr); fprintf(fp, " vmalloc_end: %016lx\n", ms->vmalloc_end); fprintf(fp, " modules_vaddr: %016lx\n", ms->modules_vaddr); fprintf(fp, " modules_end: %016lx\n", ms->modules_end); fprintf(fp, " vmemmap_vaddr: %016lx\n", ms->vmemmap_vaddr); fprintf(fp, " vmemmap_end: %016lx\n", ms->vmemmap_end); if (machdep->flags & NEW_VMEMMAP) { fprintf(fp, " vmemmap: %016lx\n", ms->vmemmap); fprintf(fp, " kimage_text: %016lx\n", ms->kimage_text); fprintf(fp, " kimage_end: %016lx\n", ms->kimage_end); fprintf(fp, " kimage_voffset: %016lx\n", ms->kimage_voffset); } fprintf(fp, " phys_offset: %lx\n", ms->phys_offset); fprintf(fp, " physvirt_offset: %lx\n", ms->physvirt_offset); fprintf(fp, "__exception_text_start: %lx\n", ms->__exception_text_start); fprintf(fp, " __exception_text_end: %lx\n", ms->__exception_text_end); fprintf(fp, " __irqentry_text_start: %lx\n", ms->__irqentry_text_start); fprintf(fp, " __irqentry_text_end: %lx\n", ms->__irqentry_text_end); fprintf(fp, " exp_entry1_start: %lx\n", ms->exp_entry1_start); fprintf(fp, " exp_entry1_end: %lx\n", ms->exp_entry1_end); fprintf(fp, " exp_entry2_start: %lx\n", ms->exp_entry2_start); fprintf(fp, " exp_entry2_end: %lx\n", ms->exp_entry2_end); fprintf(fp, " panic_task_regs: %lx\n", (ulong)ms->panic_task_regs); fprintf(fp, " user_eframe_offset: %ld\n", ms->user_eframe_offset); fprintf(fp, " kern_eframe_offset: %ld\n", ms->kern_eframe_offset); fprintf(fp, " PTE_PROT_NONE: %lx\n", ms->PTE_PROT_NONE); fprintf(fp, " PTE_FILE: "); if (ms->PTE_FILE) fprintf(fp, "%lx\n", ms->PTE_FILE); else fprintf(fp, "(unused)\n"); fprintf(fp, " __SWP_TYPE_BITS: %ld\n", ms->__SWP_TYPE_BITS); fprintf(fp, " __SWP_TYPE_SHIFT: %ld\n", ms->__SWP_TYPE_SHIFT); fprintf(fp, " __SWP_TYPE_MASK: %lx\n", ms->__SWP_TYPE_MASK); fprintf(fp, " __SWP_OFFSET_BITS: "); if (ms->__SWP_OFFSET_BITS) fprintf(fp, "%ld\n", ms->__SWP_OFFSET_BITS); else fprintf(fp, "(unused)\n"); fprintf(fp, " __SWP_OFFSET_SHIFT: %ld\n", ms->__SWP_OFFSET_SHIFT); fprintf(fp, " __SWP_OFFSET_MASK: "); if (ms->__SWP_OFFSET_MASK) fprintf(fp, "%lx\n", ms->__SWP_OFFSET_MASK); else fprintf(fp, "(unused)\n"); fprintf(fp, " machine_kexec_start: %lx\n", ms->machine_kexec_start); fprintf(fp, " machine_kexec_end: %lx\n", ms->machine_kexec_end); fprintf(fp, " crash_kexec_start: %lx\n", ms->crash_kexec_start); fprintf(fp, " crash_kexec_end: %lx\n", ms->crash_kexec_end); fprintf(fp, " crash_save_cpu_start: %lx\n", ms->crash_save_cpu_start); fprintf(fp, " crash_save_cpu_end: %lx\n", ms->crash_save_cpu_end); fprintf(fp, " kernel_flags: %lx\n", ms->kernel_flags); fprintf(fp, " irq_stackbuf: %lx\n", (ulong)ms->irq_stackbuf); if (machdep->flags & IRQ_STACKS) { fprintf(fp, " irq_stack_size: %ld\n", ms->irq_stack_size); for (i = 0; i < kt->cpus; i++) fprintf(fp, " irq_stacks[%d]: %lx\n", i, ms->irq_stacks[i]); } else { fprintf(fp, " irq_stack_size: (unused)\n"); fprintf(fp, " irq_stacks: (unused)\n"); } } static int arm64_parse_machdep_arg_l(char *argstring, char *param, ulong *value) { int len; int megabytes = FALSE; char *p; len = strlen(param); if (!STRNEQ(argstring, param) || (argstring[len] != '=')) return FALSE; if ((LASTCHAR(argstring) == 'm') || (LASTCHAR(argstring) == 'M')) { LASTCHAR(argstring) = NULLCHAR; megabytes = TRUE; } p = argstring + len + 1; if (strlen(p)) { int flags = RETURN_ON_ERROR | QUIET; int err = 0; if (STRNEQ(argstring, "max_physmem_bits")) { *value = dtol(p, flags, &err); } else if (STRNEQ(argstring, "vabits_actual")) { *value = dtol(p, flags, &err); } else if (megabytes) { *value = dtol(p, flags, &err); if (!err) *value = MEGABYTES(*value); } else { *value = htol(p, flags, &err); } if (!err) return TRUE; } return FALSE; } /* * Parse machine dependent command line arguments. * * Force the phys_offset address via: * * --machdep phys_offset=
*/ static void arm64_parse_cmdline_args(void) { int index, i, c; char *arglist[MAXARGS]; char buf[BUFSIZE]; char *p; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { error(WARNING, "ignoring --machdep option: %x\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { if (arm64_parse_machdep_arg_l(arglist[i], "phys_offset", &machdep->machspec->phys_offset)) { error(NOTE, "setting phys_offset to: 0x%lx\n\n", machdep->machspec->phys_offset); machdep->flags |= PHYS_OFFSET; continue; } else if (arm64_parse_machdep_arg_l(arglist[i], "kimage_voffset", &machdep->machspec->kimage_voffset)) { error(NOTE, "setting kimage_voffset to: 0x%lx\n\n", machdep->machspec->kimage_voffset); continue; } else if (arm64_parse_machdep_arg_l(arglist[i], "max_physmem_bits", &machdep->max_physmem_bits)) { error(NOTE, "setting max_physmem_bits to: %ld\n\n", machdep->max_physmem_bits); continue; } else if (arm64_parse_machdep_arg_l(arglist[i], "vabits_actual", &machdep->machspec->VA_BITS_ACTUAL)) { error(NOTE, "setting vabits_actual to: %ld\n\n", machdep->machspec->VA_BITS_ACTUAL); continue; } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); } } } #define MIN_KIMG_ALIGN (0x00200000) /* kimage load address must be aligned 2M */ /* * Traverse the entire dumpfile to find/verify kimage_voffset. */ static int arm64_search_for_kimage_voffset(ulong phys_base) { ulong kimage_load_addr; ulong phys_end; struct machine_specific *ms = machdep->machspec; if (!arm_kdump_phys_end(&phys_end)) return FALSE; for (kimage_load_addr = phys_base; kimage_load_addr <= phys_end; kimage_load_addr += MIN_KIMG_ALIGN) { ms->kimage_voffset = ms->vmalloc_start_addr - kimage_load_addr; if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) ms->kimage_voffset += (kt->relocate * - 1); if (verify_kimage_voffset()) { if (CRASHDEBUG(1)) error(INFO, "dumpfile searched for kimage_voffset: %lx\n\n", ms->kimage_voffset); break; } } if (kimage_load_addr > phys_end) return FALSE; return TRUE; } static int verify_kimage_voffset(void) { ulong kimage_voffset; if (!readmem(symbol_value("kimage_voffset"), KVADDR, &kimage_voffset, sizeof(kimage_voffset), "verify kimage_voffset", QUIET|RETURN_ON_ERROR)) return FALSE; return (machdep->machspec->kimage_voffset == kimage_voffset); } static void arm64_calc_kimage_voffset(void) { struct machine_specific *ms = machdep->machspec; ulong phys_addr = 0; int errflag; if (ms->kimage_voffset) /* vmcoreinfo, ioctl, or --machdep override */ return; if (ACTIVE()) { char buf[BUFSIZE]; char *p1; FILE *iomem; ulong kimage_voffset, vaddr; if (pc->flags & PROC_KCORE) { kimage_voffset = symbol_value_from_proc_kallsyms("kimage_voffset"); if ((kimage_voffset != BADVAL) && (READMEM(pc->mfd, &vaddr, sizeof(ulong), kimage_voffset, KCORE_USE_VADDR) > 0)) { ms->kimage_voffset = vaddr; return; } } if ((iomem = fopen("/proc/iomem", "r")) == NULL) return; errflag = 1; while (fgets(buf, BUFSIZE, iomem)) { if(strstr(buf, ": Kernel code")) { errflag = 0; break; } if (strstr(buf, ": System RAM")) { clean_line(buf); if (!(p1 = strstr(buf, "-"))) continue; *p1 = NULLCHAR; phys_addr = htol(buf, RETURN_ON_ERROR | QUIET, NULL); if (phys_addr == BADADDR) continue; } } fclose(iomem); if (errflag) return; } else if (KDUMP_DUMPFILE()) { errflag = 1; if (arm_kdump_phys_base(&phys_addr)) { /* Get start address of first memory block */ ms->kimage_voffset = ms->vmalloc_start_addr - phys_addr; if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) ms->kimage_voffset += (kt->relocate * -1); if (verify_kimage_voffset() || arm64_search_for_kimage_voffset(phys_addr)) errflag = 0; } if (errflag) { error(WARNING, "kimage_voffset cannot be determined from the dumpfile.\n"); error(CONT, "Try using the command line option: --machdep kimage_voffset=\n"); } return; } else { error(WARNING, "kimage_voffset cannot be determined from the dumpfile.\n"); error(CONT, "Using default value of 0. If this is not correct, then try\n"); error(CONT, "using the command line option: --machdep kimage_voffset=\n"); return; } ms->kimage_voffset = ms->vmalloc_start_addr - phys_addr; if ((kt->flags2 & KASLR) && (kt->flags & RELOC_SET)) ms->kimage_voffset += (kt->relocate * -1); } /* * The physvirt_offset only exits in kernel [5.4, 5.10) * * 1) In kernel v5.4, the patch: * "5383cc6efed137 arm64: mm: Introduce vabits_actual" * * introduced the physvirt_offset. * * 2) In kernel v5.10, the patch: * "7bc1a0f9e17658 arm64: mm: use single quantity * to represent the PA to VA translation" * removed the physvirt_offset. */ static void arm64_calc_physvirt_offset(void) { struct machine_specific *ms = machdep->machspec; ulong physvirt_offset; struct syment *sp; ulong value; if ((sp = kernel_symbol_search("physvirt_offset")) && machdep->machspec->kimage_voffset) { if (pc->flags & PROC_KCORE) { value = symbol_value_from_proc_kallsyms("physvirt_offset"); if ((value != BADVAL) && (READMEM(pc->mfd, &physvirt_offset, sizeof(ulong), value, KCORE_USE_VADDR) > 0)) { machdep->flags |= HAS_PHYSVIRT_OFFSET; ms->physvirt_offset = physvirt_offset; /* Update the ms->phys_offset which is wrong */ ms->phys_offset = ms->physvirt_offset + ms->page_offset; return; } } if (READMEM(pc->mfd, &physvirt_offset, sizeof(physvirt_offset), sp->value, sp->value - machdep->machspec->kimage_voffset) > 0) { machdep->flags |= HAS_PHYSVIRT_OFFSET; ms->physvirt_offset = physvirt_offset; return; } } /* Useless if no symbol 'physvirt_offset', just keep semantics */ ms->physvirt_offset = ms->phys_offset - ms->page_offset; } static void arm64_calc_phys_offset(void) { struct machine_specific *ms = machdep->machspec; ulong phys_offset; if (machdep->flags & PHYS_OFFSET) /* --machdep override */ return; /* * Next determine suitable value for phys_offset. User can override this * by passing valid '--machdep phys_offset=' option. */ ms->phys_offset = 0; if (ACTIVE()) { char buf[BUFSIZE]; char *p1; int errflag; FILE *iomem; physaddr_t paddr; ulong vaddr; struct syment *sp; if ((machdep->flags & NEW_VMEMMAP) && ms->kimage_voffset && (sp = kernel_symbol_search("memstart_addr"))) { if (pc->flags & PROC_KCORE) { if (arm64_get_vmcoreinfo(&ms->phys_offset, "NUMBER(PHYS_OFFSET)", NUM_HEX)) return; vaddr = symbol_value_from_proc_kallsyms("memstart_addr"); if (vaddr == BADVAL) vaddr = sp->value; paddr = KCORE_USE_VADDR; } else { vaddr = sp->value; paddr = sp->value - machdep->machspec->kimage_voffset; } if (READMEM(pc->mfd, &phys_offset, sizeof(phys_offset), vaddr, paddr) > 0) { ms->phys_offset = phys_offset; return; } } if ((iomem = fopen("/proc/iomem", "r")) == NULL) return; /* * Memory regions are sorted in ascending order. We take the * first region which should be correct for most uses. */ errflag = 1; while (fgets(buf, BUFSIZE, iomem)) { if (strstr(buf, ": System RAM")) { clean_line(buf); errflag = 0; break; } } fclose(iomem); if (errflag) return; if (!(p1 = strstr(buf, "-"))) return; *p1 = NULLCHAR; phys_offset = htol(buf, RETURN_ON_ERROR | QUIET, &errflag); if (errflag) return; ms->phys_offset = phys_offset; } else if (DISKDUMP_DUMPFILE() && diskdump_phys_base(&phys_offset)) { ms->phys_offset = phys_offset; } else if (KDUMP_DUMPFILE() && arm64_kdump_phys_base(&phys_offset)) { ms->phys_offset = phys_offset; } else { error(WARNING, "phys_offset cannot be determined from the dumpfile.\n"); error(CONT, "Using default value of 0. If this is not correct, then try\n"); error(CONT, "using the command line option: --machdep phys_offset=\n"); } if (CRASHDEBUG(1)) fprintf(fp, "using %lx as phys_offset\n", ms->phys_offset); } /* * Determine SECTION_SIZE_BITS either by reading VMCOREINFO or the kernel * config, otherwise use the 64-bit ARM default definiton. */ static void arm64_get_section_size_bits(void) { int ret; char *string; if (THIS_KERNEL_VERSION >= LINUX(5,12,0)) { if (machdep->pagesize == 65536) machdep->section_size_bits = _SECTION_SIZE_BITS_5_12_64K; else machdep->section_size_bits = _SECTION_SIZE_BITS_5_12; } else machdep->section_size_bits = _SECTION_SIZE_BITS; if (arm64_get_vmcoreinfo(&machdep->section_size_bits, "NUMBER(SECTION_SIZE_BITS)", NUM_DEC)) { /* nothing */ } else if (kt->ikconfig_flags & IKCONFIG_AVAIL) { if ((ret = get_kernel_config("CONFIG_MEMORY_HOTPLUG", NULL)) == IKCONFIG_Y) { if ((ret = get_kernel_config("CONFIG_HOTPLUG_SIZE_BITS", &string)) == IKCONFIG_STR) machdep->section_size_bits = atol(string); } } if (CRASHDEBUG(1)) fprintf(fp, "SECTION_SIZE_BITS: %ld\n", machdep->section_size_bits); } /* * Determine PHYS_OFFSET either by reading VMCOREINFO or the kernel * symbol, otherwise borrow the 32-bit ARM functionality. */ static int arm64_kdump_phys_base(ulong *phys_offset) { struct syment *sp; physaddr_t paddr; if (arm64_get_vmcoreinfo(phys_offset, "NUMBER(PHYS_OFFSET)", NUM_HEX)) return TRUE; if ((machdep->flags & NEW_VMEMMAP) && machdep->machspec->kimage_voffset && (sp = kernel_symbol_search("memstart_addr"))) { paddr = sp->value - machdep->machspec->kimage_voffset; if (READMEM(-1, phys_offset, sizeof(*phys_offset), sp->value, paddr) > 0) return TRUE; } return arm_kdump_phys_base(phys_offset); } static void arm64_init_kernel_pgd(void) { int i; ulong value; if (!kernel_symbol_exists("init_mm") || !readmem(symbol_value("init_mm") + OFFSET(mm_struct_pgd), KVADDR, &value, sizeof(void *), "init_mm.pgd", RETURN_ON_ERROR)) { if (kernel_symbol_exists("swapper_pg_dir")) value = symbol_value("swapper_pg_dir"); else { error(WARNING, "cannot determine kernel pgd location\n"); return; } } for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = value; } ulong arm64_PTOV(ulong paddr) { struct machine_specific *ms = machdep->machspec; /* * Either older kernel before kernel has 'physvirt_offset' or newer * kernel which removes 'physvirt_offset' has the same formula: * #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) */ if (!(machdep->flags & HAS_PHYSVIRT_OFFSET)) return (paddr - ms->phys_offset) | PAGE_OFFSET; else return paddr - ms->physvirt_offset; } ulong arm64_VTOP(ulong addr) { if (is_mte_kvaddr(addr)) addr = mte_tag_reset(addr); if (machdep->flags & NEW_VMEMMAP) { if (machdep->machspec->VA_START && (addr >= machdep->machspec->kimage_text) && (addr <= machdep->machspec->kimage_end)) { return addr - machdep->machspec->kimage_voffset; } if (addr >= machdep->machspec->page_offset) { if (machdep->flags & HAS_PHYSVIRT_OFFSET) { return addr + machdep->machspec->physvirt_offset; } else { /* * Either older kernel before kernel has 'physvirt_offset' or newer * kernel which removes 'physvirt_offset' has the same formula: * #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) */ return (addr & ~PAGE_OFFSET) + machdep->machspec->phys_offset; } } else if (machdep->machspec->kimage_voffset) return addr - machdep->machspec->kimage_voffset; else /* no randomness */ return machdep->machspec->phys_offset + (addr - machdep->machspec->vmalloc_start_addr); } else { return machdep->machspec->phys_offset + (addr - machdep->machspec->page_offset); } } static int arm64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong kernel_pgd; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } kernel_pgd = vt->kernel_pgd[0]; *paddr = 0; switch (machdep->flags & (VM_L2_64K|VM_L3_64K|VM_L3_4K|VM_L4_4K|VM_L2_16K|VM_L3_16K|VM_L4_16K)) { case VM_L2_64K: return arm64_vtop_2level_64k(kernel_pgd, kvaddr, paddr, verbose); case VM_L3_64K: return arm64_vtop_3level_64k(kernel_pgd, kvaddr, paddr, verbose); case VM_L3_4K: return arm64_vtop_3level_4k(kernel_pgd, kvaddr, paddr, verbose); case VM_L4_4K: return arm64_vtop_4level_4k(kernel_pgd, kvaddr, paddr, verbose); case VM_L2_16K: return arm64_vtop_2level_16k(kernel_pgd, kvaddr, paddr, verbose); case VM_L3_16K: return arm64_vtop_3level_16k(kernel_pgd, kvaddr, paddr, verbose); case VM_L4_16K: return arm64_vtop_4level_16k(kernel_pgd, kvaddr, paddr, verbose); default: return FALSE; } } static int arm64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong user_pgd; readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &user_pgd, sizeof(long), "user pgd", FAULT_ON_ERROR); *paddr = 0; switch (machdep->flags & (VM_L2_64K|VM_L3_64K|VM_L3_4K|VM_L4_4K|VM_L2_16K|VM_L3_16K|VM_L4_16K)) { case VM_L2_64K: return arm64_vtop_2level_64k(user_pgd, uvaddr, paddr, verbose); case VM_L3_64K: return arm64_vtop_3level_64k(user_pgd, uvaddr, paddr, verbose); case VM_L3_4K: return arm64_vtop_3level_4k(user_pgd, uvaddr, paddr, verbose); case VM_L4_4K: return arm64_vtop_4level_4k(user_pgd, uvaddr, paddr, verbose); case VM_L2_16K: return arm64_vtop_2level_16k(user_pgd, uvaddr, paddr, verbose); case VM_L3_16K: return arm64_vtop_3level_16k(user_pgd, uvaddr, paddr, verbose); case VM_L4_16K: return arm64_vtop_4level_16k(user_pgd, uvaddr, paddr, verbose); default: return FALSE; } } #define PTE_ADDR_LOW ((((1UL) << (48 - machdep->pageshift)) - 1) << machdep->pageshift) #define PTE_ADDR_HIGH ((0xfUL) << 12) #define PTE_ADDR_HIGH_SHIFT 36 #define PTE_TO_PHYS(pteval) (machdep->max_physmem_bits == 52 ? \ (((pteval & PTE_ADDR_LOW) | ((pteval & PTE_ADDR_HIGH) << PTE_ADDR_HIGH_SHIFT))) : (pteval & PTE_ADDR_LOW)) #define PUD_TYPE_MASK 3 #define PUD_TYPE_SECT 1 #define PMD_TYPE_MASK 3 #define PMD_TYPE_SECT 1 #define PMD_TYPE_TABLE 2 #define SECTION_PAGE_MASK_2MB ((long)(~((MEGABYTES(2))-1))) #define SECTION_PAGE_MASK_32MB ((long)(~((MEGABYTES(32))-1))) #define SECTION_PAGE_MASK_512MB ((long)(~((MEGABYTES(512))-1))) #define SECTION_PAGE_MASK_1GB ((long)(~((GIGABYTES(1))-1))) #define SECTION_PAGE_MASK_64GB ((long)(~((GIGABYTES(64))-1))) static int arm64_vtop_2level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_base, *pgd_ptr, pgd_val; ulong *pte_base, *pte_ptr, pte_val; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); pgd_base = (ulong *)pgd; FILL_PGD(pgd_base, KVADDR, machdep->ptrs_per_pgd * sizeof(ulong)); pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L2_64K) & (machdep->ptrs_per_pgd - 1)); pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; /* * #define __PAGETABLE_PUD_FOLDED * #define __PAGETABLE_PMD_FOLDED */ if ((pgd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = PTE_TO_PHYS(pgd_val & SECTION_PAGE_MASK_512MB); if (verbose) { fprintf(fp, " PAGE: %lx (512MB%s)\n\n", sectionbase, IS_ZEROPAGE(sectionbase) ? ", ZERO PAGE" : ""); arm64_translate_pte(pgd_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_512MB); return TRUE; } pte_base = (ulong *)PTOV(PTE_TO_PHYS(pgd_val)); FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L2_64K * sizeof(ulong)); pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L2_64K - 1)); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (pte_val & PTE_VALID) { *paddr = PTE_TO_PHYS(pte_val) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx %s\n\n", PAGEBASE(*paddr), IS_ZEROPAGE(PAGEBASE(*paddr)) ? "(ZERO PAGE)" : ""); arm64_translate_pte(pte_val, 0, 0); } } else { if (IS_UVADDR(vaddr, NULL)) *paddr = pte_val; if (verbose) { fprintf(fp, "\n"); arm64_translate_pte(pte_val, 0, 0); } goto no_page; } return TRUE; no_page: return FALSE; } static int arm64_vtop_3level_64k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_base, *pgd_ptr, pgd_val; ulong *pmd_base, *pmd_ptr, pmd_val; ulong *pte_base, *pte_ptr, pte_val; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); pgd_base = (ulong *)pgd; FILL_PGD(pgd_base, KVADDR, machdep->ptrs_per_pgd * sizeof(ulong)); /* * Use machdep->ptrs_per_pgd to mask vaddr instead of using macro, because * 48-bits and 52-bits have different size of ptrs_per_pgd. */ pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L3_64K) & (machdep->ptrs_per_pgd - 1)); pgd_val = ULONG(machdep->pgd + PGDIR_OFFSET_L3_64K(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; /* * #define __PAGETABLE_PUD_FOLDED */ pmd_base = (ulong *)PTOV(PTE_TO_PHYS(pgd_val)); FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L3_64K * sizeof(ulong)); pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L3_64K) & (PTRS_PER_PMD_L3_64K - 1)); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd_ptr, pmd_val); if (!pmd_val) goto no_page; if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = PTE_TO_PHYS(pmd_val & SECTION_PAGE_MASK_512MB); if (verbose) { fprintf(fp, " PAGE: %lx (512MB%s)\n\n", sectionbase, IS_ZEROPAGE(sectionbase) ? ", ZERO PAGE" : ""); arm64_translate_pte(pmd_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_512MB); return TRUE; } pte_base = (ulong *)PTOV(PTE_TO_PHYS(pmd_val)); FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L3_64K * sizeof(ulong)); pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L3_64K - 1)); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (pte_val & PTE_VALID) { *paddr = PTE_TO_PHYS(pte_val) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx %s\n\n", PAGEBASE(*paddr), IS_ZEROPAGE(PAGEBASE(*paddr)) ? "(ZERO PAGE)" : ""); arm64_translate_pte(pte_val, 0, 0); } } else { if (IS_UVADDR(vaddr, NULL)) *paddr = pte_val; if (verbose) { fprintf(fp, "\n"); arm64_translate_pte(pte_val, 0, 0); } goto no_page; } return TRUE; no_page: return FALSE; } static int arm64_vtop_2level_16k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_base, *pgd_ptr, pgd_val; ulong *pte_base, *pte_ptr, pte_val; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); pgd_base = (ulong *)pgd; FILL_PGD(pgd_base, KVADDR, machdep->ptrs_per_pgd * sizeof(ulong)); pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L2_16K) & (machdep->ptrs_per_pgd - 1)); pgd_val = ULONG(machdep->pgd + PGDIR_OFFSET_L2_16K(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; /* * #define __PAGETABLE_PUD_FOLDED * #define __PAGETABLE_PMD_FOLDED */ if ((pgd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = PTE_TO_PHYS(pgd_val & SECTION_PAGE_MASK_32MB); if (verbose) { fprintf(fp, " PAGE: %lx (32MB%s)\n\n", sectionbase, IS_ZEROPAGE(sectionbase) ? ", ZERO PAGE" : ""); arm64_translate_pte(pgd_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_32MB); return TRUE; } pte_base = (ulong *)PTOV(PTE_TO_PHYS(pgd_val)); FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L2_16K * sizeof(ulong)); pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L2_16K - 1)); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (pte_val & PTE_VALID) { *paddr = PTE_TO_PHYS(pte_val) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx %s\n\n", PAGEBASE(*paddr), IS_ZEROPAGE(PAGEBASE(*paddr)) ? "(ZERO PAGE)" : ""); arm64_translate_pte(pte_val, 0, 0); } } else { if (IS_UVADDR(vaddr, NULL)) *paddr = pte_val; if (verbose) { fprintf(fp, "\n"); arm64_translate_pte(pte_val, 0, 0); } goto no_page; } return TRUE; no_page: return FALSE; } static int arm64_vtop_3level_16k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_base, *pgd_ptr, pgd_val; ulong *pmd_base, *pmd_ptr, pmd_val; ulong *pte_base, *pte_ptr, pte_val; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); pgd_base = (ulong *)pgd; FILL_PGD(pgd_base, KVADDR, machdep->ptrs_per_pgd * sizeof(ulong)); pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L3_16K) & (machdep->ptrs_per_pgd - 1)); pgd_val = ULONG(machdep->pgd + PGDIR_OFFSET_L3_16K(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; /* * #define __PAGETABLE_PUD_FOLDED */ pmd_base = (ulong *)PTOV(PTE_TO_PHYS(pgd_val)); FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L3_16K * sizeof(ulong)); pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L3_16K) & (PTRS_PER_PMD_L3_16K - 1)); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd_ptr, pmd_val); if (!pmd_val) goto no_page; if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = PTE_TO_PHYS(pmd_val & SECTION_PAGE_MASK_32MB); if (verbose) { fprintf(fp, " PAGE: %lx (32MB%s)\n\n", sectionbase, IS_ZEROPAGE(sectionbase) ? ", ZERO PAGE" : ""); arm64_translate_pte(pmd_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_32MB); return TRUE; } pte_base = (ulong *)PTOV(PTE_TO_PHYS(pmd_val)); FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L3_16K * sizeof(ulong)); pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L3_16K - 1)); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (pte_val & PTE_VALID) { *paddr = PTE_TO_PHYS(pte_val) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx %s\n\n", PAGEBASE(*paddr), IS_ZEROPAGE(PAGEBASE(*paddr)) ? "(ZERO PAGE)" : ""); arm64_translate_pte(pte_val, 0, 0); } } else { if (IS_UVADDR(vaddr, NULL)) *paddr = pte_val; if (verbose) { fprintf(fp, "\n"); arm64_translate_pte(pte_val, 0, 0); } goto no_page; } return TRUE; no_page: return FALSE; } static int arm64_vtop_4level_16k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_base, *pgd_ptr, pgd_val; ulong *pud_base, *pud_ptr, pud_val; ulong *pmd_base, *pmd_ptr, pmd_val; ulong *pte_base, *pte_ptr, pte_val; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); pgd_base = (ulong *)pgd; FILL_PGD(pgd_base, KVADDR, PTRS_PER_PGD_L4_16K * sizeof(ulong)); pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L4_16K) & (PTRS_PER_PGD_L4_16K - 1)); pgd_val = ULONG(machdep->pgd + PGDIR_OFFSET_L4_16K(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; pud_base = (ulong *)PTOV(PTE_TO_PHYS(pgd_val)); FILL_PUD(pud_base, KVADDR, PTRS_PER_PUD_L4_16K * sizeof(ulong)); pud_ptr = pud_base + (((vaddr) >> PUD_SHIFT_L4_16K) & (PTRS_PER_PUD_L4_16K - 1)); pud_val = ULONG(machdep->pud + PAGEOFFSET(pud_ptr)); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)pud_ptr, pud_val); if (!pud_val) goto no_page; /* * TODO: * PUD Section mapping is only supported for LPA2 is enabled. * LPA2 depends on CONFIG_ARM64_16K_PAGES and CONFIG_ARM64_PA_BITS_52. */ pmd_base = (ulong *)PTOV(PTE_TO_PHYS(pud_val)); FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L4_16K * sizeof(ulong)); pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L4_16K) & (PTRS_PER_PMD_L4_16K - 1)); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd_ptr, pmd_val); if (!pmd_val) goto no_page; if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = PTE_TO_PHYS(pmd_val) & SECTION_PAGE_MASK_32MB; if (verbose) { fprintf(fp, " PAGE: %lx (32MB%s)\n\n", sectionbase, IS_ZEROPAGE(sectionbase) ? ", ZERO PAGE" : ""); arm64_translate_pte(pmd_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_32MB); return TRUE; } pte_base = (ulong *)PTOV(PTE_TO_PHYS(pmd_val)); FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L4_16K * sizeof(ulong)); pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L4_16K - 1)); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (pte_val & PTE_VALID) { *paddr = PTE_TO_PHYS(pte_val) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx %s\n\n", PAGEBASE(*paddr), IS_ZEROPAGE(PAGEBASE(*paddr)) ? "(ZERO PAGE)" : ""); arm64_translate_pte(pte_val, 0, 0); } } else { if (IS_UVADDR(vaddr, NULL)) *paddr = pte_val; if (verbose) { fprintf(fp, "\n"); arm64_translate_pte(pte_val, 0, 0); } goto no_page; } return TRUE; no_page: return FALSE; } static int arm64_vtop_3level_4k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_base, *pgd_ptr, pgd_val; ulong *pmd_base, *pmd_ptr, pmd_val; ulong *pte_base, *pte_ptr, pte_val; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); pgd_base = (ulong *)pgd; FILL_PGD(pgd_base, KVADDR, PTRS_PER_PGD_L3_4K * sizeof(ulong)); pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L3_4K) & (PTRS_PER_PGD_L3_4K - 1)); pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; if ((pgd_val & PUD_TYPE_MASK) == PUD_TYPE_SECT) { ulong sectionbase = PTE_TO_PHYS(pgd_val & SECTION_PAGE_MASK_1GB); if (verbose) { fprintf(fp, " PAGE: %lx (1GB)\n\n", sectionbase); arm64_translate_pte(pgd_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_1GB); return TRUE; } /* * #define __PAGETABLE_PUD_FOLDED */ pmd_base = (ulong *)PTOV(PTE_TO_PHYS(pgd_val)); FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L3_4K * sizeof(ulong)); pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L3_4K) & (PTRS_PER_PMD_L3_4K - 1)); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd_ptr, pmd_val); if (!pmd_val) goto no_page; if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = PTE_TO_PHYS(pmd_val & SECTION_PAGE_MASK_2MB); if (verbose) { fprintf(fp, " PAGE: %lx (2MB%s)\n\n", sectionbase, IS_ZEROPAGE(sectionbase) ? ", ZERO PAGE" : ""); arm64_translate_pte(pmd_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_2MB); return TRUE; } pte_base = (ulong *)PTOV(PTE_TO_PHYS(pmd_val)); FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L3_4K * sizeof(ulong)); pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L3_4K - 1)); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (pte_val & PTE_VALID) { *paddr = PTE_TO_PHYS(pte_val) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx %s\n\n", PAGEBASE(*paddr), IS_ZEROPAGE(PAGEBASE(*paddr)) ? "(ZERO PAGE)" : ""); arm64_translate_pte(pte_val, 0, 0); } } else { if (IS_UVADDR(vaddr, NULL)) *paddr = pte_val; if (verbose) { fprintf(fp, "\n"); arm64_translate_pte(pte_val, 0, 0); } goto no_page; } return TRUE; no_page: return FALSE; } static int arm64_vtop_4level_4k(ulong pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_base, *pgd_ptr, pgd_val; ulong *pud_base, *pud_ptr, pud_val; ulong *pmd_base, *pmd_ptr, pmd_val; ulong *pte_base, *pte_ptr, pte_val; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", pgd); pgd_base = (ulong *)pgd; FILL_PGD(pgd_base, KVADDR, PTRS_PER_PGD_L4_4K * sizeof(ulong)); pgd_ptr = pgd_base + (((vaddr) >> PGDIR_SHIFT_L4_4K) & (PTRS_PER_PGD_L4_4K - 1)); pgd_val = ULONG(machdep->pgd + PGDIR_OFFSET_48VA(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; pud_base = (ulong *)PTOV(PTE_TO_PHYS(pgd_val)); FILL_PUD(pud_base, KVADDR, PTRS_PER_PUD_L4_4K * sizeof(ulong)); pud_ptr = pud_base + (((vaddr) >> PUD_SHIFT_L4_4K) & (PTRS_PER_PUD_L4_4K - 1)); pud_val = ULONG(machdep->pud + PAGEOFFSET(pud_ptr)); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)pud_ptr, pud_val); if (!pud_val) goto no_page; if ((pud_val & PUD_TYPE_MASK) == PUD_TYPE_SECT) { ulong sectionbase = PTE_TO_PHYS(pud_val & SECTION_PAGE_MASK_1GB); if (verbose) { fprintf(fp, " PAGE: %lx (1GB)\n\n", sectionbase); arm64_translate_pte(pud_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_1GB); return TRUE; } pmd_base = (ulong *)PTOV(PTE_TO_PHYS(pud_val)); FILL_PMD(pmd_base, KVADDR, PTRS_PER_PMD_L4_4K * sizeof(ulong)); pmd_ptr = pmd_base + (((vaddr) >> PMD_SHIFT_L4_4K) & (PTRS_PER_PMD_L4_4K - 1)); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)pmd_ptr, pmd_val); if (!pmd_val) goto no_page; if ((pmd_val & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = PTE_TO_PHYS(pmd_val & SECTION_PAGE_MASK_2MB); if (verbose) { fprintf(fp, " PAGE: %lx (2MB%s)\n\n", sectionbase, IS_ZEROPAGE(sectionbase) ? ", ZERO PAGE" : ""); arm64_translate_pte(pmd_val, 0, 0); } *paddr = sectionbase + (vaddr & ~SECTION_PAGE_MASK_2MB); return TRUE; } pte_base = (ulong *)PTOV(PTE_TO_PHYS(pmd_val)); FILL_PTBL(pte_base, KVADDR, PTRS_PER_PTE_L4_4K * sizeof(ulong)); pte_ptr = pte_base + (((vaddr) >> machdep->pageshift) & (PTRS_PER_PTE_L4_4K - 1)); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (pte_val & PTE_VALID) { *paddr = PTE_TO_PHYS(pte_val) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx %s\n\n", PAGEBASE(*paddr), IS_ZEROPAGE(PAGEBASE(*paddr)) ? "(ZERO PAGE)" : ""); arm64_translate_pte(pte_val, 0, 0); } } else { if (IS_UVADDR(vaddr, NULL)) *paddr = pte_val; if (verbose) { fprintf(fp, "\n"); arm64_translate_pte(pte_val, 0, 0); } goto no_page; } return TRUE; no_page: return FALSE; } static ulong arm64_get_task_pgd(ulong task) { struct task_context *tc; ulong pgd; if ((tc = task_to_context(task)) && readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "user pgd", RETURN_ON_ERROR)) return pgd; else return NO_TASK; } static ulong arm64_processor_speed(void) { return 0; }; /* * Gather IRQ stack values. */ static void arm64_irq_stack_init(void) { int i; struct syment *sp; struct gnu_request request, *req; struct machine_specific *ms = machdep->machspec; ulong p, sz; req = &request; if (symbol_exists("irq_stack") && (sp = per_cpu_symbol_search("irq_stack")) && get_symbol_type("irq_stack", NULL, req)) { /* before v4.14 or CONFIG_VMAP_STACK disabled */ if (CRASHDEBUG(1)) { fprintf(fp, "irq_stack: \n"); fprintf(fp, " type: %s\n", (req->typecode == TYPE_CODE_ARRAY) ? "TYPE_CODE_ARRAY" : "other"); fprintf(fp, " target_typecode: %s\n", req->target_typecode == TYPE_CODE_INT ? "TYPE_CODE_INT" : "other"); fprintf(fp, " target_length: %ld\n", req->target_length); fprintf(fp, " length: %ld\n", req->length); } if (!(ms->irq_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong))))) error(FATAL, "cannot malloc irq_stack addresses\n"); ms->irq_stack_size = req->length; machdep->flags |= IRQ_STACKS; for (i = 0; i < kt->cpus; i++) ms->irq_stacks[i] = kt->__per_cpu_offset[i] + sp->value; } else if (symbol_exists("irq_stack_ptr") && (sp = per_cpu_symbol_search("irq_stack_ptr")) && get_symbol_type("irq_stack_ptr", NULL, req)) { /* v4.14 and later with CONFIG_VMAP_STACK enabled */ if (CRASHDEBUG(1)) { fprintf(fp, "irq_stack_ptr: \n"); fprintf(fp, " type: %x, %s\n", (int)req->typecode, (req->typecode == TYPE_CODE_PTR) ? "TYPE_CODE_PTR" : "other"); fprintf(fp, " target_typecode: %x, %s\n", (int)req->target_typecode, req->target_typecode == TYPE_CODE_INT ? "TYPE_CODE_INT" : "other"); fprintf(fp, " target_length: %ld\n", req->target_length); fprintf(fp, " length: %ld\n", req->length); } if (!(ms->irq_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong))))) error(FATAL, "cannot malloc irq_stack addresses\n"); /* * Determining the IRQ_STACK_SIZE is tricky, but for now * 4.14 kernel has: * * #define IRQ_STACK_SIZE THREAD_SIZE * * and finding a solid usage of THREAD_SIZE is hard, but: * * union thread_union { * ... * unsigned long stack[THREAD_SIZE/sizeof(long)]; * }; */ if (MEMBER_EXISTS("thread_union", "stack")) { if ((sz = MEMBER_SIZE("thread_union", "stack")) > 0) ms->irq_stack_size = sz; } else { ulong res = arm64_set_irq_stack_size(); ms->irq_stack_size = (res > 0) ? res : ARM64_IRQ_STACK_SIZE; } machdep->flags |= IRQ_STACKS; for (i = 0; i < kt->cpus; i++) { p = kt->__per_cpu_offset[i] + sp->value; readmem(p, KVADDR, &(ms->irq_stacks[i]), sizeof(ulong), "IRQ stack pointer", RETURN_ON_ERROR); } } } /* * Gather Overflow stack values. * * Overflow stack supported since 4.14, in commit 872d8327c */ static void arm64_overflow_stack_init(void) { int i; struct syment *sp; struct gnu_request request, *req; struct machine_specific *ms = machdep->machspec; req = &request; if (symbol_exists("overflow_stack") && (sp = per_cpu_symbol_search("overflow_stack")) && get_symbol_type("overflow_stack", NULL, req)) { if (CRASHDEBUG(1)) { fprintf(fp, "overflow_stack: \n"); fprintf(fp, " type: %x, %s\n", (int)req->typecode, (req->typecode == TYPE_CODE_ARRAY) ? "TYPE_CODE_ARRAY" : "other"); fprintf(fp, " target_typecode: %x, %s\n", (int)req->target_typecode, req->target_typecode == TYPE_CODE_INT ? "TYPE_CODE_INT" : "other"); fprintf(fp, " target_length: %ld\n", req->target_length); fprintf(fp, " length: %ld\n", req->length); } if (!(ms->overflow_stacks = (ulong *)malloc((size_t)(kt->cpus * sizeof(ulong))))) error(FATAL, "cannot malloc overflow_stack addresses\n"); ms->overflow_stack_size = ARM64_OVERFLOW_STACK_SIZE; machdep->flags |= OVERFLOW_STACKS; for (i = 0; i < kt->cpus; i++) ms->overflow_stacks[i] = kt->__per_cpu_offset[i] + sp->value; } } /* * Gather and verify all of the backtrace requirements. */ static void arm64_stackframe_init(void) { long task_struct_thread; long thread_struct_cpu_context; long context_sp, context_pc, context_fp; long context_x19, context_x20, context_x21, context_x22; long context_x23, context_x24, context_x25, context_x26; long context_x27, context_x28; struct syment *sp1, *sp1n, *sp2, *sp2n, *sp3, *sp3n; STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); MEMBER_OFFSET_INIT(elf_prstatus_pr_pid, "elf_prstatus", "pr_pid"); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); if (MEMBER_EXISTS("pt_regs", "stackframe")) { machdep->machspec->user_eframe_offset = SIZE(pt_regs); machdep->machspec->kern_eframe_offset = SIZE(pt_regs) - 16; } else { machdep->machspec->user_eframe_offset = SIZE(pt_regs) + 16; machdep->machspec->kern_eframe_offset = SIZE(pt_regs); } if ((sp1 = kernel_symbol_search("__exception_text_start")) && (sp2 = kernel_symbol_search("__exception_text_end"))) { machdep->machspec->__exception_text_start = sp1->value; machdep->machspec->__exception_text_end = sp2->value; } if ((sp1 = kernel_symbol_search("__irqentry_text_start")) && (sp2 = kernel_symbol_search("__irqentry_text_end"))) { machdep->machspec->__irqentry_text_start = sp1->value; machdep->machspec->__irqentry_text_end = sp2->value; } if ((sp1 = kernel_symbol_search("vectors")) && (sp1n = kernel_symbol_search("cpu_switch_to")) && (sp2 = kernel_symbol_search("ret_fast_syscall")) && (sp2n = kernel_symbol_search("sys_rt_sigreturn_wrapper"))) { machdep->machspec->exp_entry1_start = sp1->value; machdep->machspec->exp_entry1_end = sp1n->value; machdep->machspec->exp_entry2_start = sp2->value; machdep->machspec->exp_entry2_end = sp2n->value; } if ((sp1 = kernel_symbol_search("crash_kexec")) && (sp1n = next_symbol(NULL, sp1)) && (sp2 = kernel_symbol_search("crash_save_cpu")) && (sp2n = next_symbol(NULL, sp2)) && (sp3 = kernel_symbol_search("machine_kexec")) && (sp3n = next_symbol(NULL, sp3))) { machdep->machspec->crash_kexec_start = sp1->value; machdep->machspec->crash_kexec_end = sp1n->value; machdep->machspec->crash_save_cpu_start = sp2->value; machdep->machspec->crash_save_cpu_end = sp2n->value; machdep->machspec->machine_kexec_start = sp3->value; machdep->machspec->machine_kexec_end = sp3n->value; machdep->flags |= KDUMP_ENABLED; } task_struct_thread = MEMBER_OFFSET("task_struct", "thread"); thread_struct_cpu_context = MEMBER_OFFSET("thread_struct", "cpu_context"); if ((task_struct_thread == INVALID_OFFSET) || (thread_struct_cpu_context == INVALID_OFFSET)) { error(INFO, "cannot determine task_struct.thread.context offset\n"); return; } /* * Pay for the convenience of using a hardcopy of a kernel structure. */ if (offsetof(struct arm64_stackframe, sp) != MEMBER_OFFSET("stackframe", "sp")) { if (CRASHDEBUG(1)) error(INFO, "builtin stackframe.sp offset differs from kernel version\n"); } if (offsetof(struct arm64_stackframe, fp) != MEMBER_OFFSET("stackframe", "fp")) { if (CRASHDEBUG(1)) error(INFO, "builtin stackframe.fp offset differs from kernel version\n"); } if (offsetof(struct arm64_stackframe, pc) != MEMBER_OFFSET("stackframe", "pc")) { if (CRASHDEBUG(1)) error(INFO, "builtin stackframe.pc offset differs from kernel version\n"); } if (!MEMBER_EXISTS("stackframe", "sp")) machdep->flags |= UNW_4_14; context_sp = MEMBER_OFFSET("cpu_context", "sp"); context_fp = MEMBER_OFFSET("cpu_context", "fp"); context_pc = MEMBER_OFFSET("cpu_context", "pc"); context_x19 = MEMBER_OFFSET("cpu_context", "x19"); context_x20 = MEMBER_OFFSET("cpu_context", "x20"); context_x21 = MEMBER_OFFSET("cpu_context", "x21"); context_x22 = MEMBER_OFFSET("cpu_context", "x22"); context_x23 = MEMBER_OFFSET("cpu_context", "x23"); context_x24 = MEMBER_OFFSET("cpu_context", "x24"); context_x25 = MEMBER_OFFSET("cpu_context", "x25"); context_x26 = MEMBER_OFFSET("cpu_context", "x26"); context_x27 = MEMBER_OFFSET("cpu_context", "x27"); context_x28 = MEMBER_OFFSET("cpu_context", "x28"); if (context_sp == INVALID_OFFSET) { error(INFO, "cannot determine cpu_context.sp offset\n"); return; } if (context_fp == INVALID_OFFSET) { error(INFO, "cannot determine cpu_context.fp offset\n"); return; } if (context_pc == INVALID_OFFSET) { error(INFO, "cannot determine cpu_context.pc offset\n"); return; } ASSIGN_OFFSET(task_struct_thread_context_sp) = task_struct_thread + thread_struct_cpu_context + context_sp; ASSIGN_OFFSET(task_struct_thread_context_fp) = task_struct_thread + thread_struct_cpu_context + context_fp; ASSIGN_OFFSET(task_struct_thread_context_pc) = task_struct_thread + thread_struct_cpu_context + context_pc; ASSIGN_OFFSET(task_struct_thread_context_x19) = task_struct_thread + thread_struct_cpu_context + context_x19; ASSIGN_OFFSET(task_struct_thread_context_x20) = task_struct_thread + thread_struct_cpu_context + context_x20; ASSIGN_OFFSET(task_struct_thread_context_x21) = task_struct_thread + thread_struct_cpu_context + context_x21; ASSIGN_OFFSET(task_struct_thread_context_x22) = task_struct_thread + thread_struct_cpu_context + context_x22; ASSIGN_OFFSET(task_struct_thread_context_x23) = task_struct_thread + thread_struct_cpu_context + context_x23; ASSIGN_OFFSET(task_struct_thread_context_x24) = task_struct_thread + thread_struct_cpu_context + context_x24; ASSIGN_OFFSET(task_struct_thread_context_x25) = task_struct_thread + thread_struct_cpu_context + context_x25; ASSIGN_OFFSET(task_struct_thread_context_x26) = task_struct_thread + thread_struct_cpu_context + context_x26; ASSIGN_OFFSET(task_struct_thread_context_x27) = task_struct_thread + thread_struct_cpu_context + context_x27; ASSIGN_OFFSET(task_struct_thread_context_x28) = task_struct_thread + thread_struct_cpu_context + context_x28; } #define KERNEL_MODE (1) #define USER_MODE (2) #define USER_EFRAME_OFFSET (machdep->machspec->user_eframe_offset) #define KERN_EFRAME_OFFSET (machdep->machspec->kern_eframe_offset) /* * PSR bits */ #define PSR_MODE_EL0t 0x00000000 #define PSR_MODE_EL1t 0x00000004 #define PSR_MODE_EL1h 0x00000005 #define PSR_MODE_EL2t 0x00000008 #define PSR_MODE_EL2h 0x00000009 #define PSR_MODE_EL3t 0x0000000c #define PSR_MODE_EL3h 0x0000000d #define PSR_MODE_MASK 0x0000000f /* Architecturally defined mapping between AArch32 and AArch64 registers */ #define compat_usr(x) regs[(x)] #define compat_fp regs[11] #define compat_sp regs[13] #define compat_lr regs[14] #define user_mode(ptregs) \ (((ptregs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t) #define compat_user_mode(ptregs) \ (((ptregs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \ (PSR_MODE32_BIT | PSR_MODE_EL0t)) #define user_stack_pointer(ptregs) \ (!compat_user_mode(ptregs) ? (ptregs)->sp : (ptregs)->compat_sp) #define user_frame_pointer(ptregs) \ (!compat_user_mode(ptregs) ? (ptregs)->regs[29] : (ptregs)->compat_fp) static int arm64_is_kernel_exception_frame(struct bt_info *bt, ulong stkptr) { struct arm64_pt_regs *regs; struct machine_specific *ms = machdep->machspec; if (stkptr > STACKSIZE() && !INSTACK(stkptr, bt)) { if (CRASHDEBUG(1)) error(WARNING, "stkptr: %lx is outside the kernel stack range\n", stkptr); return FALSE; } regs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(stkptr))]; if (INSTACK(regs->sp, bt) && INSTACK(regs->regs[29], bt) && !(regs->pstate & (0xffffffff00000000ULL | PSR_MODE32_BIT)) && is_kernel_text(regs->pc) && is_kernel_text(regs->regs[30] | ms->CONFIG_ARM64_KERNELPACMASK)) { switch (regs->pstate & PSR_MODE_MASK) { case PSR_MODE_EL1t: case PSR_MODE_EL1h: case PSR_MODE_EL2t: case PSR_MODE_EL2h: return TRUE; } } return FALSE; } static int arm64_eframe_search(struct bt_info *bt) { int c; ulong ptr, count; struct machine_specific *ms; if (bt->flags & BT_EFRAME_SEARCH2) { if (!(machdep->flags & IRQ_STACKS)) error(FATAL, "IRQ stacks do not exist in this kernel\n"); ms = machdep->machspec; for (c = 0; c < kt->cpus; c++) { if ((bt->flags & BT_CPUMASK) && !(NUM_IN_BITMAP(bt->cpumask, c))) continue; fprintf(fp, "CPU %d IRQ STACK:", c); bt->stackbase = ms->irq_stacks[c]; bt->stacktop = bt->stackbase + ms->irq_stack_size; alter_stackbuf(bt); count = 0; for (ptr = bt->stackbase; ptr < bt->stacktop - SIZE(pt_regs); ptr++) { if (arm64_is_kernel_exception_frame(bt, ptr)) { fprintf(fp, "%s\nKERNEL-MODE EXCEPTION FRAME AT: %lx\n", count ? "" : "\n", ptr); arm64_print_exception_frame(bt, ptr, KERNEL_MODE, fp); count++; } } if (count) fprintf(fp, "\n"); else fprintf(fp, "(none found)\n\n"); } return 0; } count = 0; for (ptr = bt->stackbase; ptr < bt->stacktop - SIZE(pt_regs); ptr++) { if (arm64_is_kernel_exception_frame(bt, ptr)) { fprintf(fp, "\nKERNEL-MODE EXCEPTION FRAME AT: %lx\n", ptr); arm64_print_exception_frame(bt, ptr, KERNEL_MODE, fp); count++; } } if (is_kernel_thread(bt->tc->task)) return count; ptr = bt->stacktop - USER_EFRAME_OFFSET; fprintf(fp, "%sUSER-MODE EXCEPTION FRAME AT: %lx\n", count++ ? "\n" : "", ptr); arm64_print_exception_frame(bt, ptr, USER_MODE, fp); return count; } static char *arm64_exception_functions[] = { "do_undefinstr", "do_sysinstr", "do_debug_exception", "do_mem_abort", "do_el0_irq_bp_hardening", "do_sp_pc_abort", "handle_bad_stack", NULL }; static int arm64_in_exception_text(ulong ptr) { struct machine_specific *ms = machdep->machspec; char *name, **func; if (ms->__irqentry_text_start && ms->__irqentry_text_end && ((ptr >= ms->__irqentry_text_start) && (ptr < ms->__irqentry_text_end))) return TRUE; if (ms->__exception_text_start && ms->__exception_text_end) { if ((ptr >= ms->__exception_text_start) && (ptr < ms->__exception_text_end)) return TRUE; } name = closest_symbol(ptr); if (name != NULL) { /* Linux 5.5 and later */ for (func = &arm64_exception_functions[0]; *func; func++) { if (STREQ(name, *func)) return TRUE; } } return FALSE; } static int arm64_in_exp_entry(ulong addr) { struct machine_specific *ms; ms = machdep->machspec; if ((ms->exp_entry1_start <= addr) && (addr < ms->exp_entry1_end)) return TRUE; if ((ms->exp_entry2_start <= addr) && (addr < ms->exp_entry2_end)) return TRUE; return FALSE; } #define BACKTRACE_CONTINUE (1) #define BACKTRACE_COMPLETE_KERNEL (2) #define BACKTRACE_COMPLETE_USER (3) static int arm64_print_stackframe_entry(struct bt_info *bt, int level, struct arm64_stackframe *frame, FILE *ofp) { char *name, *name_plus_offset; ulong branch_pc, symbol_offset; struct syment *sp; struct load_module *lm; char buf[BUFSIZE]; /* * if pc comes from a saved lr, it actually points to an instruction * after branch. To avoid any confusion, decrement pc by 4. * See, for example, "bl schedule" before ret_to_user(). */ branch_pc = frame->pc - 4; name = closest_symbol(branch_pc); name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(branch_pc, &symbol_offset); if (sp && symbol_offset) name_plus_offset = value_to_symstr(branch_pc, buf, bt->radix); } if (!INSTACK(frame->fp, bt) && IN_TASK_VMA(bt->task, frame->fp)) frame->fp = 0; if (bt->flags & BT_FULL) { if (level) arm64_display_full_frame(bt, frame->fp); bt->frameptr = frame->fp; } fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, frame->fp ? frame->fp : bt->stacktop - USER_EFRAME_OFFSET, name_plus_offset ? name_plus_offset : name, branch_pc); if (BT_REFERENCE_CHECK(bt)) { arm64_do_bt_reference_check(bt, frame->pc, closest_symbol(frame->pc)); arm64_do_bt_reference_check(bt, branch_pc, name); } if (module_symbol(branch_pc, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); fprintf(ofp, "\n"); if (bt->flags & BT_LINE_NUMBERS) { get_line_number(branch_pc, buf, FALSE); if (strlen(buf)) fprintf(ofp, " %s\n", buf); } if (STREQ(name, "start_kernel") || STREQ(name, "secondary_start_kernel") || STREQ(name, "kthread") || STREQ(name, "kthreadd")) return BACKTRACE_COMPLETE_KERNEL; return BACKTRACE_CONTINUE; } static int arm64_print_stackframe_entry_v2(struct bt_info *bt, int level, struct arm64_stackframe *frame, FILE *ofp) { char *name, *name_plus_offset; ulong pc, symbol_offset; struct syment *sp; struct load_module *lm; char buf[BUFSIZE]; /* * if pc comes from a saved lr, it actually points to an instruction * after branch. To avoid any confusion, decrement pc by 4. * See, for example, "bl schedule" before ret_to_user(). */ pc = frame->pc - 0x4; name = closest_symbol(pc); name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { sp = value_search(pc, &symbol_offset); if (sp && symbol_offset) name_plus_offset = value_to_symstr(pc, buf, bt->radix); } if (bt->flags & BT_USER_EFRAME) frame->fp = 0; fprintf(ofp, "%s#%d [%8lx] %s at %lx", level < 10 ? " " : "", level, frame->fp ? frame->fp : bt->stacktop - USER_EFRAME_OFFSET, name_plus_offset ? name_plus_offset : name, pc); if (BT_REFERENCE_CHECK(bt)) arm64_do_bt_reference_check(bt, pc, name); if (module_symbol(pc, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); fprintf(ofp, "\n"); if (bt->flags & BT_LINE_NUMBERS) { get_line_number(pc, buf, FALSE); if (strlen(buf)) fprintf(ofp, " %s\n", buf); } if (STREQ(name, "start_kernel") || STREQ(name, "secondary_start_kernel") || STREQ(name, "kthread") || STREQ(name, "kthreadd")) return BACKTRACE_COMPLETE_KERNEL; return BACKTRACE_CONTINUE; } static void arm64_display_full_frame(struct bt_info *bt, ulong sp) { int i, u_idx; ulong *up; ulong words, addr; char buf[BUFSIZE]; if (bt->frameptr == sp) return; if (INSTACK(bt->frameptr, bt)) { if (INSTACK(sp, bt)) { ; /* normal case */ } else { if (sp == 0) /* interrupt in user mode */ sp = bt->stacktop - USER_EFRAME_OFFSET; else /* interrupt in kernel mode */ sp = bt->stacktop; } } else { /* This is a transition case from irq to process stack. */ return; } words = (sp - bt->frameptr) / sizeof(ulong); addr = bt->frameptr; u_idx = (bt->frameptr - bt->stackbase)/sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); } static void arm64_display_full_frame_v2(struct bt_info *bt, struct arm64_stackframe *cur, struct arm64_stackframe *next) { struct machine_specific *ms; ulong next_fp, stackbase; char *stackbuf; int i, u_idx; ulong *up; ulong words, addr; char buf[BUFSIZE]; stackbase = bt->stackbase; stackbuf = bt->stackbuf; ms = machdep->machspec; /* Calc next fp for dump */ if (next->fp == 0) /* last stackframe on kernel tack */ next_fp = bt->stacktop - 0x10; else if (!INSTACK(cur->sp, bt)) { /* We have just switched over stacks */ next_fp = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size - 0x10; /* * We are already buffering a process stack. * So use an old buffer for IRQ stack. */ stackbase = ms->irq_stacks[bt->tc->processor]; stackbuf = ms->irq_stackbuf; } else next_fp = next->fp; if (CRASHDEBUG(1)) fprintf(fp, " frame <%016lx:%016lx>\n", cur->fp, next_fp); /* Check here because we want to see a debug message above. */ if (!(bt->flags & BT_FULL)) return; if (next_fp <= cur->fp) return; /* Dump */ words = (next_fp - cur->fp) / sizeof(ulong); addr = cur->fp; u_idx = (cur->fp - stackbase)/sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&stackbuf[u_idx*sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); if (stackbuf == ms->irq_stackbuf) FREEBUF(stackbuf); } static int arm64_unwind_frame(struct bt_info *bt, struct arm64_stackframe *frame) { unsigned long high, low, fp; unsigned long stack_mask; unsigned long irq_stack_ptr, orig_sp; struct arm64_pt_regs *ptregs; struct machine_specific *ms = machdep->machspec; stack_mask = (unsigned long)(ARM64_STACK_SIZE) - 1; fp = frame->fp; low = frame->sp; high = (low + stack_mask) & ~(stack_mask); if (fp < low || fp > high || fp & 0xf || !INSTACK(fp, bt)) return FALSE; frame->sp = fp + 0x10; frame->fp = GET_STACK_ULONG(fp); frame->pc = GET_STACK_ULONG(fp + 8); if (is_kernel_text(frame->pc | ms->CONFIG_ARM64_KERNELPACMASK)) frame->pc |= ms->CONFIG_ARM64_KERNELPACMASK; if ((frame->fp == 0) && (frame->pc == 0)) return FALSE; if (!(machdep->flags & (IRQ_STACKS | OVERFLOW_STACKS))) return TRUE; if (machdep->flags & UNW_4_14) { if (((bt->flags & BT_IRQSTACK) && !arm64_on_irq_stack(bt->tc->processor, frame->fp)) || ((bt->flags & BT_OVERFLOW_STACK) && !arm64_on_overflow_stack(bt->tc->processor, frame->fp))) { if (arm64_on_process_stack(bt, frame->fp)) { arm64_set_process_stack(bt); frame->sp = frame->fp - KERN_EFRAME_OFFSET; /* * for switch_stack * fp still points to irq stack */ bt->bptr = fp; /* * for display_full_frame * sp points to process stack * * If we want to see pt_regs, * comment out the below. * bt->frameptr = frame->sp; */ } else { /* irq -> user */ return FALSE; } } return TRUE; } /* * The kernel's manner of determining the end of the IRQ stack: * * #define THREAD_SIZE 16384 * #define THREAD_START_SP (THREAD_SIZE - 16) * #define IRQ_STACK_START_SP THREAD_START_SP * #define IRQ_STACK_PTR(cpu) ((unsigned long)per_cpu(irq_stack, cpu) + IRQ_STACK_START_SP) * #define IRQ_STACK_TO_TASK_STACK(ptr) (*((unsigned long *)((ptr) - 0x08))) * * irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id()); * orig_sp = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); (pt_regs pointer on process stack) */ irq_stack_ptr = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size - 16; if (frame->sp == irq_stack_ptr) { orig_sp = GET_STACK_ULONG(irq_stack_ptr - 8); arm64_set_process_stack(bt); if (INSTACK(orig_sp, bt) && (INSTACK(frame->fp, bt) || (frame->fp == 0))) { ptregs = (struct arm64_pt_regs *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(orig_sp))]; frame->sp = orig_sp; frame->pc = ptregs->pc; bt->bptr = fp; if (CRASHDEBUG(1)) error(INFO, "arm64_unwind_frame: switch stacks: fp: %lx sp: %lx pc: %lx\n", frame->fp, frame->sp, frame->pc); } else { error(WARNING, "arm64_unwind_frame: on IRQ stack: oriq_sp: %lx%s fp: %lx%s\n", orig_sp, INSTACK(orig_sp, bt) ? "" : " (?)", frame->fp, INSTACK(frame->fp, bt) ? "" : " (?)"); return FALSE; } } return TRUE; } /* * The following figure shows how unwinding can be done. * Here we assume that the callstack order is: * #(X-1) ppc (previous PC) * #X cpc (current PC) * < #(X+ 1) epc (Exception entry) > * #(X+1/2) npc (Next PC) * #(X+2/3) Npc (One before Next) * #(X+3/4) NNpc (One before 'Npc') * and unwind frames from #X to #(X+1). * When we add a faked frame for exception entry (exception frame) * as #(X+1), the next frame for npc will be recognized as #(x+2). * * (1)Normal stackframe: * +------+ * | pfp | * | cpc | * psp + + * | | * | | * pfp +------+ <--- :prev stackframe = * | cfp | * | npc | * csp + + * | | * | | * cfp +------+ <--- :curr stackframe = * | nfp | cfp = *pfp * | Npc | csp = pfp + 0x10 * nsp + + * | | * | | * nfp +------+ <--- :next stackframe = * | | * * (2)Exception on the same (IRQ or process) stack: * +------+ * | pfp | * | cpc | * psp + + * | | * | | * pfp +------+ <--- :prev stackframe = * | cfp | * | npc | * csp + + * | | * | | * cfp +------+ <--- :curr stackframe = * | nfp | * | epc | * + + * | | * | | faked(*) * esp +------+ <--- :excp stackframe = <---, esp, epc * | | esp = nsp - sizeof(pt_regs) * | | * | Npc | (*) If we didn't add this frame, the next frame * | nfp | would be * | nsp | * | npc | and the frame below for npc would be lost. * nsp + + * | | * nfp +------+ <--- :task stackframe = * | Nfp | * | NNpc | * Nsp + + * | | * Nfp +------+ <--- :task stackframe = * | NNfp | * * (3)Interrupt: * +------+ * | cfp | * | ipc | * csp + + * | | * | | * cfp +------+ <--- :curr stackframe = * | ifp | * | epc | * isp + + * | | * | | (*) * ifp +------+ <--- :irq stackframe = * | nfp | ifp == IRQ_STACK_PTR * | esp | (*) Before the kernel enters an irq handler, frame * top +------+ pointer moves to the top of IRQ stack. * IRQ stack So we have to skip this frame in unwinding. * * faked * esp +------+ <--- :excp stackframe = <---, esp, epc> * | | esp = nsp - sizeof(pt_regs) * | | * | Npc | * | nfp | * | nsp | * | npc | * nsp + + * | | * nfp +------+ <--- :task stackframe = * | Nfp | * | NNpc | * Nsp + + * | | * Nfp +------+ <--- :task stackframe = * | NNfp | */ static struct arm64_stackframe ext_frame; static int arm64_unwind_frame_v2(struct bt_info *bt, struct arm64_stackframe *frame, FILE *ofp) { unsigned long high, low, fp; unsigned long stack_mask; unsigned long irq_stack_ptr; struct machine_specific *ms; stack_mask = (unsigned long)(ARM64_STACK_SIZE) - 1; fp = frame->fp; low = frame->sp; high = (low + stack_mask) & ~(stack_mask); if (fp < low || fp > high || fp & 0xf || !INSTACK(fp, bt)) return FALSE; if (CRASHDEBUG(1)) fprintf(ofp, " cur fp:%016lx sp:%016lx pc:%016lx\n", frame->fp, frame->sp, frame->pc); if (ext_frame.pc) { /* * The previous frame was a dummy for exception entry. * So complement a missing (task) stackframe now. */ frame->fp = ext_frame.fp; frame->sp = ext_frame.sp; frame->pc = ext_frame.pc; ext_frame.pc = 0; /* back to normal unwinding */ goto unwind_done; } frame->pc = GET_STACK_ULONG(fp + 8); if (!arm64_in_exp_entry(frame->pc)) { /* (1) Normal stack frame */ frame->sp = fp + 0x10; frame->fp = GET_STACK_ULONG(fp); } else { /* * We are in exception entry code, and so * - add a faked frame for exception entry, and * - prepare for a stackframe hidden by exception */ ext_frame.fp = GET_STACK_ULONG(fp); /* * Note: * In the following code, we determine a stack pointer for * exception entry based on ext_frame.fp because we have * no way to know a ext_frame.sp. * Fortunately, this will work fine for most functions * in the kernel. */ if (ext_frame.fp == 0) { /* * (2) * Either on process stack or on IRQ stack, * the next frame is the last one on process stack. */ frame->sp = bt->stacktop - sizeof(struct arm64_pt_regs) - 0x10; frame->fp = frame->sp; } else if (!arm64_on_irq_stack(bt->tc->processor, frame->sp)) { /* * (2) * We are on process stack. Just add a faked frame */ if (!arm64_on_irq_stack(bt->tc->processor, ext_frame.fp)) frame->sp = ext_frame.fp - sizeof(struct arm64_pt_regs); else { /* * FIXME: very exceptional case * We are already back on process stack, but * a saved frame pointer indicates that we are * on IRQ stack. Unfortunately this can happen * when some functions are called after * an irq handler is done because irq_exit() * doesn't restore a frame pointer (x29). * Those functions include * - do_notify_resume() * - trace_hardirqs_off() * - schedule() * * We have no perfect way to determine a true * stack pointer value here. * 0x20 is a stackframe size of schedule(). * Really ugly */ frame->sp = frame->fp + 0x20; fprintf(ofp, " (Next exception frame might be wrong)\n"); } frame->fp = frame->sp; } else { /* We are on IRQ stack */ ms = machdep->machspec; irq_stack_ptr = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size - 0x20; if (ext_frame.fp != irq_stack_ptr) { /* (2) Just add a faked frame */ frame->sp = ext_frame.fp - sizeof(struct arm64_pt_regs); frame->fp = frame->sp; } else { /* * (3) * Switch from IRQ stack to process stack */ frame->sp = GET_STACK_ULONG(irq_stack_ptr + 8); frame->fp = frame->sp; /* * Keep a buffer for a while until * displaying the last frame on IRQ stack * at next arm64_print_stackframe_entry_v2() */ if (bt->flags & BT_FULL) ms->irq_stackbuf = bt->stackbuf; arm64_set_process_stack(bt); } } /* prepare for a stackframe hidden by exception */ arm64_gen_hidden_frame(bt, frame->sp, &ext_frame); } unwind_done: if (CRASHDEBUG(1)) fprintf(ofp, " nxt fp:%016lx sp:%016lx pc:%016lx\n", frame->fp, frame->sp, frame->pc); return TRUE; } /* * A layout of a stack frame in a function looks like: * * stack grows to lower addresses. * /|\ * | * | | * new sp +------+ <--- * |dyn | | * | vars | | * new fp +- - - + | * |old fp| | a function's stack frame * |old lr| | * |static| | * | vars| | * old sp +------+ <--- * |dyn | * | vars | * old fp +------+ * | | * * - On function entry, sp is decremented down to new fp. * * - and old fp and sp are saved into this stack frame. * "Static" local variables are allocated at the same time. * * - Later on, "dynamic" local variables may be allocated on a stack. * But those dynamic variables are rarely used in the kernel image, * and, as a matter of fact, sp is equal to fp in almost all functions. * (not 100% though.) * * - Currently, sp is determined in arm64_unwind_frame() by * sp = a callee's fp + 0x10 * where 0x10 stands for a saved area for fp and sp * * - As you can see, however, this calculated sp still points to the top of * callee's static local variables and doesn't match with a *real* sp. * * - So, generally, dumping a stack from this calculated sp to the next frame's * sp shows "callee's static local variables", old fp and sp. * * Diagram and explanation courtesy of Takahiro Akashi */ static void arm64_back_trace_cmd(struct bt_info *bt) { struct arm64_stackframe stackframe; int level; ulong exception_frame; FILE *ofp; extra_stacks_idx = 0; if (bt->flags & BT_OPT_BACK_TRACE) { if (machdep->flags & UNW_4_14) { option_not_supported('o'); return; } arm64_back_trace_cmd_v2(bt); return; } ofp = BT_REFERENCE_CHECK(bt) ? pc->nullfp : fp; /* * stackframes are created from 3 contiguous stack addresses: * * x: contains stackframe.fp -- points to next triplet * x+8: contains stackframe.pc -- text return address * x+16: is the stackframe.sp address */ if (bt->flags & BT_KDUMP_ADJUST) { if (arm64_on_irq_stack(bt->tc->processor, bt->bptr)) { arm64_set_irq_stack(bt); bt->flags |= BT_IRQSTACK; } stackframe.fp = GET_STACK_ULONG(bt->bptr - 8); stackframe.pc = GET_STACK_ULONG(bt->bptr); stackframe.sp = bt->bptr + 8; bt->frameptr = stackframe.sp; } else if (bt->hp && bt->hp->esp) { if (arm64_on_irq_stack(bt->tc->processor, bt->hp->esp)) { arm64_set_irq_stack(bt); bt->flags |= BT_IRQSTACK; } stackframe.fp = GET_STACK_ULONG(bt->hp->esp - 8); stackframe.pc = bt->hp->eip ? bt->hp->eip : GET_STACK_ULONG(bt->hp->esp); stackframe.sp = bt->hp->esp + 8; bt->flags &= ~BT_REGS_NOT_FOUND; } else { if (arm64_on_irq_stack(bt->tc->processor, bt->frameptr)) { arm64_set_irq_stack(bt); bt->flags |= BT_IRQSTACK; } else if (arm64_on_overflow_stack(bt->tc->processor, bt->frameptr)) { arm64_set_overflow_stack(bt); bt->flags |= BT_OVERFLOW_STACK; } stackframe.sp = bt->stkptr; stackframe.pc = bt->instptr; stackframe.fp = bt->frameptr; } if (is_task_active(bt->task)) { if (!extra_stacks_regs[extra_stacks_idx]) { extra_stacks_regs[extra_stacks_idx] = (struct user_regs_bitmap_struct *) malloc(sizeof(struct user_regs_bitmap_struct)); } memset(extra_stacks_regs[extra_stacks_idx], 0, sizeof(struct user_regs_bitmap_struct)); if (bt->task != tt->panic_task && stackframe.sp) { readmem(stackframe.sp - 8, KVADDR, &extra_stacks_regs[extra_stacks_idx]->ur.pc, sizeof(ulong), "extra_stacks_regs.pc", RETURN_ON_ERROR); readmem(stackframe.sp - 16, KVADDR, &extra_stacks_regs[extra_stacks_idx]->ur.sp, sizeof(ulong), "extra_stacks_regs.sp", RETURN_ON_ERROR); } else { extra_stacks_regs[extra_stacks_idx]->ur.pc = stackframe.pc; extra_stacks_regs[extra_stacks_idx]->ur.sp = stackframe.sp; } SET_BIT(extra_stacks_regs[extra_stacks_idx]->bitmap, REG_SEQ(arm64_pt_regs, pc)); SET_BIT(extra_stacks_regs[extra_stacks_idx]->bitmap, REG_SEQ(arm64_pt_regs, sp)); if (!bt->machdep || (extra_stacks_regs[extra_stacks_idx]->ur.sp != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.sp && extra_stacks_regs[extra_stacks_idx]->ur.pc != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.pc)) { gdb_add_substack (extra_stacks_idx++); } } if (bt->flags & BT_TEXT_SYMBOLS) { arm64_print_text_symbols(bt, &stackframe, ofp); if (BT_REFERENCE_FOUND(bt)) { print_task_header(fp, task_to_context(bt->task), 0); arm64_print_text_symbols(bt, &stackframe, fp); fprintf(fp, "\n"); } return; } if (bt->flags & BT_REGS_NOT_FOUND) return; if (!(bt->flags & BT_KDUMP_ADJUST)) { if (bt->flags & BT_USER_SPACE) goto complete_user; if (DUMPFILE() && is_task_active(bt->task)) { exception_frame = stackframe.fp - KERN_EFRAME_OFFSET; if (arm64_is_kernel_exception_frame(bt, exception_frame)) arm64_print_exception_frame(bt, exception_frame, KERNEL_MODE, ofp); } } level = exception_frame = 0; while (1) { bt->instptr = stackframe.pc; switch (arm64_print_stackframe_entry(bt, level, &stackframe, ofp)) { case BACKTRACE_COMPLETE_KERNEL: return; case BACKTRACE_COMPLETE_USER: goto complete_user; case BACKTRACE_CONTINUE: break; } if (exception_frame) { arm64_print_exception_frame(bt, exception_frame, KERNEL_MODE, ofp); exception_frame = 0; } if (!arm64_unwind_frame(bt, &stackframe)) break; if (arm64_in_exception_text(bt->instptr) && INSTACK(stackframe.fp, bt)) { if (bt->flags & BT_OVERFLOW_STACK) { exception_frame = stackframe.fp - KERN_EFRAME_OFFSET; } else if (!(bt->flags & BT_IRQSTACK) || ((stackframe.sp + SIZE(pt_regs)) < bt->stacktop)) { if (arm64_is_kernel_exception_frame(bt, stackframe.fp - KERN_EFRAME_OFFSET)) exception_frame = stackframe.fp - KERN_EFRAME_OFFSET; } } if ((bt->flags & BT_IRQSTACK) && !arm64_on_irq_stack(bt->tc->processor, stackframe.fp)) { bt->flags &= ~BT_IRQSTACK; if (arm64_switch_stack(bt, &stackframe, ofp) == USER_MODE) break; } if ((bt->flags & BT_OVERFLOW_STACK) && !arm64_on_overflow_stack(bt->tc->processor, stackframe.fp)) { bt->flags &= ~BT_OVERFLOW_STACK; if (arm64_switch_stack_from_overflow(bt, &stackframe, ofp) == USER_MODE) break; } level++; } if (is_kernel_thread(bt->tc->task)) return; complete_user: exception_frame = bt->stacktop - USER_EFRAME_OFFSET; arm64_print_exception_frame(bt, exception_frame, USER_MODE, ofp); if ((bt->flags & (BT_USER_SPACE|BT_KDUMP_ADJUST)) == BT_USER_SPACE) fprintf(ofp, " #0 [user space]\n"); } static void arm64_back_trace_cmd_v2(struct bt_info *bt) { struct arm64_stackframe stackframe, cur_frame; int level, mode; ulong exception_frame; FILE *ofp; ofp = BT_REFERENCE_CHECK(bt) ? pc->nullfp : fp; /* * stackframes are created from 3 contiguous stack addresses: * * x: contains stackframe.fp -- points to next triplet * x+8: contains stackframe.pc -- text return address * x+16: is the stackframe.sp address */ if (bt->flags & BT_KDUMP_ADJUST) { if (arm64_on_irq_stack(bt->tc->processor, bt->bptr)) { arm64_set_irq_stack(bt); bt->flags |= BT_IRQSTACK; } stackframe.fp = GET_STACK_ULONG(bt->bptr); stackframe.pc = GET_STACK_ULONG(bt->bptr + 8); stackframe.sp = bt->bptr + 16; bt->frameptr = stackframe.fp; } else { if (arm64_on_irq_stack(bt->tc->processor, bt->frameptr)) { arm64_set_irq_stack(bt); bt->flags |= BT_IRQSTACK; } stackframe.sp = bt->stkptr; stackframe.pc = bt->instptr; stackframe.fp = bt->frameptr; } if (is_task_active(bt->task)) { if (!extra_stacks_regs[extra_stacks_idx]) { extra_stacks_regs[extra_stacks_idx] = (struct user_regs_bitmap_struct *) malloc(sizeof(struct user_regs_bitmap_struct)); } memset(extra_stacks_regs[extra_stacks_idx], 0, sizeof(struct user_regs_bitmap_struct)); if (bt->task != tt->panic_task && stackframe.sp) { readmem(stackframe.sp - 8, KVADDR, &extra_stacks_regs[extra_stacks_idx]->ur.pc, sizeof(ulong), "extra_stacks_regs.pc", RETURN_ON_ERROR); readmem(stackframe.sp - 16, KVADDR, &extra_stacks_regs[extra_stacks_idx]->ur.sp, sizeof(ulong), "extra_stacks_regs.sp", RETURN_ON_ERROR); } else { extra_stacks_regs[extra_stacks_idx]->ur.pc = stackframe.pc; extra_stacks_regs[extra_stacks_idx]->ur.sp = stackframe.sp; } SET_BIT(extra_stacks_regs[extra_stacks_idx]->bitmap, REG_SEQ(arm64_pt_regs, pc)); SET_BIT(extra_stacks_regs[extra_stacks_idx]->bitmap, REG_SEQ(arm64_pt_regs, sp)); if (!bt->machdep || (extra_stacks_regs[extra_stacks_idx]->ur.sp != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.sp && extra_stacks_regs[extra_stacks_idx]->ur.pc != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.pc)) { gdb_add_substack (extra_stacks_idx++); } } if (bt->flags & BT_TEXT_SYMBOLS) { arm64_print_text_symbols(bt, &stackframe, ofp); if (BT_REFERENCE_FOUND(bt)) { print_task_header(fp, task_to_context(bt->task), 0); arm64_print_text_symbols(bt, &stackframe, fp); fprintf(fp, "\n"); } return; } if (bt->flags & BT_REGS_NOT_FOUND) return; if (!(bt->flags & BT_KDUMP_ADJUST)) { if (bt->flags & BT_USER_SPACE) { user_space: exception_frame = bt->stacktop - USER_EFRAME_OFFSET; arm64_print_exception_frame(bt, exception_frame, USER_MODE, ofp); // fprintf(ofp, " #0 [user space]\n"); return; } if (DUMPFILE() && is_task_active(bt->task)) { exception_frame = stackframe.fp - SIZE(pt_regs); if (arm64_is_kernel_exception_frame(bt, exception_frame)) arm64_print_exception_frame(bt, exception_frame, KERNEL_MODE, ofp); } } for (level = 0;; level++) { bt->instptr = stackframe.pc; /* * Show one-line stackframe info */ if (arm64_print_stackframe_entry_v2(bt, level, &stackframe, ofp) == BACKTRACE_COMPLETE_KERNEL) break; cur_frame = stackframe; if (!arm64_unwind_frame_v2(bt, &stackframe, ofp)) break; /* * Dump the contents of the current stackframe. * We need to know the next stackframe to determine * the dump range: * */ arm64_display_full_frame_v2(bt, &cur_frame, &stackframe); /* * If we are in a normal stackframe, just continue, * otherwise show an exception frame. * Since exception entry code doesn't have a real * stackframe, we fake a dummy frame here. */ if (!arm64_in_exp_entry(stackframe.pc)) continue; if (!INSTACK(cur_frame.sp, bt)) fprintf(ofp, "--- ---\n"); arm64_print_stackframe_entry_v2(bt, ++level, &stackframe, ofp); if (bt->flags & BT_USER_EFRAME) goto user_space; cur_frame = stackframe; arm64_unwind_frame_v2(bt, &stackframe, ofp); /* * and don't show the contenxts. Instead, * show an exception frame below */ if (!INSTACK(cur_frame.sp, bt)) { /* This check is a safeguard. See unwind_frame(). */ error(WARNING, "stack pointer for exception frame is wrong\n"); return; } mode = (stackframe.pc < machdep->machspec->userspace_top) ? USER_MODE : KERNEL_MODE; // fprintf(ofp, "--- ---\n", // mode == KERNEL_MODE ? "kernel" : "user"); arm64_print_exception_frame(bt, cur_frame.sp, mode, ofp); if (mode == USER_MODE) break; } } static void arm64_print_text_symbols(struct bt_info *bt, struct arm64_stackframe *frame, FILE *ofp) { int i; ulong *up; struct load_module *lm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *name; ulong start; ulong val; struct machine_specific *ms = machdep->machspec; if (bt->flags & BT_TEXT_SYMBOLS_ALL) start = bt->stackbase; else { start = frame->sp - 8; fprintf(ofp, "%sSTART: %s at %lx\n", space(VADDR_PRLEN > 8 ? 14 : 6), bt->flags & BT_SYMBOL_OFFSET ? value_to_symstr(frame->pc, buf2, bt->radix) : closest_symbol(frame->pc), frame->pc); } for (i = (start - bt->stackbase)/sizeof(ulong); i < LONGS_PER_STACK; i++) { up = (ulong *)(&bt->stackbuf[i*sizeof(ulong)]); val = *up; if (is_kernel_text(val | ms->CONFIG_ARM64_KERNELPACMASK)) { val |= ms->CONFIG_ARM64_KERNELPACMASK; name = closest_symbol(val); fprintf(ofp, " %s[%s] %s at %lx", bt->flags & BT_ERROR_MASK ? " " : "", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(bt->stackbase + (i * sizeof(long)))), bt->flags & BT_SYMBOL_OFFSET ? value_to_symstr(val, buf2, bt->radix) : name, val); if (module_symbol(val, NULL, &lm, NULL, 0)) fprintf(ofp, " [%s]", lm->mod_name); fprintf(ofp, "\n"); if (BT_REFERENCE_CHECK(bt)) arm64_do_bt_reference_check(bt, val, name); } } } static int arm64_in_kdump_text(struct bt_info *bt, struct arm64_stackframe *frame) { ulong *ptr, *start, *base; struct machine_specific *ms; ulong crash_kexec_frame; if (!(machdep->flags & KDUMP_ENABLED)) return FALSE; base = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(bt->stackbase))]; if (bt->flags & BT_USER_SPACE) start = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(bt->stacktop))]; else { if (INSTACK(frame->fp, bt)) start = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(frame->fp))]; else start = (ulong *)&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(bt->stacktop))]; } crash_kexec_frame = 0; ms = machdep->machspec; for (ptr = start - 8; ptr >= base; ptr--) { if (bt->flags & BT_OPT_BACK_TRACE) { if ((*ptr > ms->crash_kexec_start) && (*ptr < ms->crash_kexec_end) && INSTACK(*(ptr - 1), bt)) { bt->bptr = ((ulong)(ptr - 1) - (ulong)base) + task_to_stackbase(bt->tc->task); if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (crash_kexec)\n", bt->bptr, *ptr); return TRUE; } if ((*ptr > ms->crash_save_cpu_start) && (*ptr < ms->crash_save_cpu_end) && INSTACK(*(ptr - 1), bt)) { bt->bptr = ((ulong)(ptr - 1) - (ulong)base) + task_to_stackbase(bt->tc->task); if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (crash_save_cpu)\n", bt->bptr, *ptr); return TRUE; } } else { if ((*ptr > ms->machine_kexec_start) && (*ptr < ms->machine_kexec_end)) { bt->bptr = ((ulong)ptr - (ulong)base) + task_to_stackbase(bt->tc->task); if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (machine_kexec)\n", bt->bptr, *ptr); return TRUE; } if ((*ptr > ms->crash_kexec_start) && (*ptr < ms->crash_kexec_end)) { /* * Stash the first crash_kexec frame in case the machine_kexec * frame is not found. */ if (!crash_kexec_frame) { crash_kexec_frame = ((ulong)ptr - (ulong)base) + task_to_stackbase(bt->tc->task); if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (crash_kexec)\n", bt->bptr, *ptr); } continue; } if ((*ptr > ms->crash_save_cpu_start) && (*ptr < ms->crash_save_cpu_end)) { bt->bptr = ((ulong)ptr - (ulong)base) + task_to_stackbase(bt->tc->task); if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (crash_save_cpu)\n", bt->bptr, *ptr); return TRUE; } } } if (crash_kexec_frame) { bt->bptr = crash_kexec_frame; return TRUE; } return FALSE; } static int arm64_in_kdump_text_on_irq_stack(struct bt_info *bt) { int cpu; ulong stackbase; char *stackbuf; ulong *ptr, *start, *base; struct machine_specific *ms; if ((machdep->flags & (IRQ_STACKS|KDUMP_ENABLED)) != (IRQ_STACKS|KDUMP_ENABLED)) return FALSE; ms = machdep->machspec; cpu = bt->tc->processor; stackbase = ms->irq_stacks[cpu]; stackbuf = GETBUF(ms->irq_stack_size); if (!readmem(stackbase, KVADDR, stackbuf, ms->irq_stack_size, "IRQ stack contents", RETURN_ON_ERROR)) { error(INFO, "read of IRQ stack at %lx failed\n", stackbase); FREEBUF(stackbuf); return FALSE; } base = (ulong *)stackbuf; start = (ulong *)(stackbuf + ms->irq_stack_size); for (ptr = start - 8; ptr >= base; ptr--) { if (bt->flags & BT_OPT_BACK_TRACE) { if ((*ptr > ms->crash_kexec_start) && (*ptr < ms->crash_kexec_end) && INSTACK(*(ptr - 1), bt)) { bt->bptr = ((ulong)(ptr - 1) - (ulong)base) + stackbase; if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (crash_kexec on IRQ stack)\n", bt->bptr, *ptr); FREEBUF(stackbuf); return TRUE; } if ((*ptr > ms->crash_save_cpu_start) && (*ptr < ms->crash_save_cpu_end) && INSTACK(*(ptr - 1), bt)) { bt->bptr = ((ulong)(ptr - 1) - (ulong)base) + stackbase; if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (crash_save_cpu on IRQ stack)\n", bt->bptr, *ptr); FREEBUF(stackbuf); return TRUE; } } else { if ((*ptr > ms->crash_kexec_start) && (*ptr < ms->crash_kexec_end)) { bt->bptr = ((ulong)ptr - (ulong)base) + stackbase; if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (crash_kexec on IRQ stack)\n", bt->bptr, *ptr); FREEBUF(stackbuf); return TRUE; } if ((*ptr > ms->crash_save_cpu_start) && (*ptr < ms->crash_save_cpu_end)) { bt->bptr = ((ulong)ptr - (ulong)base) + stackbase; if (CRASHDEBUG(1)) fprintf(fp, "%lx: %lx (crash_save_cpu on IRQ stack)\n", bt->bptr, *ptr); FREEBUF(stackbuf); return TRUE; } } } FREEBUF(stackbuf); return FALSE; } static int arm64_switch_stack(struct bt_info *bt, struct arm64_stackframe *frame, FILE *ofp) { int i; ulong stacktop, words, addr; ulong *stackbuf; char buf[BUFSIZE]; struct machine_specific *ms = machdep->machspec; if (bt->flags & BT_FULL) { stacktop = ms->irq_stacks[bt->tc->processor] + ms->irq_stack_size; words = (stacktop - bt->bptr) / sizeof(ulong); stackbuf = (ulong *)GETBUF(words * sizeof(ulong)); readmem(bt->bptr, KVADDR, stackbuf, words * sizeof(long), "top of IRQ stack", FAULT_ON_ERROR); addr = bt->bptr; for (i = 0; i < words; i++) { if (!(i & 1)) fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); fprintf(ofp, "%s ", format_stack_entry(bt, buf, stackbuf[i], 0)); addr += sizeof(ulong); } fprintf(ofp, "\n"); FREEBUF(stackbuf); } fprintf(ofp, "--- ---\n"); if (frame->fp == 0) return USER_MODE; if (!(machdep->flags & UNW_4_14)) arm64_print_exception_frame(bt, frame->sp, KERNEL_MODE, ofp); return KERNEL_MODE; } static int arm64_switch_stack_from_overflow(struct bt_info *bt, struct arm64_stackframe *frame, FILE *ofp) { int i; ulong stacktop, words, addr; ulong *stackbuf; char buf[BUFSIZE]; struct machine_specific *ms = machdep->machspec; if (bt->flags & BT_FULL) { stacktop = ms->overflow_stacks[bt->tc->processor] + ms->overflow_stack_size; words = (stacktop - bt->bptr) / sizeof(ulong); stackbuf = (ulong *)GETBUF(words * sizeof(ulong)); readmem(bt->bptr, KVADDR, stackbuf, words * sizeof(long), "top of overflow stack", FAULT_ON_ERROR); addr = bt->bptr; for (i = 0; i < words; i++) { if (!(i & 1)) fprintf(ofp, "%s %lx: ", i ? "\n" : "", addr); fprintf(ofp, "%s ", format_stack_entry(bt, buf, stackbuf[i], 0)); addr += sizeof(ulong); } fprintf(ofp, "\n"); FREEBUF(stackbuf); } fprintf(ofp, "--- ---\n"); if (frame->fp == 0) return USER_MODE; if (!(machdep->flags & UNW_4_14)) arm64_print_exception_frame(bt, frame->sp, KERNEL_MODE, ofp); return KERNEL_MODE; } static int arm64_get_dumpfile_stackframe(struct bt_info *bt, struct arm64_stackframe *frame) { struct machine_specific *ms = machdep->machspec; struct arm64_pt_regs *ptregs; bool skip = false; if (bt->flags & BT_SKIP_IDLE) { skip = true; bt->flags &= ~BT_SKIP_IDLE; } if (!ms->panic_task_regs || (!ms->panic_task_regs[bt->tc->processor].sp && !ms->panic_task_regs[bt->tc->processor].pc)) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } ptregs = &ms->panic_task_regs[bt->tc->processor]; frame->pc = ptregs->pc; if (user_mode(ptregs)) { frame->sp = user_stack_pointer(ptregs); frame->fp = user_frame_pointer(ptregs); if (is_kernel_text(frame->pc) || !in_user_stack(bt->tc->task, frame->sp)) { error(WARNING, "corrupt NT_PRSTATUS? pstate: 0x%lx, but no user frame found\n", ptregs->pstate); if (is_kernel_text(frame->pc) && INSTACK(frame->sp, bt) && INSTACK(frame->fp, bt)) goto try_kernel; bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } bt->flags |= BT_USER_SPACE; } else { try_kernel: frame->sp = ptregs->sp; frame->fp = ptregs->regs[29]; bt->machdep = ptregs; } if (arm64_in_kdump_text(bt, frame) || arm64_in_kdump_text_on_irq_stack(bt)) { bt->flags |= BT_KDUMP_ADJUST; if (skip && is_idle_thread(bt->task)) bt->flags |= BT_SKIP_IDLE; } return TRUE; } static int arm64_get_stackframe(struct bt_info *bt, struct arm64_stackframe *frame) { if (!fill_task_struct(bt->task)) return FALSE; frame->sp = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_sp)); frame->pc = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_pc)); frame->fp = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_fp)); frame->x19 = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_x19)); frame->x20 = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_x20)); frame->x21 = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_x21)); frame->x22 = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_x22)); frame->x23 = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_x23)); frame->x24 = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_x24)); frame->x25 = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_x25)); frame->x26 = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_x26)); frame->x27 = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_x27)); frame->x28 = ULONG(tt->task_struct + OFFSET(task_struct_thread_context_x28)); return TRUE; } static void arm64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { struct user_regs_bitmap_struct *ur_bitmap; struct arm64_stackframe stackframe = { 0 }; if (DUMPFILE() && is_task_active(bt->task)) { arm64_get_dumpfile_stackframe(bt, &stackframe); bt->need_free = FALSE; } else { if (bt->flags & BT_SKIP_IDLE) bt->flags &= ~BT_SKIP_IDLE; arm64_get_stackframe(bt, &stackframe); ur_bitmap = (struct user_regs_bitmap_struct *)GETBUF(sizeof(*ur_bitmap)); memset(ur_bitmap, 0, sizeof(*ur_bitmap)); ur_bitmap->ur.pc = stackframe.pc; ur_bitmap->ur.sp = stackframe.sp; ur_bitmap->ur.regs[29] = stackframe.fp; ur_bitmap->ur.regs[28] = stackframe.x28; ur_bitmap->ur.regs[27] = stackframe.x27; ur_bitmap->ur.regs[26] = stackframe.x26; ur_bitmap->ur.regs[25] = stackframe.x25; ur_bitmap->ur.regs[24] = stackframe.x24; ur_bitmap->ur.regs[23] = stackframe.x23; ur_bitmap->ur.regs[22] = stackframe.x22; ur_bitmap->ur.regs[21] = stackframe.x21; ur_bitmap->ur.regs[20] = stackframe.x20; ur_bitmap->ur.regs[19] = stackframe.x19; SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, pc)); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, sp)); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X29_REGNUM - X0_REGNUM); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X28_REGNUM - X0_REGNUM); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X27_REGNUM - X0_REGNUM); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X26_REGNUM - X0_REGNUM); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X25_REGNUM - X0_REGNUM); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X24_REGNUM - X0_REGNUM); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X23_REGNUM - X0_REGNUM); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X22_REGNUM - X0_REGNUM); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X21_REGNUM - X0_REGNUM); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X20_REGNUM - X0_REGNUM); SET_BIT(ur_bitmap->bitmap, REG_SEQ(arm64_pt_regs, regs[0]) + X19_REGNUM - X0_REGNUM); bt->machdep = ur_bitmap; bt->need_free = TRUE; } bt->frameptr = stackframe.fp; if (pcp) *pcp = stackframe.pc; if (spp) *spp = stackframe.sp; } static void arm64_gen_hidden_frame(struct bt_info *bt, ulong sp, struct arm64_stackframe *frame) { struct arm64_pt_regs *ptregs; if (IN_TASK_VMA(bt->task, sp)) { bt->flags |= BT_USER_EFRAME; return; } ptregs = (struct arm64_pt_regs *) &bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(sp))]; frame->pc = ptregs->pc; frame->fp = ptregs->regs[29]; frame->sp = ptregs->sp; } static void arm64_print_exception_frame(struct bt_info *bt, ulong pt_regs, int mode, FILE *ofp) { int i, r, rows, top_reg, is_64_bit; struct arm64_pt_regs *regs; struct syment *sp; ulong LR, SP, offset; char buf[BUFSIZE]; struct machine_specific *ms = machdep->machspec; if (CRASHDEBUG(1)) fprintf(ofp, "pt_regs: %lx\n", pt_regs); regs = (struct arm64_pt_regs *) &bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(pt_regs))]; if ((mode == USER_MODE) && (regs->pstate & PSR_MODE32_BIT)) { LR = regs->regs[14]; SP = regs->regs[13]; top_reg = 12; is_64_bit = FALSE; rows = 4; } else { LR = regs->regs[30]; if (is_kernel_text (LR | ms->CONFIG_ARM64_KERNELPACMASK)) LR |= ms->CONFIG_ARM64_KERNELPACMASK; SP = regs->sp; top_reg = 29; is_64_bit = TRUE; rows = 3; } switch (mode) { case USER_MODE: if (is_64_bit) fprintf(ofp, " PC: %016lx LR: %016lx SP: %016lx\n ", (ulong)regs->pc, LR, SP); else fprintf(ofp, " PC: %08lx LR: %08lx SP: %08lx PSTATE: %08lx\n ", (ulong)regs->pc, LR, SP, (ulong)regs->pstate); break; case KERNEL_MODE: fprintf(ofp, " PC: %016lx ", (ulong)regs->pc); if (is_kernel_text(regs->pc) && (sp = value_search(regs->pc, &offset))) { fprintf(ofp, "[%s", sp->name); if (offset) fprintf(ofp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); fprintf(ofp, "]\n"); } else fprintf(ofp, "[unknown or invalid address]\n"); fprintf(ofp, " LR: %016lx ", LR); if (is_kernel_text(LR) && (sp = value_search(LR, &offset))) { fprintf(ofp, "[%s", sp->name); if (offset) fprintf(ofp, (*gdb_output_radix == 16) ? "+0x%lx" : "+%ld", offset); fprintf(ofp, "]\n"); } else fprintf(ofp, "[unknown or invalid address]\n"); fprintf(ofp, " SP: %016lx PSTATE: %08lx\n ", SP, (ulong)regs->pstate); break; } for (i = top_reg, r = 1; i >= 0; r++, i--) { fprintf(ofp, "%sX%d: ", i < 10 ? " " : "", i); fprintf(ofp, is_64_bit ? "%016lx" : "%08lx", (ulong)regs->regs[i]); if ((i == 0) && !is_64_bit) fprintf(ofp, "\n"); else if ((i == 0) || ((r % rows) == 0)) fprintf(ofp, "\n%s", (i == 0) && (mode == KERNEL_MODE) ? "" : " "); else fprintf(ofp, "%s", is_64_bit ? " " : " "); } if (is_64_bit) { if (mode == USER_MODE) { fprintf(ofp, "ORIG_X0: %016lx SYSCALLNO: %lx", (ulong)regs->orig_x0, (ulong)regs->syscallno); fprintf(ofp, " PSTATE: %08lx\n", (ulong)regs->pstate); } else if (!(bt->flags & BT_EFRAME_SEARCH)) { if (!extra_stacks_regs[extra_stacks_idx]) { extra_stacks_regs[extra_stacks_idx] = (struct user_regs_bitmap_struct *) malloc(sizeof(struct user_regs_bitmap_struct)); } memset(extra_stacks_regs[extra_stacks_idx], 0, sizeof(struct user_regs_bitmap_struct)); memcpy(&extra_stacks_regs[extra_stacks_idx]->ur, regs, sizeof(struct arm64_pt_regs)); for (int i = 0; i < sizeof(struct arm64_pt_regs)/sizeof(long); i++) SET_BIT(extra_stacks_regs[extra_stacks_idx]->bitmap, i); if (!bt->machdep || (extra_stacks_regs[extra_stacks_idx]->ur.sp != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.sp && extra_stacks_regs[extra_stacks_idx]->ur.pc != ((struct user_regs_bitmap_struct *)(bt->machdep))->ur.pc)) { gdb_add_substack (extra_stacks_idx++); } } } if (is_kernel_text(regs->pc) && (bt->flags & BT_LINE_NUMBERS)) { get_line_number(regs->pc, buf, FALSE); if (strlen(buf)) fprintf(ofp, " %s\n", buf); } if (BT_REFERENCE_CHECK(bt)) { arm64_do_bt_reference_check(bt, regs->pc, NULL); if ((sp = value_search(regs->pc, &offset))) arm64_do_bt_reference_check(bt, 0, sp->name); arm64_do_bt_reference_check(bt, LR, NULL); arm64_do_bt_reference_check(bt, SP, NULL); arm64_do_bt_reference_check(bt, regs->pstate, NULL); for (i = 0; i <= top_reg; i++) arm64_do_bt_reference_check(bt, regs->regs[i], NULL); if (is_64_bit) { arm64_do_bt_reference_check(bt, regs->orig_x0, NULL); arm64_do_bt_reference_check(bt, regs->syscallno, NULL); } } } /* * Check a frame for a requested reference. */ static void arm64_do_bt_reference_check(struct bt_info *bt, ulong text, char *name) { ulong offset; struct syment *sp = NULL; if (!name) sp = value_search(text, &offset); else if (!text) sp = symbol_search(name); switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (name) { if (STREQ(name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } else { if (sp && !offset && STREQ(sp->name, bt->ref->str)) bt->ref->cmdflags |= BT_REF_FOUND; } break; case BT_REF_HEXVAL: if (text) { if (bt->ref->hexval == text) bt->ref->cmdflags |= BT_REF_FOUND; } else if (sp && (bt->ref->hexval == sp->value)) bt->ref->cmdflags |= BT_REF_FOUND; else if (!name && !text && (bt->ref->hexval == 0)) bt->ref->cmdflags |= BT_REF_FOUND; break; } } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int arm64_translate_pte(ulong pte, void *physaddr, ulonglong unused) { int c, others, len1, len2, len3; ulong paddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; int page_present; paddr = PTE_TO_PHYS(pte); page_present = pte & (PTE_VALID | machdep->machspec->PTE_PROT_NONE); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf1, len1, CENTER|LJUST, "PTE")); if (!page_present) { swap_location(pte, buf1); if ((c = parse_line(buf1, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf1, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & PTE_VALID) fprintf(fp, "%sVALID", others++ ? "|" : ""); if (pte & machdep->machspec->PTE_FILE) fprintf(fp, "%sFILE", others++ ? "|" : ""); if (pte & machdep->machspec->PTE_PROT_NONE) fprintf(fp, "%sPROT_NONE", others++ ? "|" : ""); if (pte & PTE_USER) fprintf(fp, "%sUSER", others++ ? "|" : ""); if (pte & PTE_RDONLY) fprintf(fp, "%sRDONLY", others++ ? "|" : ""); if (pte & PTE_SHARED) fprintf(fp, "%sSHARED", others++ ? "|" : ""); if (pte & PTE_AF) fprintf(fp, "%sAF", others++ ? "|" : ""); if (pte & PTE_NG) fprintf(fp, "%sNG", others++ ? "|" : ""); if (pte & PTE_PXN) fprintf(fp, "%sPXN", others++ ? "|" : ""); if (pte & PTE_UXN) fprintf(fp, "%sUXN", others++ ? "|" : ""); if (pte & PTE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (pte & PTE_SPECIAL) fprintf(fp, "%sSPECIAL", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return (page_present); } static ulong arm64_vmalloc_start(void) { return machdep->machspec->vmalloc_start_addr; } /* * Not so accurate since thread_info introduction. */ static int arm64_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); else return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } static ulong PLT_veneer_to_kvaddr(ulong value) { uint32_t insn; ulong addr = 0; int i; /* * PLT veneer always looks: * movn x16, #0x.... * movk x16, #0x...., lsl #16 * movk x16, #0x...., lsl #32 * br x16 */ for (i = 0; i < 4; i++) { if (!readmem(value + i * sizeof(insn), KVADDR, &insn, sizeof(insn), "PLT veneer", RETURN_ON_ERROR)) { error(WARNING, "cannot read PLT veneer instruction at %lx\n", value + i * sizeof(insn)); return value; } switch (i) { case 0: if ((insn & 0xffe0001f) != 0x92800010) goto not_plt; addr = ~((ulong)(insn & 0x1fffe0) >> 5); break; case 1: if ((insn & 0xffe0001f) != 0xf2a00010) goto not_plt; addr &= 0xffffffff0000ffff; addr |= (ulong)(insn & 0x1fffe0) << (16 - 5); break; case 2: if ((insn & 0xffe0001f) != 0xf2c00010) goto not_plt; addr &= 0xffff0000ffffffff; addr |= (ulong)(insn & 0x1fffe0) << (32 - 5); break; case 3: if (insn != 0xd61f0200) goto not_plt; break; default: return value; /* to avoid any warnings */ } } return addr; not_plt: return value; } /* * Filter dissassembly output if the output radix is not gdb's default 10 */ static int arm64_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !(STRNEQ(p1, " 0x") || STRNEQ(p1, "\t0x"))) p1--; if (!(STRNEQ(p1, " 0x") || STRNEQ(p1, "\t0x"))) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } if (IS_MODULE_VADDR(vaddr)) { ulong orig_value; p1 = &inbuf[strlen(inbuf)-1]; strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((STREQ(argv[argc-2], "b") || STREQ(argv[argc-2], "bl")) && extract_hex(argv[argc-1], &orig_value, NULLCHAR, TRUE)) { value = PLT_veneer_to_kvaddr(orig_value); sprintf(p1, " <%s%s>\n", value == orig_value ? "" : "plt:", value_to_symstr(value, buf2, output_radix)); } } console(" %s", inbuf); return TRUE; } /* * Machine dependent command. */ static void arm64_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cm")) != -1) { switch (c) { case 'c': case 'm': option_not_supported(c); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); arm64_display_machine_stats(); } static void arm64_display_machine_stats(void) { int i, pad; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); if ((mhz = machdep->processor_speed())) fprintf(fp, " PROCESSOR SPEED: %ld Mhz\n", mhz); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->machspec->page_offset); fprintf(fp, "KERNEL MODULES BASE: %lx\n", machdep->machspec->modules_vaddr); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", machdep->machspec->vmalloc_start_addr); fprintf(fp, "KERNEL VMEMMAP BASE: %lx\n", machdep->machspec->vmemmap_vaddr); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); if (machdep->machspec->irq_stack_size) { fprintf(fp, " IRQ STACK SIZE: %ld\n", machdep->machspec->irq_stack_size); fprintf(fp, " IRQ STACKS:\n"); for (i = 0; i < kt->cpus; i++) { pad = (i < 10) ? 3 : (i < 100) ? 2 : (i < 1000) ? 1 : 0; fprintf(fp, "%s CPU %d: %lx\n", space(pad), i, machdep->machspec->irq_stacks[i]); } } if (machdep->machspec->overflow_stack_size) { fprintf(fp, "OVERFLOW STACK SIZE: %ld\n", machdep->machspec->overflow_stack_size); fprintf(fp, " OVERFLOW STACKS:\n"); for (i = 0; i < kt->cpus; i++) { pad = (i < 10) ? 3 : (i < 100) ? 2 : (i < 1000) ? 1 : 0; fprintf(fp, "%s CPU %d: %lx\n", space(pad), i, machdep->machspec->overflow_stacks[i]); } } } static int arm64_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_present())) return cpus; else return MAX(get_cpus_online(), get_highest_cpu_online()+1); } /* * Retrieve task registers for the time of the crash. */ static void arm64_get_crash_notes(void) { struct machine_specific *ms = machdep->machspec; ulong crash_notes; Elf64_Nhdr *note = NULL; ulong offset; char *buf, *p; ulong *notes_ptrs; ulong i, found; if (!symbol_exists("crash_notes")) { if (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE()) { if (!(ms->panic_task_regs = calloc((size_t)kt->cpus, sizeof(struct arm64_pt_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = found = 0; i < kt->cpus; i++) { if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (!note) { error(WARNING, "cpu %d: cannot find NT_PRSTATUS note\n", i); continue; } /* * Find correct location of note data. This contains elf_prstatus * structure which has registers etc. for the crashed task. */ offset = sizeof(Elf64_Nhdr); offset = roundup(offset + note->n_namesz, 4); p = (char *)note + offset; /* start of elf_prstatus */ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &ms->panic_task_regs[i], sizeof(struct arm64_pt_regs)); found++; } if (!found) { free(ms->panic_task_regs); ms->panic_task_regs = NULL; } } return; } crash_notes = symbol_value("crash_notes"); notes_ptrs = (ulong *)GETBUF(kt->cpus*sizeof(notes_ptrs[0])); /* * Read crash_notes for the first CPU. crash_notes are in standard ELF * note format. */ if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read \"crash_notes\"\n"); FREEBUF(notes_ptrs); return; } if (symbol_exists("__per_cpu_offset")) { /* * Add __per_cpu_offset for each cpu to form the notes pointer. */ for (i = 0; icpus; i++) notes_ptrs[i] = notes_ptrs[kt->cpus-1] + kt->__per_cpu_offset[i]; } buf = GETBUF(SIZE(note_buf)); if (!(ms->panic_task_regs = calloc((size_t)kt->cpus, sizeof(struct arm64_pt_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = found = 0; i < kt->cpus; i++) { if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { error(WARNING, "cpu %d: cannot read NT_PRSTATUS note\n", i); continue; } /* * Do some sanity checks for this note before reading registers from it. */ note = (Elf64_Nhdr *)buf; p = buf + sizeof(Elf64_Nhdr); /* * dumpfiles created with qemu won't have crash_notes, but there will * be elf notes; dumpfiles created by kdump do not create notes for * offline cpus. */ if (note->n_namesz == 0 && (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE())) { if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (note) { /* * SIZE(note_buf) accounts for a "final note", which is a * trailing empty elf note header. */ long notesz = SIZE(note_buf) - sizeof(Elf64_Nhdr); if (sizeof(Elf64_Nhdr) + roundup(note->n_namesz, 4) + note->n_descsz == notesz) BCOPY((char *)note, buf, notesz); } else { error(WARNING, "cpu %d: cannot find NT_PRSTATUS note\n", i); continue; } } /* * Check the sanity of NT_PRSTATUS note only for each online cpu. * If this cpu has invalid note, continue to find the crash notes * for other online cpus. */ if (note->n_type != NT_PRSTATUS) { error(WARNING, "cpu %d: invalid NT_PRSTATUS note (n_type != NT_PRSTATUS)\n", i); continue; } if (!STRNEQ(p, "CORE")) { error(WARNING, "cpu %d: invalid NT_PRSTATUS note (name != \"CORE\")\n", i); continue; } /* * Find correct location of note data. This contains elf_prstatus * structure which has registers etc. for the crashed task. */ offset = sizeof(Elf64_Nhdr); offset = roundup(offset + note->n_namesz, 4); p = buf + offset; /* start of elf_prstatus */ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &ms->panic_task_regs[i], sizeof(struct arm64_pt_regs)); found++; } FREEBUF(buf); FREEBUF(notes_ptrs); if (!found) { free(ms->panic_task_regs); ms->panic_task_regs = NULL; } } static void arm64_clear_machdep_cache(void) { /* * TBD: probably not necessary... */ return; } static int arm64_on_process_stack(struct bt_info *bt, ulong stkptr) { ulong stackbase, stacktop; stackbase = GET_STACKBASE(bt->task); stacktop = GET_STACKTOP(bt->task); if ((stkptr >= stackbase) && (stkptr < stacktop)) return TRUE; return FALSE; } static int arm64_in_alternate_stackv(int cpu, ulong stkptr, ulong *stacks, ulong stack_size) { if ((cpu >= kt->cpus) || (stacks == NULL) || !stack_size) return FALSE; if ((stkptr >= stacks[cpu]) && (stkptr < (stacks[cpu] + stack_size))) return TRUE; return FALSE; } static int arm64_in_alternate_stack(int cpu, ulong stkptr) { return (arm64_on_irq_stack(cpu, stkptr) || arm64_on_overflow_stack(cpu, stkptr)); } static int arm64_on_irq_stack(int cpu, ulong stkptr) { struct machine_specific *ms = machdep->machspec; return arm64_in_alternate_stackv(cpu, stkptr, ms->irq_stacks, ms->irq_stack_size); } static int arm64_on_overflow_stack(int cpu, ulong stkptr) { struct machine_specific *ms = machdep->machspec; return arm64_in_alternate_stackv(cpu, stkptr, ms->overflow_stacks, ms->overflow_stack_size); } static void arm64_set_irq_stack(struct bt_info *bt) { struct machine_specific *ms = machdep->machspec; bt->stackbase = ms->irq_stacks[bt->tc->processor]; bt->stacktop = bt->stackbase + ms->irq_stack_size; alter_stackbuf(bt); } static void arm64_set_overflow_stack(struct bt_info *bt) { struct machine_specific *ms = machdep->machspec; bt->stackbase = ms->overflow_stacks[bt->tc->processor]; bt->stacktop = bt->stackbase + ms->overflow_stack_size; alter_stackbuf(bt); } static void arm64_set_process_stack(struct bt_info *bt) { bt->stackbase = GET_STACKBASE(bt->task); bt->stacktop = GET_STACKTOP(bt->task); alter_stackbuf(bt); } static int compare_kvaddr(const void *v1, const void *v2) { struct vaddr_range *r1, *r2; r1 = (struct vaddr_range *)v1; r2 = (struct vaddr_range *)v2; return (r1->start < r2->start ? -1 : r1->start == r2->start ? 0 : 1); } static int arm64_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->machspec->page_offset; vrp[cnt++].end = vt->high_memory; vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = machdep->machspec->vmalloc_start_addr; vrp[cnt++].end = last_vmalloc_address(); if (st->mods_installed) { vrp[cnt].type = KVADDR_MODULES; vrp[cnt].start = lowest_module_address(); vrp[cnt++].end = roundup(highest_module_address(), PAGESIZE()); } if (machdep->flags & VMEMMAP) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = machdep->machspec->vmemmap_vaddr; vrp[cnt++].end = vt->node_table[vt->numnodes-1].mem_map + (vt->node_table[vt->numnodes-1].size * SIZE(page)); } qsort(vrp, cnt, sizeof(struct vaddr_range), compare_kvaddr); return cnt; } /* * Include both vmalloc'd, module and vmemmap address space as VMALLOC space. */ int arm64_IS_VMALLOC_ADDR(ulong vaddr) { struct machine_specific *ms = machdep->machspec; if (is_mte_kvaddr(vaddr)) vaddr = mte_tag_reset(vaddr); if ((machdep->flags & NEW_VMEMMAP) && (vaddr >= machdep->machspec->kimage_text) && (vaddr <= machdep->machspec->kimage_end)) return FALSE; if (ms->VA_START && (vaddr >= ms->VA_START)) return TRUE; return ((vaddr >= ms->vmalloc_start_addr && vaddr <= ms->vmalloc_end) || ((machdep->flags & VMEMMAP) && ((vaddr >= ms->vmemmap_vaddr && vaddr <= ms->vmemmap_end) || (vaddr >= ms->vmalloc_end && vaddr <= ms->vmemmap_vaddr))) || (vaddr >= ms->modules_vaddr && vaddr <= ms->modules_end)); } /* Return TRUE if we succeed, return FALSE on failure. */ static int arm64_set_va_bits_by_tcr(void) { ulong value; if (arm64_get_vmcoreinfo(&value, "NUMBER(TCR_EL1_T1SZ)", NUM_HEX) || arm64_get_vmcoreinfo(&value, "NUMBER(tcr_el1_t1sz)", NUM_HEX)) { /* See ARMv8 ARM for the description of * TCR_EL1.T1SZ and how it can be used * to calculate the vabits_actual * supported by underlying kernel. * * Basically: * vabits_actual = 64 - T1SZ; */ value = 64 - value; if (CRASHDEBUG(1)) fprintf(fp, "vmcoreinfo : vabits_actual: %ld\n", value); machdep->machspec->VA_BITS_ACTUAL = value; machdep->machspec->VA_BITS = value; machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); return TRUE; } return FALSE; } static void arm64_calc_VA_BITS(void) { int bitval; struct syment *sp; ulong vabits_actual, value; arm64_get_vmcoreinfo(&machdep->machspec->CONFIG_ARM64_VA_BITS, "NUMBER(VA_BITS)", NUM_DEC); if (kernel_symbol_exists("vabits_actual")) { if (pc->flags & PROC_KCORE) { vabits_actual = symbol_value_from_proc_kallsyms("vabits_actual"); if ((vabits_actual != BADVAL) && (READMEM(pc->mfd, &value, sizeof(ulong), vabits_actual, KCORE_USE_VADDR) > 0)) { if (CRASHDEBUG(1)) fprintf(fp, "/proc/kcore: vabits_actual: %ld\n", value); machdep->machspec->VA_BITS_ACTUAL = value; machdep->machspec->VA_BITS = value; machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); } else error(FATAL, "/proc/kcore: cannot read vabits_actual\n"); } else if (ACTIVE()) error(FATAL, "cannot determine VA_BITS_ACTUAL: please use /proc/kcore\n"); else { if (arm64_set_va_bits_by_tcr()) { /* nothing */ } else if (machdep->machspec->VA_BITS_ACTUAL) { machdep->machspec->VA_BITS = machdep->machspec->VA_BITS_ACTUAL; machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); } else if (machdep->machspec->CONFIG_ARM64_VA_BITS) { /* guess */ machdep->machspec->VA_BITS_ACTUAL = machdep->machspec->CONFIG_ARM64_VA_BITS; machdep->machspec->VA_BITS = machdep->machspec->CONFIG_ARM64_VA_BITS; machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); } else error(FATAL, "cannot determine VA_BITS_ACTUAL\n"); } if (machdep->machspec->CONFIG_ARM64_VA_BITS) machdep->machspec->VA_BITS = machdep->machspec->CONFIG_ARM64_VA_BITS; /* * The mm flip commit is introduced before 52-bits VA, which is before the * commit to export NUMBER(TCR_EL1_T1SZ) */ machdep->flags |= FLIPPED_VM; return; } else if (arm64_set_va_bits_by_tcr()) { return; } else if (machdep->machspec->VA_BITS_ACTUAL) { machdep->machspec->VA_BITS = machdep->machspec->VA_BITS_ACTUAL; machdep->machspec->VA_START = _VA_START(machdep->machspec->VA_BITS_ACTUAL); return; } if (!(sp = symbol_search("swapper_pg_dir")) && !(sp = symbol_search("idmap_pg_dir")) && !(sp = symbol_search("_text")) && !(sp = symbol_search("stext"))) { for (sp = st->symtable; sp < st->symend; sp++) { if (highest_bit_long(sp->value) == 63) break; } } if (sp) value = sp->value; else value = kt->vmcoreinfo.log_buf_SYMBOL; /* crash --log */ for (bitval = highest_bit_long(value); bitval; bitval--) { if ((value & (1UL << bitval)) == 0) { if (machdep->flags & NEW_VMEMMAP) machdep->machspec->VA_BITS = bitval + 1; else machdep->machspec->VA_BITS = bitval + 2; break; } } /* * Verify against dumpfiles that export VA_BITS in vmcoreinfo */ if (machdep->machspec->CONFIG_ARM64_VA_BITS && (machdep->machspec->VA_BITS != machdep->machspec->CONFIG_ARM64_VA_BITS)) { error(WARNING, "VA_BITS: calculated: %ld vmcoreinfo: %ld\n", machdep->machspec->VA_BITS, machdep->machspec->CONFIG_ARM64_VA_BITS); machdep->machspec->VA_BITS = machdep->machspec->CONFIG_ARM64_VA_BITS; } if (CRASHDEBUG(1)) fprintf(fp, "VA_BITS: %ld\n", machdep->machspec->VA_BITS); } /* * The size and end of the vmalloc range is dependent upon the kernel's * VMEMMAP_SIZE value, and the vmemmap range is dependent upon the end * of the vmalloc range as well as the VMEMMAP_SIZE: * * #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) * #define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS) * #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) * * Since VMEMMAP_SIZE is dependent upon the size of a struct page, * the two ranges cannot be determined until POST_GDB. * * Since 52-bit VA was introduced: * * #define STRUCT_PAGE_MAX_SHIFT 6 * #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) * #define VMEMMAP_START (-VMEMMAP_SIZE) * #define VMALLOC_START (MODULES_END) * #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) * #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) */ #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1) #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask)) static void arm64_calc_virtual_memory_ranges(void) { struct machine_specific *ms = machdep->machspec; ulong value, vmemmap_start, vmemmap_end, vmemmap_size, vmalloc_end; char *string; int ret; ulong PUD_SIZE = UNINITIALIZED; if (!machdep->machspec->CONFIG_ARM64_VA_BITS) { if (arm64_get_vmcoreinfo(&value, "NUMBER(VA_BITS)", NUM_DEC)) { machdep->machspec->CONFIG_ARM64_VA_BITS = value; } else if (kt->ikconfig_flags & IKCONFIG_AVAIL) { if ((ret = get_kernel_config("CONFIG_ARM64_VA_BITS", &string)) == IKCONFIG_STR) machdep->machspec->CONFIG_ARM64_VA_BITS = atol(string); } } if (THIS_KERNEL_VERSION < LINUX(3,17,0)) /* use original hardwired values */ return; STRUCT_SIZE_INIT(page, "page"); ms->struct_page_size = SIZE(page); switch (machdep->flags & (VM_L2_64K|VM_L3_64K|VM_L3_4K|VM_L4_4K)) { case VM_L2_64K: case VM_L3_64K: PUD_SIZE = PGDIR_SIZE_L2_64K; break; case VM_L3_4K: PUD_SIZE = PGDIR_SIZE_L3_4K; case VM_L4_4K: PUD_SIZE = PUD_SIZE_L4_4K; break; } #define STRUCT_PAGE_MAX_SHIFT 6 if (ms->VA_BITS_ACTUAL) { ulong va_bits_min = 48; if (machdep->machspec->CONFIG_ARM64_VA_BITS < 48) va_bits_min = ms->CONFIG_ARM64_VA_BITS; vmemmap_size = (1UL) << (va_bits_min - machdep->pageshift - 1 + STRUCT_PAGE_MAX_SHIFT); vmalloc_end = (- PUD_SIZE - vmemmap_size - KILOBYTES(64)); vmemmap_start = (-vmemmap_size - MEGABYTES(2)); ms->vmalloc_end = vmalloc_end - 1; ms->vmemmap_vaddr = vmemmap_start; ms->vmemmap_end = vmemmap_start + vmemmap_size; return; } if (machdep->flags & NEW_VMEMMAP) vmemmap_size = 1UL << (ms->VA_BITS - machdep->pageshift - 1 + STRUCT_PAGE_MAX_SHIFT); else vmemmap_size = ALIGN((1UL << (ms->VA_BITS - machdep->pageshift)) * SIZE(page), PUD_SIZE); vmalloc_end = (ms->page_offset - PUD_SIZE - vmemmap_size - SZ_64K); if (machdep->flags & NEW_VMEMMAP) { vmemmap_start = ms->page_offset - vmemmap_size; vmemmap_end = ms->page_offset; } else { vmemmap_start = vmalloc_end + SZ_64K; vmemmap_end = vmemmap_start + vmemmap_size; } ms->vmalloc_end = vmalloc_end - 1; ms->vmemmap_vaddr = vmemmap_start; ms->vmemmap_end = vmemmap_end - 1; } static int arm64_is_uvaddr(ulong addr, struct task_context *tc) { return (addr < machdep->machspec->userspace_top); } ulong arm64_swp_type(ulong pte) { struct machine_specific *ms = machdep->machspec; pte >>= ms->__SWP_TYPE_SHIFT; pte &= ms->__SWP_TYPE_MASK; return pte; } ulong arm64_swp_offset(ulong pte) { struct machine_specific *ms = machdep->machspec; pte >>= ms->__SWP_OFFSET_SHIFT; if (ms->__SWP_OFFSET_MASK) pte &= ms->__SWP_OFFSET_MASK; return pte; } static void arm64_calc_KERNELPACMASK(void) { ulong value; if (arm64_get_vmcoreinfo(&value, "NUMBER(KERNELPACMASK)", NUM_HEX)) { machdep->machspec->CONFIG_ARM64_KERNELPACMASK = value; if (CRASHDEBUG(1)) fprintf(fp, "CONFIG_ARM64_KERNELPACMASK: %lx\n", value); } } #define GENMASK_UL(h, l) \ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) static void arm64_recalc_KERNELPACMASK(void){ /* * Check if PAC is enabled according to the existence of * kernel symbol 'ptrauth_keys_kernel'. */ if (STRUCT_EXISTS("ptrauth_keys_kernel") && machdep->machspec->VA_BITS_ACTUAL){ machdep->machspec->CONFIG_ARM64_KERNELPACMASK = GENMASK_UL(63, machdep->machspec->VA_BITS_ACTUAL); if (CRASHDEBUG(1)) fprintf(fp, "CONFIG_ARM64_KERNELPACMASK: %lx\n", machdep->machspec->CONFIG_ARM64_KERNELPACMASK); } } static ulong arm64_set_irq_stack_size(void) { int min_thread_shift = 14; ulong thread_shift = 0; char buf1[BUFSIZE]; char *pos1, *pos2; int errflag = 0; if (kernel_symbol_exists("vmcoreinfo_data") && kernel_symbol_exists("vmcoreinfo_size")) { /* * Referring to arch/arm64/include/asm/memory.h */ if (kernel_symbol_exists("kasan_enable_current")) min_thread_shift += 1; if (MEMBER_EXISTS("task_struct", "stack_vm_area") && (min_thread_shift < machdep->pageshift)) thread_shift = machdep->pageshift; else thread_shift = min_thread_shift; } else { sprintf(buf1, "x/32i vectors"); open_tmpfile(); if (!gdb_pass_through(buf1, pc->tmpfile, GNU_RETURN_ON_ERROR)) goto out; rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if ((pos1 = strstr(buf1, "tbnz"))) { if ((pos2 = strchr(pos1, '#'))) { pos2 += 1; for (pos1 = pos2; *pos2 != '\0' && *pos2 != ','; pos2++); *pos2 = '\0'; thread_shift = stol(pos1, RETURN_ON_ERROR|QUIET, &errflag); if (errflag) thread_shift = 0; break; } } } out: close_tmpfile(); } if (thread_shift) return ((1UL) << thread_shift); return 0; } #endif /* ARM64 */ crash-utility-crash-9cd43f5/va_server.c0000664000372000037200000002437415107550337017546 0ustar juerghjuergh/* va_server.c - kernel crash dump file translation library * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2011, 2013 David Anderson * Copyright (C) 2002-2006, 2011, 2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * 10/99, Dave Winchell, Initial release for kernel crash dump support. * 11/12/99, Dave Winchell, Add support for in memory dumps. */ #include #include #include #include #include #include #include #include #include "va_server.h" #include #include #include struct map_hdr *vas_map_base = (struct map_hdr *)0; /* base of tree */ #ifdef NOT_DEF #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(page_size - 1)))) #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(page_size - 1))) #endif u_long vas_base_va; u_long vas_start_va; FILE *vas_file_p; char *zero_page; int vas_version; int read_map(char *crash_file); void load_data(struct crash_map_entry *m); int find_data(u_long va, u_long *buf, u_long *len, u_long *offset); u_long vas_find_end(void); int vas_free_memory(char *); int vas_memory_used(void); int vas_memory_dump(FILE *); int mclx_page_size(void); void set_vas_debug(ulong); extern int monitor_memory(long *, long *, long *, long *); int Page_Size; ulong vas_debug = 0; extern void *malloc(size_t); int va_server_init(char *crash_file, u_long *start, u_long *end, u_long *stride) { Page_Size = getpagesize(); /* temporary setting until disk header is read */ if(read_map(crash_file)) { if(va_server_init_v1(crash_file, start, end, stride)) return -1; vas_version = 1; return 0; } vas_version = 2; zero_page = (char *)malloc(Page_Size); bzero((void *)zero_page, Page_Size); vas_base_va = vas_start_va = vas_map_base->map[0].start_va; if(start) *start = vas_start_va; if(end) { *end = vas_find_end(); } if(stride) *stride = Page_Size; return 0; } int vas_lseek(u_long position, int whence) { if(vas_version < 2) return vas_lseek_v1(position, whence); if(whence != SEEK_SET) return -1; vas_base_va = vas_start_va + position; return 0; } size_t vas_read(void *buf_in, size_t count) { u_long len, offset, buf, va; u_long num, output, remaining; if(vas_version < 2) return vas_read_v1(buf_in, count); va = vas_base_va; remaining = count; output = (u_long)buf_in; while(remaining) { find_data(va, &buf, &len, &offset); num = (remaining > (len - offset)) ? (len - offset) : remaining; bcopy((const void *)(buf+offset), (void *)output, num); remaining -= num; va += num; output += num; } vas_base_va += count; return count; } size_t vas_write(void *buf_in, size_t count) { u_long len, offset, buf, va; if(vas_version < 2) return vas_write_v1(buf_in, count); if(count != sizeof(u_long)) { printf("count %d not %d\n", (int)count, (int)sizeof(u_long)); return -1; } va = vas_base_va; if(!find_data(va, &buf, &len, &offset)) *(u_long *)(buf+offset) = *(u_long *)buf_in; vas_base_va += count; return count; } void vas_free_data(u_long va) { struct crash_map_entry *m, *last_m; if(vas_version < 2) { vas_free_data_v1(va); return; } m = last_m = vas_map_base->map; for(;m->start_va;) { if(m->start_va > va) break; last_m = m; m++; } if(last_m->exp_data) { free((void *)last_m->exp_data); last_m->exp_data = 0; } } u_long vas_find_end(void) { struct crash_map_entry *m; u_long *sub_m; m = vas_map_base->map; for(;m->start_va;m++) ; m--; load_data(m); sub_m = (u_long *)m->exp_data; for(;*sub_m; sub_m++) ; sub_m--; return *sub_m; } int find_data(u_long va, u_long *buf, u_long *len, u_long *offset) { u_long off; struct crash_map_entry *m, *last_m; u_long *sub_m, va_saved; char *data; int saved; m = last_m = vas_map_base->map; for(;m->start_va;) { if(m->start_va > va) break; last_m = m; m++; } load_data(last_m); sub_m = (u_long *)last_m->exp_data; data = last_m->exp_data + CRASH_SUB_MAP_PAGES*Page_Size; saved = 0; for(;*sub_m; sub_m++, data += Page_Size) { va_saved = *sub_m; if((va >= va_saved) && (va < (va_saved + Page_Size))) { saved = 1; break; } else if(va < va_saved) break; } off = va - (u_long)trunc_page(va); if(offset) *offset = off; if(len) *len = Page_Size; if (vas_debug && !saved) fprintf(stderr, "find_data: page containing %lx not saved\n", (u_long)trunc_page(va)); if(buf) *buf = saved ? (u_long)data : (u_long)zero_page; return (saved ^ 1); } void load_data(struct crash_map_entry *m) { char *compr_buf; char *exp_buf; int ret, items; uLongf destLen; int retries; if(m->exp_data) goto out; ret = fseek(vas_file_p, (long)(m->start_blk * Page_Size), SEEK_SET); if(ret == -1) { printf("load_data: unable to fseek, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } retries = 0; load_data_retry1: compr_buf = (char *)malloc(m->num_blks * Page_Size); if(!compr_buf) { if (retries++ == 0) { vas_free_memory("malloc failure: out of memory"); goto load_data_retry1; } fprintf(stderr, "FATAL ERROR: malloc failure: out of memory\n"); clean_exit(1); } items = fread((void *)compr_buf, sizeof(char), m->num_blks * Page_Size, vas_file_p); if(items != m->num_blks * Page_Size) { printf("unable to read blocks from errno = %d\n", ferror(vas_file_p)); clean_exit(1); } load_data_retry2: m->exp_data = exp_buf = (char *)malloc((CRASH_SOURCE_PAGES+CRASH_SUB_MAP_PAGES) * Page_Size); if(!exp_buf) { if (retries++ == 0) { vas_free_memory("malloc failure: out of memory"); goto load_data_retry2; } fprintf(stderr, "FATAL ERROR: malloc failure: out of memory\n"); clean_exit(1); } destLen = (uLongf)((CRASH_SOURCE_PAGES+CRASH_SUB_MAP_PAGES) * Page_Size); ret = uncompress((Bytef *)exp_buf, &destLen, (const Bytef *)compr_buf, (uLong)items); if(ret) { if(ret == Z_MEM_ERROR) printf("load_data, bad ret Z_MEM_ERROR from uncompress\n"); else if(ret == Z_BUF_ERROR) printf("load_data, bad ret Z_BUF_ERROR from uncompress\n"); else if(ret == Z_DATA_ERROR) printf("load_data, bad ret Z_DATA_ERROR from uncompress\n"); else printf("load_data, bad ret %d from uncompress\n", ret); clean_exit(1); } free((void *)compr_buf); out: return; } int read_map(char *crash_file) { struct crash_map_hdr *disk_hdr; int ret, items; struct map_hdr *hdr; vas_file_p = fopen(crash_file, "r"); if(vas_file_p == (FILE *)0) { printf("read_maps: bad ret from fopen for %s: %s\n", crash_file, strerror(errno)); return -1; } hdr = (struct map_hdr *)malloc(sizeof(struct map_hdr)); if(!hdr) { printf("read_map: unable to malloc mem\n"); return -1; } bzero((void *)hdr, sizeof(struct map_hdr)); disk_hdr = (struct crash_map_hdr *)malloc(Page_Size); ret = fseek(vas_file_p, (long)0, SEEK_SET); if(ret == -1) { printf("va_server: unable to fseek, err = %d\n", ferror(vas_file_p)); free(hdr); free(disk_hdr); return -1; } items = fread((void *)disk_hdr, 1, Page_Size, vas_file_p); if(items != Page_Size) { free(hdr); free(disk_hdr); return -1; } if(disk_hdr->magic[0] != CRASH_MAGIC) { free(hdr); free(disk_hdr); return -1; } ret = fseek(vas_file_p, (long)((disk_hdr->map_block) * disk_hdr->blk_size), SEEK_SET); if(ret == -1) { printf("va_server: unable to fseek, err = %d\n", ferror(vas_file_p)); free(hdr); free(disk_hdr); return -1; } Page_Size = disk_hdr->blk_size; /* over-ride PAGE_SIZE */ hdr->blk_size = disk_hdr->blk_size; hdr->map = (struct crash_map_entry *)malloc(disk_hdr->map_blocks * disk_hdr->blk_size); items = fread((void *)hdr->map, hdr->blk_size, disk_hdr->map_blocks, vas_file_p); if(items != disk_hdr->map_blocks) { printf("unable to read map entries, err = %d\n", errno); free(hdr); free(disk_hdr); return -1; } vas_map_base = hdr; free(disk_hdr); return 0; } int vas_free_memory(char *s) { struct crash_map_entry *m; long swap_usage; int blks; if (vas_version < 2) return 0; if (s) { fprintf(stderr, "\nWARNING: %s ", s); if (monitor_memory(NULL, NULL, NULL, &swap_usage)) fprintf(stderr, "(swap space usage: %ld%%)", swap_usage); fprintf(stderr, "\nWARNING: memory/swap exhaustion may cause this session to be killed\n"); } for (blks = 0, m = vas_map_base->map; m->start_va; m++) { if (m->exp_data) { free((void *)m->exp_data); m->exp_data = 0; blks += m->num_blks; } } return blks; } int vas_memory_used(void) { struct crash_map_entry *m; int blks; if (vas_version < 2) return 0; for (blks = 0, m = vas_map_base->map; m->start_va; m++) { if (m->exp_data) blks += m->num_blks; } return blks; } char *memory_dump_hdr_32 = "START_VA EXP_DATA START_BLK NUM_BLKS\n"; char *memory_dump_fmt_32 = "%8lx %8lx %9d %8d\n"; char *memory_dump_hdr_64 = \ " START_VA EXP_DATA START_BLK NUM_BLKS\n"; char *memory_dump_fmt_64 = "%16lx %16lx %9d %8d\n"; int vas_memory_dump(FILE *fp) { struct crash_map_entry *m; char *hdr, *fmt; int blks; if (vas_version < 2) { fprintf(fp, "%s\n", vas_version ? "version 1: not supported" : "no dumpfile"); return 0; } hdr = sizeof(long) == 4 ? memory_dump_hdr_32 : memory_dump_hdr_64; fmt = sizeof(long) == 4 ? memory_dump_fmt_32 : memory_dump_fmt_64; fprintf(fp, "%s", hdr); for (blks = 0, m = vas_map_base->map; m->start_va; m++) { fprintf(fp, fmt, m->start_va, m->exp_data, m->start_blk, m->num_blks); if (m->exp_data) blks += m->num_blks; } fprintf(fp, "total blocks: %d\n", blks); return blks; } int mclx_page_size(void) { return (Page_Size); } void set_vas_debug(ulong value) { vas_debug = value; } crash-utility-crash-9cd43f5/loongarch64.c0000664000372000037200000011215115107550337017667 0ustar juerghjuergh/* loongarch64.c - core analysis suite * * Copyright (C) 2021 Loongson Technology Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef LOONGARCH64 #include #include "defs.h" /* from arch/loongarch/include/asm/ptrace.h */ struct loongarch64_pt_regs { /* Saved main processor registers. */ unsigned long regs[32]; /* Saved special registers. */ unsigned long csr_crmd; unsigned long csr_prmd; unsigned long csr_euen; unsigned long csr_ecfg; unsigned long csr_estat; unsigned long csr_epc; unsigned long csr_badvaddr; unsigned long orig_a0; }; struct loongarch64_unwind_frame { unsigned long sp; unsigned long pc; unsigned long ra; }; static int loongarch64_pgd_vtop(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose); static int loongarch64_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose); static int loongarch64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose); static int loongarch64_translate_pte(ulong pte, void *physaddr, ulonglong pte64); static void loongarch64_cmd_mach(void); static void loongarch64_display_machine_stats(void); static void loongarch64_back_trace_cmd(struct bt_info *bt); static void loongarch64_analyze_function(ulong start, ulong offset, struct loongarch64_unwind_frame *current, struct loongarch64_unwind_frame *previous); static void loongarch64_dump_backtrace_entry(struct bt_info *bt, struct syment *sym, struct loongarch64_unwind_frame *current, struct loongarch64_unwind_frame *previous, int level); static void loongarch64_dump_exception_stack(struct bt_info *bt, char *pt_regs); static int loongarch64_is_exception_entry(struct syment *sym); static void loongarch64_display_full_frame(struct bt_info *bt, struct loongarch64_unwind_frame *current, struct loongarch64_unwind_frame *previous); static void loongarch64_stackframe_init(void); static void loongarch64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp); static int loongarch64_get_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp); static int loongarch64_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp); static int loongarch64_init_active_task_regs(void); static int loongarch64_get_crash_notes(void); static int loongarch64_get_elf_notes(void); /* * 3 Levels paging PAGE_SIZE=16KB * PGD | PMD | PTE | OFFSET | * 11 | 11 | 11 | 14 | */ /* From arch/loongarch/include/asm/pgtable{,-64}.h */ typedef struct { ulong pgd; } pgd_t; typedef struct { ulong pmd; } pmd_t; typedef struct { ulong pte; } pte_t; #define TASK_SIZE64 (1UL << 40) #define PMD_SHIFT (PAGESHIFT() + (PAGESHIFT() - 3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE - 1)) #define PGDIR_SHIFT (PMD_SHIFT + (PAGESHIFT() - 3)) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) #define PTRS_PER_PTE (1UL << (PAGESHIFT() - 3)) #define PTRS_PER_PMD PTRS_PER_PTE #define PTRS_PER_PGD PTRS_PER_PTE #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE) : 1) #define pte_index(addr) (((addr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #define LOONGARCH64_CPU_RIXI (1UL << 23) /* CPU has TLB Read/eXec Inhibit */ #define LOONGARCH64_EF_R0 0 #define LOONGARCH64_EF_RA 1 #define LOONGARCH64_EF_SP 3 #define LOONGARCH64_EF_FP 22 #define LOONGARCH64_EF_CSR_EPC 32 #define LOONGARCH64_EF_CSR_BADVADDR 33 #define LOONGARCH64_EF_CSR_CRMD 34 #define LOONGARCH64_EF_CSR_PRMD 35 #define LOONGARCH64_EF_CSR_EUEN 36 #define LOONGARCH64_EF_CSR_ECFG 37 #define LOONGARCH64_EF_CSR_ESTAT 38 static struct machine_specific loongarch64_machine_specific = { 0 }; /* * Holds registers during the crash. */ static struct loongarch64_pt_regs *panic_task_regs; /* * Check and print the flags on the page */ static void check_page_flags(ulong pte) { #define CHECK_PAGE_FLAG(flag) \ if ((_PAGE_##flag) && (pte & _PAGE_##flag)) \ fprintf(fp, "%s" #flag, others++ ? "|" : "") int others = 0; fprintf(fp, "("); if (pte) { CHECK_PAGE_FLAG(VALID); CHECK_PAGE_FLAG(DIRTY); CHECK_PAGE_FLAG(PLV); /* Determine whether it is a huge page format */ if (pte & _PAGE_HGLOBAL) { CHECK_PAGE_FLAG(HUGE); CHECK_PAGE_FLAG(HGLOBAL); } else { CHECK_PAGE_FLAG(GLOBAL); } CHECK_PAGE_FLAG(PRESENT); CHECK_PAGE_FLAG(WRITE); CHECK_PAGE_FLAG(PROTNONE); CHECK_PAGE_FLAG(SPECIAL); CHECK_PAGE_FLAG(NO_READ); CHECK_PAGE_FLAG(NO_EXEC); CHECK_PAGE_FLAG(RPLV); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); } /* * Translate a PTE, returning TRUE if the page is present. * If a physaddr pointer is passed in, don't print anything. */ static int loongarch64_translate_pte(ulong pte, void *physaddr, ulonglong unused) { char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char *arglist[MAXARGS]; int page_present; int c, len1, len2, len3; ulong paddr; paddr = PTOB(pte >> _PFN_SHIFT); page_present = !!(pte & _PAGE_PRESENT); if (physaddr) { *(ulong *)physaddr = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf1, len1, CENTER | LJUST, "PTE")); if (!page_present) { swap_location(pte, buf1); if ((c = parse_line(buf1, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf1, len2, CENTER | LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER | RJUST, NULL), mkstring(physbuf, len2, CENTER | RJUST, NULL)); check_page_flags(pte); return page_present; } /* * Identify and print the segment name to which the virtual address belongs */ static void get_segment_name(ulong vaddr, int verbose) { const char * segment; if (verbose) { if (vaddr < 0x4000000000000000lu) segment = "xuvrange"; else if (vaddr < 0x8000000000000000lu) segment = "xsprange"; else if (vaddr < 0xc000000000000000lu) segment = "xkprange"; else segment = "xkvrange"; fprintf(fp, "SEGMENT: %s\n", segment); } } /* * Virtual to physical memory translation. This function will be called * by both loongarch64_kvtop and loongarch64_uvtop. */ static int loongarch64_pgd_vtop(ulong *pgd, ulong vaddr, physaddr_t *paddr, int verbose) { ulong *pgd_ptr, pgd_val; ulong *pmd_ptr, pmd_val; ulong *pte_ptr, pte_val; get_segment_name(vaddr, verbose); if (IS_XKPRANGE(vaddr)) { *paddr = VTOP(vaddr); return TRUE; } if (verbose) fprintf(fp, "PAGE DIRECTORY: %016lx\n", (ulong)pgd); pgd_ptr = pgd + pgd_index(vaddr); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_val = ULONG(machdep->pgd + PAGEOFFSET(pgd_ptr)); if (verbose) fprintf(fp, " PGD: %16lx => %16lx\n", (ulong)pgd_ptr, pgd_val); if (!pgd_val) goto no_page; pmd_ptr = (ulong *)(VTOP(pgd_val) + sizeof(pmd_t) * pmd_index(vaddr)); FILL_PMD(PAGEBASE(pmd_ptr), PHYSADDR, PAGESIZE()); pmd_val = ULONG(machdep->pmd + PAGEOFFSET(pmd_ptr)); if (verbose) fprintf(fp, " PMD: %016lx => %016lx\n", (ulong)pmd_ptr, pmd_val); if (!pmd_val) goto no_page; pte_ptr = (ulong *)(VTOP(pmd_val) + sizeof(pte_t) * pte_index(vaddr)); FILL_PTBL(PAGEBASE(pte_ptr), PHYSADDR, PAGESIZE()); pte_val = ULONG(machdep->ptbl + PAGEOFFSET(pte_ptr)); if (verbose) fprintf(fp, " PTE: %016lx => %016lx\n", (ulong)pte_ptr, pte_val); if (!pte_val) goto no_page; if (!(pte_val & _PAGE_PRESENT)) { if (verbose) { fprintf(fp, "\n"); loongarch64_translate_pte((ulong)pte_val, 0, pte_val); } return FALSE; } *paddr = PTOB(pte_val >> _PFN_SHIFT) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %016lx\n\n", PAGEBASE(*paddr)); loongarch64_translate_pte(pte_val, 0, 0); } return TRUE; no_page: fprintf(fp, "invalid\n"); return FALSE; } /* Translates a user virtual address to its physical address. cmd_vtop() sets * the verbose flag so that the pte translation gets displayed; all other * callers quietly accept the translation. */ static int loongarch64_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm, active_mm; ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } return loongarch64_pgd_vtop(pgd, vaddr, paddr, verbose);; } /* Translates a user virtual address to its physical address. cmd_vtop() sets * the verbose flag so that the pte translation gets displayed; all other * callers quietly accept the translation. */ static int loongarch64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { if (!IS_KVADDR(kvaddr)) return FALSE; if (!verbose) { if (IS_XKPRANGE(kvaddr)) { *paddr = VTOP(kvaddr); return TRUE; } } return loongarch64_pgd_vtop((ulong *)vt->kernel_pgd[0], kvaddr, paddr, verbose); } /* * Machine dependent command. */ static void loongarch64_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cmo")) != EOF) { switch (c) { case 'c': case 'm': case 'o': option_not_supported(c); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); loongarch64_display_machine_stats(); } /* * "mach" command output. */ static void loongarch64_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } /* * Unroll a kernel stack. */ static void loongarch64_back_trace_cmd(struct bt_info *bt) { struct loongarch64_unwind_frame current, previous; struct loongarch64_pt_regs *regs; char pt_regs[SIZE(pt_regs)]; int level = 0; int invalid_ok = 1; if (bt->flags & BT_REGS_NOT_FOUND) return; previous.sp = previous.pc = previous.ra = 0; current.pc = bt->instptr; current.sp = bt->stkptr; current.ra = 0; if (!INSTACK(current.sp, bt)) return; if (bt->machdep) { regs = (struct loongarch64_pt_regs *)bt->machdep; previous.pc = current.ra = regs->regs[LOONGARCH64_EF_RA]; } while (current.sp <= bt->stacktop - 32 - SIZE(pt_regs)) { struct syment *symbol = NULL; ulong offset; if (CRASHDEBUG(8)) fprintf(fp, "level %d pc %#lx ra %#lx sp %lx\n", level, current.pc, current.ra, current.sp); if (!IS_KVADDR(current.pc) && !invalid_ok) return; symbol = value_search(current.pc, &offset); if (!symbol && !invalid_ok) { error(FATAL, "PC is unknown symbol (%lx)", current.pc); return; } invalid_ok = 0; /* * If we get an address which points to the start of a * function, then it could one of the following: * * - we are dealing with a noreturn function. The last call * from a noreturn function has an ra which points to the * start of the function after it. This is common in the * oops callchain because of die() which is annotated as * noreturn. * * - we have taken an exception at the start of this function. * In this case we already have the RA in current.ra. * * - we are in one of these routines which appear with zero * offset in manually-constructed stack frames: * * * ret_from_exception * * ret_from_irq * * ret_from_fork * * ret_from_kernel_thread */ if (symbol && !STRNEQ(symbol->name, "ret_from") && !offset && !current.ra && current.sp < bt->stacktop - 32 - SIZE(pt_regs)) { if (CRASHDEBUG(8)) fprintf(fp, "zero offset at %s, try previous symbol\n", symbol->name); symbol = value_search(current.pc - 4, &offset); if (!symbol) { error(FATAL, "PC is unknown symbol (%lx)", current.pc); return; } } if (symbol && loongarch64_is_exception_entry(symbol)) { GET_STACK_DATA(current.sp, pt_regs, sizeof(pt_regs)); regs = (struct loongarch64_pt_regs *) (pt_regs + OFFSET(pt_regs_regs)); previous.ra = regs->regs[LOONGARCH64_EF_RA]; previous.sp = regs->regs[LOONGARCH64_EF_SP]; current.ra = regs->csr_epc; if (CRASHDEBUG(8)) fprintf(fp, "exception pc %#lx ra %#lx sp %lx\n", previous.pc, previous.ra, previous.sp); /* The PC causing the exception may have been invalid */ invalid_ok = 1; } else if (symbol) { loongarch64_analyze_function(symbol->value, offset, ¤t, &previous); } else { /* * The current PC is invalid. Assume that the code * jumped through a invalid pointer and that the SP has * not been adjusted. */ previous.sp = current.sp; } if (symbol) loongarch64_dump_backtrace_entry(bt, symbol, ¤t, &previous, level++); current.pc = current.ra; current.sp = previous.sp; current.ra = previous.ra; if (CRASHDEBUG(8)) fprintf(fp, "next %d pc %#lx ra %#lx sp %lx\n", level, current.pc, current.ra, current.sp); previous.sp = previous.pc = previous.ra = 0; } } static void loongarch64_analyze_function(ulong start, ulong offset, struct loongarch64_unwind_frame *current, struct loongarch64_unwind_frame *previous) { ulong i; ulong rapos = 0; ulong spadjust = 0; uint32_t *funcbuf, *ip; if (CRASHDEBUG(8)) fprintf(fp, "%s: start %#lx offset %#lx\n", __func__, start, offset); if (!offset) { previous->sp = current->sp; return; } ip = funcbuf = (uint32_t *)GETBUF(offset); if (!readmem(start, KVADDR, funcbuf, offset, "loongarch64_analyze_function", RETURN_ON_ERROR)) { FREEBUF(funcbuf); error(WARNING, "Cannot read function at %16lx\n", start); return; } for (i = 0; i < offset; i += 4) { ulong insn = *ip & 0xffffffff; ulong si12 = (insn >> 10) & 0xfff; /* bit[10:21] */ if (CRASHDEBUG(8)) fprintf(fp, "insn @ %#lx = %#lx\n", start + i, insn); if ((insn & 0xffc003ff) == 0x02800063 || /* addi.w sp,sp,si12 */ (insn & 0xffc003ff) == 0x02c00063) { /* addi.d sp,sp,si12 */ if (!(si12 & 0x800)) /* si12 < 0 */ break; spadjust += 0x1000 - si12; if (CRASHDEBUG(8)) fprintf(fp, "si12 =%lu ,spadjust = %lu\n", si12, spadjust); } else if ((insn & 0xffc003ff) == 0x29800061 || /* st.w ra,sp,si12 */ (insn & 0xffc003ff) == 0x29c00061) { /* st.d ra,sp,si12 */ rapos = current->sp + si12; if (CRASHDEBUG(8)) fprintf(fp, "rapos %lx\n", rapos); break; } ip++; } FREEBUF(funcbuf); previous->sp = current->sp + spadjust; if (rapos && !readmem(rapos, KVADDR, ¤t->ra, sizeof(current->ra), "RA from stack", RETURN_ON_ERROR)) { error(FATAL, "Cannot read RA from stack %lx", rapos); return; } } static void loongarch64_dump_backtrace_entry(struct bt_info *bt, struct syment *sym, struct loongarch64_unwind_frame *current, struct loongarch64_unwind_frame *previous, int level) { const char *name = sym ? sym->name : "(invalid)"; struct load_module *lm; char *name_plus_offset = NULL; struct syment *symp; ulong symbol_offset; char buf[BUFSIZE]; char pt_regs[SIZE(pt_regs)]; if (bt->flags & BT_SYMBOL_OFFSET) { symp = value_search(current->pc, &symbol_offset); if (symp && symbol_offset) name_plus_offset = value_to_symstr(current->pc, buf, bt->radix); } fprintf(fp, "%s#%d [%016lx] %s at %016lx", level < 10 ? " " : "", level, current->sp, name_plus_offset ? name_plus_offset : name, current->pc); if (module_symbol(current->pc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); /* * 'bt -l', get a line number associated with a current pc address. */ if (bt->flags & BT_LINE_NUMBERS) { get_line_number(current->pc, buf, FALSE); if (strlen(buf)) fprintf(fp, " %s\n", buf); } if (sym && loongarch64_is_exception_entry(sym)) { GET_STACK_DATA(current->sp, &pt_regs, SIZE(pt_regs)); loongarch64_dump_exception_stack(bt, pt_regs); } /* bt -f */ if (bt->flags & BT_FULL) { fprintf(fp, " " "[PC: %016lx RA: %016lx SP: %016lx SIZE: %ld]\n", current->pc, current->ra, current->sp, previous->sp - current->sp); loongarch64_display_full_frame(bt, current, previous); } } static void loongarch64_dump_exception_stack(struct bt_info *bt, char *pt_regs) { struct loongarch64_pt_regs *regs; int i; char buf[BUFSIZE]; regs = (struct loongarch64_pt_regs *) (pt_regs + OFFSET(pt_regs_regs)); for (i = 0; i < 32; i += 4) { fprintf(fp, " $%2d : %016lx %016lx %016lx %016lx\n", i, regs->regs[i], regs->regs[i+1], regs->regs[i+2], regs->regs[i+3]); } value_to_symstr(regs->csr_epc, buf, 16); fprintf(fp, " epc : %016lx %s\n", regs->csr_epc, buf); value_to_symstr(regs->regs[LOONGARCH64_EF_RA], buf, 16); fprintf(fp, " ra : %016lx %s\n", regs->regs[LOONGARCH64_EF_RA], buf); fprintf(fp, " CSR crmd : %016lx\n", regs->csr_crmd); fprintf(fp, " CSR prmd : %016lx\n", regs->csr_prmd); fprintf(fp, " CSR ecfg : %016lx\n", regs->csr_ecfg); fprintf(fp, " CSR estat: %016lx\n", regs->csr_estat); fprintf(fp, " CSR euen : %016lx\n", regs->csr_euen); fprintf(fp, " BadVA : %016lx\n", regs->csr_badvaddr); } static int loongarch64_is_exception_entry(struct syment *sym) { return STREQ(sym->name, "ret_from_exception") || STREQ(sym->name, "ret_from_irq") || STREQ(sym->name, "work_resched") || STREQ(sym->name, "handle_sys"); } /* * 'bt -f' commend output * Display all stack data contained in a frame */ static void loongarch64_display_full_frame(struct bt_info *bt, struct loongarch64_unwind_frame *current, struct loongarch64_unwind_frame *previous) { int i, u_idx; ulong *up; ulong words, addr; char buf[BUFSIZE]; if (previous->sp < current->sp) return; if (!(INSTACK(previous->sp, bt) && INSTACK(current->sp, bt))) return; words = (previous->sp - current->sp) / sizeof(ulong) + 1; addr = current->sp; u_idx = (current->sp - bt->stackbase) / sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if (!(i & 1)) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx*sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); } static void loongarch64_stackframe_init(void) { long task_struct_thread = MEMBER_OFFSET("task_struct", "thread"); long thread_reg03_sp = MEMBER_OFFSET("thread_struct", "reg03"); long thread_reg01_ra = MEMBER_OFFSET("thread_struct", "reg01"); if ((task_struct_thread == INVALID_OFFSET) || (thread_reg03_sp == INVALID_OFFSET) || (thread_reg01_ra == INVALID_OFFSET)) { error(FATAL, "cannot determine thread_struct offsets\n"); return; } ASSIGN_OFFSET(task_struct_thread_reg03) = task_struct_thread + thread_reg03_sp; ASSIGN_OFFSET(task_struct_thread_reg01) = task_struct_thread + thread_reg01_ra; MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); STRUCT_SIZE_INIT(note_buf, "note_buf_t"); } /* * Get a stack frame combination of pc and ra from the most relevant spot. */ static void loongarch64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { ulong ksp, nip; int ret = 0; nip = ksp = 0; bt->machdep = NULL; if (DUMPFILE() && is_task_active(bt->task)) { ret = loongarch64_get_dumpfile_stack_frame(bt, &nip, &ksp); } else { ret = loongarch64_get_frame(bt, &nip, &ksp); } if (!ret) error(WARNING, "cannot determine starting stack frame for task %lx\n", bt->task); if (pcp) *pcp = nip; if (spp) *spp = ksp; } /* * Get the starting point for the active cpu in a diskdump. */ static int loongarch64_get_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp) { const struct machine_specific *ms = machdep->machspec; struct loongarch64_pt_regs *regs; ulong epc, sp; if (!ms->crash_task_regs) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } /* * We got registers for panic task from crash_notes. Just return them. */ regs = &ms->crash_task_regs[bt->tc->processor]; epc = regs->csr_epc; sp = regs->regs[LOONGARCH64_EF_SP]; if (!epc && !sp) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } if (nip) *nip = epc; if (ksp) *ksp = sp; bt->machdep = regs; return TRUE; } /* * Do the work for loongarch64_get_stack_frame() for non-active tasks. * Get SP and PC values for idle tasks. */ static int loongarch64_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (!bt->tc || !(tt->flags & THREAD_INFO)) return FALSE; if (!readmem(bt->task + OFFSET(task_struct_thread_reg01), KVADDR, pcp, sizeof(*pcp), "thread_struct.regs01", RETURN_ON_ERROR)) { return FALSE; } if (!readmem(bt->task + OFFSET(task_struct_thread_reg03), KVADDR, spp, sizeof(*spp), "thread_struct.regs03", RETURN_ON_ERROR)) { return FALSE; } return TRUE; } static int loongarch64_init_active_task_regs(void) { int retval; retval = loongarch64_get_crash_notes(); if (retval == TRUE) return retval; return loongarch64_get_elf_notes(); } /* * Retrieve task registers for the time of the crash. */ static int loongarch64_get_crash_notes(void) { struct machine_specific *ms = machdep->machspec; ulong crash_notes; Elf64_Nhdr *note; ulong offset; char *buf, *p; ulong *notes_ptrs; ulong i; /* * crash_notes contains per cpu memory for storing cpu states * in case of system crash. */ if (!symbol_exists("crash_notes")) return FALSE; crash_notes = symbol_value("crash_notes"); notes_ptrs = (ulong *)GETBUF(kt->cpus*sizeof(notes_ptrs[0])); /* * Read crash_notes for the first CPU. crash_notes are in standard ELF * note format. */ if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read crash_notes\n"); FREEBUF(notes_ptrs); return FALSE; } if (symbol_exists("__per_cpu_offset")) { /* * Add __per_cpu_offset for each cpu to form the pointer to the notes */ for (i = 0; i < kt->cpus; i++) notes_ptrs[i] = notes_ptrs[kt->cpus-1] + kt->__per_cpu_offset[i]; } buf = GETBUF(SIZE(note_buf)); if (!(panic_task_regs = calloc((size_t)kt->cpus, sizeof(*panic_task_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = 0; i < kt->cpus; i++) { if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); goto fail; } /* * Do some sanity checks for this note before reading registers from it. */ note = (Elf64_Nhdr *)buf; p = buf + sizeof(Elf64_Nhdr); /* * dumpfiles created with qemu won't have crash_notes, but there will * be elf notes; dumpfiles created by kdump do not create notes for * offline cpus. */ if (note->n_namesz == 0 && (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE())) { if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (note) { /* * SIZE(note_buf) accounts for a "final note", which is a * trailing empty elf note header. */ long notesz = SIZE(note_buf) - sizeof(Elf64_Nhdr); if (sizeof(Elf64_Nhdr) + roundup(note->n_namesz, 4) + note->n_descsz == notesz) BCOPY((char *)note, buf, notesz); } else { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } } /* * Check the sanity of NT_PRSTATUS note only for each online cpu. */ if (note->n_type != NT_PRSTATUS) { error(WARNING, "invalid NT_PRSTATUS note (n_type != NT_PRSTATUS)\n"); goto fail; } if (!STRNEQ(p, "CORE")) { error(WARNING, "invalid NT_PRSTATUS note (name != \"CORE\"\n"); goto fail; } /* * Find correct location of note data. This contains elf_prstatus * structure which has registers etc. for the crashed task. */ offset = sizeof(Elf64_Nhdr); offset = roundup(offset + note->n_namesz, 4); p = buf + offset; /* start of elf_prstatus */ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); } /* * And finally we have the registers for the crashed task. This is * used later on when dumping backtrace. */ ms->crash_task_regs = panic_task_regs; FREEBUF(buf); FREEBUF(notes_ptrs); return TRUE; fail: FREEBUF(buf); FREEBUF(notes_ptrs); free(panic_task_regs); return FALSE; } static int loongarch64_get_elf_notes(void) { struct machine_specific *ms = machdep->machspec; int i; if (!DISKDUMP_DUMPFILE() && !KDUMP_DUMPFILE()) return FALSE; panic_task_regs = calloc(kt->cpus, sizeof(*panic_task_regs)); if (!panic_task_regs) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = 0; i < kt->cpus; i++) { Elf64_Nhdr *note = NULL; size_t len; if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (!note) { error(WARNING, "cannot find NT_PRSTATUS note for cpu: %d\n", i); continue; } len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); BCOPY((char *)note + len + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); } ms->crash_task_regs = panic_task_regs; return TRUE; } /* * Accept or reject a symbol from the kernel namelist. */ static int loongarch64_verify_symbol(const char *name, ulong value, char type) { if (!strncmp(name, ".L", 2) || !strncmp(name, "L0", 2)) return FALSE; if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); if (STREQ(name, "_text") || STREQ(name, "_stext")) machdep->flags |= KSYMS_START; return (name && strlen(name) && (machdep->flags & KSYMS_START) && !STRNEQ(name, "__func__.") && !STRNEQ(name, "__crc_")); } /* * Override smp_num_cpus if possible and necessary. */ static int loongarch64_get_smp_cpus(void) { return (get_cpus_online() > 0) ? get_cpus_online() : kt->cpus; } static ulong loongarch64_get_page_size(void) { return memory_page_size(); } /* * Determine where vmalloc'd memory starts. */ static ulong loongarch64_vmalloc_start(void) { return first_vmalloc_address(); } /* * Calculate and return the speed of the processor. */ static ulong loongarch64_processor_speed(void) { unsigned long cpu_hz = 0; if (machdep->mhz) return (machdep->mhz); if (symbol_exists("cpu_clock_freq")) { get_symbol_data("cpu_clock_freq", sizeof(int), &cpu_hz); if (cpu_hz) return(machdep->mhz = cpu_hz/1000000); } return 0; } /* * Checks whether given task is valid task address. */ static int loongarch64_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); return (IS_KVADDR(task) && ALIGNED_STACK_OFFSET(task) == 0); } /* * 'help -m/M' command output */ void loongarch64_dump_machdep_table(ulong arg) { int others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " pgdir_shift: %d\n", PGDIR_SHIFT); fprintf(fp, " ptrs_per_pgd: %lu\n", PTRS_PER_PGD); fprintf(fp, " ptrs_per_pte: %ld\n", PTRS_PER_PTE); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " back_trace: loongarch64_back_trace_cmd()\n"); fprintf(fp, " processor_speed: loongarch64_processor_speed()\n"); fprintf(fp, " uvtop: loongarch64_uvtop()\n"); fprintf(fp, " kvtop: loongarch64_kvtop()\n"); fprintf(fp, " get_stack_frame: loongarch64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: loongarch64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: loongarch64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: loongarch64_is_task_addr()\n"); fprintf(fp, " verify_symbol: loongarch64_verify_symbol()\n"); fprintf(fp, " dis_filter: generic_dis_filter()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " cmd_mach: loongarch64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: loongarch64_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: NULL\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } static void pt_level_alloc(char **lvl, char *name) { size_t sz = PAGESIZE(); void *pointer = malloc(sz); if (!pointer) error(FATAL, name); *lvl = pointer; } void loongarch64_init(int when) { switch (when) { case SETUP_ENV: machdep->process_elf_notes = process_elf64_notes; break; case PRE_SYMTAB: machdep->verify_symbol = loongarch64_verify_symbol; machdep->machspec = &loongarch64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; break; case PRE_GDB: machdep->pagesize = loongarch64_get_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); if (machdep->pagesize >= 16384) machdep->stacksize = machdep->pagesize; else machdep->stacksize = machdep->pagesize * 2; pt_level_alloc(&machdep->pgd, "cannot malloc pgd space."); pt_level_alloc(&machdep->pmd, "cannot malloc pmd space."); pt_level_alloc(&machdep->ptbl, "cannot malloc ptbl space."); machdep->kvbase = 0x8000000000000000lu; machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->uvtop = loongarch64_uvtop; machdep->kvtop = loongarch64_kvtop; machdep->cmd_mach = loongarch64_cmd_mach; machdep->back_trace = loongarch64_back_trace_cmd; machdep->get_stack_frame = loongarch64_get_stack_frame; machdep->vmalloc_start = loongarch64_vmalloc_start; machdep->processor_speed = loongarch64_processor_speed; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = loongarch64_translate_pte; machdep->memory_size = generic_memory_size; machdep->is_task_addr = loongarch64_is_task_addr; machdep->get_smp_cpus = loongarch64_get_smp_cpus; machdep->dis_filter = generic_dis_filter; machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; break; case POST_GDB: machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); loongarch64_stackframe_init(); if (!machdep->hz) machdep->hz = 250; break; case POST_VM: /* * crash_notes contains machine specific information about the * crash. In particular, it contains CPU registers at the time * of the crash. We need this information to extract correct * backtraces from the panic task. */ if (!ACTIVE() && !loongarch64_init_active_task_regs()) error(WARNING,"cannot retrieve registers for active task%s\n\n", kt->cpus > 1 ? "s" : ""); break; } } void loongarch64_display_regs_from_elf_notes(int cpu, FILE *ofp) { const struct machine_specific *ms = machdep->machspec; struct loongarch64_pt_regs *regs; if (!ms->crash_task_regs) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } regs = &ms->crash_task_regs[cpu]; if (!regs->regs[LOONGARCH64_EF_SP] && !regs->csr_epc) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } fprintf(ofp, " R0: %016lx R1: %016lx R2: %016lx\n" " R3: %016lx R4: %016lx R5: %016lx\n" " R6: %016lx R7: %016lx R8: %016lx\n" " R9: %016lx R10: %016lx R11: %016lx\n" " R12: %016lx R13: %016lx R14: %016lx\n" " R15: %016lx R16: %016lx R17: %016lx\n" " R18: %016lx R19: %016lx R20: %016lx\n" " R21: %016lx R22: %016lx R23: %016lx\n" " R24: %016lx R25: %016lx R26: %016lx\n" " R27: %016lx R28: %016lx R29: %016lx\n" " R30: %016lx R31: %016lx\n" " CSR epc : %016lx CSR badv: %016lx\n" " CSR crmd: %08lx CSR prmd: %08lx\n" " CSR ecfg: %08lx CSR estat: %08lx\n" " CSR eneu: %08lx", regs->regs[LOONGARCH64_EF_R0], regs->regs[LOONGARCH64_EF_R0 + 1], regs->regs[LOONGARCH64_EF_R0 + 2], regs->regs[LOONGARCH64_EF_R0 + 3], regs->regs[LOONGARCH64_EF_R0 + 4], regs->regs[LOONGARCH64_EF_R0 + 5], regs->regs[LOONGARCH64_EF_R0 + 6], regs->regs[LOONGARCH64_EF_R0 + 7], regs->regs[LOONGARCH64_EF_R0 + 8], regs->regs[LOONGARCH64_EF_R0 + 9], regs->regs[LOONGARCH64_EF_R0 + 10], regs->regs[LOONGARCH64_EF_R0 + 11], regs->regs[LOONGARCH64_EF_R0 + 12], regs->regs[LOONGARCH64_EF_R0 + 13], regs->regs[LOONGARCH64_EF_R0 + 14], regs->regs[LOONGARCH64_EF_R0 + 15], regs->regs[LOONGARCH64_EF_R0 + 16], regs->regs[LOONGARCH64_EF_R0 + 17], regs->regs[LOONGARCH64_EF_R0 + 18], regs->regs[LOONGARCH64_EF_R0 + 19], regs->regs[LOONGARCH64_EF_R0 + 20], regs->regs[LOONGARCH64_EF_R0 + 21], regs->regs[LOONGARCH64_EF_R0 + 22], regs->regs[LOONGARCH64_EF_R0 + 23], regs->regs[LOONGARCH64_EF_R0 + 24], regs->regs[LOONGARCH64_EF_R0 + 25], regs->regs[LOONGARCH64_EF_R0 + 26], regs->regs[LOONGARCH64_EF_R0 + 27], regs->regs[LOONGARCH64_EF_R0 + 28], regs->regs[LOONGARCH64_EF_R0 + 29], regs->regs[LOONGARCH64_EF_R0 + 30], regs->regs[LOONGARCH64_EF_R0 + 31], regs->csr_epc, regs->csr_badvaddr, regs->csr_crmd, regs->csr_prmd, regs->csr_ecfg, regs->csr_estat, regs->csr_euen); } #else /* !LOONGARCH64 */ #include "defs.h" void loongarch64_display_regs_from_elf_notes(int cpu, FILE *ofp) { return; } #endif /* !LOONGARCH64 */ crash-utility-crash-9cd43f5/xen_hyper_global_data.c0000664000372000037200000003305615107550337022061 0ustar juerghjuergh/* * xen_hyper_global_data.c * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "defs.h" #ifdef XEN_HYPERVISOR_ARCH #include "xen_hyper_defs.h" /* * Global data for Xen hypervisor. */ struct xen_hyper_machdep_table xen_hyper_machdep_table = { 0 }; struct xen_hyper_machdep_table *xhmachdep = &xen_hyper_machdep_table; struct xen_hyper_table xen_hyper_table = { 0 }; struct xen_hyper_table *xht = &xen_hyper_table; struct xen_hyper_dumpinfo_table xen_hyper_dumpinfo_table = { 0 }; struct xen_hyper_dumpinfo_table *xhdit = &xen_hyper_dumpinfo_table; struct xen_hyper_domain_table xen_hyper_domain_table = { 0 }; struct xen_hyper_domain_table *xhdt = &xen_hyper_domain_table; struct xen_hyper_vcpu_table xen_hyper_vcpu_table = { 0 }; struct xen_hyper_vcpu_table *xhvct = &xen_hyper_vcpu_table; struct xen_hyper_pcpu_table xen_hyper_pcpu_table = { 0 }; struct xen_hyper_pcpu_table *xhpct = &xen_hyper_pcpu_table; struct xen_hyper_sched_table xen_hyper_sched_table = { 0 }; struct xen_hyper_sched_table *xhscht = &xen_hyper_sched_table; struct xen_hyper_symbol_table_data xen_hyper_symbol_table_data = { 0 }; struct xen_hyper_symbol_table_data *xhsymt = &xen_hyper_symbol_table_data; /* * The following commands are for Xen hypervisor. */ struct command_table_entry xen_hyper_command_table[] = { {"*", cmd_pointer, help_pointer, 0}, {"alias", cmd_alias, help_alias, 0}, {"ascii", cmd_ascii, help_ascii, 0}, {"bt", cmd_bt, help_bt, 0}, {"dis", cmd_dis, help_dis, 0}, {"domain", xen_hyper_cmd_domain, xen_hyper_help_domain, REFRESH_TASK_TABLE}, {"doms", xen_hyper_cmd_doms, xen_hyper_help_doms, REFRESH_TASK_TABLE}, #if defined(X86) || defined(X86_64) {"dumpinfo",xen_hyper_cmd_dumpinfo, xen_hyper_help_dumpinfo,0}, #endif {"eval", cmd_eval, help_eval, 0}, {"exit", cmd_quit, help_exit, 0}, {"extend", cmd_extend, help_extend, 0}, {"gdb", cmd_gdb, help_gdb, 0}, {"help", xen_hyper_cmd_help, help_help, 0}, {"list", cmd_list, help__list, 0}, {"log", xen_hyper_cmd_log, xen_hyper_help_log, 0}, {"p", cmd_p, help_p, 0}, {"pcpus", xen_hyper_cmd_pcpus, xen_hyper_help_pcpus, 0}, {"pte", cmd_pte, help_pte, 0}, {"q", cmd_quit, help_quit, 0}, {"rd", cmd_rd, help_rd, 0}, {"repeat", cmd_repeat, help_repeat, 0}, {"sched", xen_hyper_cmd_sched, xen_hyper_help_sched, 0}, {"search", cmd_search, help_search, 0}, {"set", cmd_set, help_set, 0}, {"struct", cmd_struct, help_struct, 0}, {"sym", cmd_sym, help_sym, 0}, {"sys", xen_hyper_cmd_sys, xen_hyper_help_sys, 0}, {"test", cmd_test, NULL, HIDDEN_COMMAND}, {"union", cmd_union, help_union, 0}, {"vcpu", xen_hyper_cmd_vcpu, xen_hyper_help_vcpu, REFRESH_TASK_TABLE}, {"vcpus", xen_hyper_cmd_vcpus, xen_hyper_help_vcpus, REFRESH_TASK_TABLE}, {"whatis", cmd_whatis, help_whatis, 0}, {"wr", cmd_wr, help_wr, 0}, {(char *)NULL} }; /* * */ struct xen_hyper_offset_table xen_hyper_offset_table = { 0 }; struct xen_hyper_size_table xen_hyper_size_table = { 0 }; /* * help data */ char *xen_hyper_help_domain[] = { "domain", "display contents of domain struct", "[domain-id | domainp] ...", " This command displays contents of domain struct for selected, or all, domains", " domain-id a domain id.", " domainp a domain pointer.", NULL }; char *xen_hyper_help_doms[] = { "doms", "display domain status information", "[domain-id | domainp] ...", " This command displays domain status for selected, or all, domains" , " domain-id a domain id.", " domainp a domain pointer.", " ", " 1. the DOMAIN-ID.", " 2. the struct domain pointer.", " 3. the domain state", " (SF:fully shut down, SH:shutting down, DY:dying,", " CP:pause by controller software, PO:polling event channels,", " PA:pause by the hypervisor, RU:running).", " 4. the TYPE of domain", " (O:dom_io, X:dom_xen, I:idle domain, 0:domain 0, U:domain U).", " 5. displays max_pages member of domain.", " 6. displays tot_pages member of domain.", " 7. a number of vcpu that domain is assigned.", " 8. the shared_info pointer of domain.", " 9. frame containing list of mfns containing list of mfns" , " containing p2m.", " ", " The active domain on each CPU will be highlighted by an angle ", " bracket (\">\") preceding its information.", " The crashing domain on each CPU will be highlighted by an aster ", " (\"*\") preceding its information.", "\nEXAMPLES", " Show the domain status of all:\n", " %s> doms", " DID DOMAIN ST T MAXPAGE TOTPAGE VCPU SHARED_I P2M_MFN", " 32753 ffbf8080 RU O 0 0 0 0 ----", " 32754 ffbfa080 RU X 0 0 0 0 ----", " 32767 ffbfc080 RU I 0 0 2 0 ----", " >* 0 ff198080 RU 0 ffffffff 32900 2 ff194000 18d0", " 4 ffbee080 RU U 4000 4000 2 ff18d000 3eb92", " 5 ff186080 RU U 4000 4000 2 ff184000 298d3", " %s>", NULL }; char *xen_hyper_help_dumpinfo[] = { "dumpinfo", "display Xen dump information", "[-t | -r] [pcpu-id | enotep] ...", " This command displays Xen dump information for selected, or all, cpus" , " pcpu-id a physical cpu id.", " enotep a ELF Note pointer.", " -t display time information.", " -r display register information.", NULL }; char *xen_hyper_help_log[] = { "log", "dump system message buffer", " ", " This command dumps the xen conring contents in chronological order." , " ", "EXAMPLES", " Dump the Xen message buffer:\n", " %s> log", " __ __ _____ ___ _ _ _", " \\ \\/ /___ _ __ |___ / / _ \\ _ _ _ __ ___| |_ __ _| |__ | | ___", " \\ // _ \\ '_ \\ |_ \\| | | |__| | | | '_ \\/ __| __/ _` | '_ \\| |/ _ \\", " / \\ __/ | | | ___) | |_| |__| |_| | | | \\__ \\ || (_| | |_) | | __/", " /_/\\_\\___|_| |_| |____(_)___/ \\__,_|_| |_|___/\\__\\__,_|_.__/|_|\\___|", " ", " http://www.cl.cam.ac.uk/netos/xen", " University of Cambridge Computer Laboratory", " ", " Xen version 3.0-unstable (damm@) (gcc version 3.4.6 (Gentoo 3.4.6-r1, ssp-3.4.5-1.0,", " pie-8.7.9)) Wed Dec 6 17:34:32 JST 2006", " Latest ChangeSet: unavailable", " ", " (XEN) Console output is synchronous.", " (XEN) Command line: 12733-i386-pae/xen.gz console=com1 sync_console conswitch=bb com1", " =115200,8n1,0x3f8 dom0_mem=480000 crashkernel=64M@32M", " (XEN) Physical RAM map:", " (XEN) 0000000000000000 - 0000000000098000 (usable)", " (XEN) 0000000000098000 - 00000000000a0000 (reserved)", " (XEN) 00000000000f0000 - 0000000000100000 (reserved)", " (XEN) 0000000000100000 - 000000003f7f0000 (usable)", " (XEN) 000000003f7f0000 - 000000003f7f3000 (ACPI NVS)", " (XEN) 000000003f7f3000 - 000000003f800000 (ACPI data)", " (XEN) 00000000e0000000 - 00000000f0000000 (reserved)", " (XEN) 00000000fec00000 - 0000000100000000 (reserved)", " (XEN) Kdump: 64MB (65536kB) at 0x2000000", " (XEN) System RAM: 1015MB (1039904kB)", " (XEN) ACPI: RSDP (v000 XPC ) @ 0x000f9250", " ...", NULL }; char *xen_hyper_help_pcpus[] = { "pcpus", "display physical cpu information", "[-r][-t] [pcpu-id | pcpup] ...", " This command displays physical cpu information for selected, or all, cpus" , " pcpu-id a physical cpu id.", " pcpup a physical cpu pointer.", " cur-vcpu a current virtual cpu pointer.", " -r display register information.", " -t display init_tss information.", " ", " The crashing physical cpu will be highlighted by an aster ", " (\"*\") preceding its information.", "\nEXAMPLES", " Show the physical cpu status of all:\n", " %s> pcpus", " PCID PCPU CUR-VCPU", " 0 ff1a3fb4 ffbf9080", " * 1 ff1dbfb4 ffbf8080", " %s>", " ", " Show the physical cpu status of all with register information:\n", " %s> pcpus -r", " PCID PCPU CUR-VCPU", " * 0 ff1b7fb4 ffbef080", " Register information:", " struct cpu_user_regs {", " ebx = 0x0,", " ecx = 0xdcf4bed8,", " edx = 0xc0326887,", " esi = 0x63,", " edi = 0x0,", " ebp = 0xdcf4bee0,", " eax = 0x25,", " error_code = 0x6,", " entry_vector = 0xe,", " eip = 0xc01014a7,", " cs = 0x61,", " saved_upcall_mask = 0x0,", " _pad0 = 0x0,", " eflags = 0x202,", " esp = 0xdcf4bed0,", " ss = 0x69,", " _pad1 = 0x0,", " es = 0x7b,", " _pad2 = 0x0,", " ds = 0x7b,", " _pad3 = 0x0,", " fs = 0x0,", " _pad4 = 0x0,", " gs = 0x0,", " _pad5 = 0x0", " }", " ", " Show the physical cpu status of all with init_tss information:\n", " %s> pcpus -t", " PCID PCPU CUR-VCPU", " * 0 ff1b7fb4 ffbef080", " init_tss information:", " struct tss_struct {", " back_link = 0x0,", " __blh = 0x0,", " esp0 = 0xff1b7fe8,", " ss0 = 0xe010,", " __ss0h = 0x0,", " esp1 = 0xdcf4bff8,", " ss1 = 0x69,", " __ss1h = 0x0,", " esp2 = 0x0,", " ss2 = 0x0,", " __ss2h = 0x0,", " __cr3 = 0x0,", " eip = 0x0,", " eflags = 0x0,", " eax = 0x0,", " ecx = 0x0,", " edx = 0x0,", " ebx = 0x0,", " esp = 0x0,", " ebp = 0x0,", " esi = 0x0,", " edi = 0x0,", " es = 0x0,", " __esh = 0x0,", " cs = 0x0,", " __csh = 0x0,", " ss = 0x0,", " __ssh = 0x0,", " ds = 0x0,", " __dsh = 0x0,", " fs = 0x0,", " __fsh = 0x0,", " gs = 0x0,", " __gsh = 0x0,", " ldt = 0x0,", " __ldth = 0x0,", " trace = 0x0,", " bitmap = 0x8000,", " __cacheline_filler = \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"", " }", NULL }; char *xen_hyper_help_sched[] = { "pcpus", "display scheduler information", "[-v] [pcpu-id] ...", " This command displays scheduler information for selected, or all, cpus" , " pcpu-id a physical cpu id.", " -v display verbosely scheduler information.", " ", NULL }; char *xen_hyper_help_sys[] = { "sys", "system data", "[-c [name|number]] config", " This command displays system-specific data. If no arguments are entered,\n" " the same system data shown during %s invocation is shown.\n", "\nEXAMPLES", " Display essential system information:\n", " %s> sys", " DEBUG KERNEL: xen-syms", " DUMPFILE: vmcore", " CPUS: 2", " DOMAINS: 2", " MACHINE: Pentium III (Coppermine) (866 Mhz)", " MEMORY: 2 GB", " %s>", NULL }; char *xen_hyper_help_vcpu[] = { "vcpu", "display contents of vcpu struct", "[vcpup] ...", " This command displays contents of vcpu struct for selected, or all, vcpus", " vcpu-id a virtual cpu id.", " vcpup a virtual cpu pointer.", NULL }; char *xen_hyper_help_vcpus[] = { "vcpus", "display vcpu status information", "[-i domain-id vcpu-id | vcpup] ...", " This command displays vcpu status for selected, or all, vcpus" , " domain-id a domain id.", " vcpu-id a VCPU-ID.", " vcpup a hexadecimal struct vcpu pointer.", " -i specify vcpu id as an argument.", " ", " 1. the VCPU-ID.", " 2. the physical CPU-ID.", " 3. the struct vcpu pointer.", " 4. the vcpu state (RU, BL, OF).", " 5. the TYPE of domain that vcpu is assigned(I, 0, G).", " 6. the DOMAIN-ID of domain that vcpu is assigned.", " 7. the struct domain pointer of domain that vcpu is assigned.", " ", " The active vcpu on each CPU will be highlighted by an angle ", " bracket (\">\") preceding its information.", " The crashing vcpu on each CPU will be highlighted by an aster ", " (\"*\") preceding its information.", "\nEXAMPLES", " Show the vcpu status of all:\n", " %s> vcpus", " VCID PCID VCPU ST T DOMID DOMAIN", " 0 0 ffbfe080 RU I 32767 ffbfc080", " 1 1 ff1df080 RU I 32767 ffbfc080", " >* 0 0 ff195180 RU 0 0 ff198080", " > 1 1 ff190080 BL 0 0 ff198080", " 0 1 ff18a080 BL G 4 ffbee080", " 1 0 ff189080 BL G 4 ffbee080", " 0 1 ff1f3080 BL G 5 ff186080", " 1 0 ff1f2080 BL G 5 ff186080", " %s>", NULL }; struct task_context fake_tc = { 0 }; #endif crash-utility-crash-9cd43f5/ramdump.c0000664000372000037200000002175715107550337017221 0ustar juerghjuergh/* * ramdump.c - core analysis suite * * Copyright (c) 2014 Broadcom Corporation * Oza Pawandeep * Vikram Prakash * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Oza Pawandeep */ #define _LARGEFILE64_SOURCE 1 /* stat64() */ #include "defs.h" #include struct ramdump_def { char *path; int rfd; ulonglong start_paddr; ulonglong end_paddr; }; static struct ramdump_def *ramdump; static int nodes; static char *user_elf = NULL; static char elf_default[] = "/var/tmp/ramdump_elf_XXXXXX"; static void alloc_elf_header(Elf64_Ehdr *ehdr, ushort e_machine) { memcpy(ehdr->e_ident, ELFMAG, SELFMAG); ehdr->e_ident[EI_CLASS] = ELFCLASS64; ehdr->e_ident[EI_DATA] = ELFDATA2LSB; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = ELFOSABI_LINUX; ehdr->e_ident[EI_ABIVERSION] = 0; memset(ehdr->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); ehdr->e_type = ET_CORE; ehdr->e_machine = e_machine; ehdr->e_version = EV_CURRENT; ehdr->e_entry = 0; ehdr->e_phoff = sizeof(Elf64_Ehdr); ehdr->e_shoff = 0; ehdr->e_flags = 0; ehdr->e_ehsize = sizeof(Elf64_Ehdr); ehdr->e_phentsize = sizeof(Elf64_Phdr); ehdr->e_phnum = 1 + nodes; ehdr->e_shentsize = 0; ehdr->e_shnum = 0; ehdr->e_shstrndx = 0; } static void alloc_program_headers(Elf64_Phdr *phdr) { unsigned int i; for (i = 0; i < nodes; i++) { phdr[i].p_type = PT_LOAD; phdr[i].p_filesz = ramdump[i].end_paddr + 1 - ramdump[i].start_paddr; phdr[i].p_memsz = phdr[i].p_filesz; phdr[i].p_vaddr = 0; phdr[i].p_paddr = ramdump[i].start_paddr; phdr[i].p_flags = PF_R | PF_W | PF_X; phdr[i].p_align = 0; } } static char *write_elf(Elf64_Phdr *load, Elf64_Ehdr *e_head, size_t data_offset) { #define CPY_BUF_SZ 4096 int fd1, fd2, i, err = 1; char *buf; char *out_elf; size_t offset; ssize_t rd, len; buf = (char *)malloc(CPY_BUF_SZ); offset = data_offset; if (user_elf) { fd2 = open(user_elf, O_CREAT|O_RDWR, S_IRUSR|S_IWUSR); if (fd2 < 0) { error(INFO, "%s open error, %s\n", user_elf, strerror(errno)); goto end1; } out_elf = user_elf; } else { fd2 = mkstemp(elf_default); if (fd2 < 0) { error(INFO, "%s open error, %s\n", elf_default, strerror(errno)); goto end1; } out_elf = elf_default; pc->flags2 |= RAMDUMP; } if (user_elf) { sprintf(buf, "creating ELF dumpfile: %s", out_elf); please_wait(buf); } else if (CRASHDEBUG(1)) fprintf(fp, "creating temporary ELF header: %s\n\n", elf_default); while (offset > 0) { len = write(fd2, e_head + (data_offset - offset), offset); if (len < 0) { error(INFO, "ramdump write error, %s\n", strerror(errno)); goto end; } offset -= len; } if (user_elf) { for (i = 0; i < nodes; i++) { offset = load[i].p_offset; fd1 = open(ramdump[i].path, O_RDONLY, S_IRUSR); if (fd1 < 0) { error(INFO, "%s open error, %s\n", ramdump[i].path, strerror(errno)); goto end; } lseek(fd2, (off_t)offset, SEEK_SET); while ((rd = read(fd1, buf, CPY_BUF_SZ)) > 0) { if (write(fd2, buf, rd) != rd) { error(INFO, "%s write error, %s\n", ramdump[i].path, strerror(errno)); close(fd1); goto end; } } close(fd1); } please_wait_done(); } err = 0; end: close(fd2); end1: free(buf); return err ? NULL : out_elf; } static void alloc_notes(Elf64_Phdr *notes) { /* Nothing filled in as of now */ notes->p_type = PT_NOTE; notes->p_offset = 0; notes->p_vaddr = 0; notes->p_paddr = 0; notes->p_filesz = 0; notes->p_memsz = 0; notes->p_flags = 0; notes->p_align = 0; } char *ramdump_to_elf(void) { int i; char *ptr, *e_file = NULL; ushort e_machine = 0; size_t offset, data_offset; size_t l_offset; Elf64_Phdr *notes, *load; Elf64_Ehdr *e_head; if (machine_type("ARM")) e_machine = EM_ARM; else if (machine_type("ARM64")) e_machine = EM_AARCH64; else if (machine_type("MIPS") || machine_type("MIPS64")) e_machine = EM_MIPS; else if (machine_type("X86_64")) e_machine = EM_X86_64; else if (machine_type("RISCV64")) e_machine = EM_RISCV; else if (machine_type("LOONGARCH64")) e_machine = EM_LOONGARCH; else error(FATAL, "ramdump: unsupported machine type: %s\n", MACHINE_TYPE); e_head = (Elf64_Ehdr *)malloc(sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) + (nodes * sizeof(Elf64_Phdr)) + (CPY_BUF_SZ * 2)); ptr = (char *)e_head; offset = 0; alloc_elf_header(e_head, e_machine); ptr += sizeof(Elf64_Ehdr); offset += sizeof(Elf64_Ehdr); notes = (Elf64_Phdr *)ptr; alloc_notes(notes); offset += sizeof(Elf64_Phdr); ptr += sizeof(Elf64_Phdr); load = (Elf64_Phdr *)ptr; alloc_program_headers(load); offset += sizeof(Elf64_Phdr) * nodes; ptr += sizeof(Elf64_Phdr) * nodes; /* Empty note */ notes->p_offset = offset; l_offset = offset; data_offset = offset; for (i = 0; i < nodes; i++) { load[i].p_offset = l_offset; l_offset += load[i].p_filesz; } e_file = write_elf(load, e_head, data_offset); free(e_head); return e_file; } #define PREFIX(ptr, pat) \ (strncmp((ptr), (pat), sizeof(pat)-1) ? 0 : \ ((ptr) += sizeof(pat)-1, 1)) int is_ramdump(char *p) { char *x = NULL, *y = NULL, *pat; size_t len; char *pattern; struct stat64 st; int is_live; int err = 0; is_live = PREFIX(p, "live:"); if (nodes || !strchr(p, '@')) return 0; len = strlen(p); pattern = (char *)malloc(len + 1); strlcpy(pattern, p, len + 1); pat = pattern; while ((pat = strtok_r(pat, ",", &x))) { if ((pat = strtok_r(pat, "@", &y))) { nodes++; ramdump = realloc(ramdump, sizeof(struct ramdump_def) * nodes); if (!ramdump) error(FATAL, "realloc failure\n"); ramdump[nodes - 1].path = pat; pat = strtok_r(NULL, "@", &y); ramdump[nodes - 1].start_paddr = htoll(pat, RETURN_ON_ERROR, &err); if (err == TRUE) error(FATAL, "Invalid ramdump address\n"); if ((ramdump[nodes - 1].rfd = open(ramdump[nodes - 1].path, O_RDONLY)) < 0) error(FATAL, "ramdump %s open failed:%s\n", ramdump[nodes - 1].path, strerror(errno)); if (fstat64(ramdump[nodes - 1].rfd, &st) < 0) error(FATAL, "ramdump stat failed\n"); ramdump[nodes - 1].end_paddr = ramdump[nodes - 1].start_paddr + st.st_size - 1; } pat = NULL; } if (nodes && is_live) { pc->flags |= LIVE_SYSTEM; pc->dumpfile = ramdump[0].path; pc->live_memsrc = pc->dumpfile; } return nodes; } void ramdump_elf_output_file(char *opt) { user_elf = opt; } void ramdump_cleanup(void) { if (!user_elf) unlink(elf_default); } int read_ramdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { off_t offset; int i, found; struct ramdump_def *r = &ramdump[0]; offset = 0; for (i = found = 0; i < nodes; i++) { r = &ramdump[i]; if ((paddr >= r->start_paddr) && (paddr <= r->end_paddr)) { offset = (off_t)paddr - (off_t)r->start_paddr; found++; break; } } if (!found) { if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: READ_ERROR: " "offset not found for paddr: %llx\n", (ulonglong)paddr); return READ_ERROR; } if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: addr: %lx paddr: %llx cnt: %d offset: %llx\n", addr, (ulonglong)paddr, cnt, (ulonglong)offset); if (lseek(r->rfd, offset, SEEK_SET) == -1) { if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: SEEK_ERROR: " "offset: %llx\n", (ulonglong)offset); return SEEK_ERROR; } if (read(r->rfd, bufptr, cnt) != cnt) { if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: READ_ERROR: " "offset: %llx\n", (ulonglong)offset); return READ_ERROR; } return cnt; } void show_ramdump_files(void) { int i; fprintf(fp, "%s [temporary ELF header]\n", elf_default); for (i = 0; i < nodes; i++) { fprintf(fp, "%s %s", i ? "\n" : "", ramdump[i].path); } } void dump_ramdump_data() { int i; if (!user_elf && !is_ramdump_image()) return; fprintf(fp, "\nramdump data:\n"); fprintf(fp, " user_elf: %s\n", user_elf ? user_elf : "(unused)"); fprintf(fp, " elf_default: %s\n", user_elf ? "(unused)" : elf_default); fprintf(fp, " nodes: %d\n", nodes); for (i = 0; i < nodes; i++) { fprintf(fp, " ramdump[%d]:\n", i); fprintf(fp, " path: %s\n", ramdump[i].path); fprintf(fp, " rfd: %d\n", ramdump[i].rfd); fprintf(fp, " start_paddr: %llx\n", (ulonglong)ramdump[i].start_paddr); fprintf(fp, " end_paddr: %llx\n", (ulonglong)ramdump[i].end_paddr); } fprintf(fp, "\n"); } int is_ramdump_image(void) { return (pc->flags2 & RAMDUMP ? TRUE : FALSE); } crash-utility-crash-9cd43f5/extensions/0000775000372000037200000000000015107550337017573 5ustar juerghjuerghcrash-utility-crash-9cd43f5/extensions/dminfo.c0000664000372000037200000012307515107550337021223 0ustar juerghjuergh/* dminfo.c - crash extension module for device-mapper analysis * * Copyright (C) 2005 NEC Corporation * Copyright (C) 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" /* From the crash source top-level directory */ void dminfo_init(void); void dminfo_fini(void); /* * Indices of size-offset array (Used by GET_xxx macros) * * DM__ */ enum { DM_hash_cell_name_list = 0, DM_hash_cell_name, DM_hash_cell_md, DM_mapped_device_disk, DM_mapped_device_map, DM_gendisk_major, DM_gendisk_first_minor, DM_gendisk_disk_name, DM_dm_table_num_targets, DM_dm_table_targets, DM_dm_table_devices, DM_dm_target_type, DM_dm_target_begin, DM_dm_target_len, DM_dm_target_private, DM_dm_dev_count, DM_dm_dev_bdev, DM_dm_dev_name, DM_dm_io_md, DM_dm_io_bio, DM_target_type_name, DM_target_io_io, DM_block_device_bd_disk, DM_bio_bi_private, DM_bio_list_head, DM_linear_c_dev, DM_linear_c_start, DM_multipath_hw_handler, DM_multipath_nr_priority_groups, DM_multipath_priority_groups, DM_multipath_nr_valid_paths, DM_multipath_current_pg, DM_multipath_queue_if_no_path, DM_multipath_queue_size, DM_hw_handler_type, DM_hw_handler_type_name, DM_priority_group_ps, DM_priority_group_pg_num, DM_priority_group_bypassed, DM_priority_group_nr_pgpaths, DM_priority_group_pgpaths, DM_path_selector_type, DM_path_selector_type_name, DM_pgpath_fail_count, DM_pgpath_path, DM_path_dev, DM_path_is_active, DM_mirror_set_rh, DM_mirror_set_reads, DM_mirror_set_writes, DM_mirror_set_in_sync, DM_mirror_set_nr_mirrors, DM_mirror_set_mirror, DM_region_hash_log, DM_region_hash_quiesced_regions, DM_region_hash_recovered_regions, DM_dirty_log_type, DM_dirty_log_type_name, DM_mirror_error_count, DM_mirror_dev, DM_mirror_offset, DM_crypt_config_dev, DM_crypt_config_iv_mode, DM_crypt_config_tfm, DM_crypt_config_key_size, DM_crypt_config_key, DM_crypto_tfm_crt_u, DM_crypto_tfm___crt_alg, DM_crypto_alg_cra_name, DM_cipher_tfm_cit_mode, DM_stripe_c_stripes, DM_stripe_c_chunk_mask, DM_stripe_c_stripe, DM_stripe_dev, DM_dm_snapshot_origin, DM_dm_snapshot_cow, DM_dm_snapshot_chunk_size, DM_dm_snapshot_valid, DM_dm_snapshot_type, NR_DMINFO_MEMBER_TABLE_ENTRY }; /* Size-offset array for structure's member */ static struct dminfo_member_entry { unsigned long offset; unsigned long size; } mbr_ary[NR_DMINFO_MEMBER_TABLE_ENTRY]; /* * Macros to retrieve data of given structure's member * * Macros except for the MSG assume 'struct s' is at 'addr' */ #define MSG(msg, s, m) msg ": " s "." m /* Initialize the size-offset array */ #define INIT_MBR_TABLE(s, m) \ do { \ if (!mbr_ary[DM_##s##_##m].size) { \ mbr_ary[DM_##s##_##m].offset = MEMBER_OFFSET("struct " #s, #m); \ mbr_ary[DM_##s##_##m].size = MEMBER_SIZE("struct " #s, #m); \ } \ } while (0) /* * Store the data of member m in ret. * Initialize the size-offset array for the member m if needed. */ #define GET_VALUE(addr, s, m, ret) \ do { \ INIT_MBR_TABLE(s, m); \ if (sizeof(ret) < mbr_ary[DM_##s##_##m].size) \ fprintf(fp, "%s\n", \ MSG("ERROR: GET_VALUE size_check", #s, #m)); \ readmem(addr + mbr_ary[DM_##s##_##m].offset, KVADDR, &ret, \ mbr_ary[DM_##s##_##m].size, MSG("GET_VALUE", #s, #m), \ FAULT_ON_ERROR);\ } while (0) /* * Store the address of member m in ret. * Initialize the size-offset array for the member m if needed. */ #define GET_ADDR(addr, s, m, ret) \ do { \ INIT_MBR_TABLE(s, m); \ ret = addr + mbr_ary[DM_##s##_##m].offset; \ } while (0) /* * Store the string data of member m in ret. * Initialize the size-offset array for the member m if needed. */ #define GET_STR(addr, s, m, ret, len) \ do { \ INIT_MBR_TABLE(s, m); \ if (!read_string(addr + mbr_ary[DM_##s##_##m].offset, ret, len - 1)) \ fprintf(fp, "%s\n", MSG("ERROR: GET_STR", #s, #m)); \ } while (0) /* * Store the string data pointed by member m in ret. * Initialize the size-offset array for the member m if needed. */ #define GET_PTR_STR(addr, s, m, ret, len) \ do { \ unsigned long tmp; \ INIT_MBR_TABLE(s, m); \ readmem(addr + mbr_ary[DM_##s##_##m].offset, KVADDR, &tmp, \ mbr_ary[DM_##s##_##m].size, MSG("GET_PTR_STR", #s, #m),\ FAULT_ON_ERROR);\ if (!read_string(tmp, ret, len - 1)) \ fprintf(fp, "%s\n", MSG("ERROR: GET_PTR_STR", #s, #m));\ } while (0) /* * Utility function/macro to walk the list */ static unsigned long get_next_from_list_head(unsigned long addr) { unsigned long ret; readmem(addr + OFFSET(list_head_next), KVADDR, &ret, sizeof(void *), MSG("get_next_from_list_head", "list_head", "next"), FAULT_ON_ERROR); return ret; } #define list_for_each(next, head, last) \ for (next = get_next_from_list_head(head), last = 0UL; \ next && next != head && next != last; \ last = next, next = get_next_from_list_head(next)) /* * device-mapper target analyzer * * device-mapper has various target driver: linear, mirror, multipath, etc. * Information specific to target is stored in its own way. * Target-specific analyzer is provided for each target driver for this reason. */ static struct dminfo_target_analyzer { struct dminfo_target_analyzer *next; char *target_name; int (*ready) (void); /* returns true if analyzer is available */ void (*show_table) (unsigned long); /* display table info */ void (*show_status) (unsigned long); /* display status info */ void (*show_queue) (unsigned long); /* display queued I/O info */ } analyzers_head; static void dminfo_register_target_analyzer(struct dminfo_target_analyzer *ta) { ta->next = analyzers_head.next; analyzers_head.next = ta; } static struct dminfo_target_analyzer *find_target_analyzer(char *target_type) { struct dminfo_target_analyzer *ta; for (ta = analyzers_head.next; ta; ta = ta->next) if (!strcmp(ta->target_name, target_type)) return ta; return NULL; } /* * zero target */ static int zero_ready(void) { return 1; } static void zero_show_table(unsigned long target) { unsigned long long start, len; /* Get target information */ GET_VALUE(target, dm_target, begin, start); GET_VALUE(target, dm_target, len, len); fprintf(fp, " begin:%llu len:%llu", start, len); } static void zero_show_status(unsigned long target) { /* zero target has no status */ fprintf(fp, " No status info"); } static void zero_show_queue(unsigned long target) { /* zero target has no queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer zero_analyzer = { .target_name = "zero", .ready = zero_ready, .show_table = zero_show_table, .show_status = zero_show_status, .show_queue = zero_show_queue }; /* * error target */ static int error_ready(void) { return 1; } static void error_show_table(unsigned long target) { unsigned long long start, len; /* Get target information */ GET_VALUE(target, dm_target, begin, start); GET_VALUE(target, dm_target, len, len); fprintf(fp, " begin:%llu len:%llu", start, len); } static void error_show_status(unsigned long target) { /* error target has no status */ fprintf(fp, " No status info"); } static void error_show_queue(unsigned long target) { /* error target has no queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer error_analyzer = { .target_name = "error", .ready = error_ready, .show_table = error_show_table, .show_status = error_show_status, .show_queue = error_show_queue }; /* * linear target */ static int linear_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct linear_c")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: linear_c"); return 0; } static void linear_show_table(unsigned long target) { unsigned long lc, dm_dev; unsigned long long start, len, offset; char devt[BUFSIZE]; /* Get target information */ GET_VALUE(target, dm_target, begin, start); GET_VALUE(target, dm_target, len, len); GET_VALUE(target, dm_target, private, lc); GET_VALUE(lc, linear_c, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, devt, BUFSIZE); GET_VALUE(lc, linear_c, start, offset); fprintf(fp, " begin:%llu len:%llu dev:%s offset:%llu", start, len, devt, offset); } static void linear_show_status(unsigned long target) { /* linear target has no status */ fprintf(fp, " No status info"); } static void linear_show_queue(unsigned long target) { /* linear target has no I/O queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer linear_analyzer = { .target_name = "linear", .ready = linear_ready, .show_table = linear_show_table, .show_status = linear_show_status, .show_queue = linear_show_queue }; /* * mirror target */ static int mirror_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct mirror_set")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: mirror_set"); return 0; } static void mirror_show_table(unsigned long target) { unsigned int i, nr_mir; unsigned long ms, rh, log, log_type, mir_size, mir_head, mir, dm_dev; unsigned long long offset; char buf[BUFSIZE]; /* Get the address of struct mirror_set */ GET_VALUE(target, dm_target, private, ms); /* Get the log-type name of the mirror_set */ GET_ADDR(ms, mirror_set, rh, rh); GET_VALUE(rh, region_hash, log, log); GET_VALUE(log, dirty_log, type, log_type); GET_PTR_STR(log_type, dirty_log_type, name, buf, BUFSIZE); fprintf(fp, " log:%s", buf); /* * Display information for each mirror disks. * * mir_head = mirror_set.mirror. * This is the head of struct mirror array. */ fprintf(fp, " dev:"); mir_size = STRUCT_SIZE("struct mirror"); GET_ADDR(ms, mirror_set, mirror, mir_head); GET_VALUE(ms, mirror_set, nr_mirrors, nr_mir); for (i = 0; i < nr_mir; i++) { mir = mir_head + mir_size * i; /* Get next mirror */ /* Get the devt of the mirror disk */ GET_VALUE(mir, mirror, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); /* Get the offset of the mirror disk */ GET_VALUE(mir, mirror, offset, offset); fprintf(fp, "%s(%llu)%s", buf, offset, i == nr_mir - 1 ? "" : ","); } if (i != nr_mir) fprintf(fp, " ERROR: dev are less than nr_mir:%d", nr_mir); } static void mirror_show_status(unsigned long target) { unsigned int i, nr_mir, synced, nr_error; unsigned long ms, mir_size, mir_head, mir, dm_dev; char buf[BUFSIZE]; /* Get the address of struct mirror_set */ GET_VALUE(target, dm_target, private, ms); /* Get the status info of the mirror_set */ GET_VALUE(ms, mirror_set, in_sync, synced); fprintf(fp, " in_sync:%d", synced); /* * Display information for each mirror disks. * * mir_head = mirror_set.mirror. * This is the head of struct mirror array. */ fprintf(fp, " dev:"); mir_size = STRUCT_SIZE("struct mirror"); GET_ADDR(ms, mirror_set, mirror, mir_head); GET_VALUE(ms, mirror_set, nr_mirrors, nr_mir); for (i = 0; i < nr_mir; i++) { mir = mir_head + mir_size * i; /* Get next mirror */ /* Get the devt of the mirror disk */ GET_VALUE(mir, mirror, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); /* Get the offset of the mirror disk */ GET_VALUE(mir, mirror, error_count, nr_error); fprintf(fp, "%s(%c,%d)%s", buf, nr_error ? 'D' : 'A', nr_error, i == nr_mir - 1 ? "" : ","); } if (i != nr_mir) fprintf(fp, " ERROR: dev are less than nr_mir:%d", nr_mir); } static void mirror_show_queue(unsigned long target) { unsigned long ms, rlist, wlist, rhead, whead; unsigned long rh, quis_head, rcov_head, quis_next, rcov_next; /* Get the address of struct mirror_set */ GET_VALUE(target, dm_target, private, ms); /* Get the address of queued I/O lists in struct mirror_set */ GET_ADDR(ms, mirror_set, reads, rlist); GET_ADDR(ms, mirror_set, writes, wlist); /* Get the head of queued I/O lists */ GET_VALUE(rlist, bio_list, head, rhead); GET_VALUE(wlist, bio_list, head, whead); fprintf(fp, " %s", rhead ? "reads" : "(reads)"); fprintf(fp, " %s", whead ? "writes" : "(writes)"); /* Get the address of the struct region_hash */ GET_ADDR(ms, mirror_set, rh, rh); /* Get the address of recover region lists in struct region_hash */ GET_ADDR(rh, region_hash, quiesced_regions, quis_head); GET_ADDR(rh, region_hash, recovered_regions, rcov_head); /* Get the head of recover region lists */ quis_next = get_next_from_list_head(quis_head); rcov_next = get_next_from_list_head(rcov_head); fprintf(fp, " %s", quis_next != quis_head ? "quiesced" : "(quiesced)"); fprintf(fp, " %s", rcov_next != rcov_head ? "recovered" : "(recovered)"); } static struct dminfo_target_analyzer mirror_analyzer = { .target_name = "mirror", .ready = mirror_ready, .show_table = mirror_show_table, .show_status = mirror_show_status, .show_queue = mirror_show_queue }; /* * multipath target */ static int multipath_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct multipath")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: multipath"); return 0; } static void multipath_show_table(unsigned long target) { int i, j; unsigned int queue_if_no_path, nr_pgs, pg_id, nr_paths; unsigned long mp, hwh, hwh_type, ps, ps_type, path, dm_dev; unsigned long pg_head, pg_next, pg_last; unsigned long path_head, path_next, path_last; char name[BUFSIZE]; /* Get the address of struct multipath */ GET_VALUE(target, dm_target, private, mp); /* Get features information */ GET_VALUE(mp, multipath, queue_if_no_path, queue_if_no_path); /* Get the hardware-handler information */ GET_ADDR(mp, multipath, hw_handler, hwh); GET_VALUE(hwh, hw_handler, type, hwh_type); if (hwh_type) GET_PTR_STR(hwh_type, hw_handler_type, name, name, BUFSIZE); else strcpy(name, "none"); /* Get the number of priority groups */ GET_VALUE(mp, multipath, nr_priority_groups, nr_pgs); fprintf(fp, " queue_if_no_path:%d hwh:%s nr_pgs:%d\n", queue_if_no_path, name, nr_pgs); /* Display information for each priority group */ fprintf(fp, " %-2s %-13s %-8s %s", "PG", "PATH_SELECTOR", "NR_PATHS", "PATHS"); GET_ADDR(mp, multipath, priority_groups, pg_head); i = 0; list_for_each (pg_next, pg_head, pg_last) { /* pg_next == struct priority_group */ /* Get the index of the priority group */ GET_VALUE(pg_next, priority_group, pg_num, pg_id); /* Get the name of path selector */ GET_ADDR(pg_next, priority_group, ps, ps); GET_VALUE(ps, path_selector, type, ps_type); GET_PTR_STR(ps_type, path_selector_type, name, name, BUFSIZE); /* Get the number of paths in the priority group */ GET_VALUE(pg_next, priority_group, nr_pgpaths, nr_paths); fprintf(fp, "\n %-2d %-13s %-8d ", pg_id, name, nr_paths); /* Display information for each path */ GET_ADDR(pg_next, priority_group, pgpaths, path_head); j = 0; list_for_each (path_next, path_head, path_last) { /* path_next == struct pgpath */ /* Get the devt of the pgpath */ GET_ADDR(path_next, pgpath, path, path); GET_VALUE(path, path, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, name, BUFSIZE); fprintf(fp, " %s", name); j++; } if (j != nr_paths) fprintf(fp, " ERROR: paths are less than nr_paths:%d", nr_paths); i++; } if (i != nr_pgs) fprintf(fp, " ERROR: pgs are less than nr_pgs:%d", nr_pgs); } static void multipath_show_status(unsigned long target) { int i, j; unsigned int queue_if_no_path, nr_pgs, pg_id, nr_paths; unsigned int bypassed_pg, path_active, nr_fails; unsigned long mp, hwh, hwh_type, cur_pg, path, dm_dev; unsigned long pg_head, pg_next, pg_last; unsigned long path_head, path_next, path_last; char buf[BUFSIZE], path_status; /* Get the address of struct multipath */ GET_VALUE(target, dm_target, private, mp); /* Get features information */ GET_VALUE(mp, multipath, queue_if_no_path, queue_if_no_path); /* Get the hardware-handler information */ GET_ADDR(mp, multipath, hw_handler, hwh); GET_VALUE(hwh, hw_handler, type, hwh_type); if (hwh_type) GET_PTR_STR(hwh_type, hw_handler_type, name, buf, BUFSIZE); else strcpy(buf, "none"); /* Get the number of priority groups */ GET_VALUE(mp, multipath, nr_priority_groups, nr_pgs); fprintf(fp, " queue_if_no_path:%d hwh:%s nr_pgs:%d\n", queue_if_no_path, buf, nr_pgs); /* Display information for each priority group */ fprintf(fp, " %-2s %-9s %-8s %s", "PG", "PG_STATUS", "NR_PATHS", "PATHS"); GET_ADDR(mp, multipath, priority_groups, pg_head); i = 0; list_for_each (pg_next, pg_head, pg_last) { /* pg_next == struct priority_group */ /* Get the index of the priority group */ GET_VALUE(pg_next, priority_group, pg_num, pg_id); /* Get the status of the priority group */ GET_VALUE(pg_next, priority_group, bypassed, bypassed_pg); if (bypassed_pg) strcpy(buf, "disabled"); else { GET_VALUE(mp, multipath, current_pg, cur_pg); if (pg_next == cur_pg) strcpy(buf, "active"); else strcpy(buf, "enabled"); } /* Get the number of paths in the priority group */ GET_VALUE(pg_next, priority_group, nr_pgpaths, nr_paths); fprintf(fp, "\n %-2d %-9s %-8d ", pg_id, buf, nr_paths); /* Display information for each path */ GET_ADDR(pg_next, priority_group, pgpaths, path_head); j = 0; list_for_each (path_next, path_head, path_last) { /* path_next == struct pgpath */ /* Get the devt of the pgpath */ GET_ADDR(path_next, pgpath, path, path); GET_VALUE(path, path, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); /* Get the status of the path */ GET_VALUE(path, path, is_active, path_active); GET_VALUE(path_next, pgpath, fail_count, nr_fails); path_status = path_active ? 'A' : 'F'; fprintf(fp, " %s(%c,%u)", buf, path_status, nr_fails); j++; } if (j != nr_paths) fprintf(fp, " ERROR: paths are less than nr_paths:%d", nr_paths); i++; } if (i != nr_pgs) fprintf(fp, " ERROR: pgs are less than nr_pgs:%d", nr_pgs); } static void multipath_show_queue(unsigned long target) { unsigned int queue_size; unsigned long mp; /* Get the address of struct multipath */ GET_VALUE(target, dm_target, private, mp); /* Get the size of queued I/Os in this 'target' */ GET_VALUE(mp, multipath, queue_size, queue_size); fprintf(fp, " queue_size:%d", queue_size); } static struct dminfo_target_analyzer multipath_analyzer = { .target_name = "multipath", .ready = multipath_ready, .show_table = multipath_show_table, .show_status = multipath_show_status, .show_queue = multipath_show_queue }; /* * crypt target */ static int crypt_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct crypt_config")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: crypt_config"); return 0; } #define DMINFO_CRYPTO_TFM_MODE_ECB 0x00000001 #define DMINFO_CRYPTO_TFM_MODE_CBC 0x00000002 static void crypt_show_table(unsigned long target) { int i, cit_mode, key_size; unsigned long cc, tfm, crt_alg, cipher, iv_mode, dm_dev; char buf[BUFSIZE], *chainmode; /* Get the address of struct crypt_config */ GET_VALUE(target, dm_target, private, cc); /* Get the cipher name of the crypt_tfm */ GET_VALUE(cc, crypt_config, tfm, tfm); GET_VALUE(tfm, crypto_tfm, __crt_alg, crt_alg); GET_STR(crt_alg, crypto_alg, cra_name, buf, BUFSIZE); fprintf(fp, " type:%s", buf); /* Get the cit_mode of the crypt_tfm */ GET_ADDR(tfm, crypto_tfm, crt_u, cipher); GET_VALUE(cipher, cipher_tfm, cit_mode, cit_mode); if (MEMBER_EXISTS("struct crypt_config", "iv_mode")) { if (cit_mode == DMINFO_CRYPTO_TFM_MODE_CBC) chainmode = "cbc"; else if (cit_mode == DMINFO_CRYPTO_TFM_MODE_ECB) chainmode = "ecb"; else chainmode = "unknown"; /* Get the iv_mode of the crypt_config */ GET_VALUE(cc, crypt_config, iv_mode, iv_mode); if (iv_mode) { GET_PTR_STR(cc, crypt_config, iv_mode, buf, BUFSIZE); fprintf(fp, "-%s-%s", chainmode, buf); } else fprintf(fp, "-%s", chainmode); } else { /* Compatibility mode for old dm-crypt cipher strings */ if (cit_mode == DMINFO_CRYPTO_TFM_MODE_CBC) chainmode = "plain"; else if (cit_mode == DMINFO_CRYPTO_TFM_MODE_ECB) chainmode = "ecb"; else chainmode = "unknown"; fprintf(fp, "-%s", chainmode); } /* Get the devt of the crypt_config */ GET_VALUE(cc, crypt_config, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); fprintf(fp, " dev:%s", buf); /* * Get the key of the crypt_config. */ GET_VALUE(cc, crypt_config, key_size, key_size); GET_STR(cc, crypt_config, key, buf, MIN(key_size + 1, BUFSIZE)); fprintf(fp, " key:"); for (i = 0; i < key_size; i++) fprintf(fp, "%02x", (unsigned char)buf[i]); } static void crypt_show_status(unsigned long target) { /* crypt target has no status */ fprintf(fp, " No status info"); } static void crypt_show_queue(unsigned long target) { /* crypt target has no queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer crypt_analyzer = { .target_name = "crypt", .ready = crypt_ready, .show_table = crypt_show_table, .show_status = crypt_show_status, .show_queue = crypt_show_queue }; /* * stripe target */ static int stripe_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct stripe_c")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: stripe_c"); return 0; } static void stripe_show_table(unsigned long target) { unsigned int i, n_stripe; unsigned long sc, stripe_size, s, head, dm_dev; unsigned long long mask; char buf[BUFSIZE]; /* Get the address of struct stripe_c */ GET_VALUE(target, dm_target, private, sc); /* Get the chunk_size of the stripe_c */ GET_VALUE(sc, stripe_c, chunk_mask, mask); fprintf(fp, " chunk_size:%llu", mask + 1); /* * Display the information of each stripe disks. * * head = stripe_c.stripe. * This is the head of struct stripe array. */ stripe_size = STRUCT_SIZE("struct stripe"); GET_ADDR(sc, stripe_c, stripe, head); GET_VALUE(sc, stripe_c, stripes, n_stripe); fprintf(fp, " dev:"); for (i = 0; i < n_stripe; i++) { s = head + stripe_size * i; /* Get next stripe */ /* Get the devt of the stripe disk */ GET_VALUE(s, stripe, dev, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); fprintf(fp, "%s%s", buf, i == n_stripe - 1 ? "" : ","); } if (i != n_stripe) fprintf(fp, " ERROR: dev are less than n_stripe:%d", n_stripe); } static void stripe_show_status(unsigned long target) { /* stripe target has no status */ fprintf(fp, " No status info"); } static void stripe_show_queue(unsigned long target) { /* stripe target has no queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer stripe_analyzer = { .target_name = "striped", .ready = stripe_ready, .show_table = stripe_show_table, .show_status = stripe_show_status, .show_queue = stripe_show_queue }; /* * snapshot target */ static int snapshot_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct dm_snapshot")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: dm_snapshot"); return 0; } static void snapshot_show_table(unsigned long target) { unsigned long snap, orig_dev, cow_dev; unsigned long long chunk_size; char orig_name[BUFSIZE], cow_name[BUFSIZE], type; /* Get the address of struct dm_snapshot */ GET_VALUE(target, dm_target, private, snap); /* Get snapshot parameters of the dm_snapshot */ GET_VALUE(snap, dm_snapshot, origin, orig_dev); GET_STR(orig_dev, dm_dev, name, orig_name, BUFSIZE); GET_VALUE(snap, dm_snapshot, cow, cow_dev); GET_STR(cow_dev, dm_dev, name, cow_name, BUFSIZE); GET_VALUE(snap, dm_snapshot, type, type); GET_VALUE(snap, dm_snapshot, chunk_size, chunk_size); fprintf(fp, " orig:%s cow:%s type:%c chunk_size:%llu", orig_name, cow_name, type, chunk_size); } static void snapshot_show_status(unsigned long target) { int valid; unsigned long snap; /* Get the address of struct dm_snapshot */ GET_VALUE(target, dm_target, private, snap); /* Get snapshot parameters of the dm_snapshot */ GET_VALUE(snap, dm_snapshot, valid, valid); fprintf(fp, " vaild:%d", valid); } static void snapshot_show_queue(unsigned long target) { fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer snapshot_analyzer = { .target_name = "snapshot", .ready = snapshot_ready, .show_table = snapshot_show_table, .show_status = snapshot_show_status, .show_queue = snapshot_show_queue }; /* * snapshot-origin target */ static int origin_ready(void) { return 1; } static void origin_show_table(unsigned long target) { unsigned long dm_dev; char buf[BUFSIZE]; /* Get the name of the struct dm_dev */ GET_VALUE(target, dm_target, private, dm_dev); GET_STR(dm_dev, dm_dev, name, buf, BUFSIZE); fprintf(fp, " orig_dev:%s", buf); } static void origin_show_status(unsigned long target) { /* snapshot-origin target has no status */ fprintf(fp, " No status info"); } static void origin_show_queue(unsigned long target) { /* snapshot-origin target has no queue */ fprintf(fp, " No queue info"); } static struct dminfo_target_analyzer snapshot_origin_analyzer = { .target_name = "snapshot-origin", .ready = origin_ready, .show_table = origin_show_table, .show_status = origin_show_status, .show_queue = origin_show_queue }; /* * Core part of dminfo */ #define DMINFO_LIST 0 #define DMINFO_DEPS 1 #define DMINFO_TABLE 2 #define DMINFO_STATUS 3 #define DMINFO_QUEUE 4 static int dm_core_ready(void) { static int debuginfo = 0; if (debuginfo) return 1; if (STRUCT_EXISTS("struct hash_cell")) { debuginfo = 1; return 1; } else fprintf(fp, "No such struct info: hash_cell\n"); return 0; } /* Display dependency information of the 'table' */ static void dminfo_show_deps(unsigned long table) { int major, minor, count; unsigned long head, next, last, dev, bdev; char buf[BUFSIZE]; /* head = dm_table.devices */ GET_ADDR(table, dm_table, devices, head); fprintf(fp, " %-3s %-3s %-16s %-5s %s\n", "MAJ", "MIN", "GENDISK", "COUNT", "DEVNAME"); list_for_each (next, head, last) { /* Get dependency information. (next == struct *dm_dev) */ GET_VALUE(next, dm_dev, count, count); GET_VALUE(next, dm_dev, bdev, bdev); GET_VALUE(bdev, block_device, bd_disk, dev); GET_VALUE(dev, gendisk, major, major); GET_VALUE(dev, gendisk, first_minor, minor); GET_STR(dev, gendisk, disk_name, buf, BUFSIZE); fprintf(fp, " %-3d %-3d %-16lx %-5d %s\n", major, minor, dev, count, buf); } } /* * Display target specific information in the 'table', if the target * analyzer is registered and available. */ static void dminfo_show_details(unsigned long table, unsigned int num_targets, int info_type) { unsigned int i; unsigned long head, target_size, target, target_type; struct dminfo_target_analyzer *ta; char buf[BUFSIZE]; /* * head = dm_table.targets. * This is the head of struct dm_target array. */ GET_VALUE(table, dm_table, targets, head); target_size = STRUCT_SIZE("struct dm_target"); fprintf(fp, " %-16s %-11s %s\n", "TARGET", "TARGET_TYPE", "PRIVATE_DATA"); for (i = 0; i < num_targets; i++, fprintf(fp, "\n")) { target = head + target_size * i; /* Get next target */ /* Get target information */ GET_VALUE(target, dm_target, type, target_type); GET_PTR_STR(target_type, target_type, name, buf, BUFSIZE); fprintf(fp, " %-16lx %-11s", target, buf); if (!(ta = find_target_analyzer(buf)) || !ta->ready || !ta->ready()) continue; switch (info_type) { case DMINFO_TABLE: if (ta->show_table) ta->show_table(target); break; case DMINFO_STATUS: if (ta->show_status) ta->show_status(target); break; case DMINFO_QUEUE: if (ta->show_queue) ta->show_queue(target); break; default: break; } } if (i != num_targets) fprintf(fp, " ERROR: targets are less than num_targets:%d", num_targets); } /* * Display lists (and detail information if specified) of existing * dm devices. */ static void dminfo_show_list(int additional_info) { int i, major, minor, array_len; unsigned int num_targets; unsigned long _name_buckets, head, next, last, md, dev, table; char buf[BUFSIZE]; _name_buckets = symbol_value("_name_buckets"); array_len = get_array_length("_name_buckets", NULL, 0); if (additional_info == DMINFO_LIST) fprintf(fp, "%-3s %-3s %-16s %-16s %-7s %s\n", "MAJ", "MIN", "MAP_DEV", "DM_TABLE", "TARGETS", "MAPNAME"); for (i = 0; i < array_len; i++) { /* head = _name_buckets[i] */ head = _name_buckets + (i * SIZE(list_head)); list_for_each (next, head, last) { /* next == hash_cell */ /* Get device and table information */ GET_PTR_STR(next, hash_cell, name, buf, BUFSIZE); GET_VALUE(next, hash_cell, md, md); GET_VALUE(md, mapped_device, disk, dev); GET_VALUE(dev, gendisk, major, major); GET_VALUE(dev, gendisk, first_minor, minor); GET_VALUE(md, mapped_device, map, table); GET_VALUE(table, dm_table, num_targets, num_targets); if (additional_info != DMINFO_LIST) fprintf(fp, "%-3s %-3s %-16s %-16s %-7s %s\n", "MAJ", "MIN", "MAP_DEV", "DM_TABLE", "TARGETS", "MAPNAME"); fprintf(fp, "%-3d %-3d %-16lx %-16lx %-7d %s\n", major, minor, md, table, num_targets, buf); switch(additional_info) { case DMINFO_DEPS: dminfo_show_deps(table); break; case DMINFO_TABLE: case DMINFO_STATUS: case DMINFO_QUEUE: dminfo_show_details(table, num_targets, additional_info); break; default: break; } if (additional_info != DMINFO_LIST) fprintf(fp, "\n"); } } } /* * Display the original bio information for the 'bio'. * If the 'bio' is for dm devices, the original bio information is pointed * by bio.bi_private as struct target_io. */ static void dminfo_show_bio(unsigned long bio) { int major, minor; unsigned long target_io, dm_io, dm_bio, md, dev; char buf[BUFSIZE]; /* Get original bio and device information */ GET_VALUE(bio, bio, bi_private, target_io); GET_VALUE(target_io, target_io, io, dm_io); GET_VALUE(dm_io, dm_io, bio, dm_bio); GET_VALUE(dm_io, dm_io, md, md); GET_VALUE(md, mapped_device, disk, dev); GET_VALUE(dev, gendisk, major, major); GET_VALUE(dev, gendisk, first_minor, minor); GET_STR(dev, gendisk, disk_name, buf, BUFSIZE); fprintf(fp, "%-16s %-3s %-3s %-16s %s\n", "DM_BIO_ADDRESS", "MAJ", "MIN", "MAP_DEV", "DEVNAME"); fprintf(fp, "%-16lx %-3d %-3d %-16lx %s\n", dm_bio, major, minor, md, buf); } static void cmd_dminfo(void) { int c, additional_info = DMINFO_LIST; unsigned long bio; if (!dm_core_ready()) return; /* Parse command line option */ while ((c = getopt(argcnt, args, "b:dlqst")) != EOF) { switch(c) { case 'b': bio = stol(optarg, FAULT_ON_ERROR, NULL); dminfo_show_bio(bio); return; case 'd': additional_info = DMINFO_DEPS; break; case 'l': additional_info = DMINFO_LIST; break; case 'q': additional_info = DMINFO_QUEUE; break; case 's': additional_info = DMINFO_STATUS; break; case 't': additional_info = DMINFO_TABLE; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); dminfo_show_list(additional_info); } /* * dminfo help */ static char *help_dminfo[] = { "dminfo", /* command name */ "device mapper (dm) information", /* short description */ "[-b bio | -d | -l | -q | -s | -t]", /* argument synopsis */ " This command displays information about device-mapper mapped ", " devices (dm devices).", " If no argument is entered, displays lists of existing dm devices.", " It's same as -l option.", "", " -b bio displays the information of the dm device which the bio", " is submitted in. If the bio isn't for dm devices,", " results will be error.", " -d displays dependency information for existing dm devices.", " -l displays lists of existing dm devices.", " -q displays queued I/O information for each target of", " existing dm devices.", " -s displays status information for each target of existing", " dm devices.", " -t displays table information for each target of existing", " dm devices.", "", "EXAMPLE", " Display lists of dm devices. \"MAP_DEV\" is the address of the", " struct mapped_device. \"DM_TABLE\" is the address of the struct", " dm_table. \"TARGETS\" is the number of targets which are in", " the struct dm_table.", "", " %s> dminfo", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 8 c4866c80 c4866280 1 vg0-snap0", " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", " 253 0 c4840380 c4841880 1 mp0", " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", " 253 4 d441e280 c919ed80 1 dummy1", " 253 3 f5dc4280 cba81d80 1 dummy0", " 253 2 f7c53180 c4866180 1 vg0-lv0", " 253 1 f746d280 f746cd80 1 mp0p1", "", " Display the dm device information which the bio is submitted in.", " The bio (ceacee80) is a clone of the bio (ceacee00) which is", " submitted in the dm-3 (dummy0). And the bio (ceacee00) is a clone", " of the bio (ceaced80) which is submitted in the dm-4 (dummy1), too.", " The bio (ceaced80) is the original bio.", "", " %s> dminfo -b ceacee80", " DM_BIO_ADDRESS MAJ MIN MAP_DEV DEVNAME", " ceacee00 253 3 f5dc4280 dm-3", " crash> dminfo -b ceacee00", " DM_BIO_ADDRESS MAJ MIN MAP_DEV DEVNAME", " ceaced80 253 4 d441e280 dm-4", " crash> dminfo -b ceaced80", " dminfo: invalid kernel virtual address: 64 type: \"GET_VALUE: dm_io.bio\"", "", " Display dependency information for each target.", " The vg0-snap0 depends on thd dm-6 (vg0-lv0-real) and the dm-7", " (vg0-snap0-cow)", "", " %s> dminfo -d", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 8 c4866c80 c4866280 1 vg0-snap0", " MAJ MIN GENDISK COUNT DEVNAME", " 253 7 c4866980 1 dm-7", " 253 6 f6a04280 1 dm-6", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", " MAJ MIN GENDISK COUNT DEVNAME", " 8 0 f7f24c80 1 sda", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", " MAJ MIN GENDISK COUNT DEVNAME", " 8 0 f7f24c80 1 sda", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 2 f7c53180 c4866180 1 vg0-lv0", " MAJ MIN GENDISK COUNT DEVNAME", " 253 6 f6a04280 1 dm-6", "", " Display queued I/O information for each target.", " The information is displayed under the \"PRIVATE_DATA\" column.", "", " %s> dminfo -q", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", " TARGET TARGET_TYPE PRIVATE_DATA", " f8961080 mirror (reads) (writes) (quiesced) (recovered)", "", " --------------------------------------------------------------", " \"reads/writes\" are members of the struct mirror_set, and", " \"quiesced/recovered\" are members of the struct region_hash.", " If the list is empty, the member is bracketed by \"()\".", " --------------------------------------------------------------", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 0 c4840380 c4841880 1 mp0", " TARGET TARGET_TYPE PRIVATE_DATA", " f8802080 multipath queue_size:0", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 1 f746d280 f746cd80 1 mp0p1", " TARGET TARGET_TYPE PRIVATE_DATA", " f8821080 linear No queue info", "", " Display status information for each target.", " The information is displayed under the \"PRIVATE_DATA\" column.", "", " %s> dminfo -s", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 0 c4840380 c4841880 1 mp0", " TARGET TARGET_TYPE PRIVATE_DATA", " f8802080 multipath queue_if_no_path:0 hwh:none nr_pgs:1", " PG PG_STATUS NR_PATHS PATHS", " 1 active 2 8:16(A,0) 8:32(A,0)", "", " --------------------------------------------------------------", " Format of \"PATHS\": :(,)", " Status: A:active, F:faulty", " Fail_count: the value of the struct pgpath.fail_count", " --------------------------------------------------------------", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", " TARGET TARGET_TYPE PRIVATE_DATA", " f8961080 mirror in_sync:1 dev:8:16(A,0),8:32(A,0)", "", " --------------------------------------------------------------", " Format of \"dev\": :(,)", " Status: A:active, D:degraded", " Error_count: the value of the struct mirror.error_count", " --------------------------------------------------------------", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 1 f746d280 f746cd80 1 mp0p1", " TARGET TARGET_TYPE PRIVATE_DATA", " f8821080 linear No status info", "", " Display table information for each target.", " The information is displayed under the \"PRIVATE_DATA\" column.", "", " %s> dminfo -t", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 8 c4866c80 c4866280 1 vg0-snap0", " TARGET TARGET_TYPE PRIVATE_DATA", " f89b4080 snapshot orig:253:6 cow:253:7 type:P chunk_size:16", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 6 f6a04a80 f6a04580 1 vg0-lv0-real", " TARGET TARGET_TYPE PRIVATE_DATA", " f890f080 linear begin:0 len:204800 dev:8:5 offset:384", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 0 c4840380 c4841880 1 mp0", " TARGET TARGET_TYPE PRIVATE_DATA", " f8802080 multipath queue_if_no_path:0 hwh:none nr_pgs:1", " PG PATH_SELECTOR NR_PATHS PATHS", " 1 round-robin 2 8:16 8:32", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 5 f7c50c80 c488e480 1 via_cbeheddbdd", " TARGET TARGET_TYPE PRIVATE_DATA", " f8961080 mirror log:core dev:8:16(0),8:32(0)", "", " --------------------------------------------------------------", " Format of \"dev\": :()", " Offset: the value of the struct mirror.offset", " --------------------------------------------------------------", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 7 c4866a80 c4866380 1 vg0-snap0-cow", " TARGET TARGET_TYPE PRIVATE_DATA", " f899d080 linear begin:0 len:8192 dev:8:5 offset:205184", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 2 f7c53180 c4866180 1 vg0-lv0", " TARGET TARGET_TYPE PRIVATE_DATA", " f8bbc080 snapshot-origin orig_dev:253:6", "", " MAJ MIN MAP_DEV DM_TABLE TARGETS MAPNAME", " 253 1 f746d280 f746cd80 1 mp0p1", " TARGET TARGET_TYPE PRIVATE_DATA", " f8821080 linear begin:0 len:2040192 dev:253:0 offset:63", NULL }; /* * Registering command extension */ static struct command_table_entry command_table[] = { {"dminfo", cmd_dminfo, help_dminfo, 0}, {NULL, NULL, NULL, 0}, }; void __attribute__((constructor)) dminfo_init(void) { register_extension(command_table); dminfo_register_target_analyzer(&zero_analyzer); dminfo_register_target_analyzer(&error_analyzer); dminfo_register_target_analyzer(&linear_analyzer); dminfo_register_target_analyzer(&mirror_analyzer); dminfo_register_target_analyzer(&multipath_analyzer); dminfo_register_target_analyzer(&crypt_analyzer); dminfo_register_target_analyzer(&stripe_analyzer); dminfo_register_target_analyzer(&snapshot_analyzer); dminfo_register_target_analyzer(&snapshot_origin_analyzer); } void __attribute__((destructor)) dminfo_fini(void) { } crash-utility-crash-9cd43f5/extensions/Makefile0000664000372000037200000000363515107550337021242 0ustar juerghjuergh# # Makefile for building crash shared object extensions # # Copyright (C) 2005, 2007, 2009, 2011, 2013 David Anderson # Copyright (C) 2005, 2007, 2009, 2011, 2013 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # To build the extension shared objects in this directory, run # "make extensions" from the top-level directory. # # To add a new extension object, simply copy your module's .c file # to this directory, and it will be built automatically using # the "standard" compile line. If that compile line does not # suffice, create a .mk file with the same prefix as the .c file, # and that makefile will be invoked. # CC ?= gcc CONTRIB_SO := $(patsubst %.c,%.so,$(wildcard *.c)) all: link_defs $(CONTRIB_SO) link_defs: @rm -f defs.h @ln ../defs.h $(CONTRIB_SO): %.so: %.c defs.h @if [ -f $*.mk ]; then \ $(MAKE) -f $*.mk; \ else \ grep -q '((constructor))' $*.c && { \ echo "$(CC) -Wall -g -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) $(GDB_FLAGS)"; \ $(CC) -Wall -g -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) $(GDB_FLAGS); \ } || { \ echo "$(CC) -Wall -g -nostartfiles -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) $(GDB_FLAGS)"; \ $(CC) -Wall -g -nostartfiles -shared -rdynamic -o $@ $*.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) $(GDB_FLAGS); \ }; \ fi clean: rm -f $(CONTRIB_SO) @for MAKEFILE in `grep -sl "^clean:" *.mk`; \ do $(MAKE) -f $$MAKEFILE clean; \ done crash-utility-crash-9cd43f5/extensions/snap.c0000664000372000037200000005004515107550337020704 0ustar juerghjuergh/* snap.c - capture live memory into a kdump or netdump dumpfile * * Copyright (C) 2009, 2013, 2014, 2017 David Anderson * Copyright (C) 2009, 2013, 2014, 2017 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include void snap_init(void); void snap_fini(void); void cmd_snap(void); char *help_snap[]; static struct command_table_entry command_table[] = { { "snap", cmd_snap, help_snap, 0 }, { NULL } }; static char *generate_elf_header(int, int, char *); static int verify_paddr(physaddr_t); static void init_ram_segments(void); static int print_progress(const char *, ulong); #if defined(X86) || defined(X86_64) || defined(IA64) || defined(PPC64) || defined(ARM64) int supported = TRUE; #else int supported = FALSE; #endif void __attribute__((constructor)) snap_init(void) /* Register the command set. */ { register_extension(command_table); } void __attribute__((destructor)) snap_fini(void) { } /* * Just pass in an unused filename. */ void cmd_snap(void) { int c, fd, n; physaddr_t paddr; size_t offset; char *buf; char *filename; struct node_table *nt; int type; char *elf_header; Elf64_Phdr *load; int load_index; if (!supported) error(FATAL, "command not supported on the %s architecture\n", pc->machine_type); filename = NULL; buf = GETBUF(PAGESIZE()); type = KDUMP_ELF64; while ((c = getopt(argcnt, args, "n")) != EOF) { switch(c) { case 'n': if (machine_type("X86_64")) option_not_supported('n'); else type = NETDUMP_ELF64; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { if (filename) cmd_usage(pc->curcmd, SYNOPSIS); if (file_exists(args[optind], NULL)) error(FATAL, "%s: file already exists\n", args[optind]); else if ((fd = open(args[optind], O_RDWR|O_CREAT, 0644)) < 0) error(FATAL, args[optind]); filename = args[optind]; optind++; } if (!filename) cmd_usage(pc->curcmd, SYNOPSIS); init_ram_segments(); if (!(elf_header = generate_elf_header(type, fd, filename))) error(FATAL, "cannot generate ELF header\n"); load = (Elf64_Phdr *)(elf_header + sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr)); load_index = machine_type("X86_64") || machine_type("IA64") ? 1 : 0; for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; paddr = nt->start_paddr; offset = load[load_index + n].p_offset; for (c = 0; c < nt->size; c++, paddr += PAGESIZE()) { if (!verify_paddr(paddr)) continue; if (!readmem(paddr, PHYSADDR, &buf[0], PAGESIZE(), "memory page", QUIET|RETURN_ON_ERROR)) continue; lseek(fd, (off_t)(paddr + offset - nt->start_paddr), SEEK_SET); if (write(fd, &buf[0], PAGESIZE()) != PAGESIZE()) error(FATAL, "write to dumpfile failed\n"); if (!print_progress(filename, BTOP(paddr))) return; } } fprintf(stderr, "\r%s: [100%%] ", filename); fprintf(fp, "\n"); sprintf(buf, "/bin/ls -l %s\n", filename); system(buf); FREEBUF(elf_header); FREEBUF(buf); } char *help_snap[] = { "snap", /* command name */ "take a memory snapshot", /* short description */ "[-n] dumpfile", /* filename */ " This command takes a snapshot of physical memory and creates an ELF vmcore.", " The default vmcore is a kdump-style dumpfile. Supported on x86, x86_64,", " ia64 and ppc64 architectures only.", " ", " -n create a netdump-style vmcore (n/a on x86_64).", NULL }; /* * Architecture-specific and -generic ELF header data borrowed from the * netdump.h file in the netdump package, modified slightly to also create * a kdump-style vmcore. */ /****************************************************************************** * Elf core dumping * ******************************************************************************/ /* * Host-platform independent data */ #define ELF_PRARGSZ (80) /* Number of chars for args */ struct elf_prpsinfo_64 { char pr_state; /* numeric process state */ char pr_sname; /* char for pr_state */ char pr_zomb; /* zombie */ char pr_nice; /* nice val */ __u64 pr_flag; /* flags */ __u32 pr_uid; __u32 pr_gid; __u32 pr_pid, pr_ppid, pr_pgrp, pr_sid; /* Lots missing */ char pr_fname[16]; /* filename of executable */ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ }; /* * i386 specific */ struct user_regs_struct_i386 { __u32 ebx, ecx, edx, esi, edi, ebp, eax; __u16 ds, __ds, es, __es; __u16 fs, __fs, gs, __gs; __u32 orig_eax, eip; __u16 cs, __cs; __u32 eflags, esp; __u16 ss, __ss; }; #define ELF_NGREG_I386 (sizeof (struct user_regs_struct_i386) / sizeof(__u32)) typedef __u32 elf_gregset_i386_t[ELF_NGREG_I386]; struct elf_prstatus_i386 { char pad[72]; elf_gregset_i386_t pr_reg; /* GP registers */ __u32 pr_fpvalid; /* True if math co-processor being used. */ }; /* * x86_64 specific */ struct user_regs_struct_x86_64 { __u64 r15,r14,r13,r12,rbp,rbx,r11,r10; __u64 r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; __u64 rip,cs,eflags; __u64 rsp,ss; __u64 fs_base, gs_base; __u64 ds,es,fs,gs; }; #define ELF_NGREG_X86_64 (sizeof (struct user_regs_struct_x86_64) / sizeof(__u64)) typedef __u64 elf_gregset_x86_64_t[ELF_NGREG_X86_64]; struct elf_prstatus_x86_64 { char pad[112]; elf_gregset_x86_64_t pr_reg; /* GP registers */ __u32 pr_fpvalid; /* True if math co-processor being used. */ }; /* * ppc64 specific */ struct user_regs_struct_ppc64 { __u64 gpr[32]; __u64 nip; __u64 msr; __u64 orig_gpr3; __u64 ctr; __u64 link; __u64 xer; __u64 ccr; __u64 softe; __u64 trap; __u64 dar; __u64 dsisr; __u64 result; }; #define ELF_NGREG_PPC64 (sizeof (struct user_regs_struct_ppc64) / sizeof(__u64)) typedef __u64 elf_gregset_ppc64_t[ELF_NGREG_PPC64]; struct elf_prstatus_ppc64 { char pad[112]; elf_gregset_ppc64_t pr_reg; /* GP registers */ __u32 pr_fpvalid; /* True if math co-processor being used. */ }; /* * ia64 specific */ struct _ia64_fpreg { union { __u64 bits[2]; } u; } __attribute__ ((aligned (16))); struct user_regs_struct_ia64 { /* The following registers are saved by SAVE_MIN: */ __u64 b6; /* scratch */ __u64 b7; /* scratch */ __u64 ar_csd; /* used by cmp8xchg16 (scratch) */ __u64 ar_ssd; /* reserved for future use (scratch) */ __u64 r8; /* scratch (return value register 0) */ __u64 r9; /* scratch (return value register 1) */ __u64 r10; /* scratch (return value register 2) */ __u64 r11; /* scratch (return value register 3) */ __u64 cr_ipsr; /* interrupted task's psr */ __u64 cr_iip; /* interrupted task's instruction pointer */ __u64 cr_ifs; /* interrupted task's function state */ __u64 ar_unat; /* interrupted task's NaT register (preserved) */ __u64 ar_pfs; /* prev function state */ __u64 ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ __u64 ar_rnat; /* RSE NaT */ __u64 ar_bspstore; /* RSE bspstore */ __u64 pr; /* 64 predicate registers (1 bit each) */ __u64 b0; /* return pointer (bp) */ __u64 loadrs; /* size of dirty partition << 16 */ __u64 r1; /* the gp pointer */ __u64 r12; /* interrupted task's memory stack pointer */ __u64 r13; /* thread pointer */ __u64 ar_fpsr; /* floating point status (preserved) */ __u64 r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ __u64 r14; /* scratch */ __u64 r2; /* scratch */ __u64 r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ __u64 r16; /* scratch */ __u64 r17; /* scratch */ __u64 r18; /* scratch */ __u64 r19; /* scratch */ __u64 r20; /* scratch */ __u64 r21; /* scratch */ __u64 r22; /* scratch */ __u64 r23; /* scratch */ __u64 r24; /* scratch */ __u64 r25; /* scratch */ __u64 r26; /* scratch */ __u64 r27; /* scratch */ __u64 r28; /* scratch */ __u64 r29; /* scratch */ __u64 r30; /* scratch */ __u64 r31; /* scratch */ __u64 ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct _ia64_fpreg f6; /* scratch */ struct _ia64_fpreg f7; /* scratch */ struct _ia64_fpreg f8; /* scratch */ struct _ia64_fpreg f9; /* scratch */ struct _ia64_fpreg f10; /* scratch */ struct _ia64_fpreg f11; /* scratch */ }; #define ELF_NGREG_IA64 (sizeof (struct user_regs_struct_ia64) / sizeof(__u64)) typedef __u64 elf_gregset_ia64_t[ELF_NGREG_IA64]; struct elf_prstatus_ia64 { char pad[112]; elf_gregset_ia64_t pr_reg; /* GP registers */ __u32 pr_fpvalid; /* True if math co-processor being used. */ }; /* * arm64 specific */ struct user_pt_regs_arm64 { __u64 regs[31]; __u64 sp; __u64 pc; __u64 pstate; }; #define ELF_NGREG_ARM64 (sizeof (struct user_pt_regs_arm64) / sizeof(elf_greg_t)) #ifndef elf_greg_t typedef unsigned long elf_greg_t; #endif typedef elf_greg_t elf_gregset_arm64_t[ELF_NGREG_ARM64]; struct elf_prstatus_arm64 { char pad[112]; elf_gregset_arm64_t pr_reg; int pr_fpvalid; }; union prstatus { struct elf_prstatus_i386 x86; struct elf_prstatus_x86_64 x86_64; struct elf_prstatus_ppc64 ppc64; struct elf_prstatus_ia64 ia64; struct elf_prstatus_arm64 arm64; }; static size_t dump_elf_note(char *buf, Elf64_Word type, char *name, char *desc, int d_len) { Elf64_Nhdr *note; size_t len; note = (Elf64_Nhdr *)buf; note->n_namesz = strlen(name); note->n_descsz = d_len; note->n_type = type; len = sizeof(Elf64_Nhdr); memcpy(buf + len, name, note->n_namesz); len = roundup(len + note->n_namesz, 4); memcpy(buf + len, desc, note->n_descsz); len = roundup(len + note->n_descsz, 4); return len; } char * generate_elf_header(int type, int fd, char *filename) { int i, n; char *buffer, *ptr; Elf64_Ehdr *elf; Elf64_Phdr *notes; Elf64_Phdr *load; size_t offset, len, l_offset; size_t data_offset; struct elf_prpsinfo_64 prpsinfo; union prstatus prstatus; int prstatus_len; ushort e_machine; int num_segments; struct node_table *nt; struct SNAP_info { ulonglong task_struct; ulonglong arch_data1; ulonglong arch_data2; } SNAP_info; num_segments = vt->numnodes; if (machine_type("X86_64")) { e_machine = EM_X86_64; prstatus_len = sizeof(prstatus.x86_64); num_segments += 1; /* mapped kernel section for phys_base */ } else if (machine_type("X86")) { e_machine = EM_386; prstatus_len = sizeof(prstatus.x86); } else if (machine_type("IA64")) { e_machine = EM_IA_64; prstatus_len = sizeof(prstatus.ia64); num_segments += 1; /* mapped kernel section for phys_start */ } else if (machine_type("PPC64")) { e_machine = EM_PPC64; prstatus_len = sizeof(prstatus.ppc64); } else if (machine_type("ARM64")) { e_machine = EM_AARCH64; prstatus_len = sizeof(prstatus.arm64); } else return NULL; /* should be enought for the notes + roundup + two blocks */ buffer = (char *)GETBUF(sizeof(Elf64_Ehdr) + num_segments * sizeof(Elf64_Phdr) + PAGESIZE() * 2); offset = 0; ptr = buffer; /* Elf header */ elf = (Elf64_Ehdr *)ptr; memcpy(elf->e_ident, ELFMAG, SELFMAG); elf->e_ident[EI_CLASS] = ELFCLASS64; #if __BYTE_ORDER == __BIG_ENDIAN elf->e_ident[EI_DATA] = ELFDATA2MSB; #else elf->e_ident[EI_DATA] = ELFDATA2LSB; #endif elf->e_ident[EI_VERSION] = EV_CURRENT; elf->e_ident[EI_OSABI] = ELFOSABI_SYSV; elf->e_ident[EI_ABIVERSION] = 0; memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); elf->e_type = ET_CORE; elf->e_machine = e_machine; elf->e_version = EV_CURRENT; elf->e_entry = 0; elf->e_phoff = sizeof(Elf64_Ehdr); elf->e_shoff = 0; elf->e_flags = 0; elf->e_ehsize = sizeof(Elf64_Ehdr); elf->e_phentsize = sizeof(Elf64_Phdr); elf->e_phnum = 1 + num_segments; elf->e_shentsize = 0; elf->e_shnum = 0; elf->e_shstrndx = 0; offset += sizeof(Elf64_Ehdr); ptr += sizeof(Elf64_Ehdr); /* PT_NOTE */ notes = (Elf64_Phdr *)ptr; notes->p_type = PT_NOTE; notes->p_offset = 0; /* TO BE FILLED IN */ notes->p_vaddr = 0; notes->p_paddr = 0; notes->p_filesz = 0; /* TO BE FILLED IN */ notes->p_memsz = 0; notes->p_flags = 0; notes->p_align = 0; offset += sizeof(Elf64_Phdr); ptr += sizeof(Elf64_Phdr); /* PT_LOAD */ load = (Elf64_Phdr *)ptr; for (i = n = 0; i < num_segments; i++) { load[i].p_type = PT_LOAD; load[i].p_offset = 0; /* TO BE FILLED IN */ switch (e_machine) { case EM_X86_64: nt = &vt->node_table[n]; if (i == 0) { #ifdef X86_64 load[i].p_vaddr = __START_KERNEL_map; load[i].p_paddr = machdep->machspec->phys_base; #endif load[i].p_filesz = 0; load[i].p_memsz = load[i].p_filesz; } else { load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; n++; } load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = 0; break; case EM_386: nt = &vt->node_table[n++]; load[i].p_vaddr = 0; load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; case EM_IA_64: nt = &vt->node_table[n]; if (i == 0) { #ifdef IA64 load[i].p_vaddr = machdep->machspec->kernel_start; load[i].p_paddr = machdep->machspec->phys_start; #endif load[i].p_filesz = 0; load[i].p_memsz = load[i].p_filesz; } else { load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; n++; } load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; case EM_PPC64: nt = &vt->node_table[n++]; load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; case EM_AARCH64: nt = &vt->node_table[n++]; load[i].p_vaddr = PTOV(nt->start_paddr); load[i].p_paddr = nt->start_paddr; load[i].p_filesz = nt->size * PAGESIZE(); load[i].p_memsz = load[i].p_filesz; load[i].p_flags = PF_R | PF_W | PF_X; load[i].p_align = (type == NETDUMP_ELF64) ? PAGESIZE() : 0; break; } // l_offset += load[i].p_filesz; offset += sizeof(Elf64_Phdr); ptr += sizeof(Elf64_Phdr); } notes->p_offset = offset; /* NT_PRSTATUS note */ memset(&prstatus, 0, sizeof(prstatus)); len = dump_elf_note(ptr, NT_PRSTATUS, "CORE", (char *)&prstatus, prstatus_len); offset += len; ptr += len; notes->p_filesz += len; /* NT_PRPSINFO note */ memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo_64)); prpsinfo.pr_state = 0; prpsinfo.pr_sname = 'R'; prpsinfo.pr_zomb = 0; strcpy(prpsinfo.pr_fname, "vmlinux"); len = dump_elf_note(ptr, NT_PRPSINFO, "CORE", (char *)&prpsinfo, sizeof(prpsinfo)); offset += len; ptr += len; notes->p_filesz += len; /* NT_TASKSTRUCT note */ SNAP_info.task_struct = CURRENT_TASK(); #ifdef X86_64 SNAP_info.arch_data1 = kt->relocate; SNAP_info.arch_data2 = 0; #elif ARM64 SNAP_info.arch_data1 = machdep->machspec->kimage_voffset; SNAP_info.arch_data2 = (machdep->machspec->VA_BITS_ACTUAL << 32) | machdep->machspec->CONFIG_ARM64_VA_BITS; #else SNAP_info.arch_data1 = 0; SNAP_info.arch_data2 = 0; #endif len = dump_elf_note (ptr, NT_TASKSTRUCT, "SNAP", (char *)&SNAP_info, sizeof(struct SNAP_info)); offset += len; ptr += len; notes->p_filesz += len; if (type == NETDUMP_ELF64) offset = roundup (offset, PAGESIZE()); l_offset = offset; for (i = 0; i < num_segments; i++) { load[i].p_offset = l_offset; l_offset += load[i].p_filesz; } data_offset = offset; while (offset > 0) { len = write(fd, buffer + (data_offset - offset), offset); if (len < 0) { perror(filename); FREEBUF(buffer); return NULL; } offset -= len; } return buffer; } struct ram_segments { physaddr_t start; physaddr_t end; }; static struct ram_segments *ram_segments = NULL; static int nr_segments = 0; static void init_ram_segments(void) { int i, errflag; FILE *iomem; char buf[BUFSIZE], *p1, *p2; physaddr_t start, end; if ((iomem = fopen("/proc/iomem", "r")) == NULL) goto fail_iomem; while (fgets(buf, BUFSIZE, iomem)) { if (strstr(buf, "System RAM")) { console(buf); nr_segments++; } } if (!nr_segments) goto fail_iomem; ram_segments = (struct ram_segments *) GETBUF(sizeof(struct ram_segments) * nr_segments); rewind(iomem); i = 0; while (fgets(buf, BUFSIZE, iomem)) { if (strstr(buf, "System RAM")) { if (!(p1 = strstr(buf, ":"))) goto fail_iomem; *p1 = NULLCHAR; clean_line(buf); if (strstr(buf, " ")) goto fail_iomem; p1 = buf; if (!(p2 = strstr(buf, "-"))) goto fail_iomem; *p2 = NULLCHAR; p2++; errflag = 0; start = htoll(p1, RETURN_ON_ERROR|QUIET, &errflag); end = htoll(p2, RETURN_ON_ERROR|QUIET, &errflag); if (errflag) goto fail_iomem; ram_segments[i].start = PHYSPAGEBASE(start); if (PAGEOFFSET(start)) ram_segments[i].start += PAGESIZE(); ram_segments[i].end = PHYSPAGEBASE(end); if (PAGEOFFSET(end) == (PAGESIZE()-1)) ram_segments[i].end += PAGESIZE(); console("ram_segments[%d]: %016llx %016llx [%s-%s]\n", i, (ulonglong)ram_segments[i].start, (ulonglong)ram_segments[i].end, p1, p2); i++; } } fclose(iomem); return; fail_iomem: fclose(iomem); nr_segments = 0; if (ram_segments) FREEBUF(ram_segments); return; } static int verify_paddr(physaddr_t paddr) { int i, ok; if (!machdep->verify_paddr(paddr)) return FALSE; if (!nr_segments) return TRUE; for (i = ok = 0; i < nr_segments; i++) { if ((paddr >= ram_segments[i].start) && (paddr < ram_segments[i].end)) { ok++; break; } } /* * Pre-2.6.13 x86_64 /proc/iomem was restricted to 4GB, * so just accept it. */ if ((paddr >= 0x100000000ULL) && machine_type("X86_64") && (THIS_KERNEL_VERSION < LINUX(2,6,13))) ok++; if (!ok) { if (CRASHDEBUG(1)) console("reject: %llx\n", (ulonglong)paddr); return FALSE; } return TRUE; } /* * Borrowed from makedumpfile, prints a percentage-done value * once per second. */ static int print_progress(const char *filename, ulong current) { int n, progress; time_t tm; struct node_table *nt; static time_t last_time = 0; static ulong total_pages = 0; static ulong written_pages = 0; if (!total_pages) { for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; total_pages += nt->size; } } if (received_SIGINT()) { fprintf(stderr, "\n\n"); return FALSE; } if (++written_pages < total_pages) { tm = time(NULL); if (tm - last_time < 1) return TRUE; last_time = tm; progress = written_pages * 100 / total_pages; } else progress = 100; fprintf(stderr, "\r%s: [%2d%%] ", filename, progress); return TRUE; } crash-utility-crash-9cd43f5/extensions/snap.mk0000664000372000037200000000257715107550337021100 0ustar juerghjuergh# # Copyright (C) 2009, 2011, 2013 David Anderson # Copyright (C) 2009, 2011, 2013 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # ifeq ($(shell arch), i686) TARGET=X86 TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64 endif ifeq ($(shell arch), ppc64) TARGET=PPC64 TARGET_CFLAGS=-m64 endif ifeq ($(shell arch), ppc64le) TARGET=PPC64 TARGET_CFLAGS=-m64 endif ifeq ($(shell arch), ia64) TARGET=IA64 TARGET_CFLAGS= endif ifeq ($(shell arch), x86_64) TARGET=X86_64 TARGET_CFLAGS= endif ifeq ($(shell /bin/ls /usr/include/crash/defs.h 2>/dev/null), /usr/include/crash/defs.h) INCDIR=/usr/include/crash endif ifeq ($(shell /bin/ls ../defs.h 2> /dev/null), ../defs.h) INCDIR=.. endif ifeq ($(shell /bin/ls ./defs.h 2> /dev/null), ./defs.h) INCDIR=. endif all: snap.so snap.so: $(INCDIR)/defs.h snap.c gcc -Wall -g -I$(INCDIR) -shared -rdynamic -o snap.so snap.c -fPIC -D$(TARGET) $(TARGET_CFLAGS) $(GDB_FLAGS) crash-utility-crash-9cd43f5/extensions/eppic.mk0000664000372000037200000000424215107550337021226 0ustar juerghjuergh# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. TARGET_FLAGS = -D$(TARGET) ifeq ($(TARGET), PPC64) TARGET_FLAGS += -m64 endif ifeq ($(TARGET), ARM) TARGET_FLAGS += -m32 endif ifeq ($(TARGET), MIPS) TARGET_FLAGS += -m32 endif ifeq ($(TARGET), X86) TARGET_FLAGS += -m32 endif APPFILE=eppic/applications/crash/eppic.c GIT := $(shell which git 2> /dev/null) # crash 8 with gdb 10 uses new third party callback (tcb) API EPPIC_BRANCH=v5.0 all: @if [ -f /usr/bin/flex ] && [ -f /usr/bin/bison ]; \ then \ if [ -f ../$(GDB)/crash.target ]; \ then \ if [ ! -f $(APPFILE) ]; \ then \ if [ -f "$(GIT)" ]; \ then \ if [ -n "$(EPPIC_GIT_URL)" ]; \ then \ git clone $(EPPIC_GIT_OPTIONS) $(EPPIC_GIT_URL) eppic; \ else \ if ping -c 1 -W 5 github.com >/dev/null ; then \ git clone -b $(EPPIC_BRANCH) $(EPPIC_GIT_OPTIONS) https://github.com/lucchouina/eppic.git eppic; \ fi; \ fi; \ else \ if [ ! -f "$(GIT)" ]; then \ echo "eppic.so: git command is needed for pulling eppic extension code"; \ fi; \ fi; \ fi; \ if [ -f $(APPFILE) ]; \ then \ make -f eppic.mk eppic.so; \ else \ echo "eppic.so: failed to pull eppic code from git repo"; \ fi; \ else \ echo "eppic.so: build failed: requires the crash $(GDB) module"; \ fi ;\ else \ echo "eppic.so: build failed: requires /usr/bin/flex and /usr/bin/bison"; \ fi lib-eppic: cd eppic/libeppic && make eppic.so: ../defs.h $(APPFILE) lib-eppic gcc -g -O0 -Ieppic/libeppic -I.. -nostartfiles -shared -rdynamic -o eppic.so $(APPFILE) -fPIC $(TARGET_FLAGS) $(GDB_FLAGS) -Leppic/libeppic -leppic clean: if [ -d eppic/libeppic ]; \ then \ cd eppic/libeppic && make -i clean; \ fi rm -f eppic.so crash-utility-crash-9cd43f5/extensions/echo.c0000664000372000037200000000672515107550337020667 0ustar juerghjuergh/* echo.c - simple example of a crash extension * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2005, 2007, 2013 David Anderson * Copyright (C) 2002-2005, 2007, 2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" /* From the crash source top-level directory */ static void echo_init(void); /* constructor function */ static void echo_fini(void); /* destructor function (optional) */ static void cmd_echo(void); /* Declare the commands and their help data. */ static char *help_echo[]; /* * Please making the functions and global variables static within your * extension if you don't want to make them visiable to subsequently * loaded extensions. Otherwise, non-static symbols within 2 extensions * that have the same name can cause confliction. */ static struct command_table_entry command_table[] = { { "echo", cmd_echo, help_echo, 0}, /* One or more commands, */ { NULL }, /* terminated by NULL, */ }; static void __attribute__((constructor)) echo_init(void) /* Register the command set. */ { register_extension(command_table); } /* * This function is called if the shared object is unloaded. * If desired, perform any cleanups here. */ static void __attribute__((destructor)) echo_fini(void) { } /* * Arguments are passed to the command functions in the global args[argcnt] * array. See getopt(3) for info on dash arguments. Check out defs.h and * other crash commands for usage of the myriad of utility routines available * to accomplish what your task. */ static void cmd_echo(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) fprintf(fp, "%s ", args[optind++]); fprintf(fp, "\n"); } /* * The optional help data is simply an array of strings in a defined format. * For example, the "help echo" command will use the help_echo[] string * array below to create a help page that looks like this: * * NAME * echo - echoes back its arguments * * SYNOPSIS * echo arg ... * * DESCRIPTION * This command simply echoes back its arguments. * * EXAMPLE * Echo back all command arguments: * * crash> echo hello, world * hello, world * */ static char *help_echo[] = { "echo", /* command name */ "echoes back its arguments", /* short description */ "arg ...", /* argument synopsis, or " " if none */ " This command simply echoes back its arguments.", "\nEXAMPLE", " Echo back all command arguments:\n", " crash> echo hello, world", " hello, world", NULL }; crash-utility-crash-9cd43f5/extensions/eppic.c0000664000372000037200000000021315107550337021033 0ustar juerghjuergh/* Place holder for proper working of the extension Makefile. Eppic crash application file is in eppic/applications/crash/eppic.c */ crash-utility-crash-9cd43f5/help.c0000664000372000037200000153213615107550337016503 0ustar juerghjuergh/* help.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2020 David Anderson * Copyright (C) 2002-2020 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" static void reshuffle_cmdlist(void); static int sort_command_name(const void *, const void *); static void display_commands(void); static void display_copying_info(void); static void display_warranty_info(void); static void display_output_info(void); static void display_input_info(void); static void display_README(void); static char *gnu_public_license[]; static char *gnu_public_license_v3[]; static char *version_info[]; static char *output_info[]; static char *input_info[]; static char *README[]; static void dump_registers(void); #define GPLv2 2 #define GPLv3 3 #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) static int GPL_version = GPLv2; #else static int GPL_version = GPLv3; #endif static char *program_usage_info[] = { "", "USAGE:", "", " crash [OPTION]... NAMELIST MEMORY-IMAGE[@ADDRESS] (dumpfile form)", " crash [OPTION]... [NAMELIST] (live system form)", "", "OPTIONS:", "", " NAMELIST", " This is a pathname to an uncompressed kernel image (a vmlinux", " file), or a Xen hypervisor image (a xen-syms file) which has", " been compiled with the \"-g\" option. If using the dumpfile form,", " a vmlinux file may be compressed in either gzip or bzip2 formats.", "", " MEMORY-IMAGE", " A kernel core dump file created by the netdump, diskdump, LKCD", " kdump, xendump or kvmdump facilities.", "", " If a MEMORY-IMAGE argument is not entered, the session will be", " invoked on the live system, which typically requires root privileges", " because of the device file used to access system RAM. By default, ", " /dev/crash will be used if it exists. If it does not exist, then ", " /dev/mem will be used; but if the kernel has been configured with ", " CONFIG_STRICT_DEVMEM, then /proc/kcore will be used. It is permissible", " to explicitly enter /dev/crash, /dev/mem or /proc/kcore.", "", " An @ADDRESS value must be appended to the MEMORY-IMAGE if the dumpfile", " is a raw RAM dumpfile that has no header information describing the file", " contents. Multiple MEMORY-IMAGE@ADDRESS ordered pairs may be entered,", " with each dumpfile containing a contiguous block of RAM, where the ADDRESS", " value is the physical start address of the block expressed in hexadecimal.", " The physical address value(s) will be used to create a temporary ELF header", " in /var/tmp, which will only exist during the crash session. If a raw RAM", " dumpfile represents a live memory source, such as that specified by the QEMU", " mem-path argument of a memory-backend-file object, then \"live:\" must be", " prepended to the MEMORY-IMAGE name.", "", " mapfile", " If the NAMELIST file is not the same kernel that is running", " (live system form), or the kernel that was running when the system", " crashed (dumpfile form), then the System.map file of the original ", " kernel should be entered on the command line.", "", " -h [option]", " --help [option]", " Without an option argument, display a crash usage help message.", " If the option argument is a crash command name, the help page", " for that command is displayed. If it is the string \"input\", a", " page describing the various crash command line input options is", " displayed. If it is the string \"output\", a page describing command", " line output options is displayed. If it is the string \"all\", then", " all of the possible help messages are displayed. After the help", " message is displayed, crash exits.", "", " -s ", " Silently proceed directly to the \"crash>\" prompt without displaying", " any version, GPL, or crash initialization data during startup, and by", " default, runtime command output is not passed to any scrolling command.", "", " -i file", " Execute the command(s) contained in \"file\" prior to displaying ", " the \"crash>\" prompt for interactive user input.", "", " -d num ", " Set the internal debug level. The higher the number, the more", " debugging data will be printed when crash initializes and runs.", "", " -S ", " Use /boot/System.map as the mapfile.", "", " -e vi | emacs", " Set the readline(3) command line editing mode to \"vi\" or \"emacs\". ", " The default editing mode is \"vi\".", "", " -f ", " Force the usage of a compressed vmlinux file if its original", " name does not start with \"vmlinux\".", "", " -k ", " Indicate that the NAMELIST file is an LKCD \"Kerntypes\" debuginfo file.", "", " -g [namelist]", " Determine if a vmlinux or xen-syms namelist file contains debugging data.", "", " -t ", " Display the system-crash timestamp and exit.", "", " -L ", " Attempt to lock all of its virtual address space into memory by", " calling mlockall(MCL_CURRENT|MCL_FUTURE) during initialization.", " If the system call fails, an error message will be displayed,", " but the session continues.", "", " -c tty-device", " Open the tty-device as the console used for debug messages.", "", " -p page-size", " If a processor's page size cannot be determined by the dumpfile, ", " and the processor default cannot be used, use page-size.", "", " -o filename", " Only used with the MEMORY-IMAGE@ADDRESS format for raw RAM dumpfiles,", " specifies a filename of a new ELF vmcore that will be created and used", " as the dumpfile. It will be saved to allow future use as a standalone", " vmcore, replacing the original raw RAM dumpfile.", "", " -m option=value", " --machdep option=value", " Pass an option and value pair to machine-dependent code. These", " architecture-specific option/pairs should only be required in", " very rare circumstances:", "", " X86_64:", " phys_base=", " irq_eframe_link=", " irq_stack_gap=", " max_physmem_bits=", " kernel_image_size=", " vm=orig (pre-2.6.11 virtual memory address ranges)", " vm=2.6.11 (2.6.11 and later virtual memory address ranges)", " vm=xen (Xen kernel virtual memory address ranges)", " vm=xen-rhel4 (RHEL4 Xen kernel virtual address ranges)", " vm=5level (5-level page tables)", " page_offset=", " PPC64:", " vm=orig", " vm=2.6.14 (4-level page tables)", " IA64:", " phys_start=", " init_stack_size=", " vm=4l (4-level page tables)", " ARM:", " phys_base=", " ARM64:", " phys_offset=", " kimage_voffset=", " max_physmem_bits=", " vabits_actual=", " X86:", " page_offset=", "", " -x ", " Automatically load extension modules from a particular directory.", " The directory is determined by the following order of precedence:", "", " (1) the directory specified in the CRASH_EXTENSIONS shell ", " environment variable", " (2) /usr/lib64/crash/extensions (64-bit architectures)", " (3) /usr/lib/crash/extensions (32-bit architectures)", " (4) the ./extensions subdirectory of the current directory", "", " --active", " Track only the active task on each cpu.", "", " --buildinfo", " Display the crash binary's build date, the user ID of the builder,", " the hostname of the machine where the build was done, the target", " architecture, the version number, and the compiler version.", "", " --memory_module modname", " Use the modname as an alternative kernel module to the crash.ko", " module that creates the /dev/crash device.", "", " --memory_device device", " Use device as an alternative device to the /dev/crash, /dev/mem", " or /proc/kcore devices.", "", " --log dumpfile", " Dump the contents of the kernel log buffer. A kernel namelist", " argument is not necessary, but the dumpfile must contain the", " VMCOREINFO data taken from the original /proc/vmcore ELF header.", " Note: this option is deprecated and will no longer work for", " kernel(>=v5.10).", "", " --no_kallsyms", " Do not use kallsyms-generated symbol information contained within", " kernel module object files.", "", " --no_modules", " Do not access or display any kernel module related information.", "", " --no_ikconfig", " Do not attempt to read configuration data that was built into", " kernels configured with CONFIG_IKCONFIG.", "", " --no_data_debug", " Do not verify the validity of all structure member offsets and", " structure sizes that it uses.", "", " --no_kmem_cache", " Do not initialize the kernel's slab cache infrastructure, and", " commands that use kmem_cache-related data will not work.", "", " --no_elf_notes", " Do not use the registers from the ELF NT_PRSTATUS notes saved", " in a compressed kdump header for backtraces.", "", " --kmem_cache_delay", " Delay the initialization of the kernel's slab cache infrastructure", " until it is required by a run-time command.", "", " --readnow", " Pass this flag to the embedded gdb module, which will override", " the two-stage strategy that it uses for reading symbol tables", " from the NAMELIST. If module symbol tables are loaded during", " runtime with the \"mod\" command, the same override will occur.", "", " --smp ", " Specify that the system being analyzed is an SMP kernel.", "", " -v", " --version", " Display the version of the crash utility, the version of the", " embedded gdb module, GPL information, and copyright notices.", "", " --cpus number", " Specify the number of cpus in the SMP system being analyzed.", "", " --osrelease dumpfile", " Display the OSRELEASE vmcoreinfo string from a kdump dumpfile", " header.", "", " --hyper", " Force the session to be that of a Xen hypervisor.", "", " --p2m_mfn pfn", " When a Xen Hypervisor or its dom0 kernel crashes, the dumpfile", " is typically analyzed with either the Xen hypervisor or the dom0", " kernel. It is also possible to analyze any of the guest domU", " kernels if the pfn_to_mfn_list_list pfn value of the guest kernel", " is passed on the command line along with its NAMELIST and the ", " dumpfile.", "", " --xen_phys_start physical-address", " Supply the base physical address of the Xen hypervisor's text", " and static data for older xendump dumpfiles that did not pass", " that information in the dumpfile header.", "", " --zero_excluded", " If the makedumpfile(8) facility has filtered a compressed kdump", " dumpfile to exclude various types of non-essential pages, or has", " marked a compressed or ELF kdump dumpfile as incomplete due to", " an ENOSPC or other error during its creation, any attempt to", " read missing pages will fail. With this flag, reads from any", " of those pages will return zero-filled memory.", "", " --no_panic", " Do not attempt to find the task that was running when the kernel", " crashed. Set the initial context to that of the \"swapper\" task", " on cpu 0.", "", " --more ", " Use /bin/more as the command output scroller, overriding the", " default of /usr/bin/less and any settings in either ./.crashrc", " or $HOME/.crashrc.", "", " --less ", " Use /usr/bin/less as the command output scroller, overriding any", " settings in either ./.crashrc or $HOME/.crashrc.", "", " --CRASHPAGER", " Use the output paging command defined in the CRASHPAGER shell", " environment variable, overriding any settings in either ./.crashrc ", " or $HOME/.crashrc.", "", " --no_scroll", " Do not pass run-time command output to any scrolling command.", "", " --no_strip", " Do not strip cloned kernel text symbol names.", "", " --no_crashrc", " Do not execute the commands in either $HOME/.crashrc or ./.crashrc.", "", " --mod directory", " When loading the debuginfo data of kernel modules with the \"mod -S\"", " command, search for their object files in directory instead of in ", " the standard location.", "", " --src directory", " Search for the kernel source code in directory instead of in the", " standard location that is compiled into the debuginfo data.", "", " --reloc size", " When analyzing live x86 kernels configured with a CONFIG_PHYSICAL_START ", " value that is larger than its CONFIG_PHYSICAL_ALIGN value, then it will", " be necessary to enter a relocation size equal to the difference between", " the two values.", "", " --hash count", " Set the number of internal hash queue heads used for list gathering", " and verification. The default count is 32768.", "", " --kaslr offset | auto", " If x86, x86_64, s390x or loongarch64 kernel was configured with", " CONFIG_RANDOMIZE_BASE, the offset value is equal to the difference", " between the symbol values compiled into the vmlinux file and their", " relocated KASLR value. If set to auto, the KASLR offset value will", " be automatically calculated.", "", " --minimal", " Bring up a session that is restricted to the log, dis, rd, sym,", " eval, set and exit commands. This option may provide a way to", " extract some minimal/quick information from a corrupted or truncated", " dumpfile, or in situations where one of the several kernel subsystem ", " initialization routines would abort the crash session.", "", " --kvmhost [32|64]", " When examining an x86 KVM guest dumpfile, this option specifies", " that the KVM host that created the dumpfile was an x86 (32-bit)", " or an x86_64 (64-bit) machine, overriding the automatically", " determined value.", "", " --kvmio ", " override the automatically-calculated KVM guest I/O hole size.", "", " --offline [show|hide]", " Show or hide command output that is associated with offline cpus,", " overriding any settings in either ./.crashrc or $HOME/.crashrc.", "", "FILES:", "", " .crashrc", " Initialization commands. The file can be located in the user's", " HOME directory and/or the current directory. Commands found in", " the .crashrc file in the HOME directory are executed before", " those in the current directory's .crashrc file.", "", "ENVIRONMENT VARIABLES:", "", " EDITOR ", " Command input is read using readline(3). If EDITOR is set to", " emacs or vi then suitable keybindings are used. If EDITOR is", " not set, then vi is used. This can be overridden by \"set vi\" or", " \"set emacs\" commands located in a .crashrc file, or by entering", " \"-e emacs\" on the crash command line.", "", " CRASHPAGER", " If CRASHPAGER is set, its value is used as the name of the program", " to which command output will be sent. If not, then command output", " output is sent to \"/usr/bin/less -E -X\" by default.", "", " CRASH_MODULE_PATH", " Specifies an alternative directory tree to search for kernel", " module object files.", "", " CRASH_EXTENSIONS", " Specifies a directory containing extension modules that will be", " loaded automatically if the -x command line option is used.", "", NULL }; void program_usage(int form) { if (form == SHORT_FORM) { fprintf(fp, "\nUsage:\n\n"); fprintf(fp, "%s\n%s\n", program_usage_info[3], program_usage_info[4]); fprintf(fp, "\nEnter \"%s -h\" for details.\n", pc->program_name); clean_exit(1); } else { FILE *scroll; char *scroll_command; char **p; if ((scroll_command = setup_scroll_command()) && (scroll = popen(scroll_command, "w"))) fp = scroll; else scroll = NULL; for (p = program_usage_info; *p; p++) { fprintf(fp, *p, pc->program_name); fprintf(fp, "\n"); } fflush(fp); if (scroll) pclose(scroll); clean_exit(0); } } /* * Get an updated count of commands for subsequent help menu display, * reshuffling the deck if this is the first time or if something's changed. */ void help_init(void) { struct command_table_entry *cp; struct extension_table *ext; for (pc->ncmds = 0, cp = pc->cmd_table; cp->name; cp++) { if (!(cp->flags & HIDDEN_COMMAND)) pc->ncmds++; } for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) pc->ncmds++; } } if (!pc->cmdlist) { pc->cmdlistsz = pc->ncmds; if ((pc->cmdlist = (char **) malloc(sizeof(char *) * pc->cmdlistsz)) == NULL) error(FATAL, "cannot malloc command list space\n"); } else if (pc->ncmds > pc->cmdlistsz) { pc->cmdlistsz = pc->ncmds; if ((pc->cmdlist = (char **)realloc(pc->cmdlist, sizeof(char *) * pc->cmdlistsz)) == NULL) error(FATAL, "cannot realloc command list space\n"); } reshuffle_cmdlist(); } /* * If the command list is modified during runtime, re-shuffle the list * for proper help menu display. */ static void reshuffle_cmdlist(void) { int i, cnt; struct command_table_entry *cp; struct extension_table *ext; for (i = 0; i < pc->cmdlistsz; i++) pc->cmdlist[i] = NULL; for (cnt = 0, cp = pc->cmd_table; cp->name; cp++) { if (!(cp->flags & HIDDEN_COMMAND)) pc->cmdlist[cnt++] = cp->name; } for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) pc->cmdlist[cnt++] = cp->name; } } if (cnt > pc->cmdlistsz) error(FATAL, "help table malfunction!\n"); qsort((void *)pc->cmdlist, (size_t)cnt, sizeof(char *), sort_command_name); } /* * The help list is in alphabetical order, with exception of the "q" command, * which has historically always been the last command in the list. */ static int sort_command_name(const void *name1, const void *name2) { char **s1, **s2; s1 = (char **)name1; s2 = (char **)name2; if (STREQ(*s1, "q")) return 1; return strcmp(*s1, *s2); } /* * Get help for a command, to dump an internal table, or the GNU public * license copying/warranty information. */ void cmd_help(void) { int c; int oflag; oflag = 0; while ((c = getopt(argcnt, args, "efNDdmM:ngcaBbHhkKsvVoptTzLOr")) != EOF) { switch(c) { case 'e': dump_extension_table(VERBOSE); return; case 'f': dump_filesys_table(VERBOSE); return; case 'n': case 'D': dumpfile_memory(DUMPFILE_MEM_DUMP); return; case 'd': dump_dev_table(); return; case 'M': dump_machdep_table(stol(optarg, FAULT_ON_ERROR, NULL)); return; case 'm': dump_machdep_table(0); return; case 'g': dump_gdb_data(); return; case 'N': dump_net_table(); return; case 'a': dump_alias_data(); return; case 'b': dump_shared_bufs(); return; case 'B': dump_build_data(); return; case 'c': dump_numargs_cache(); return; case 'H': dump_hash_table(VERBOSE); return; case 'h': dump_hash_table(!VERBOSE); return; case 'k': dump_kernel_table(!VERBOSE); return; case 'K': dump_kernel_table(VERBOSE); return; case 's': dump_symbol_table(); return; case 'V': dump_vm_table(VERBOSE); return; case 'v': dump_vm_table(!VERBOSE); return; case 'O': dump_offset_table(NULL, TRUE); return; case 'o': oflag = TRUE; break; case 'T': dump_task_table(VERBOSE); return; case 't': dump_task_table(!VERBOSE); return; case 'p': dump_program_context(); return; case 'z': fprintf(fp, "help options:\n"); fprintf(fp, " -a - alias data\n"); fprintf(fp, " -b - shared buffer data\n"); fprintf(fp, " -B - build data\n"); fprintf(fp, " -c - numargs cache\n"); fprintf(fp, " -d - device table\n"); fprintf(fp, " -D - dumpfile contents/statistics\n"); fprintf(fp, " -e - extension table data\n"); fprintf(fp, " -f - filesys table\n"); fprintf(fp, " -g - gdb data\n"); fprintf(fp, " -h - hash_table data\n"); fprintf(fp, " -H - hash_table data (verbose)\n"); fprintf(fp, " -k - kernel_table\n"); fprintf(fp, " -K - kernel_table (verbose)\n"); fprintf(fp, " -L - LKCD page cache environment\n"); fprintf(fp, " -M machine specific\n"); fprintf(fp, " -m - machdep_table\n"); fprintf(fp, " -N - net_table\n"); fprintf(fp, " -n - dumpfile contents/statistics\n"); fprintf(fp, " -o - offset_table and size_table\n"); fprintf(fp, " -p - program_context\n"); fprintf(fp, " -r - dump registers from dumpfile header\n"); fprintf(fp, " -s - symbol table data\n"); fprintf(fp, " -t - task_table\n"); fprintf(fp, " -T - task_table plus context_array\n"); fprintf(fp, " -v - vm_table\n"); fprintf(fp, " -V - vm_table (verbose)\n"); fprintf(fp, " -z - help options\n"); return; case 'L': dumpfile_memory(DUMPFILE_ENVIRONMENT); return; case 'r': dump_registers(); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, COMPLETE_HELP); if (!args[optind]) { if (oflag) dump_offset_table(NULL, FALSE); else display_help_screen(""); return; } do { if (oflag) dump_offset_table(args[optind], FALSE); else cmd_usage(args[optind], COMPLETE_HELP|MUST_HELP); optind++; } while (args[optind]); } static void dump_registers(void) { if (pc->flags2 & QEMU_MEM_DUMP_ELF) { dump_registers_for_qemu_mem_dump(); return; } else if (DISKDUMP_DUMPFILE()) { dump_registers_for_compressed_kdump(); return; } else if (NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) { dump_registers_for_elf_dumpfiles(); return; } else if (VMSS_DUMPFILE()) { dump_registers_for_vmss_dump(); return; } error(FATAL, "-r option not supported on %s\n", ACTIVE() ? "a live system" : "this dumpfile type"); } /* * Format and display the help menu. */ void display_help_screen(char *indent) { int i, j, rows; char **namep; help_init(); fprintf(fp, "\n%s", indent); rows = (pc->ncmds + (HELP_COLUMNS-1)) / HELP_COLUMNS; for (i = 0; i < rows; i++) { namep = &pc->cmdlist[i]; for (j = 0; j < HELP_COLUMNS; j++) { fprintf(fp,"%-15s", *namep); namep += rows; if ((namep - pc->cmdlist) >= pc->ncmds) break; } fprintf(fp,"\n%s", indent); } fprintf(fp, "\n%s%s version: %-6s gdb version: %s\n", indent, pc->program_name, pc->program_version, pc->gdb_version); fprintf(fp, "%sFor help on any command above, enter \"help \".\n", indent); fprintf(fp, "%sFor help on input options, enter \"help input\".\n", indent); fprintf(fp, "%sFor help on output options, enter \"help output\".\n", indent); #ifdef NO_LONGER_TRUE fprintf(fp, "%sFor the most recent version: " "http://www.missioncriticallinux.com/download\n\n", indent); #else fprintf(fp, "\n"); #endif } /* * Used for generating HTML pages, dump the commands in the order * they would be seen on the help menu, i.e., from left-to-right, row-by-row. * Line ends are signaled with a "BREAK" string. */ static void display_commands(void) { int i, j, rows; char **namep; help_init(); rows = (pc->ncmds + (HELP_COLUMNS-1)) / HELP_COLUMNS; for (i = 0; i < rows; i++) { namep = &pc->cmdlist[i]; for (j = 0; j < HELP_COLUMNS; j++) { fprintf(fp,"%s\n", *namep); namep += rows; if ((namep - pc->cmdlist) >= pc->ncmds) { fprintf(fp, "BREAK\n"); break; } } } } /* * Help data for a command must be formatted using the following template: "command-name", "command description line", "argument-usage line", "description...", "description...", "description...", NULL, * The first line is concatenated with the second line, and will follow the * help command's "NAME" header. * The first and third lines will also be concatenated, and will follow the * help command's "SYNOPSIS" header. If the command has no arguments, enter * a string consisting of a space, i.e., " ". * The fourth and subsequent lines will follow the help command's "DESCRIPTION" * header. * * The program name can be referenced by using the %%s format. The final * entry in each command's help data string list must be a NULL. */ char *help_foreach[] = { "foreach", "display command data for multiple tasks in the system", "[[pid | taskp | name | state | [kernel | user | gleader]] ...]\n" " command [flag] [argument]", " This command allows for an examination of various kernel data associated", " with any, or all, tasks in the system, without having to set the context", " to each targeted task.\n", " pid perform the command(s) on this PID.", " taskp perform the command(s) on task referenced by this hexadecimal", " task_struct pointer.", " name perform the command(s) on all tasks with this name. If the", " task name can be confused with a foreach command name, then", " precede the name string with a \"\\\". If the name string is", " enclosed within \"'\" characters, then the encompassed string", " must be a POSIX extended regular expression that will be used", " to match task names.", " user perform the command(s) on all user (non-kernel) threads.", " gleader perform the command(s) on all user (non-kernel) thread group leaders.", " kernel perform the command(s) on all kernel threads.", " active perform the command(s) on the active thread on each CPU.", " state perform the command(s) on all tasks in the specified state, which", " may be one of: RU, IN, UN, ST, ZO, TR, SW, DE, WA, PA, ID or NE.\n", " If none of the task-identifying arguments above are entered, the command", " will be performed on all tasks.\n", " command select one or more of the following commands to be run on the tasks", " selected, or on all tasks:\n", " bt run the \"bt\" command (optional flags: -r -t -l -e -R -f -F", " -o -s -x -d)", " vm run the \"vm\" command (optional flags: -p -v -m -R -d -x)", " task run the \"task\" command (optional flags: -R -d -x)", " files run the \"files\" command (optional flag: -c -R)", " net run the \"net\" command (optional flags: -s -S -R -d -x)", " set run the \"set\" command", " ps run the \"ps\" command (optional flags: -G -s -p -c -t -l -a", " -g -r -y)", " sig run the \"sig\" command (optional flag: -g)", " vtop run the \"vtop\" command (optional flags: -c -u -k)\n", " flag Pass this optional flag to the command selected.", " argument Pass this argument to the command selected.", " ", " A header containing the PID, task address, cpu and command name will be", " pre-pended before the command output for each selected task. Consult the", " help page of each of the command types above for details.", "\nEXAMPLES", " Display the stack traces for all tasks:\n", " %s> foreach bt", " PID: 4752 TASK: c7680000 CPU: 1 COMMAND: \"xterm\"", " #0 [c7681edc] schedule at c01135f6", " (void)", " #1 [c7681f34] schedule_timeout at c01131ff", " (24)", " #2 [c7681f64] do_select at c0132838", " (5, c7681fa4, c7681fa0)", " #3 [c7681fbc] sys_select at c0132dad", " (5, 8070300, 8070380, 0, 0)", " #4 [bffffb0c] system_call at c0109944", " EAX: 0000008e EBX: 00000005 ECX: 08070300 EDX: 08070380 ", " DS: 002b ESI: 00000000 ES: 002b EDI: 00000000 ", " SS: 002b ESP: bffffadc EBP: bffffb0c ", " CS: 0023 EIP: 402259ee ERR: 0000008e EFLAGS: 00000246 ", " ", " PID: 557 TASK: c5600000 CPU: 0 COMMAND: \"nfsd\"", " #0 [c5601f38] schedule at c01135f6", " (void)", " #1 [c5601f90] schedule_timeout at c01131ff", " (c5600000)", " #2 [c5601fb8] svc_recv at c805363a", " (c0096f40, c5602800, 7fffffff, 100, c65c9f1c)", " #3 [c5601fec] (nfsd module) at c806e303", " (c5602800, c5602800, c0096f40, 6c6e0002, 50)", " #4 [c65c9f24] kernel_thread at c010834f", " (0, 0, ext2_file_inode_operations)", " ", " PID: 824 TASK: c7c84000 CPU: 0 COMMAND: \"mingetty\"", " ...\n", " Display the task_struct structure for each \"bash\" command:\n", " %s> foreach bash task", " ...\n", " Display the open files for all tasks:\n", " %s> foreach files", " ...\n", " Display the state of tasks whose name contains a match to \"event.*\":\n", " %s> foreach 'event.*' task -R state", " PID: 99 TASK: ffff8804750d5500 CPU: 0 COMMAND: \"events/0\"", " state = 1,", " ", " PID: 100 TASK: ffff8804750d4ac0 CPU: 1 COMMAND: \"events/1\"", " state = 1,", " ", " PID: 101 TASK: ffff8804750d4080 CPU: 2 COMMAND: \"events/2\"", " state = 1,", " ...\n", " Display the stack traces for all blocked (TASK_UNINTERRUPTIBLE) tasks:\n", " %s> foreach UN bt", " PID: 428 TASK: ffff880036b6c560 CPU: 1 COMMAND: \"jbd2/dm-1-8\"", " #0 [ffff880035779a70] __schedule at ffffffff815df272", " #1 [ffff880035779b08] schedule at ffffffff815dfacf", " #2 [ffff880035779b18] io_schedule at ffffffff815dfb7f", " #3 [ffff880035779b38] sleep_on_page at ffffffff81119a4e", " #4 [ffff880035779b48] __wait_on_bit at ffffffff815e039f", " #5 [ffff880035779b98] wait_on_page_bit at ffffffff81119bb8", " #6 [ffff880035779be8] filemap_fdatawait_range at ffffffff81119ccc", " #7 [ffff880035779cd8] filemap_fdatawait at ffffffff81119d8b", " #8 [ffff880035779ce8] jbd2_journal_commit_transaction at ffffffff8123a99c", " #9 [ffff880035779e58] kjournald2 at ffffffff8123ee7b", " #10 [ffff880035779ee8] kthread at ffffffff8108fb9c", " #11 [ffff880035779f48] kernel_thread_helper at ffffffff815ebaf4", " ...\n", NULL }; char *help_ascii[] = { "ascii", "translate a hexadecimal string to ASCII", "value ...", " Translates 32-bit or 64-bit hexadecimal values to ASCII. If no argument", " is entered, an ASCII chart is displayed.", "\nEXAMPLES", " Translate the hexadecimal value of 0x62696c2f7273752f to ASCII:", "\n %s> ascii 62696c2f7273752f", " 62696c2f7273752f: /usr/lib", "\n Display an ASCII chart:", "\n %s> ascii", " ", " 0 1 2 3 4 5 6 7", " +-------------------------------", " 0 | NUL DLE SP 0 @ P ' p", " 1 | SOH DC1 ! 1 A Q a q", " 2 | STX DC2 \" 2 B R b r", " 3 | ETX DC3 # 3 C S c s", " 4 | EOT DC4 $ 4 D T d t", " 5 | ENQ NAK \% 5 E U e u", " 6 | ACK SYN & 6 F V f v", " 7 | BEL ETB ` 7 G W g w", " 8 | BS CAN ( 8 H X h x", " 9 | HT EM ) 9 I Y i y", " A | LF SUB * : J Z j z", " B | VT ESC + ; K [ k {", " C | FF FS , < L \\ l |", " D | CR GS _ = M ] m }", " E | SO RS . > N ^ n ~", " F | SI US / ? O - o DEL", NULL }; char *help_sbitmapq[] = { "sbitmapq", "sbitmap_queue struct contents", "[-s struct[.member[,member]] -a address [-p] [-v]] -[x|d] address", " The command dumps the contents of the sbitmap_queue structure and", " the used bits in the bitmap. Also, it shows the dump of a structure", " array associated with the sbitmap_queue.", "", " The arguments are as follows:", "", " -s struct name of a C-code structure, that is stored in an array", " associated with sbitmap_queue structure. Use the", " \"struct.member\" format in order to display a particular", " member of the structure. -s option requires -a option", " -a address address of a structure array associated with sbitmap_queue", " structure. The set bits in sbitmap are used for the index", " in an associated array.", " -p associated with sbitmap_queue array contains the points of", " structure.", " -x override default output format with hexadecimal format.", " -d override default output format with decimal format.", " -v By default, the sbitmap command shows only a used sbitmap", " index and a structure address in the associated array.", " This flag says to print a formatted display of the", " contents of a structure in an associated array. -v option", " requires of -s.", "", "EXAMPLES", "", " All examples are shown on the base of Linux Target system with iSCSI", " transport.", "", " Display the common sbitmap information for target session:", "", " %s> struct -oh se_session 0xc0000000e118c760 | grep sbitmap_queue", " [c0000000e118c808] struct sbitmap_queue sess_tag_pool;", " %s>", " %s> sbitmapq c0000000e118c808", " depth = 136", " busy = 4", " cleared = 26", " bits_per_word = 32", " map_nr = 5", " alloc_hint = {74, 36, 123, 101}", " wake_batch = 8", " wake_index = 0", " ws_active = 0", " ws = {", " { .wait_cnt = 8, .wait = inactive },", " { .wait_cnt = 8, .wait = inactive },", " { .wait_cnt = 8, .wait = inactive },", " { .wait_cnt = 8, .wait = inactive },", " { .wait_cnt = 8, .wait = inactive },", " { .wait_cnt = 8, .wait = inactive },", " { .wait_cnt = 8, .wait = inactive },", " { .wait_cnt = 8, .wait = inactive },", " }", " round_robin = 0", " min_shallow_depth = 4294967295", "", " 00000000: 0000 0000 0000 0000 0030 0000 0000 0000", " 00000010: 00", "", " Display the addresses of structure are associated with", " sbitmap_queue (for iscsi it is 'iscsi_cmd' structure):", "", " %s> struct se_session 0xc0000000e118c760 | grep sess_cmd_map", " sess_cmd_map = 0xc0000000671c0000,", " %s>", " %s> sbitmapq -s iscsi_cmd -a 0xc0000000671c0000 c0000000e118c808", " 76: 0xc0000000671d5600", " 77: 0xc0000000671d5a80", "", " Dump of formatted content of structures:", "", " %s> sbitmapq -s iscsi_cmd -a 0xc0000000671c0000 -v c0000000e118c808", " 76 (0xc0000000671d5600):", " struct iscsi_cmd {", " dataout_timer_flags = 0,", " dataout_timeout_retries = 0 '\\000',", " error_recovery_count = 0 '\\000',", " deferred_i_state = ISTATE_NO_STATE,", " i_state = ISTATE_SENT_STATUS,", " ...", " first_data_sg = 0xc0000000e306b080,", " first_data_sg_off = 0,", " kmapped_nents = 1,", " sense_reason = 0", " }", " 77 (0xc0000000671d5a80):", " struct iscsi_cmd {", " dataout_timer_flags = 0,", " dataout_timeout_retries = 0 '\\000',", " error_recovery_count = 0 '\\000',", " deferred_i_state = ISTATE_NO_STATE,", " i_state = ISTATE_NEW_CMD,", " ...", " first_data_sg = 0x0,", " first_data_sg_off = 0,", " kmapped_nents = 0,", " sense_reason = 0", " }", NULL }; char *help_quit[] = { "quit", "exit this session", " ", " Bail out of the current %s session.", "\nNOTE", " This command is equivalent to the \"exit\" command.", NULL }; char *help_exit[] = { "exit", "exit this session", " ", " Bail out of the current %s session.", "\nNOTE", " This command is equivalent to the \"q\" command.", NULL }; char *help_help[] = { "help", "get help", "[command | all] [-