crash-7.2.8/0000755000000000000000000000000013614623427011336 5ustar rootrootcrash-7.2.8/arm.c0000664000000000000000000013602313614623427012270 0ustar rootroot/* * arm.c - core analysis suite * * Authors: * Thomas Fänge * Jan Karlsson * Mika Westerberg * * Copyright (C) 2010-2011 Nokia Corporation * Copyright (C) 2010 Sony Ericsson. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef ARM #include #include "defs.h" static void arm_parse_cmdline_args(void); static void arm_get_crash_notes(void); static int arm_verify_symbol(const char *, ulong, char); static int arm_is_module_addr(ulong); static int arm_is_kvaddr(ulong); static int arm_is_uvaddr(ulong, struct task_context *); static int arm_in_exception_text(ulong); static int arm_in_ret_from_syscall(ulong, int *); static void arm_back_trace(struct bt_info *); static void arm_back_trace_cmd(struct bt_info *); static ulong arm_processor_speed(void); static int arm_translate_pte(ulong, void *, ulonglong); static int arm_vtop(ulong, ulong *, physaddr_t *, int); static int arm_kvtop(struct task_context *, ulong, physaddr_t *, int); static int arm_uvtop(struct task_context *, ulong, physaddr_t *, int); static int arm_get_frame(struct bt_info *, ulong *, ulong *); static int arm_get_dumpfile_stack_frame(struct bt_info *, ulong *, ulong *); static void arm_get_stack_frame(struct bt_info *, ulong *, ulong *); static void arm_dump_exception_stack(ulong, ulong); static void arm_display_full_frame(struct bt_info *, ulong); static ulong arm_vmalloc_start(void); static int arm_is_task_addr(ulong); static int arm_dis_filter(ulong, char *, unsigned int); static int arm_eframe_search(struct bt_info *); static ulong arm_get_task_pgd(ulong); static void arm_cmd_mach(void); static void arm_display_machine_stats(void); static int arm_get_smp_cpus(void); static void arm_init_machspec(void); static struct line_number_hook arm_line_number_hooks[]; static struct machine_specific arm_machine_specific; /** * struct arm_cpu_context_save - idle task registers * * This structure holds idle task registers. Only FP, SP, and PC are needed for * unwinding the stack. */ struct arm_cpu_context_save { ulong fp; ulong sp; ulong pc; }; /* * Holds registers during the crash. */ static struct arm_pt_regs *panic_task_regs; #define PGDIR_SIZE() (4 * PAGESIZE()) #define PGDIR_OFFSET(X) (((ulong)(X)) & (PGDIR_SIZE() - 1)) #define _SECTION_PAGE_MASK (~((MEGABYTES(1))-1)) #define PMD_TYPE_MASK 3 #define PMD_TYPE_SECT 2 #define PMD_TYPE_TABLE 1 #define PMD_TYPE_SECT_LPAE 1 static inline ulong * pmd_page_addr(ulong pmd) { ulong ptr; if (machdep->flags & PGTABLE_V2) { ptr = PAGEBASE(pmd); } else { ptr = pmd & ~(PTRS_PER_PTE * sizeof(void *) - 1); ptr += PTRS_PER_PTE * sizeof(void *); } return (ulong *)ptr; } /* * "Linux" PTE definitions. */ #define L_PTE_PRESENT (1 << 0) #define L_PTE_YOUNG (1 << 1) #define L_PTE_FILE (1 << 2) #define L_PTE_DIRTY (1 << 6) #define L_PTE_WRITE (1 << 7) #define L_PTE_RDONLY L_PTE_WRITE #define L_PTE_USER (1 << 8) #define L_PTE_EXEC (1 << 9) #define L_PTE_XN L_PTE_EXEC #define L_PTE_SHARED (1 << 10) #define pte_val(pte) (pte) #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) #define pte_rdonly(pte) (pte_val(pte) & L_PTE_RDONLY) #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC) #define pte_xn(pte) (pte_val(pte) & L_PTE_XN) /* * Following stuff is taken directly from the kernel sources. These are used in * dump_exception_stack() to format an exception stack entry. */ #define USR26_MODE 0x00000000 #define FIQ26_MODE 0x00000001 #define IRQ26_MODE 0x00000002 #define SVC26_MODE 0x00000003 #define USR_MODE 0x00000010 #define FIQ_MODE 0x00000011 #define IRQ_MODE 0x00000012 #define SVC_MODE 0x00000013 #define ABT_MODE 0x00000017 #define UND_MODE 0x0000001b #define SYSTEM_MODE 0x0000001f #define MODE32_BIT 0x00000010 #define MODE_MASK 0x0000001f #define PSR_T_BIT 0x00000020 #define PSR_F_BIT 0x00000040 #define PSR_I_BIT 0x00000080 #define PSR_A_BIT 0x00000100 #define PSR_E_BIT 0x00000200 #define PSR_J_BIT 0x01000000 #define PSR_Q_BIT 0x08000000 #define PSR_V_BIT 0x10000000 #define PSR_C_BIT 0x20000000 #define PSR_Z_BIT 0x40000000 #define PSR_N_BIT 0x80000000 #define isa_mode(regs) \ ((((regs)->ARM_cpsr & PSR_J_BIT) >> 23) | \ (((regs)->ARM_cpsr & PSR_T_BIT) >> 5)) #define processor_mode(regs) \ ((regs)->ARM_cpsr & MODE_MASK) #define interrupts_enabled(regs) \ (!((regs)->ARM_cpsr & PSR_I_BIT)) #define fast_interrupts_enabled(regs) \ (!((regs)->ARM_cpsr & PSR_F_BIT)) static const char *processor_modes[] = { "USER_26", "FIQ_26", "IRQ_26", "SVC_26", "UK4_26", "UK5_26", "UK6_26", "UK7_26" , "UK8_26", "UK9_26", "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", "USER_32", "FIQ_32", "IRQ_32", "SVC_32", "UK4_32", "UK5_32", "UK6_32", "ABT_32", "UK8_32", "UK9_32", "UK10_32", "UND_32", "UK12_32", "UK13_32", "UK14_32", "SYS_32", }; static const char *isa_modes[] = { "ARM" , "Thumb" , "Jazelle", "ThumbEE", }; #define NOT_IMPLEMENTED() \ error(FATAL, "%s: N/A\n", __func__) /* * Do all necessary machine-specific setup here. This is called several times * during initialization. */ void arm_init(int when) { ulong vaddr; char *string; struct syment *sp; #if defined(__i386__) || defined(__x86_64__) if (ACTIVE()) error(FATAL, "compiled for the ARM architecture\n"); #endif switch (when) { case PRE_SYMTAB: machdep->verify_symbol = arm_verify_symbol; machdep->machspec = &arm_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong)machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; if (machdep->cmdline_args[0]) arm_parse_cmdline_args(); break; case PRE_GDB: if ((machdep->pgd = (char *)malloc(PGDIR_SIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PMDSIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); /* * LPAE requires an additional page for the PGD, * so PG_DIR_SIZE = 0x5000 for LPAE */ if ((string = pc->read_vmcoreinfo("CONFIG_ARM_LPAE"))) { machdep->flags |= PAE; free(string); } else if ((sp = next_symbol("swapper_pg_dir", NULL)) && (sp->value - symbol_value("swapper_pg_dir")) == 0x5000) machdep->flags |= PAE; machdep->kvbase = symbol_value("_stext") & ~KVBASE_MASK; machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = arm_is_kvaddr; machdep->is_uvaddr = arm_is_uvaddr; machdep->eframe_search = arm_eframe_search; machdep->back_trace = arm_back_trace_cmd; machdep->processor_speed = arm_processor_speed; machdep->uvtop = arm_uvtop; machdep->kvtop = arm_kvtop; machdep->get_task_pgd = arm_get_task_pgd; machdep->get_stack_frame = arm_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = arm_translate_pte; machdep->memory_size = generic_memory_size; machdep->vmalloc_start = arm_vmalloc_start; machdep->is_task_addr = arm_is_task_addr; machdep->dis_filter = arm_dis_filter; machdep->cmd_mach = arm_cmd_mach; machdep->get_smp_cpus = arm_get_smp_cpus; machdep->line_number_hooks = arm_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; machdep->dump_irq = generic_dump_irq; machdep->show_interrupts = generic_show_interrupts; machdep->get_irq_affinity = generic_get_irq_affinity; arm_init_machspec(); break; case POST_GDB: /* * Starting from 2.6.38 hardware and Linux page tables * were reordered. See also mainline kernel commit * d30e45eeabe (ARM: pgtable: switch order of Linux vs * hardware page tables). */ if (THIS_KERNEL_VERSION > LINUX(2,6,37) || STRUCT_EXISTS("pteval_t")) machdep->flags |= PGTABLE_V2; if (THIS_KERNEL_VERSION >= LINUX(3,3,0) || symbol_exists("idmap_pgd")) machdep->flags |= IDMAP_PGD; if (machdep->flags & PAE) { machdep->section_size_bits = _SECTION_SIZE_BITS_LPAE; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS_LPAE; } else { machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; } if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); /* * Registers for idle threads are saved in * thread_info.cpu_context. */ STRUCT_SIZE_INIT(cpu_context_save, "cpu_context_save"); MEMBER_OFFSET_INIT(cpu_context_save_r7, "cpu_context_save", "r7"); MEMBER_OFFSET_INIT(cpu_context_save_fp, "cpu_context_save", "fp"); MEMBER_OFFSET_INIT(cpu_context_save_sp, "cpu_context_save", "sp"); MEMBER_OFFSET_INIT(cpu_context_save_pc, "cpu_context_save", "pc"); MEMBER_OFFSET_INIT(thread_info_cpu_context, "thread_info", "cpu_context"); /* * We need to have information about note_buf_t which is used to * hold ELF note containing registers and status of the thread * that panic'd. */ STRUCT_SIZE_INIT(note_buf, "note_buf_t"); STRUCT_SIZE_INIT(elf_prstatus, "elf_prstatus"); MEMBER_OFFSET_INIT(elf_prstatus_pr_pid, "elf_prstatus", "pr_pid"); MEMBER_OFFSET_INIT(elf_prstatus_pr_reg, "elf_prstatus", "pr_reg"); if (!machdep->hz) machdep->hz = 100; break; case POST_VM: machdep->machspec->vmalloc_start_addr = vt->high_memory; /* * Modules are placed in first vmalloc'd area. This is 16MB * below PAGE_OFFSET. */ machdep->machspec->modules_end = machdep->kvbase - 1; vaddr = first_vmalloc_address(); if (vaddr > machdep->machspec->modules_end) machdep->machspec->modules_vaddr = DEFAULT_MODULES_VADDR; else machdep->machspec->modules_vaddr = vaddr; /* * crash_notes contains machine specific information about the * crash. In particular, it contains CPU registers at the time * of the crash. We need this information to extract correct * backtraces from the panic task. */ if (!ACTIVE()) arm_get_crash_notes(); if (init_unwind_tables()) { if (CRASHDEBUG(1)) fprintf(fp, "using unwind tables\n"); } else { if (CRASHDEBUG(1)) fprintf(fp, "using framepointers\n"); } break; case LOG_ONLY: machdep->machspec = &arm_machine_specific; machdep->kvbase = kt->vmcoreinfo._stext_SYMBOL & 0xffff0000UL; arm_init_machspec(); break; } } void arm_dump_machdep_table(ulong arg) { const struct machine_specific *ms; int others, i; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & KSYMS_START) fprintf(fp, "%sKSYMS_START", others++ ? "|" : ""); if (machdep->flags & PHYS_BASE) fprintf(fp, "%sPHYS_BASE", others++ ? "|" : ""); if (machdep->flags & PGTABLE_V2) fprintf(fp, "%sPGTABLE_V2", others++ ? "|" : ""); if (machdep->flags & IDMAP_PGD) fprintf(fp, "%sIDMAP_PGD", others++ ? "|" : ""); if (machdep->flags & PAE) fprintf(fp, "%sPAE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->kvbase); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %lx\n", (ulong)machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %lld (0x%llx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: arm_eframe_search()\n"); fprintf(fp, " back_trace: arm_back_trace_cmd()\n"); fprintf(fp, " processor_speed: arm_processor_speed()\n"); fprintf(fp, " uvtop: arm_uvtop()\n"); fprintf(fp, " kvtop: arm_kvtop()\n"); fprintf(fp, " get_task_pgd: arm_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_stack_frame: arm_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: arm_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: arm_vmalloc_start()\n"); fprintf(fp, " is_task_addr: arm_is_task_addr()\n"); fprintf(fp, " verify_symbol: arm_verify_symbol()\n"); fprintf(fp, " dis_filter: arm_dis_filter()\n"); fprintf(fp, " cmd_mach: arm_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: arm_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: arm_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: arm_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " xendump_p2m_create: NULL\n"); fprintf(fp, "xen_kdump_p2m_create: NULL\n"); fprintf(fp, " line_number_hooks: arm_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, "clear_machdep_cache: NULL\n"); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } ms = machdep->machspec; fprintf(fp, " machspec: %lx\n", (ulong)ms); fprintf(fp, " phys_base: %lx\n", ms->phys_base); fprintf(fp, " vmalloc_start_addr: %lx\n", ms->vmalloc_start_addr); fprintf(fp, " modules_vaddr: %lx\n", ms->modules_vaddr); fprintf(fp, " modules_end: %lx\n", ms->modules_end); fprintf(fp, " kernel_text_start: %lx\n", ms->kernel_text_start); fprintf(fp, " kernel_text_end: %lx\n", ms->kernel_text_end); fprintf(fp, "exception_text_start: %lx\n", ms->exception_text_start); fprintf(fp, " exception_text_end: %lx\n", ms->exception_text_end); fprintf(fp, " crash_task_regs: %lx\n", (ulong)ms->crash_task_regs); fprintf(fp, "unwind_index_prel31: %d\n", ms->unwind_index_prel31); } /* * Parse machine dependent command line arguments. * * Force the phys_base address via: * * --machdep phys_base=
*/ static void arm_parse_cmdline_args(void) { int index, i, c, err; char *arglist[MAXARGS]; char buf[BUFSIZE]; char *p; ulong value = 0; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { error(WARNING, "ignoring --machdep option: %x\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { err = 0; if (STRNEQ(arglist[i], "phys_base=")) { int megabytes = FALSE; int flags = RETURN_ON_ERROR | QUIET; if ((LASTCHAR(arglist[i]) == 'm') || (LASTCHAR(arglist[i]) == 'M')) { LASTCHAR(arglist[i]) = NULLCHAR; megabytes = TRUE; } p = arglist[i] + strlen("phys_base="); if (strlen(p)) { if (megabytes) value = dtol(p, flags, &err); else value = htol(p, flags, &err); } if (!err) { if (megabytes) value = MEGABYTES(value); machdep->machspec->phys_base = value; error(NOTE, "setting phys_base to: 0x%lx\n", machdep->machspec->phys_base); machdep->flags |= PHYS_BASE; continue; } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); } } } /* * Retrieve task registers for the time of the crash. */ static void arm_get_crash_notes(void) { struct machine_specific *ms = machdep->machspec; ulong crash_notes; Elf32_Nhdr *note; ulong offset; char *buf, *p; ulong *notes_ptrs; ulong i, found; if (!symbol_exists("crash_notes")) return; crash_notes = symbol_value("crash_notes"); notes_ptrs = (ulong *)GETBUF(kt->cpus*sizeof(notes_ptrs[0])); /* * Read crash_notes for the first CPU. crash_notes are in standard ELF * note format. */ if (!readmem(crash_notes, KVADDR, ¬es_ptrs[kt->cpus-1], sizeof(notes_ptrs[kt->cpus-1]), "crash_notes", RETURN_ON_ERROR)) { error(WARNING, "cannot read crash_notes\n"); FREEBUF(notes_ptrs); return; } if (symbol_exists("__per_cpu_offset")) { /* Add __per_cpu_offset for each cpu to form the pointer to the notes */ for (i = 0; icpus; i++) notes_ptrs[i] = notes_ptrs[kt->cpus-1] + kt->__per_cpu_offset[i]; } buf = GETBUF(SIZE(note_buf)); if (!(panic_task_regs = calloc((size_t)kt->cpus, sizeof(*panic_task_regs)))) error(FATAL, "cannot calloc panic_task_regs space\n"); for (i = found = 0; icpus; i++) { if (!readmem(notes_ptrs[i], KVADDR, buf, SIZE(note_buf), "note_buf_t", RETURN_ON_ERROR)) { error(WARNING, "cpu %d: cannot read NT_PRSTATUS note\n", i); continue; } /* * Do some sanity checks for this note before reading registers from it. */ note = (Elf32_Nhdr *)buf; p = buf + sizeof(Elf32_Nhdr); /* * dumpfiles created with qemu won't have crash_notes, but there will * be elf notes; dumpfiles created by kdump do not create notes for * offline cpus. */ if (note->n_namesz == 0 && (DISKDUMP_DUMPFILE() || KDUMP_DUMPFILE())) { if (DISKDUMP_DUMPFILE()) note = diskdump_get_prstatus_percpu(i); else if (KDUMP_DUMPFILE()) note = netdump_get_prstatus_percpu(i); if (note) { /* * SIZE(note_buf) accounts for a "final note", which is a * trailing empty elf note header. */ long notesz = SIZE(note_buf) - sizeof(Elf32_Nhdr); if (sizeof(Elf32_Nhdr) + roundup(note->n_namesz, 4) + note->n_descsz == notesz) BCOPY((char *)note, buf, notesz); } else { error(WARNING, "cpu %d: cannot find NT_PRSTATUS note\n", i); continue; } } /* * Check the sanity of NT_PRSTATUS note only for each online cpu. * If this cpu has invalid note, continue to find the crash notes * for other online cpus. */ if (note->n_type != NT_PRSTATUS) { error(WARNING, "cpu %d: invalid NT_PRSTATUS note (n_type != NT_PRSTATUS)\n", i); continue; } if (!STRNEQ(p, "CORE")) { error(WARNING, "cpu %d: invalid NT_PRSTATUS note (name != \"CORE\")\n", i); continue; } /* * Find correct location of note data. This contains elf_prstatus * structure which has registers etc. for the crashed task. */ offset = sizeof(Elf32_Nhdr); offset = roundup(offset + note->n_namesz, 4); p = buf + offset; /* start of elf_prstatus */ BCOPY(p + OFFSET(elf_prstatus_pr_reg), &panic_task_regs[i], sizeof(panic_task_regs[i])); found++; } /* * And finally we have the registers for the crashed task. This is * used later on when dumping backtrace. */ ms->crash_task_regs = panic_task_regs; FREEBUF(buf); FREEBUF(notes_ptrs); if (!found) { free(panic_task_regs); ms->crash_task_regs = NULL; } } /* * Accept or reject a symbol from the kernel namelist. */ static int arm_verify_symbol(const char *name, ulong value, char type) { if (STREQ(name, "swapper_pg_dir")) machdep->flags |= KSYMS_START; if (!name || !strlen(name) || !(machdep->flags & KSYMS_START)) return FALSE; if (STREQ(name, "$a") || STREQ(name, "$n") || STREQ(name, "$d")) return FALSE; if (STREQ(name, "PRRR") || STREQ(name, "NMRR")) return FALSE; if ((type == 'A') && STRNEQ(name, "__crc_")) return FALSE; if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%08lx %s\n", value, name); return TRUE; } static int arm_is_module_addr(ulong vaddr) { ulong modules_start; ulong modules_end = machdep->machspec->modules_end; if (!MODULES_VADDR) { /* * In case we are still initializing, and vm_init() has not been * called, we use defaults here which is 16MB below kernel start * address. */ modules_start = DEFAULT_MODULES_VADDR; } else { modules_start = MODULES_VADDR; } return (vaddr >= modules_start && vaddr <= modules_end); } int arm_is_vmalloc_addr(ulong vaddr) { if (arm_is_module_addr(vaddr)) return TRUE; if (!VMALLOC_START) return FALSE; return (vaddr >= VMALLOC_START); } /* * Check whether given address falls inside kernel address space (including * modules). */ static int arm_is_kvaddr(ulong vaddr) { if (arm_is_module_addr(vaddr)) return TRUE; return (vaddr >= machdep->kvbase); } static int arm_is_uvaddr(ulong vaddr, struct task_context *unused) { if (arm_is_module_addr(vaddr)) return FALSE; return (vaddr < machdep->kvbase); } /* * Returns TRUE if given pc is in exception area. */ static int arm_in_exception_text(ulong pc) { ulong exception_start = machdep->machspec->exception_text_start; ulong exception_end = machdep->machspec->exception_text_end; if (exception_start && exception_end) return (pc >= exception_start && pc < exception_end); return FALSE; } /* * Returns TRUE if given pc points to a return from syscall * entrypoint. In case the function returns TRUE and if offset is given, * it is filled with the offset that should be added to the SP to get * address of the exception frame where the user registers are. */ static int arm_in_ret_from_syscall(ulong pc, int *offset) { /* * On fast syscall return path, the stack looks like: * * SP + 0 {r4, r5} * SP + 8 user pt_regs * * The asm syscall handler pushes fifth and sixth registers * onto the stack before calling the actual syscall handler. * * So in order to print out the user registers at the time * the syscall was made, we need to adjust SP for 8. */ if (pc == symbol_value("ret_fast_syscall")) { if (offset) *offset = 8; return TRUE; } /* * In case we are on the slow syscall path, the SP already * points to the start of the user registers hence no * adjustments needs to be done. */ if (pc == symbol_value("ret_slow_syscall")) { if (offset) *offset = 0; return TRUE; } return FALSE; } /* * Unroll the kernel stack using a minimal amount of gdb services. */ static void arm_back_trace(struct bt_info *bt) { int n = 0; /* * In case bt->machdep contains pointer to a full register set, we take * FP from there. */ if (bt->machdep) { const struct arm_pt_regs *regs = bt->machdep; bt->frameptr = regs->ARM_fp; } /* * Stack frame layout: * optionally saved caller registers (r4 - r10) * saved fp * saved sp * saved lr * frame => saved pc * optionally saved arguments (r0 - r3) * saved sp => * * Functions start with the following code sequence: * mov ip, sp * stmfd sp!, {r0 - r3} (optional) * corrected pc => stmfd sp!, {..., fp, ip, lr, pc} */ while (bt->frameptr && INSTACK(bt->frameptr, bt)) { ulong from; ulong sp; /* * We correct the PC to point to the actual instruction (current * value is PC + 8). */ bt->instptr = GET_STACK_ULONG(bt->frameptr - 0); bt->instptr -= 8; /* * Now get LR, saved SP and FP from the frame as well. */ from = GET_STACK_ULONG(bt->frameptr - 4); sp = GET_STACK_ULONG(bt->frameptr - 8); bt->frameptr = GET_STACK_ULONG(bt->frameptr - 12); arm_dump_backtrace_entry(bt, n++, from, sp); bt->stkptr = sp; } } /* * Unroll a kernel stack. */ static void arm_back_trace_cmd(struct bt_info *bt) { if (bt->flags & BT_REGS_NOT_FOUND) return; if (kt->flags & DWARF_UNWIND) unwind_backtrace(bt); else arm_back_trace(bt); } /* * Calculate and return the speed of the processor. */ static ulong arm_processor_speed(void) { /* * For now, we don't support reading CPU speed. */ return 0; } /* * Translate a PTE, returning TRUE if the page is present. If a physaddr pointer * is passed in, don't print anything. */ static int arm_translate_pte(ulong pte, void *physaddr, ulonglong lpae_pte) { char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char buf[BUFSIZE]; int page_present; ulonglong paddr; int len1, len2, others; if (machdep->flags & PAE) { paddr = LPAE_PAGEBASE(lpae_pte); sprintf(ptebuf, "%llx", lpae_pte); pte = (ulong)lpae_pte; } else { paddr = PAGEBASE(pte); sprintf(ptebuf, "%lx", pte); } page_present = pte_present(pte); if (physaddr) { if (machdep->flags & PAE) *((ulonglong *)physaddr) = paddr; else *((ulong *)physaddr) = (ulong)paddr; return page_present; } len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER | LJUST, "PTE")); if (!page_present && pte) { /* swap page, not handled yet */ return page_present; } sprintf(physbuf, "%llx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER | LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER | RJUST, NULL), mkstring(physbuf, len2, CENTER | RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte_present(pte)) fprintf(fp, "%sPRESENT", others++ ? "|" : ""); if (pte_dirty(pte)) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (pte_young(pte)) fprintf(fp, "%sYOUNG", others++ ? "|" : ""); if (machdep->flags & PGTABLE_V2) { if (!pte_rdonly(pte)) fprintf(fp, "%sWRITE", others++ ? "|" : ""); if (!pte_xn(pte)) fprintf(fp, "%sEXEC", others++ ? "|" : ""); } else { if (pte_write(pte)) fprintf(fp, "%sWRITE", others++ ? "|" : ""); if (pte_exec(pte)) fprintf(fp, "%sEXEC", others++ ? "|" : ""); } } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return 0; } /* * Virtual to physical memory translation. This function will be called by both * arm_kvtop() and arm_uvtop(). */ static int arm_vtop(ulong vaddr, ulong *pgd, physaddr_t *paddr, int verbose) { char buf[BUFSIZE]; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; /* * Page tables in ARM Linux * * In hardware PGD is 16k (having 4096 pointers to PTE) and PTE is 1k * (containing 256 translations). * * Linux, however, wants to have PTEs as page sized entities. This means * that in ARM Linux we have following setup (see also * arch/arm/include/asm/pgtable.h) * * Before 2.6.38 * * PGD PTE * +---------+ * | | 0 ----> +------------+ * +- - - - -+ | h/w pt 0 | * | | 4 ----> +------------+ +1024 * +- - - - -+ | h/w pt 1 | * . . +------------+ +2048 * . . | Linux pt 0 | * . . +------------+ +3072 * | | 4095 | Linux pt 1 | * +---------+ +------------+ +4096 * * Starting from 2.6.38 * * PGD PTE * +---------+ * | | 0 ----> +------------+ * +- - - - -+ | Linux pt 0 | * | | 4 ----> +------------+ +1024 * +- - - - -+ | Linux pt 1 | * . . +------------+ +2048 * . . | h/w pt 0 | * . . +------------+ +3072 * | | 4095 | h/w pt 1 | * +---------+ +------------+ +4096 * * So in Linux implementation we have two hardware pointers to second * level page tables. Depending on the kernel version, the "Linux" page * tables either follow or precede the hardware tables. * * Linux PT entries contain bits that are not supported on hardware, for * example "young" and "dirty" flags. * * Our translation scheme only uses Linux PTEs here. */ if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); /* * pgd_offset(pgd, vaddr) */ page_dir = pgd + PGD_OFFSET(vaddr) * 2; /* The unity-mapped region is mapped using 1MB pages, * hence 1-level translation if bit 20 is set; if we * are 1MB apart physically, we move the page_dir in * case bit 20 is set. */ if (((vaddr) >> (20)) & 1) page_dir = page_dir + 1; FILL_PGD(PAGEBASE(pgd), KVADDR, PGDIR_SIZE()); pgd_pte = ULONG(machdep->pgd + PGDIR_OFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR((ulong)page_dir)), pgd_pte); if (!pgd_pte) return FALSE; /* * pmd_offset(pgd, vaddr) * * Here PMD is folded into a PGD. */ pmd_pte = pgd_pte; page_middle = page_dir; if (verbose) fprintf(fp, " PMD: %s => %lx\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR((ulong)page_middle)), pmd_pte); if ((pmd_pte & PMD_TYPE_MASK) == PMD_TYPE_SECT) { ulong sectionbase = pmd_pte & _SECTION_PAGE_MASK; if (verbose) { fprintf(fp, " PAGE: %s (1MB)\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR(sectionbase))); } *paddr = sectionbase + (vaddr & ~_SECTION_PAGE_MASK); return TRUE; } /* * pte_offset_map(pmd, vaddr) */ page_table = pmd_page_addr(pmd_pte) + PTE_OFFSET(vaddr); FILL_PTBL(PAGEBASE(page_table), PHYSADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) { fprintf(fp, " PTE: %s => %lx\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR((ulong)page_table)), pte); } if (!pte_present(pte)) { if (pte && verbose) { fprintf(fp, "\n"); arm_translate_pte(pte, 0, 0); } return FALSE; } *paddr = PAGEBASE(pte) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR(PAGEBASE(pte)))); arm_translate_pte(pte, 0, 0); } return TRUE; } /* * Virtual to physical memory translation when "CONFIG_ARM_LPAE=y". * This function will be called by both arm_kvtop() and arm_uvtop(). */ static int arm_lpae_vtop(ulong vaddr, ulong *pgd, physaddr_t *paddr, int verbose) { char buf[BUFSIZE]; physaddr_t page_dir; physaddr_t page_middle; physaddr_t page_table; pgd_t pgd_pmd; pmd_t pmd_pte; pte_t pte; if (!vt->vmalloc_start) { *paddr = LPAE_VTOP(vaddr); return TRUE; } if (!IS_VMALLOC_ADDR(vaddr)) { *paddr = LPAE_VTOP(vaddr); if (!verbose) return TRUE; } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); /* * pgd_offset(pgd, vaddr) */ page_dir = LPAE_VTOP((ulong)pgd + LPAE_PGD_OFFSET(vaddr) * 8); FILL_PGD_LPAE(LPAE_VTOP(pgd), PHYSADDR, LPAE_PGDIR_SIZE()); pgd_pmd = ULONGLONG(machdep->pgd + LPAE_PGDIR_OFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %8llx => %llx\n", (ulonglong)page_dir, pgd_pmd); if (!pgd_pmd) return FALSE; /* * pmd_offset(pgd, vaddr) */ page_middle = LPAE_PAGEBASE(pgd_pmd) + LPAE_PMD_OFFSET(vaddr) * 8; FILL_PMD_LPAE(LPAE_PAGEBASE(pgd_pmd), PHYSADDR, LPAE_PMDIR_SIZE()); pmd_pte = ULONGLONG(machdep->pmd + LPAE_PMDIR_OFFSET(page_middle)); if (!pmd_pte) return FALSE; if ((pmd_pte & PMD_TYPE_MASK) == PMD_TYPE_SECT_LPAE) { ulonglong sectionbase = LPAE_PAGEBASE(pmd_pte) & LPAE_SECTION_PAGE_MASK; if (verbose) fprintf(fp, " PAGE: %8llx (2MB)\n\n", (ulonglong)sectionbase); *paddr = sectionbase + (vaddr & ~LPAE_SECTION_PAGE_MASK); return TRUE; } /* * pte_offset_map(pmd, vaddr) */ page_table = LPAE_PAGEBASE(pmd_pte) + PTE_OFFSET(vaddr) * 8; FILL_PTBL_LPAE(LPAE_PAGEBASE(pmd_pte), PHYSADDR, LPAE_PTEDIR_SIZE()); pte = ULONGLONG(machdep->ptbl + LPAE_PTEDIR_OFFSET(page_table)); if (verbose) { fprintf(fp, " PTE: %8llx => %llx\n\n", (ulonglong)page_table, pte); } if (!pte_present(pte)) { if (pte && verbose) { fprintf(fp, "\n"); arm_translate_pte(0, 0, pte); } return FALSE; } *paddr = LPAE_PAGEBASE(pte) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %s\n\n", mkstring(buf, VADDR_PRLEN, RJUST | LONG_HEX, MKSTR(PAGEBASE(pte)))); arm_translate_pte(0, 0, pte); } return TRUE; } /* * Translates a user virtual address to its physical address. cmd_vtop() sets * the verbose flag so that the pte translation gets displayed; all other * callers quietly accept the translation. */ static int arm_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); /* * Before idmap_pgd was introduced with upstream commit 2c8951ab0c * (ARM: idmap: use idmap_pgd when setting up mm for reboot), the * panic task pgd was overwritten by soft reboot code, so we can't do * any vtop translations. */ if (!(machdep->flags & IDMAP_PGD) && tc->task == tt->panic_task) error(FATAL, "panic task pgd is trashed by soft reboot code\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(uvaddr)) { ulong active_mm; readmem(tc->task + OFFSET(task_struct_active_mm), KVADDR, &active_mm, sizeof(void *), "task active_mm contents", FAULT_ON_ERROR); if (!active_mm) error(FATAL, "no active_mm for this kernel thread\n"); readmem(active_mm + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } else { ulong mm; mm = task_mm(tc->task, TRUE); if (mm) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } if (machdep->flags & PAE) return arm_lpae_vtop(uvaddr, pgd, paddr, verbose); return arm_vtop(uvaddr, pgd, paddr, verbose); } /* * Translates a kernel virtual address to its physical address. cmd_vtop() sets * the verbose flag so that the pte translation gets displayed; all other * callers quietly accept the translation. */ static int arm_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { if (!IS_KVADDR(kvaddr)) return FALSE; if (machdep->flags & PAE) return arm_lpae_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); if (!vt->vmalloc_start) { *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); if (!verbose) return TRUE; } return arm_vtop(kvaddr, (ulong *)vt->kernel_pgd[0], paddr, verbose); } /* * Get SP and PC values for idle tasks. */ static int arm_get_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { const char *cpu_context; if (!bt->tc || !(tt->flags & THREAD_INFO)) return FALSE; /* * Update thread_info in tt. */ if (!fill_thread_info(bt->tc->thread_info)) return FALSE; cpu_context = tt->thread_info + OFFSET(thread_info_cpu_context); #define GET_REG(ptr, cp, off) ((*ptr) = (*((ulong *)((cp) + OFFSET(off))))) GET_REG(spp, cpu_context, cpu_context_save_sp); GET_REG(pcp, cpu_context, cpu_context_save_pc); /* * Unwinding code needs FP (R7 for Thumb code) value also so we pass it * with bt. */ if (*pcp & 1) GET_REG(&bt->frameptr, cpu_context, cpu_context_save_r7); else GET_REG(&bt->frameptr, cpu_context, cpu_context_save_fp); return TRUE; } /* * Get the starting point for the active cpu in a diskdump. */ static int arm_get_dumpfile_stack_frame(struct bt_info *bt, ulong *nip, ulong *ksp) { const struct machine_specific *ms = machdep->machspec; if (!ms->crash_task_regs || (!ms->crash_task_regs[bt->tc->processor].ARM_pc && !ms->crash_task_regs[bt->tc->processor].ARM_sp)) { bt->flags |= BT_REGS_NOT_FOUND; return FALSE; } /* * We got registers for panic task from crash_notes. Just return them. */ *nip = ms->crash_task_regs[bt->tc->processor].ARM_pc; *ksp = ms->crash_task_regs[bt->tc->processor].ARM_sp; /* * Also store pointer to all registers in case unwinding code needs * to access LR. */ bt->machdep = &(ms->crash_task_regs[bt->tc->processor]); return TRUE; } /* * Get a stack frame combination of PC and SP from the most relevant spot. */ static void arm_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { ulong ip, sp; int ret; ip = sp = 0; bt->machdep = NULL; if (DUMPFILE() && is_task_active(bt->task)) ret = arm_get_dumpfile_stack_frame(bt, &ip, &sp); else ret = arm_get_frame(bt, &ip, &sp); if (!ret) error(WARNING, "cannot determine starting stack frame for task %lx\n", bt->task); if (pcp) *pcp = ip; if (spp) *spp = sp; } /* * Prints out exception stack starting from start. */ void arm_dump_exception_stack(ulong start, ulong end) { struct arm_pt_regs regs; ulong flags; char buf[64]; if (!readmem(start, KVADDR, ®s, sizeof(regs), "exception regs", RETURN_ON_ERROR)) { error(WARNING, "failed to read exception registers\n"); return; } fprintf(fp, " pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" " sp : %08lx ip : %08lx fp : %08lx\n", regs.ARM_pc, regs.ARM_lr, regs.ARM_cpsr, regs.ARM_sp, regs.ARM_ip, regs.ARM_fp); fprintf(fp, " r10: %08lx r9 : %08lx r8 : %08lx\n", regs.ARM_r10, regs.ARM_r9, regs.ARM_r8); fprintf(fp, " r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", regs.ARM_r7, regs.ARM_r6, regs.ARM_r5, regs.ARM_r4); fprintf(fp, " r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", regs.ARM_r3, regs.ARM_r2, regs.ARM_r1, regs.ARM_r0); flags = regs.ARM_cpsr; buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; buf[4] = '\0'; fprintf(fp, " Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s\n", buf, interrupts_enabled(®s) ? "n" : "ff", fast_interrupts_enabled(®s) ? "n" : "ff", processor_modes[processor_mode(®s)], isa_modes[isa_mode(®s)]); } static void arm_display_full_frame(struct bt_info *bt, ulong sp) { ulong words, addr; ulong *up; char buf[BUFSIZE]; int i, u_idx; if (!INSTACK(sp, bt) || !INSTACK(bt->stkptr, bt)) return; words = (sp - bt->stkptr) / sizeof(ulong); if (words == 0) { fprintf(fp, " (no frame)\n"); return; } addr = bt->stkptr; u_idx = (bt->stkptr - bt->stackbase) / sizeof(ulong); for (i = 0; i < words; i++, u_idx++) { if ((i % 4) == 0) fprintf(fp, "%s %lx: ", i ? "\n" : "", addr); up = (ulong *)(&bt->stackbuf[u_idx * sizeof(ulong)]); fprintf(fp, "%s ", format_stack_entry(bt, buf, *up, 0)); addr += sizeof(ulong); } fprintf(fp, "\n"); } /* * Prints out a single stack frame. What is printed depends on flags passed in * with bt. * * What is expected when calling this function: * bt->frameptr = current FP (or 0 if there is no such) * bt->stkptr = current SP * bt->instptr = current PC * * from = LR * sp = previous/saved SP */ void arm_dump_backtrace_entry(struct bt_info *bt, int level, ulong from, ulong sp) { struct load_module *lm; const char *name; int offset = 0; struct syment *symp; ulong symbol_offset; char *name_plus_offset; char buf[BUFSIZE]; name = closest_symbol(bt->instptr); name_plus_offset = NULL; if (bt->flags & BT_SYMBOL_OFFSET) { symp = value_search(bt->instptr, &symbol_offset); if (symp && symbol_offset) name_plus_offset = value_to_symstr(bt->instptr, buf, bt->radix); } if (module_symbol(bt->instptr, NULL, &lm, NULL, 0)) { fprintf(fp, "%s#%d [<%08lx>] (%s [%s]) from [<%08lx>]\n", level < 10 ? " " : "", level, bt->instptr, name_plus_offset ? name_plus_offset : name, lm->mod_name, from); } else { fprintf(fp, "%s#%d [<%08lx>] (%s) from [<%08lx>]\n", level < 10 ? " " : "", level, bt->instptr, name_plus_offset ? name_plus_offset : name, from); } if (bt->flags & BT_LINE_NUMBERS) { char buf[BUFSIZE]; get_line_number(bt->instptr, buf, FALSE); if (strlen(buf)) fprintf(fp, " %s\n", buf); } if (arm_in_exception_text(bt->instptr)) { arm_dump_exception_stack(sp, sp + sizeof(struct arm_pt_regs)); } else if (arm_in_ret_from_syscall(from, &offset)) { ulong nsp = sp + offset; arm_dump_exception_stack(nsp, nsp + sizeof(struct arm_pt_regs)); } if (bt->flags & BT_FULL) { if (kt->flags & DWARF_UNWIND) { fprintf(fp, " " "[PC: %08lx LR: %08lx SP: %08lx SIZE: %ld]\n", bt->instptr, from, bt->stkptr, sp - bt->stkptr); } else { fprintf(fp, " " "[PC: %08lx LR: %08lx SP: %08lx FP: %08lx " "SIZE: %ld]\n", bt->instptr, from, bt->stkptr, bt->frameptr, sp - bt->stkptr); } arm_display_full_frame(bt, sp); } } /* * Determine where vmalloc'd memory starts. */ static ulong arm_vmalloc_start(void) { machdep->machspec->vmalloc_start_addr = vt->high_memory; return vt->high_memory; } /* * Checks whether given task is valid task address. */ static int arm_is_task_addr(ulong task) { if (tt->flags & THREAD_INFO) return IS_KVADDR(task); return (IS_KVADDR(task) && ALIGNED_STACK_OFFSET(task) == 0); } /* * Filter dissassembly output if the output radix is not gdb's default 10 */ static int arm_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on arm) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, " 0x")) p1--; if (!STRNEQ(p1, " 0x")) return FALSE; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); return TRUE; } /* * Look for likely exception frames in a stack. */ static int arm_eframe_search(struct bt_info *bt) { return (NOT_IMPLEMENTED()); } /* * Get the relevant page directory pointer from a task structure. */ static ulong arm_get_task_pgd(ulong task) { return (NOT_IMPLEMENTED()); } /* * Machine dependent command. */ static void arm_cmd_mach(void) { int c; while ((c = getopt(argcnt, args, "cm")) != -1) { switch (c) { case 'c': case 'm': fprintf(fp, "ARM: '-%c' option is not supported\n", c); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); arm_display_machine_stats(); } static void arm_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", get_cpus_to_display()); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL MODULES BASE: %lx\n", MODULES_VADDR); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } static int arm_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_present())) return cpus; else return MAX(get_cpus_online(), get_highest_cpu_online()+1); } /* * Initialize ARM specific stuff. */ static void arm_init_machspec(void) { struct machine_specific *ms = machdep->machspec; ulong phys_base; if (symbol_exists("__exception_text_start") && symbol_exists("__exception_text_end")) { ms->exception_text_start = symbol_value("__exception_text_start"); ms->exception_text_end = symbol_value("__exception_text_end"); } if (symbol_exists("_stext") && symbol_exists("_etext")) { ms->kernel_text_start = symbol_value("_stext"); ms->kernel_text_end = symbol_value("_etext"); } if (CRASHDEBUG(1)) { fprintf(fp, "kernel text: [%lx - %lx]\n", ms->kernel_text_start, ms->kernel_text_end); fprintf(fp, "exception text: [%lx - %lx]\n", ms->exception_text_start, ms->exception_text_end); } if (machdep->flags & PHYS_BASE) /* --machdep override */ return; /* * Next determine suitable value for phys_base. User can override this * by passing valid '--machdep phys_base=' option. */ ms->phys_base = 0; if (ACTIVE()) { char buf[BUFSIZE]; char *p1; int errflag; FILE *fp; if ((fp = fopen("/proc/iomem", "r")) == NULL) return; /* * Memory regions are sorted in ascending order. We take the * first region which should be correct for most uses. */ errflag = 1; while (fgets(buf, BUFSIZE, fp)) { if (strstr(buf, ": System RAM")) { clean_line(buf); errflag = 0; break; } } fclose(fp); if (errflag) return; if (!(p1 = strstr(buf, "-"))) return; *p1 = NULLCHAR; phys_base = htol(buf, RETURN_ON_ERROR | QUIET, &errflag); if (errflag) return; ms->phys_base = phys_base; } else if (DISKDUMP_DUMPFILE() && diskdump_phys_base(&phys_base)) { ms->phys_base = phys_base; } else if (KDUMP_DUMPFILE() && arm_kdump_phys_base(&phys_base)) { ms->phys_base = phys_base; } else { error(WARNING, "phys_base cannot be determined from the dumpfile.\n" "Using default value of 0. If this is not correct,\n" "consider using '--machdep phys_base='\n"); } if (CRASHDEBUG(1)) fprintf(fp, "using %lx as phys_base\n", ms->phys_base); } static const char *hook_files[] = { "arch/arm/kernel/entry-armv.S", "arch/arm/kernel/entry-common.S", }; #define ENTRY_ARMV_S ((char **)&hook_files[0]) #define ENTRY_COMMON_S ((char **)&hook_files[1]) static struct line_number_hook arm_line_number_hooks[] = { { "__dabt_svc", ENTRY_ARMV_S }, { "__irq_svc", ENTRY_ARMV_S }, { "__und_svc", ENTRY_ARMV_S }, { "__pabt_svc", ENTRY_ARMV_S }, { "__switch_to", ENTRY_ARMV_S }, { "ret_fast_syscall", ENTRY_COMMON_S }, { "ret_slow_syscall", ENTRY_COMMON_S }, { "ret_from_fork", ENTRY_COMMON_S }, { NULL, NULL }, }; #endif /* ARM */ crash-7.2.8/vmcore.h0000664000000000000000000000201713614623427013004 0ustar rootroot/* * vmcore.h * * Copyright (C) 2019 Chelsio Communications. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef _VMCORE_H #define _VMCORE_H #include #ifndef NT_VMCOREDD #define NT_VMCOREDD 0x700 #endif #define VMCOREDD_NOTE_NAME "LINUX" #define VMCOREDD_MAX_NAME_BYTES 44 struct vmcoredd_header { __u32 n_namesz; /* Name size */ __u32 n_descsz; /* Content size */ __u32 n_type; /* NT_VMCOREDD */ __u8 name[8]; /* LINUX\0\0\0 */ __u8 dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Device dump's name */ }; #endif /* _VMCORE_H */ crash-7.2.8/configure.c0000775000000000000000000013571713614623427013506 0ustar rootroot/* configure.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2013 David Anderson * Copyright (C) 2002-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * define, clear and undef dynamically update the top-level Makefile: * * -b define: TARGET, GDB, GDB_FILES, GDB_OFILES, GDB_PATCH_FILES, * TARGET_CFLAGS, LDFLAGS, GDB_CONF_FLAGS and GPL_FILES * create: build_data.c * * -d define: TARGET, GDB, GDB_FILES, GDB_OFILES, GDB_PATCH_FILES, * TARGET_CFLAGS, LDFLAGS, GDB_CONF_FLAGS and PROGRAM (for daemon) * create: build_data.c * * -u clear: TARGET, GDB, GDB_FILES, GDB_OFILES, VERSION, GDB_PATCH_FILES, * TARGET_CFLAGS, LDFLAGS, GDB_CONF_FLAGS and GPL_FILES * undef: WARNING_ERROR, WARNING_OPTIONS * * -r define: GDB_FILES, VERSION, GDB_PATCH_FILES GPL_FILES * * -w define: WARNING_OPTIONS * undef: WARNING_ERROR * * -W define: WARNING_ERROR, WARNING_OPTIONS * * -n undef: WARNING_ERROR, WARNING_OPTIONS * * -g define: GDB * * -p Create or remove .rh_rpm_package file * * -q Don't print configuration * * -s Create crash.spec file * * -x Add extra libraries/flags to build */ #include #include #include #include #include #include struct supported_gdb_version; void build_configure(struct supported_gdb_version *); void release_configure(char *, struct supported_gdb_version *); void make_rh_rpm_package(char *, int); void unconfigure(void); void set_warnings(int); void show_configuration(void); void target_rebuild_instructions(struct supported_gdb_version *, char *); void arch_mismatch(struct supported_gdb_version *); void get_current_configuration(struct supported_gdb_version *); void makefile_setup(FILE **, FILE **); void makefile_create(FILE **, FILE **); char *strip_linefeeds(char *); char *upper_case(char *, char *); char *lower_case(char *, char *); char *shift_string_left(char *, int); char *shift_string_right(char *, int); char *strip_beginning_whitespace(char *); char *strip_ending_whitespace(char *); char *strip_linefeeds(char *); int file_exists(char *); int count_chars(char *, char); void make_build_data(char *); void gdb_configure(struct supported_gdb_version *); int parse_line(char *, char **); struct supported_gdb_version *setup_gdb_defaults(void); struct supported_gdb_version *store_gdb_defaults(struct supported_gdb_version *); void make_spec_file(struct supported_gdb_version *); void set_initial_target(struct supported_gdb_version *); char *target_to_name(int); int name_to_target(char *); char *get_extra_flags(char *, char *); void add_extra_lib(char *); #define TRUE 1 #define FALSE 0 #undef X86 #undef ALPHA #undef PPC #undef IA64 #undef S390 #undef S390X #undef PPC64 #undef X86_64 #undef ARM #undef ARM64 #undef SPARC64 #define UNKNOWN 0 #define X86 1 #define ALPHA 2 #define PPC 3 #define IA64 4 #define S390 5 #define S390X 6 #define PPC64 7 #define X86_64 8 #define ARM 9 #define ARM64 10 #define MIPS 11 #define SPARC64 12 #define TARGET_X86 "TARGET=X86" #define TARGET_ALPHA "TARGET=ALPHA" #define TARGET_PPC "TARGET=PPC" #define TARGET_IA64 "TARGET=IA64" #define TARGET_S390 "TARGET=S390" #define TARGET_S390X "TARGET=S390X" #define TARGET_PPC64 "TARGET=PPC64" #define TARGET_X86_64 "TARGET=X86_64" #define TARGET_ARM "TARGET=ARM" #define TARGET_ARM64 "TARGET=ARM64" #define TARGET_MIPS "TARGET=MIPS" #define TARGET_SPARC64 "TARGET=SPARC64" #define TARGET_CFLAGS_X86 "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_ALPHA "TARGET_CFLAGS=" #define TARGET_CFLAGS_PPC "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_IA64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_S390 "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_S390X "TARGET_CFLAGS=" #define TARGET_CFLAGS_PPC64 "TARGET_CFLAGS=-m64" #define TARGET_CFLAGS_X86_64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_ARM "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_ARM_ON_X86 "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_ARM_ON_X86_64 "TARGET_CFLAGS=-m32 -D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_X86_ON_X86_64 "TARGET_CFLAGS=-m32 -D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_PPC_ON_PPC64 "TARGET_CFLAGS=-m32 -D_FILE_OFFSET_BITS=64 -fPIC" #define TARGET_CFLAGS_ARM64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_ARM64_ON_X86_64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_PPC64_ON_X86_64 "TARGET_CFLAGS=" #define TARGET_CFLAGS_MIPS "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_MIPS_ON_X86 "TARGET_CFLAGS=-D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_MIPS_ON_X86_64 "TARGET_CFLAGS=-m32 -D_FILE_OFFSET_BITS=64" #define TARGET_CFLAGS_SPARC64 "TARGET_CFLAGS=" #define GDB_TARGET_DEFAULT "GDB_CONF_FLAGS=" #define GDB_TARGET_ARM_ON_X86 "GDB_CONF_FLAGS=--target=arm-elf-linux" #define GDB_TARGET_ARM_ON_X86_64 "GDB_CONF_FLAGS=--target=arm-elf-linux CFLAGS=-m32" #define GDB_TARGET_X86_ON_X86_64 "GDB_CONF_FLAGS=--target=i686-pc-linux-gnu CFLAGS=-m32" #define GDB_TARGET_PPC_ON_PPC64 "GDB_CONF_FLAGS=--target=ppc-elf-linux CFLAGS=-m32" #define GDB_TARGET_ARM64_ON_X86_64 "GDB_CONF_FLAGS=--target=aarch64-elf-linux" /* TBD */ #define GDB_TARGET_PPC64_ON_X86_64 "GDB_CONF_FLAGS=--target=powerpc64le-unknown-linux-gnu" #define GDB_TARGET_MIPS_ON_X86 "GDB_CONF_FLAGS=--target=mipsel-elf-linux" #define GDB_TARGET_MIPS_ON_X86_64 "GDB_CONF_FLAGS=--target=mipsel-elf-linux CFLAGS=-m32" /* * The original plan was to allow the use of a particular version * of gdb for a given architecture. But for practical purposes, * it's a one-size-fits-all scheme, and they all use the default * unless overridden. */ #define GDB_5_3 (0) #define GDB_6_0 (1) #define GDB_6_1 (2) #define GDB_7_0 (3) #define GDB_7_3_1 (4) #define GDB_7_6 (5) #define SUPPORTED_GDB_VERSIONS (GDB_7_6 + 1) int default_gdb = GDB_7_6; struct supported_gdb_version { char *GDB; char *GDB_VERSION_IN; char *GDB_FILES; char *GDB_OFILES; char *GDB_PATCH_FILES; char *GDB_FLAGS; char *GPL; } supported_gdb_versions[SUPPORTED_GDB_VERSIONS] = { { "GDB=gdb-5.3post-0.20021129.36rh", "Red Hat Linux (5.3post-0.20021129.36rh)", "GDB_FILES=${GDB_5.3post-0.20021129.36rh_FILES}", "GDB_OFILES=${GDB_5.3post-0.20021129.36rh_OFILES}", "GDB_PATCH_FILES=", "GDB_FLAGS=-DGDB_5_3", "GPLv2" }, { "GDB=gdb-6.0", "6.0", "GDB_FILES=${GDB_6.0_FILES}", "GDB_OFILES=${GDB_6.0_OFILES}", "GDB_PATCH_FILES=", "GDB_FLAGS=-DGDB_6_0", "GPLv2" }, { "GDB=gdb-6.1", "6.1", "GDB_FILES=${GDB_6.1_FILES}", "GDB_OFILES=${GDB_6.1_OFILES}", "GDB_PATCH_FILES=gdb-6.1.patch", "GDB_FLAGS=-DGDB_6_1", "GPLv2" }, { "GDB=gdb-7.0", "7.0", "GDB_FILES=${GDB_7.0_FILES}", "GDB_OFILES=${GDB_7.0_OFILES}", "GDB_PATCH_FILES=gdb-7.0.patch", "GDB_FLAGS=-DGDB_7_0", "GPLv3" }, { "GDB=gdb-7.3.1", "7.3.1", "GDB_FILES=${GDB_7.3.1_FILES}", "GDB_OFILES=${GDB_7.3.1_OFILES}", "GDB_PATCH_FILES=gdb-7.3.1.patch", "GDB_FLAGS=-DGDB_7_3_1", "GPLv3" }, { "GDB=gdb-7.6", "7.6", "GDB_FILES=${GDB_7.6_FILES}", "GDB_OFILES=${GDB_7.6_OFILES}", "GDB_PATCH_FILES=gdb-7.6.patch gdb-7.6-ppc64le-support.patch gdb-7.6-proc_service.h.patch", "GDB_FLAGS=-DGDB_7_6", "GPLv3" }, }; #define DAEMON 0x1 #define QUIET 0x2 #define MAXSTRLEN 256 #define MIN(a,b) (((a)<(b))?(a):(b)) struct target_data { int target; int host; int initial_gdb_target; int flags; char program[MAXSTRLEN]; char gdb_version[MAXSTRLEN]; char release[MAXSTRLEN]; struct stat statbuf; const char *target_as_param; } target_data = { 0 }; int main(int argc, char **argv) { int c; struct supported_gdb_version *sp; sp = setup_gdb_defaults(); while ((c = getopt(argc, argv, "gsqnWwubdr:p:P:t:x:")) > 0) { switch (c) { case 'q': target_data.flags |= QUIET; break; case 'u': unconfigure(); break; case 'd': target_data.flags |= DAEMON; case 'b': build_configure(sp); break; case 'r': release_configure(optarg, sp); break; case 'p': make_rh_rpm_package(optarg, 0); break; case 'P': make_rh_rpm_package(optarg, 1); break; case 'W': case 'w': case 'n': set_warnings(c); break; case 's': make_spec_file(sp); break; case 'g': gdb_configure(sp); break; case 't': target_data.target_as_param = optarg; break; case 'x': add_extra_lib(optarg); break; } } exit(0); } void target_rebuild_instructions(struct supported_gdb_version *sp, char *target) { fprintf(stderr, "\nIn order to build a crash binary for the %s architecture:\n", target); fprintf(stderr, " 1. remove the %s subdirectory\n", &sp->GDB[strlen("GDB=")]); fprintf(stderr, " 2. perform a \"make clean\"\n"); fprintf(stderr, " 3. retry the build\n\n"); } void arch_mismatch(struct supported_gdb_version *sp) { fprintf(stderr, "\nThe initial build in this source tree was for the %s architecture.\n", target_to_name(target_data.initial_gdb_target)); target_rebuild_instructions(sp, target_to_name(target_data.target)); exit(1); } void get_current_configuration(struct supported_gdb_version *sp) { FILE *fp; static char buf[512]; char *p; #ifdef __alpha__ target_data.target = ALPHA; #endif #ifdef __i386__ target_data.target = X86; #endif #ifdef __powerpc__ target_data.target = PPC; #endif #ifdef __ia64__ target_data.target = IA64; #endif #ifdef __s390__ target_data.target = S390; #endif #ifdef __s390x__ target_data.target = S390X; #endif #ifdef __powerpc64__ target_data.target = PPC64; #endif #ifdef __x86_64__ target_data.target = X86_64; #endif #ifdef __arm__ target_data.target = ARM; #endif #ifdef __aarch64__ target_data.target = ARM64; #endif #ifdef __mips__ target_data.target = MIPS; #endif #ifdef __sparc_v9__ target_data.target = SPARC64; #endif set_initial_target(sp); /* * Override target if specified on command line. */ target_data.host = target_data.target; if (target_data.target_as_param) { if ((target_data.target == X86 || target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == ARM)) { /* * Debugging of ARM core files supported on X86, and on * X86_64 when built as a 32-bit executable. */ target_data.target = ARM; } else if ((target_data.target == X86 || target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == MIPS)) { /* * Debugging of MIPS little-endian core files * supported on X86, and on X86_64 when built as a * 32-bit executable. */ target_data.target = MIPS; } else if ((target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == X86)) { /* * Build an X86 crash binary on an X86_64 host. */ target_data.target = X86; } else if ((target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == ARM64)) { /* * Build an ARM64 crash binary on an X86_64 host. */ target_data.target = ARM64; } else if ((target_data.target == X86_64) && (name_to_target((char *)target_data.target_as_param) == PPC64)) { /* * Build a PPC64 little-endian crash binary on an X86_64 host. */ target_data.target = PPC64; } else if ((target_data.target == PPC64) && (name_to_target((char *)target_data.target_as_param) == PPC)) { /* * Build an PPC crash binary on an PPC64 host. */ target_data.target = PPC; } else if (name_to_target((char *)target_data.target_as_param) == target_data.host) { if ((target_data.initial_gdb_target != UNKNOWN) && (target_data.host != target_data.initial_gdb_target)) arch_mismatch(sp); } else { fprintf(stderr, "\ntarget=%s is not supported on the %s host architecture\n\n", target_data.target_as_param, target_to_name(target_data.host)); exit(1); } } /* * Impose implied (sticky) target if an initial build has been * done in the source tree. */ if (target_data.initial_gdb_target && (target_data.target != target_data.initial_gdb_target)) { if ((target_data.initial_gdb_target == ARM) && (target_data.target != ARM)) { if ((target_data.target == X86) || (target_data.target == X86_64)) target_data.target = ARM; else arch_mismatch(sp); } if ((target_data.target == ARM) && (target_data.initial_gdb_target != ARM)) arch_mismatch(sp); if ((target_data.initial_gdb_target == MIPS) && (target_data.target != MIPS)) { if ((target_data.target == X86) || (target_data.target == X86_64)) target_data.target = MIPS; else arch_mismatch(sp); } if ((target_data.initial_gdb_target == X86) && (target_data.target != X86)) { if (target_data.target == X86_64) target_data.target = X86; else arch_mismatch(sp); } if ((target_data.target == X86) && (target_data.initial_gdb_target != X86)) arch_mismatch(sp); if ((target_data.initial_gdb_target == ARM64) && (target_data.target != ARM64)) { if (target_data.target == X86_64) target_data.target = ARM64; else arch_mismatch(sp); } if ((target_data.target == ARM64) && (target_data.initial_gdb_target != ARM64)) arch_mismatch(sp); if ((target_data.initial_gdb_target == PPC64) && (target_data.target != PPC64)) { if (target_data.target == X86_64) target_data.target = PPC64; else arch_mismatch(sp); } if ((target_data.target == PPC64) && (target_data.initial_gdb_target != PPC64)) arch_mismatch(sp); if ((target_data.initial_gdb_target == PPC) && (target_data.target != PPC)) { if (target_data.target == PPC64) target_data.target = PPC; else arch_mismatch(sp); } if ((target_data.target == PPC) && (target_data.initial_gdb_target != PPC)) arch_mismatch(sp); if ((target_data.target == SPARC64) && (target_data.initial_gdb_target != SPARC64)) arch_mismatch(sp); } if ((fp = fopen("Makefile", "r")) == NULL) { perror("Makefile"); goto get_release; } while (fgets(buf, 512, fp)) { if (strncmp(buf, "PROGRAM=", strlen("PROGRAM=")) == 0) { p = strstr(buf, "=") + 1; strip_linefeeds(p); upper_case(p, target_data.program); if (target_data.flags & DAEMON) strcat(target_data.program, "D"); continue; } } fclose(fp); get_release: target_data.release[0] = '\0'; if (file_exists(".rh_rpm_package")) { if ((fp = fopen(".rh_rpm_package", "r")) == NULL) { perror(".rh_rpm_package"); } else { if (fgets(buf, 512, fp)) { strip_linefeeds(buf); if (strlen(buf)) { buf[MAXSTRLEN-1] = '\0'; strcpy(target_data.release, buf); } else fprintf(stderr, "WARNING: .rh_rpm_package file is empty!\n"); } else fprintf(stderr, "WARNING: .rh_rpm_package file is empty!\n"); fclose(fp); if (strlen(target_data.release)) return; } } else fprintf(stderr, "WARNING: .rh_rpm_package file does not exist!\n"); if ((fp = fopen("defs.h", "r")) == NULL) { perror("defs.h"); return; } while (fgets(buf, 512, fp)) { if (strncmp(buf, "#define BASELEVEL_REVISION", strlen("#define BASELEVEL_REVISION")) == 0) { p = strstr(buf, "\"") + 1; strip_linefeeds(p); p[strlen(p)-1] = '\0'; strcpy(target_data.release, p); break; } } fclose(fp); } void show_configuration(void) { int i; if (target_data.flags & QUIET) return; switch (target_data.target) { case X86: printf("TARGET: X86\n"); break; case ALPHA: printf("TARGET: ALPHA\n"); break; case PPC: printf("TARGET: PPC\n"); break; case IA64: printf("TARGET: IA64\n"); break; case S390: printf("TARGET: S390\n"); break; case S390X: printf("TARGET: S390X\n"); break; case PPC64: printf("TARGET: PPC64\n"); break; case X86_64: printf("TARGET: X86_64\n"); break; case ARM: printf("TARGET: ARM\n"); break; case ARM64: printf("TARGET: ARM64\n"); break; case MIPS: printf("TARGET: MIPS\n"); break; case SPARC64: printf("TARGET: SPARC64\n"); break; } if (strlen(target_data.program)) { for (i = 0; i < (strlen("TARGET")-strlen(target_data.program)); i++) printf(" "); printf("%s: ", target_data.program); if (strlen(target_data.release)) printf("%s\n", target_data.release); else printf("???\n"); } if (strlen(target_data.gdb_version)) printf(" GDB: %s\n\n", &target_data.gdb_version[4]); } void build_configure(struct supported_gdb_version *sp) { FILE *fp1, *fp2; char buf[512]; char *target; char *target_CFLAGS; char *gdb_conf_flags; char *ldflags; char *cflags; get_current_configuration(sp); target = target_CFLAGS = NULL; gdb_conf_flags = GDB_TARGET_DEFAULT; switch (target_data.target) { case X86: target = TARGET_X86; if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_X86_ON_X86_64; gdb_conf_flags = GDB_TARGET_X86_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_X86; break; case ALPHA: target = TARGET_ALPHA; target_CFLAGS = TARGET_CFLAGS_ALPHA; break; case PPC: target = TARGET_PPC; if (target_data.host == PPC64) { target_CFLAGS = TARGET_CFLAGS_PPC_ON_PPC64; gdb_conf_flags = GDB_TARGET_PPC_ON_PPC64; } else target_CFLAGS = TARGET_CFLAGS_PPC; break; case IA64: target = TARGET_IA64; target_CFLAGS = TARGET_CFLAGS_IA64; break; case S390: target = TARGET_S390; target_CFLAGS = TARGET_CFLAGS_S390; break; case S390X: target = TARGET_S390X; target_CFLAGS = TARGET_CFLAGS_S390X; break; case PPC64: target = TARGET_PPC64; if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_PPC64_ON_X86_64; gdb_conf_flags = GDB_TARGET_PPC64_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_PPC64; break; case X86_64: target = TARGET_X86_64; target_CFLAGS = TARGET_CFLAGS_X86_64; break; case ARM: target = TARGET_ARM; if (target_data.host == X86) { target_CFLAGS = TARGET_CFLAGS_ARM_ON_X86; gdb_conf_flags = GDB_TARGET_ARM_ON_X86; } else if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_ARM_ON_X86_64; gdb_conf_flags = GDB_TARGET_ARM_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_ARM; break; case ARM64: target = TARGET_ARM64; if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_ARM64_ON_X86_64; gdb_conf_flags = GDB_TARGET_ARM64_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_ARM64; break; case MIPS: target = TARGET_MIPS; if (target_data.host == X86) { target_CFLAGS = TARGET_CFLAGS_MIPS_ON_X86; gdb_conf_flags = GDB_TARGET_MIPS_ON_X86; } else if (target_data.host == X86_64) { target_CFLAGS = TARGET_CFLAGS_MIPS_ON_X86_64; gdb_conf_flags = GDB_TARGET_MIPS_ON_X86_64; } else target_CFLAGS = TARGET_CFLAGS_MIPS; break; case SPARC64: target = TARGET_SPARC64; target_CFLAGS = TARGET_CFLAGS_SPARC64; break; } ldflags = get_extra_flags("LDFLAGS.extra", NULL); cflags = get_extra_flags("CFLAGS.extra", NULL); gdb_conf_flags = get_extra_flags("GDBFLAGS.extra", gdb_conf_flags); makefile_setup(&fp1, &fp2); while (fgets(buf, 512, fp1)) { if (strncmp(buf, "TARGET=", strlen("TARGET=")) == 0) fprintf(fp2, "%s\n", target); else if (strncmp(buf, "TARGET_CFLAGS=", strlen("TARGET_CFLAGS=")) == 0) fprintf(fp2, "%s%s%s\n", target_CFLAGS, cflags ? " " : "", cflags ? cflags : ""); else if (strncmp(buf, "GDB_CONF_FLAGS=", strlen("GDB_CONF_FLAGS=")) == 0) fprintf(fp2, "%s\n", gdb_conf_flags); else if (strncmp(buf, "GDB_FILES=",strlen("GDB_FILES=")) == 0) fprintf(fp2, "%s\n", sp->GDB_FILES); else if (strncmp(buf, "GDB_OFILES=",strlen("GDB_OFILES=")) == 0) fprintf(fp2, "%s\n", sp->GDB_OFILES); else if (strncmp(buf, "GDB_PATCH_FILES=",strlen("GDB_PATCH_FILES=")) == 0) fprintf(fp2, "%s\n", sp->GDB_PATCH_FILES); else if (strncmp(buf, "GDB_FLAGS=",strlen("GDB_FLAGS=")) == 0) fprintf(fp2, "%s\n", sp->GDB_FLAGS); else if (strncmp(buf, "GPL_FILES=", strlen("GPL_FILES=")) == 0) fprintf(fp2, "GPL_FILES=%s\n", strcmp(sp->GPL, "GPLv2") == 0 ? "COPYING" : "COPYING3"); else if (strncmp(buf, "GDB=", strlen("GDB=")) == 0) { fprintf(fp2, "%s\n", sp->GDB); sprintf(target_data.gdb_version, "%s", &sp->GDB[4]); } else if (strncmp(buf, "LDFLAGS=", strlen("LDFLAGS=")) == 0) { fprintf(fp2, "LDFLAGS=%s\n", ldflags ? ldflags : ""); } else fprintf(fp2, "%s", buf); } makefile_create(&fp1, &fp2); show_configuration(); make_build_data(&target[strlen("TARGET=")]); } void release_configure(char *gdb_version, struct supported_gdb_version *sp) { FILE *fp1, *fp2; int found; char buf[512]; char gdb_files[MAXSTRLEN]; get_current_configuration(sp); sprintf(buf, "%s/gdb", gdb_version); if (!file_exists(buf)) { fprintf(stderr, "make release: no such directory: %s\n", buf); exit(1); } sprintf(gdb_files, "GDB_%s_FILES", &gdb_version[strlen("gdb-")]); makefile_setup(&fp1, &fp2); found = 0; while (fgets(buf, 512, fp1)) { if (strncmp(buf, gdb_files, strlen(gdb_files)) == 0) found++; if (strncmp(buf, "GDB_FILES=", strlen("GDB_FILES=")) == 0) fprintf(fp2, "GDB_FILES=${%s}\n", gdb_files); else if (strncmp(buf, "VERSION=", strlen("VERSION=")) == 0) fprintf(fp2, "VERSION=%s\n", target_data.release); else if (strncmp(buf, "GDB_PATCH_FILES=", strlen("GDB_PATCH_FILES=")) == 0) fprintf(fp2, "%s\n", sp->GDB_PATCH_FILES); else if (strncmp(buf, "GPL_FILES=", strlen("GPL_FILES=")) == 0) fprintf(fp2, "GPL_FILES=%s\n", strcmp(sp->GPL, "GPLv2") == 0 ? "COPYING" : "COPYING3"); else fprintf(fp2, "%s", buf); } if (!found) { fprintf(stderr, "make release: cannot find %s\n", gdb_files); exit(1); } makefile_create(&fp1, &fp2); } /* * Create an .rh_rpm_package file if the passed-in variable is set. */ void make_rh_rpm_package(char *package, int release) { char *p, *cur; FILE *fp; char buf[256]; if ((strcmp(package, "remove") == 0)) { if (file_exists(".rh_rpm_package")) { if (unlink(".rh_rpm_package")) { perror("unlink"); fprintf(stderr, "cannot remove .rh_rpm_package\n"); exit(1); } } return; } if (!(p = strstr(package, "="))) return; if (!strlen(++p)) return; if (release) { if (!(fp = popen("./crash -v", "r"))) { fprintf(stderr, "cannot execute \"crash -v\"\n"); exit(1); } cur = NULL; while (fgets(buf, 256, fp)) { if (strncmp(buf, "crash ", 6) == 0) { cur = &buf[6]; break; } } pclose(fp); if (!cur) { fprintf(stderr, "cannot get version from \"crash -v\"\n"); exit(1); } strip_linefeeds(cur); if (strcmp(cur, p) != 0) { fprintf(stderr, "./crash version: %s\n", cur); fprintf(stderr, "release version: %s\n", p); exit(1); } } if ((fp = fopen(".rh_rpm_package", "w")) == NULL) { perror("fopen"); fprintf(stderr, "cannot open .rh_rpm_package\n"); exit(1); } fprintf(fp, "%s\n", strip_linefeeds(p)); fclose(fp); } void gdb_configure(struct supported_gdb_version *sp) { FILE *fp1, *fp2; char buf[512]; get_current_configuration(sp); makefile_setup(&fp1, &fp2); while (fgets(buf, 512, fp1)) { if (strncmp(buf, "GDB=", strlen("GDB=")) == 0) fprintf(fp2, "%s\n", sp->GDB); else fprintf(fp2, "%s", buf); } makefile_create(&fp1, &fp2); } void unconfigure(void) { FILE *fp1, *fp2; char buf[512]; makefile_setup(&fp1, &fp2); while (fgets(buf, 512, fp1)) { if (strncmp(buf, "TARGET=", strlen("TARGET=")) == 0) fprintf(fp2, "TARGET=\n"); else if (strncmp(buf, "TARGET_CFLAGS=", strlen("TARGET_CFLAGS=")) == 0) fprintf(fp2, "TARGET_CFLAGS=\n"); else if (strncmp(buf, "GDB_CONF_FLAGS=", strlen("GDB_CONF_FLAGS=")) == 0) fprintf(fp2, "GDB_CONF_FLAGS=\n"); else if (strncmp(buf, "GDB_FILES=",strlen("GDB_FILES=")) == 0) fprintf(fp2, "GDB_FILES=\n"); else if (strncmp(buf, "GDB_OFILES=",strlen("GDB_OFILES=")) == 0) fprintf(fp2, "GDB_OFILES=\n"); else if (strncmp(buf, "GDB_PATCH_FILES=",strlen("GDB_PATCH_FILES=")) == 0) fprintf(fp2, "GDB_PATCH_FILES=\n"); else if (strncmp(buf, "GDB_FLAGS=",strlen("GDB_FLAGS=")) == 0) fprintf(fp2, "GDB_FLAGS=\n"); else if (strncmp(buf, "GDB=", strlen("GDB=")) == 0) fprintf(fp2, "GDB=\n"); else if (strncmp(buf, "VERSION=", strlen("VERSION=")) == 0) fprintf(fp2, "VERSION=\n"); else if (strncmp(buf, "GPL_FILES=", strlen("GPL_FILES=")) == 0) fprintf(fp2, "GPL_FILES=\n"); else if (strncmp(buf, "LDFLAGS=", strlen("LDFLAGS=")) == 0) fprintf(fp2, "LDFLAGS=\n"); else if (strncmp(buf, "WARNING_ERROR=", strlen("WARNING_ERROR=")) == 0) { shift_string_right(buf, 1); buf[0] = '#'; fprintf(fp2, "%s", buf); } else if (strncmp(buf, "WARNING_OPTIONS=", strlen("WARNING_OPTIONS=")) == 0) { shift_string_right(buf, 1); buf[0] = '#'; fprintf(fp2, "%s", buf); } else fprintf(fp2, "%s", buf); } makefile_create(&fp1, &fp2); } void set_warnings(int w) { FILE *fp1, *fp2; char buf[512]; makefile_setup(&fp1, &fp2); while (fgets(buf, 512, fp1)) { if (strncmp(buf, "#WARNING_ERROR=", strlen("#WARNING_ERROR=")) == 0) { switch (w) { case 'W': shift_string_left(buf, 1); break; case 'w': case 'n': break; } } if (strncmp(buf, "WARNING_ERROR=", strlen("WARNING_ERROR=")) == 0) { switch (w) { case 'n': case 'w': shift_string_right(buf, 1); buf[0] = '#'; break; case 'W': break; } } if (strncmp(buf, "#WARNING_OPTIONS=", strlen("#WARNING_OPTIONS=")) == 0) { switch (w) { case 'W': case 'w': shift_string_left(buf, 1); break; case 'n': break; } } if (strncmp(buf, "WARNING_OPTIONS=", strlen("WARNING_OPTIONS=")) == 0) { switch (w) { case 'w': case 'W': break; case 'n': shift_string_right(buf, 1); buf[0] = '#'; break; } } fprintf(fp2, "%s", buf); } makefile_create(&fp1, &fp2); } void makefile_setup(FILE **fp1, FILE **fp2) { if (stat("Makefile", &target_data.statbuf) == -1) { perror("Makefile"); exit(1); } if ((*fp1 = fopen("Makefile", "r")) == NULL) { perror("fopen"); fprintf(stderr, "cannot open existing Makefile\n"); exit(1); } unlink("Makefile.new"); if ((*fp2 = fopen("Makefile.new", "w+")) == NULL) { perror("fopen"); fprintf(stderr, "cannot create new Makefile\n"); exit(1); } } void makefile_create(FILE **fp1, FILE **fp2) { fclose(*fp1); fclose(*fp2); if (system("mv Makefile.new Makefile") != 0) { fprintf(stderr, "Makefile: cannot create new Makefile\n"); fprintf(stderr, "please copy Makefile.new to Makefile\n"); exit(1); } if (chown("Makefile", target_data.statbuf.st_uid, target_data.statbuf.st_gid) == -1) { fprintf(stderr, "Makefile: cannot restore original owner/group\n"); } } #define LASTCHAR(s) (s[strlen(s)-1]) char * strip_linefeeds(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == '\n') *p = '\0'; return(line); } /* * Turn a string into upper-case. */ char * upper_case(char *s, char *buf) { char *p1, *p2; p1 = s; p2 = buf; while (*p1) { *p2 = toupper(*p1); p1++, p2++; } *p2 = '\0'; return(buf); } /* * Turn a string into lower-case. */ char * lower_case(char *s, char *buf) { char *p1, *p2; p1 = s; p2 = buf; while (*p1) { *p2 = tolower(*p1); p1++, p2++; } *p2 = '\0'; return(buf); } char * shift_string_left(char *s, int cnt) { int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s, s+cnt, (origlen-cnt)); *(s+(origlen-cnt)) = '\0'; return(s); } char * shift_string_right(char *s, int cnt) { int i; int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s+cnt, s, origlen); *(s+(origlen+cnt)) = '\0'; for (i = 0; i < cnt; i++) s[i] = ' '; return(s); } char * strip_beginning_whitespace(char *line) { char buf[MAXSTRLEN]; char *p; if (line == NULL || strlen(line) == 0) return(line); strcpy(buf, line); p = &buf[0]; while (*p == ' ' || *p == '\t') p++; strcpy(line, p); return(line); } char * strip_ending_whitespace(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &line[strlen(line)-1]; while (*p == ' ' || *p == '\t') { *p = '\0'; if (p == line) break; p--; } return(line); } int file_exists(char *file) { struct stat sbuf; if (stat(file, &sbuf) == 0) return TRUE; return FALSE; } int count_chars(char *s, char c) { char *p; int count; if (!s) return 0; count = 0; for (p = s; *p; p++) { if (*p == c) count++; } return count; } void make_build_data(char *target) { char *p; char hostname[MAXSTRLEN]; char progname[MAXSTRLEN]; char inbuf1[MAXSTRLEN]; char inbuf2[MAXSTRLEN]; char inbuf3[MAXSTRLEN]; FILE *fp1, *fp2, *fp3, *fp4; unlink("build_data.c"); fp1 = popen("date", "r"); fp2 = popen("id", "r"); fp3 = popen("gcc --version", "r"); if ((fp4 = fopen("build_data.c", "w")) == NULL) { perror("build_data.c"); exit(1); } if (gethostname(hostname, MAXSTRLEN) != 0) hostname[0] = '\0'; p = fgets(inbuf1, 79, fp1); p = fgets(inbuf2, 79, fp2); p = strstr(inbuf2, " "); *p = '\0'; p = fgets(inbuf3, 79, fp3); lower_case(target_data.program, progname); fprintf(fp4, "char *build_command = \"%s\";\n", progname); if (getenv("SOURCE_DATE_EPOCH")) fprintf(fp4, "char *build_data = \"reproducible build\";\n"); else if (strlen(hostname)) fprintf(fp4, "char *build_data = \"%s by %s on %s\";\n", strip_linefeeds(inbuf1), inbuf2, hostname); else fprintf(fp4, "char *build_data = \"%s by %s\";\n", strip_linefeeds(inbuf1), inbuf2); bzero(inbuf1, MAXSTRLEN); sprintf(inbuf1, "%s", target_data.release); fprintf(fp4, "char *build_target = \"%s\";\n", target); fprintf(fp4, "char *build_version = \"%s\";\n", inbuf1); fprintf(fp4, "char *compiler_version = \"%s\";\n", strip_linefeeds(inbuf3)); pclose(fp1); pclose(fp2); pclose(fp3); fclose(fp4); } void make_spec_file(struct supported_gdb_version *sp) { char *Version, *Release; char buf[512]; get_current_configuration(sp); Release = strstr(target_data.release, "-"); if (!Release) { Version = target_data.release; Release = "0"; } else { fprintf(stderr, "crash.spec: obsolete src.rpm build manner -- no dashes allowed: %s\n", target_data.release); return; } printf("#\n"); printf("# crash core analysis suite\n"); printf("#\n"); printf("Summary: crash utility for live systems; netdump, diskdump, kdump, LKCD or mcore dumpfiles\n"); printf("Name: %s\n", lower_case(target_data.program, buf)); printf("Version: %s\n", Version); printf("Release: %s\n", Release); printf("License: %s\n", sp->GPL); printf("Group: Development/Debuggers\n"); printf("Source: %%{name}-%%{version}.tar.gz\n"); printf("URL: http://people.redhat.com/anderson\n"); printf("Distribution: Linux 2.2 or greater\n"); printf("Vendor: Red Hat, Inc.\n"); printf("Packager: Dave Anderson \n"); printf("ExclusiveOS: Linux\n"); printf("ExclusiveArch: %%{ix86} alpha ia64 ppc ppc64 ppc64pseries ppc64iseries x86_64 s390 s390x arm aarch64 ppc64le mips mipsel sparc64\n"); printf("Buildroot: %%{_tmppath}/%%{name}-root\n"); printf("BuildRequires: ncurses-devel zlib-devel bison\n"); printf("Requires: binutils\n"); printf("# Patch0: crash-3.3-20.installfix.patch (patch example)\n"); printf("\n"); printf("%%description\n"); printf("The core analysis suite is a self-contained tool that can be used to\n"); printf("investigate either live systems, kernel core dumps created from the\n"); printf("netdump, diskdump and kdump facilities from Red Hat Linux, the mcore kernel patch\n"); printf("offered by Mission Critical Linux, or the LKCD kernel patch.\n"); printf("\n"); printf("%%package devel\n"); printf("Requires: %%{name} = %%{version}, zlib-devel\n"); printf("Summary: crash utility for live systems; netdump, diskdump, kdump, LKCD or mcore dumpfiles\n"); printf("Group: Development/Debuggers\n"); printf("\n"); printf("%%description devel\n"); printf("The core analysis suite is a self-contained tool that can be used to\n"); printf("investigate either live systems, kernel core dumps created from the\n"); printf("netdump, diskdump and kdump packages from Red Hat Linux, the mcore kernel patch\n"); printf("offered by Mission Critical Linux, or the LKCD kernel patch.\n"); printf("\n"); printf("%%package extensions\n"); printf("Summary: Additional commands for the crash dump analysis tool\n"); printf("Group: Development/Debuggers\n"); printf("\n"); printf("%%description extensions\n"); printf("The extensions package contains plugins that provide additional crash\n"); printf("commands. The extensions can be loaded in crash via the \"extend\" command.\n"); printf("\n"); printf("The following extensions are provided:\n"); printf("* eppic: Provides C-like language for writing dump analysis scripts\n"); printf("* dminfo: Device-mapper target analyzer\n"); printf("* snap: Takes a snapshot of live memory and creates a kdump dumpfile\n"); printf("* trace: Displays kernel tracing data and traced events that occurred prior to a panic.\n"); printf("\n"); printf("%%prep\n"); printf("%%setup -n %%{name}-%%{version}\n"); printf("# %%patch0 -p1 -b .install (patch example)\n"); printf("\n"); printf("%%build\n"); printf("make RPMPKG=\"%%{version}\"\n"); printf("# make RPMPKG=\"%%{version}-%%{release}\"\n"); printf("make extensions\n"); /* printf("make crashd\n"); */ printf("\n"); printf("%%install\n"); printf("rm -rf %%{buildroot}\n"); printf("mkdir -p %%{buildroot}/usr/bin\n"); printf("make DESTDIR=%%{buildroot} install\n"); printf("mkdir -p %%{buildroot}%%{_mandir}/man8\n"); printf("cp crash.8 %%{buildroot}%%{_mandir}/man8/crash.8\n"); printf("mkdir -p %%{buildroot}%%{_includedir}/crash\n"); printf("cp defs.h %%{buildroot}%%{_includedir}/crash\n"); printf("mkdir -p %%{buildroot}%%{_libdir}/crash/extensions\n"); printf("if [ -f extensions/eppic.so ]\n"); printf("then\n"); printf("cp extensions/eppic.so %%{buildroot}%%{_libdir}/crash/extensions\n"); printf("fi\n"); printf("cp extensions/dminfo.so %%{buildroot}%%{_libdir}/crash/extensions\n"); printf("cp extensions/snap.so %%{buildroot}%%{_libdir}/crash/extensions\n"); printf("cp extensions/trace.so %%{buildroot}%%{_libdir}/crash/extensions\n"); printf("\n"); printf("%%clean\n"); printf("rm -rf %%{buildroot}\n"); printf("\n"); printf("%%files\n"); printf("%%defattr(-,root,root)\n"); printf("/usr/bin/crash\n"); printf("%%{_mandir}/man8/crash.8*\n"); /* printf("/usr/bin/crashd\n"); */ printf("%%doc README\n"); printf("\n"); printf("%%files devel\n"); printf("%%defattr(-,root,root)\n"); printf("%%{_includedir}/*\n"); printf("\n"); printf("%%files extensions\n"); printf("%%defattr(-,root,root)\n"); printf("%%{_libdir}/crash/extensions/*\n"); } /* * Use the default gdb #defines unless there's a .gdb file. */ struct supported_gdb_version * setup_gdb_defaults(void) { FILE *fp; char inbuf[512]; char buf[512]; struct supported_gdb_version *sp; /* * Use the default, allowing for an override in .gdb */ if (!file_exists(".gdb")) return store_gdb_defaults(NULL); if ((fp = fopen(".gdb", "r")) == NULL) { perror(".gdb"); return store_gdb_defaults(NULL); } while (fgets(inbuf, 512, fp)) { strip_linefeeds(inbuf); strip_beginning_whitespace(inbuf); strcpy(buf, inbuf); /* * Simple override. */ if (strcmp(buf, "5.3") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_5_3]; fprintf(stderr, ".gdb configuration: %s\n\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "6.0") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_6_0]; fprintf(stderr, ".gdb configuration: %s\n\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "6.1") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_6_1]; fprintf(stderr, ".gdb configuration: %s\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "7.0") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_7_0]; fprintf(stderr, ".gdb configuration: %s\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "7.3.1") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_7_3_1]; fprintf(stderr, ".gdb configuration: %s\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } if (strcmp(buf, "7.6") == 0) { fclose(fp); sp = &supported_gdb_versions[GDB_7_6]; fprintf(stderr, ".gdb configuration: %s\n", sp->GDB_VERSION_IN); return store_gdb_defaults(sp); } } fclose(fp); fprintf(stderr, ".gdb: rejected -- using default gdb\n\n"); return store_gdb_defaults(NULL); } struct supported_gdb_version * store_gdb_defaults(struct supported_gdb_version *sp) { if (!sp) sp = &supported_gdb_versions[default_gdb]; else fprintf(stderr, "WARNING: \"make clean\" may be required before rebuilding\n\n"); return sp; } void set_initial_target(struct supported_gdb_version *sp) { FILE *fp; char crash_target[512]; char buf[512]; target_data.initial_gdb_target = UNKNOWN; sprintf(crash_target, "%s/crash.target", &sp->GDB[strlen("GDB=")]); if (!file_exists(crash_target)) { if (target_data.target_as_param && file_exists(&sp->GDB[strlen("GDB=")])) { fprintf(stderr, "\nThe \"%s\" file does not exist.\n", crash_target); target_rebuild_instructions(sp, (char *)target_data.target_as_param); exit(1); } return; } if ((fp = fopen(crash_target, "r")) == NULL) { perror(crash_target); return; } if (!fgets(buf, 512, fp)) { perror(crash_target); fclose(fp); return; } fclose(fp); if (strncmp(buf, "X86_64", strlen("X86_64")) == 0) target_data.initial_gdb_target = X86_64; else if (strncmp(buf, "X86", strlen("X86")) == 0) target_data.initial_gdb_target = X86; else if (strncmp(buf, "ALPHA", strlen("ALPHA")) == 0) target_data.initial_gdb_target = ALPHA; else if (strncmp(buf, "PPC64", strlen("PPC64")) == 0) target_data.initial_gdb_target = PPC64; else if (strncmp(buf, "PPC", strlen("PPC")) == 0) target_data.initial_gdb_target = PPC; else if (strncmp(buf, "IA64", strlen("IA64")) == 0) target_data.initial_gdb_target = IA64; else if (strncmp(buf, "S390X", strlen("S390X")) == 0) target_data.initial_gdb_target = S390X; else if (strncmp(buf, "S390", strlen("S390")) == 0) target_data.initial_gdb_target = S390; else if (strncmp(buf, "ARM64", strlen("ARM64")) == 0) target_data.initial_gdb_target = ARM64; else if (strncmp(buf, "ARM", strlen("ARM")) == 0) target_data.initial_gdb_target = ARM; else if (strncmp(buf, "MIPS", strlen("MIPS")) == 0) target_data.initial_gdb_target = MIPS; else if (strncmp(buf, "SPARC64", strlen("SPARC64")) == 0) target_data.initial_gdb_target = SPARC64; } char * target_to_name(int target) { switch (target) { case X86: return("X86"); case ALPHA: return("ALPHA"); case PPC: return("PPC"); case IA64: return("IA64"); case S390: return("S390"); case S390X: return("S390X"); case PPC64: return("PPC64"); case X86_64: return("X86_64"); case ARM: return("ARM"); case ARM64: return("ARM64"); case MIPS: return("MIPS"); case SPARC64: return("SPARC64"); } return "UNKNOWN"; } int name_to_target(char *name) { if (strncmp(name, "X86_64", strlen("X86_64")) == 0) return X86_64; else if (strncmp(name, "x86_64", strlen("x86_64")) == 0) return X86_64; else if (strncmp(name, "X86", strlen("X86")) == 0) return X86; else if (strncmp(name, "x86", strlen("x86")) == 0) return X86; else if (strncmp(name, "ALPHA", strlen("ALPHA")) == 0) return ALPHA; else if (strncmp(name, "alpha", strlen("alpha")) == 0) return ALPHA; else if (strncmp(name, "PPC64", strlen("PPC64")) == 0) return PPC64; else if (strncmp(name, "ppc64", strlen("ppc64")) == 0) return PPC64; else if (strncmp(name, "ppc64le", strlen("ppc64le")) == 0) return PPC64; else if (strncmp(name, "PPC64LE", strlen("PPC64LE")) == 0) return PPC64; else if (strncmp(name, "PPC", strlen("PPC")) == 0) return PPC; else if (strncmp(name, "ppc", strlen("ppc")) == 0) return PPC; else if (strncmp(name, "IA64", strlen("IA64")) == 0) return IA64; else if (strncmp(name, "ia64", strlen("ia64")) == 0) return IA64; else if (strncmp(name, "S390X", strlen("S390X")) == 0) return S390X; else if (strncmp(name, "s390x", strlen("s390x")) == 0) return S390X; else if (strncmp(name, "S390", strlen("S390")) == 0) return S390; else if (strncmp(name, "s390", strlen("s390")) == 0) return S390; else if (strncmp(name, "ARM64", strlen("ARM64")) == 0) return ARM64; else if (strncmp(name, "arm64", strlen("arm64")) == 0) return ARM64; else if (strncmp(name, "aarch64", strlen("aarch64")) == 0) return ARM64; else if (strncmp(name, "ARM", strlen("ARM")) == 0) return ARM; else if (strncmp(name, "arm", strlen("arm")) == 0) return ARM; else if (strncmp(name, "mips", strlen("mips")) == 0) return MIPS; else if (strncmp(name, "MIPS", strlen("MIPS")) == 0) return MIPS; else if (strncmp(name, "sparc64", strlen("sparc64")) == 0) return SPARC64; return UNKNOWN; } char * get_extra_flags(char *filename, char *initial) { FILE *fp; char inbuf[512]; char buf[512]; if (!file_exists(filename)) return (initial ? initial : NULL); if ((fp = fopen(filename, "r")) == NULL) { perror(filename); return (initial ? initial : NULL); } if (initial) strcpy(buf, initial); else buf[0] = '\0'; while (fgets(inbuf, 512, fp)) { strip_linefeeds(inbuf); strip_beginning_whitespace(inbuf); strip_ending_whitespace(inbuf); if (inbuf[0] == '#') continue; if (strlen(inbuf)) { if (strlen(buf)) strcat(buf, " "); strcat(buf, inbuf); } } fclose(fp); if (strlen(buf)) return strdup(buf); else return NULL; } /* * Add extra compression libraries. If not already there, create * a CFLAGS.extra file and an LDFLAGS.extra file. * For lzo: * - enter -DSNAPPY in the CFLAGS.extra file * - enter -lsnappy in the LDFLAGS.extra file * * For snappy: * - enter -DLZO in the CFLAGS.extra file * - enter -llzo2 in the LDFLAGS.extra file. */ void add_extra_lib(char *option) { int lzo, add_DLZO, add_llzo2; int snappy, add_DSNAPPY, add_lsnappy; char *cflags, *ldflags; FILE *fp_cflags, *fp_ldflags; char *mode; char inbuf[512]; lzo = add_DLZO = add_llzo2 = 0; snappy = add_DSNAPPY = add_lsnappy = 0; ldflags = get_extra_flags("LDFLAGS.extra", NULL); cflags = get_extra_flags("CFLAGS.extra", NULL); if (strcmp(option, "lzo") == 0) { lzo++; if (!cflags || !strstr(cflags, "-DLZO")) add_DLZO++; if (!ldflags || !strstr(ldflags, "-llzo2")) add_llzo2++; } if (strcmp(option, "snappy") == 0) { snappy++; if (!cflags || !strstr(cflags, "-DSNAPPY")) add_DSNAPPY++; if (!ldflags || !strstr(ldflags, "-lsnappy")) add_lsnappy++; } if ((lzo || snappy) && file_exists("diskdump.o") && (unlink("diskdump.o") < 0)) { perror("diskdump.o"); return; } mode = file_exists("CFLAGS.extra") ? "r+" : "w+"; if ((fp_cflags = fopen("CFLAGS.extra", mode)) == NULL) { perror("CFLAGS.extra"); return; } mode = file_exists("LDFLAGS.extra") ? "r+" : "w+"; if ((fp_ldflags = fopen("LDFLAGS.extra", mode)) == NULL) { perror("LDFLAGS.extra"); fclose(fp_cflags); return; } if (add_DLZO || add_DSNAPPY) { while (fgets(inbuf, 512, fp_cflags)) ; if (add_DLZO) fputs("-DLZO\n", fp_cflags); if (add_DSNAPPY) fputs("-DSNAPPY\n", fp_cflags); } if (add_llzo2 || add_lsnappy) { while (fgets(inbuf, 512, fp_ldflags)) ; if (add_llzo2) fputs("-llzo2\n", fp_ldflags); if (add_lsnappy) fputs("-lsnappy\n", fp_ldflags); } fclose(fp_cflags); fclose(fp_ldflags); } crash-7.2.8/xen_hyper_global_data.c0000664000000000000000000003305613614623427016025 0ustar rootroot/* * xen_hyper_global_data.c * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "defs.h" #ifdef XEN_HYPERVISOR_ARCH #include "xen_hyper_defs.h" /* * Global data for Xen hypervisor. */ struct xen_hyper_machdep_table xen_hyper_machdep_table = { 0 }; struct xen_hyper_machdep_table *xhmachdep = &xen_hyper_machdep_table; struct xen_hyper_table xen_hyper_table = { 0 }; struct xen_hyper_table *xht = &xen_hyper_table; struct xen_hyper_dumpinfo_table xen_hyper_dumpinfo_table = { 0 }; struct xen_hyper_dumpinfo_table *xhdit = &xen_hyper_dumpinfo_table; struct xen_hyper_domain_table xen_hyper_domain_table = { 0 }; struct xen_hyper_domain_table *xhdt = &xen_hyper_domain_table; struct xen_hyper_vcpu_table xen_hyper_vcpu_table = { 0 }; struct xen_hyper_vcpu_table *xhvct = &xen_hyper_vcpu_table; struct xen_hyper_pcpu_table xen_hyper_pcpu_table = { 0 }; struct xen_hyper_pcpu_table *xhpct = &xen_hyper_pcpu_table; struct xen_hyper_sched_table xen_hyper_sched_table = { 0 }; struct xen_hyper_sched_table *xhscht = &xen_hyper_sched_table; struct xen_hyper_symbol_table_data xen_hyper_symbol_table_data = { 0 }; struct xen_hyper_symbol_table_data *xhsymt = &xen_hyper_symbol_table_data; /* * The following commands are for Xen hypervisor. */ struct command_table_entry xen_hyper_command_table[] = { {"*", cmd_pointer, help_pointer, 0}, {"alias", cmd_alias, help_alias, 0}, {"ascii", cmd_ascii, help_ascii, 0}, {"bt", cmd_bt, help_bt, 0}, {"dis", cmd_dis, help_dis, 0}, {"domain", xen_hyper_cmd_domain, xen_hyper_help_domain, REFRESH_TASK_TABLE}, {"doms", xen_hyper_cmd_doms, xen_hyper_help_doms, REFRESH_TASK_TABLE}, #if defined(X86) || defined(X86_64) {"dumpinfo",xen_hyper_cmd_dumpinfo, xen_hyper_help_dumpinfo,0}, #endif {"eval", cmd_eval, help_eval, 0}, {"exit", cmd_quit, help_exit, 0}, {"extend", cmd_extend, help_extend, 0}, {"gdb", cmd_gdb, help_gdb, 0}, {"help", xen_hyper_cmd_help, help_help, 0}, {"list", cmd_list, help__list, 0}, {"log", xen_hyper_cmd_log, xen_hyper_help_log, 0}, {"p", cmd_p, help_p, 0}, {"pcpus", xen_hyper_cmd_pcpus, xen_hyper_help_pcpus, 0}, {"pte", cmd_pte, help_pte, 0}, {"q", cmd_quit, help_quit, 0}, {"rd", cmd_rd, help_rd, 0}, {"repeat", cmd_repeat, help_repeat, 0}, {"sched", xen_hyper_cmd_sched, xen_hyper_help_sched, 0}, {"search", cmd_search, help_search, 0}, {"set", cmd_set, help_set, 0}, {"struct", cmd_struct, help_struct, 0}, {"sym", cmd_sym, help_sym, 0}, {"sys", xen_hyper_cmd_sys, xen_hyper_help_sys, 0}, {"test", cmd_test, NULL, HIDDEN_COMMAND}, {"union", cmd_union, help_union, 0}, {"vcpu", xen_hyper_cmd_vcpu, xen_hyper_help_vcpu, REFRESH_TASK_TABLE}, {"vcpus", xen_hyper_cmd_vcpus, xen_hyper_help_vcpus, REFRESH_TASK_TABLE}, {"whatis", cmd_whatis, help_whatis, 0}, {"wr", cmd_wr, help_wr, 0}, {(char *)NULL} }; /* * */ struct xen_hyper_offset_table xen_hyper_offset_table = { 0 }; struct xen_hyper_size_table xen_hyper_size_table = { 0 }; /* * help data */ char *xen_hyper_help_domain[] = { "domain", "display contents of domain struct", "[domain-id | domainp] ...", " This command displays contents of domain struct for selected, or all, domains", " domain-id a domain id.", " domainp a domain pointer.", NULL }; char *xen_hyper_help_doms[] = { "doms", "display domain status information", "[domain-id | domainp] ...", " This command displays domain status for selected, or all, domains" , " domain-id a domain id.", " domainp a domain pointer.", " ", " 1. the DOMAIN-ID.", " 2. the struct domain pointer.", " 3. the domain state", " (SF:fully shut down, SH:shutting down, DY:dying,", " CP:pause by controller software, PO:polling event channels,", " PA:pause by the hypervisor, RU:running).", " 4. the TYPE of domain", " (O:dom_io, X:dom_xen, I:idle domain, 0:domain 0, U:domain U).", " 5. displays max_pages member of domain.", " 6. displays tot_pages member of domain.", " 7. a number of vcpu that domain is assigned.", " 8. the shared_info pointer of domain.", " 9. frame containing list of mfns containing list of mfns" , " containing p2m.", " ", " The active domain on each CPU will be highlighted by an angle ", " bracket (\">\") preceding its information.", " The crashing domain on each CPU will be highlighted by an aster ", " (\"*\") preceding its information.", "\nEXAMPLES", " Show the domain status of all:\n", " %s> doms", " DID DOMAIN ST T MAXPAGE TOTPAGE VCPU SHARED_I P2M_MFN", " 32753 ffbf8080 RU O 0 0 0 0 ----", " 32754 ffbfa080 RU X 0 0 0 0 ----", " 32767 ffbfc080 RU I 0 0 2 0 ----", " >* 0 ff198080 RU 0 ffffffff 32900 2 ff194000 18d0", " 4 ffbee080 RU U 4000 4000 2 ff18d000 3eb92", " 5 ff186080 RU U 4000 4000 2 ff184000 298d3", " %s>", NULL }; char *xen_hyper_help_dumpinfo[] = { "dumpinfo", "display Xen dump information", "[-t | -r] [pcpu-id | enotep] ...", " This command displays Xen dump information for selected, or all, cpus" , " pcpu-id a physical cpu id.", " enotep a ELF Note pointer.", " -t display time information.", " -r display register information.", NULL }; char *xen_hyper_help_log[] = { "log", "dump system message buffer", " ", " This command dumps the xen conring contents in chronological order." , " ", "EXAMPLES", " Dump the Xen message buffer:\n", " %s> log", " __ __ _____ ___ _ _ _", " \\ \\/ /___ _ __ |___ / / _ \\ _ _ _ __ ___| |_ __ _| |__ | | ___", " \\ // _ \\ '_ \\ |_ \\| | | |__| | | | '_ \\/ __| __/ _` | '_ \\| |/ _ \\", " / \\ __/ | | | ___) | |_| |__| |_| | | | \\__ \\ || (_| | |_) | | __/", " /_/\\_\\___|_| |_| |____(_)___/ \\__,_|_| |_|___/\\__\\__,_|_.__/|_|\\___|", " ", " http://www.cl.cam.ac.uk/netos/xen", " University of Cambridge Computer Laboratory", " ", " Xen version 3.0-unstable (damm@) (gcc version 3.4.6 (Gentoo 3.4.6-r1, ssp-3.4.5-1.0,", " pie-8.7.9)) Wed Dec 6 17:34:32 JST 2006", " Latest ChangeSet: unavailable", " ", " (XEN) Console output is synchronous.", " (XEN) Command line: 12733-i386-pae/xen.gz console=com1 sync_console conswitch=bb com1", " =115200,8n1,0x3f8 dom0_mem=480000 crashkernel=64M@32M", " (XEN) Physical RAM map:", " (XEN) 0000000000000000 - 0000000000098000 (usable)", " (XEN) 0000000000098000 - 00000000000a0000 (reserved)", " (XEN) 00000000000f0000 - 0000000000100000 (reserved)", " (XEN) 0000000000100000 - 000000003f7f0000 (usable)", " (XEN) 000000003f7f0000 - 000000003f7f3000 (ACPI NVS)", " (XEN) 000000003f7f3000 - 000000003f800000 (ACPI data)", " (XEN) 00000000e0000000 - 00000000f0000000 (reserved)", " (XEN) 00000000fec00000 - 0000000100000000 (reserved)", " (XEN) Kdump: 64MB (65536kB) at 0x2000000", " (XEN) System RAM: 1015MB (1039904kB)", " (XEN) ACPI: RSDP (v000 XPC ) @ 0x000f9250", " ...", NULL }; char *xen_hyper_help_pcpus[] = { "pcpus", "display physical cpu information", "[-r][-t] [pcpu-id | pcpup] ...", " This command displays physical cpu information for selected, or all, cpus" , " pcpu-id a physical cpu id.", " pcpup a physical cpu pointer.", " cur-vcpu a current virtual cpu pointer.", " -r display register information.", " -t display init_tss information.", " ", " The crashing physical cpu will be highlighted by an aster ", " (\"*\") preceding its information.", "\nEXAMPLES", " Show the physical cpu status of all:\n", " %s> pcpus", " PCID PCPU CUR-VCPU", " 0 ff1a3fb4 ffbf9080", " * 1 ff1dbfb4 ffbf8080", " %s>", " ", " Show the physical cpu status of all with register information:\n", " %s> pcpus -r", " PCID PCPU CUR-VCPU", " * 0 ff1b7fb4 ffbef080", " Register information:", " struct cpu_user_regs {", " ebx = 0x0,", " ecx = 0xdcf4bed8,", " edx = 0xc0326887,", " esi = 0x63,", " edi = 0x0,", " ebp = 0xdcf4bee0,", " eax = 0x25,", " error_code = 0x6,", " entry_vector = 0xe,", " eip = 0xc01014a7,", " cs = 0x61,", " saved_upcall_mask = 0x0,", " _pad0 = 0x0,", " eflags = 0x202,", " esp = 0xdcf4bed0,", " ss = 0x69,", " _pad1 = 0x0,", " es = 0x7b,", " _pad2 = 0x0,", " ds = 0x7b,", " _pad3 = 0x0,", " fs = 0x0,", " _pad4 = 0x0,", " gs = 0x0,", " _pad5 = 0x0", " }", " ", " Show the physical cpu status of all with init_tss information:\n", " %s> pcpus -t", " PCID PCPU CUR-VCPU", " * 0 ff1b7fb4 ffbef080", " init_tss information:", " struct tss_struct {", " back_link = 0x0,", " __blh = 0x0,", " esp0 = 0xff1b7fe8,", " ss0 = 0xe010,", " __ss0h = 0x0,", " esp1 = 0xdcf4bff8,", " ss1 = 0x69,", " __ss1h = 0x0,", " esp2 = 0x0,", " ss2 = 0x0,", " __ss2h = 0x0,", " __cr3 = 0x0,", " eip = 0x0,", " eflags = 0x0,", " eax = 0x0,", " ecx = 0x0,", " edx = 0x0,", " ebx = 0x0,", " esp = 0x0,", " ebp = 0x0,", " esi = 0x0,", " edi = 0x0,", " es = 0x0,", " __esh = 0x0,", " cs = 0x0,", " __csh = 0x0,", " ss = 0x0,", " __ssh = 0x0,", " ds = 0x0,", " __dsh = 0x0,", " fs = 0x0,", " __fsh = 0x0,", " gs = 0x0,", " __gsh = 0x0,", " ldt = 0x0,", " __ldth = 0x0,", " trace = 0x0,", " bitmap = 0x8000,", " __cacheline_filler = \"\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\\000\"", " }", NULL }; char *xen_hyper_help_sched[] = { "pcpus", "display scheduler information", "[-v] [pcpu-id] ...", " This command displays scheduler information for selected, or all, cpus" , " pcpu-id a physical cpu id.", " -v display verbosely scheduler information.", " ", NULL }; char *xen_hyper_help_sys[] = { "sys", "system data", "[-c [name|number]] config", " This command displays system-specific data. If no arguments are entered,\n" " the same system data shown during %s invocation is shown.\n", "\nEXAMPLES", " Display essential system information:\n", " %s> sys", " DEBUG KERNEL: xen-syms", " DUMPFILE: vmcore", " CPUS: 2", " DOMAINS: 2", " MACHINE: Pentium III (Coppermine) (866 Mhz)", " MEMORY: 2 GB", " %s>", NULL }; char *xen_hyper_help_vcpu[] = { "vcpu", "display contents of vcpu struct", "[vcpup] ...", " This command displays contents of vcpu struct for selected, or all, vcpus", " vcpu-id a virtual cpu id.", " vcpup a virtual cpu pointer.", NULL }; char *xen_hyper_help_vcpus[] = { "vcpus", "display vcpu status information", "[-i domain-id vcpu-id | vcpup] ...", " This command displays vcpu status for selected, or all, vcpus" , " domain-id a domain id.", " vcpu-id a VCPU-ID.", " vcpup a hexadecimal struct vcpu pointer.", " -i specify vcpu id as an argument.", " ", " 1. the VCPU-ID.", " 2. the physical CPU-ID.", " 3. the struct vcpu pointer.", " 4. the vcpu state (RU, BL, OF).", " 5. the TYPE of domain that vcpu is assigned(I, 0, G).", " 6. the DOMAIN-ID of domain that vcpu is assigned.", " 7. the struct domain pointer of domain that vcpu is assigned.", " ", " The active vcpu on each CPU will be highlighted by an angle ", " bracket (\">\") preceding its information.", " The crashing vcpu on each CPU will be highlighted by an aster ", " (\"*\") preceding its information.", "\nEXAMPLES", " Show the vcpu status of all:\n", " %s> vcpus", " VCID PCID VCPU ST T DOMID DOMAIN", " 0 0 ffbfe080 RU I 32767 ffbfc080", " 1 1 ff1df080 RU I 32767 ffbfc080", " >* 0 0 ff195180 RU 0 0 ff198080", " > 1 1 ff190080 BL 0 0 ff198080", " 0 1 ff18a080 BL G 4 ffbee080", " 1 0 ff189080 BL G 4 ffbee080", " 0 1 ff1f3080 BL G 5 ff186080", " 1 0 ff1f2080 BL G 5 ff186080", " %s>", NULL }; struct task_context fake_tc = { 0 }; #endif crash-7.2.8/ia64.c0000775000000000000000000037264213614623427012270 0ustar rootroot/* ia64.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2013 David Anderson * Copyright (C) 2002-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef IA64 #include "defs.h" #include "xen_hyper_defs.h" #include static int ia64_verify_symbol(const char *, ulong, char); static int ia64_eframe_search(struct bt_info *); static void ia64_back_trace_cmd(struct bt_info *); static void ia64_old_unwind(struct bt_info *); static void ia64_old_unwind_init(void); static void try_old_unwind(struct bt_info *); static void ia64_dump_irq(int); static ulong ia64_processor_speed(void); static int ia64_vtop_4l(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_vtop(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_uvtop(struct task_context *, ulong, physaddr_t *, int); static int ia64_kvtop(struct task_context *, ulong, physaddr_t *, int); static ulong ia64_get_task_pgd(ulong); static ulong ia64_get_pc(struct bt_info *); static ulong ia64_get_sp(struct bt_info *); static ulong ia64_get_thread_ksp(ulong); static void ia64_get_stack_frame(struct bt_info *, ulong *, ulong *); static int ia64_translate_pte(ulong, void *, ulonglong); static ulong ia64_vmalloc_start(void); static int ia64_is_task_addr(ulong); static int ia64_dis_filter(ulong, char *, unsigned int); static void ia64_dump_switch_stack(ulong, ulong); static void ia64_cmd_mach(void); static int ia64_get_smp_cpus(void); static void ia64_display_machine_stats(void); static void ia64_display_cpu_data(unsigned int); static void ia64_display_memmap(void); static void ia64_create_memmap(void); static ulong check_mem_limit(void); static int ia64_verify_paddr(uint64_t); static int ia64_available_memory(struct efi_memory_desc_t *); static void ia64_post_init(void); static ulong ia64_in_per_cpu_mca_stack(void); static struct line_number_hook ia64_line_number_hooks[]; static ulong ia64_get_stackbase(ulong); static ulong ia64_get_stacktop(ulong); static void parse_cmdline_args(void); static void ia64_calc_phys_start(void); static int ia64_get_kvaddr_ranges(struct vaddr_range *); struct unw_frame_info; static void dump_unw_frame_info(struct unw_frame_info *); static int old_unw_unwind(struct unw_frame_info *); static void unw_init_from_blocked_task(struct unw_frame_info *, ulong); static ulong ia64_rse_slot_num(ulong *); static ulong *ia64_rse_skip_regs(ulong *, long); static ulong *ia64_rse_rnat_addr(ulong *); static ulong rse_read_reg(struct unw_frame_info *, int, int *); static void rse_function_params(struct unw_frame_info *, char *); static int ia64_vtop_4l_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_vtop_xen_wpt(ulong, physaddr_t *paddr, ulong *pgd, int, int); static int ia64_xen_kdump_p2m_create(struct xen_kdump_data *); static int ia64_xendump_p2m_create(struct xendump_data *); static void ia64_debug_dump_page(FILE *, char *, char *); static char *ia64_xendump_load_page(ulong, struct xendump_data *); static int ia64_xendump_page_index(ulong, struct xendump_data *); static ulong ia64_xendump_panic_task(struct xendump_data *); static void ia64_get_xendump_regs(struct xendump_data *, struct bt_info *, ulong *, ulong *); static void ia64_init_hyper(int); struct machine_specific ia64_machine_specific = { 0 }; void ia64_init(int when) { struct syment *sp, *spn; if (XEN_HYPER_MODE()) { ia64_init_hyper(when); return; } switch (when) { case SETUP_ENV: #if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT) prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0); #endif #if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT) prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0); #endif break; case PRE_SYMTAB: machdep->verify_symbol = ia64_verify_symbol; machdep->machspec = &ia64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~(machdep->pageoffset); switch (machdep->pagesize) { case 4096: machdep->stacksize = (power(2, 3) * PAGESIZE()); break; case 8192: machdep->stacksize = (power(2, 2) * PAGESIZE()); break; case 16384: machdep->stacksize = (power(2, 1) * PAGESIZE()); break; case 65536: machdep->stacksize = (power(2, 0) * PAGESIZE()); break; default: machdep->stacksize = 32*1024; break; } if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pud space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = ia64_verify_paddr; machdep->get_kvaddr_ranges = ia64_get_kvaddr_ranges; machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->machspec->phys_start = UNKNOWN_PHYS_START; if (machdep->cmdline_args[0]) parse_cmdline_args(); if (ACTIVE()) machdep->flags |= DEVMEMRD; break; case PRE_GDB: if (pc->flags & KERNEL_DEBUG_QUERY) return; /* * Until the kernel core dump and va_server library code * do the right thing with respect to the configured page size, * try to recognize a fatal inequity between the compiled-in * page size and the page size used by the kernel. */ if ((sp = symbol_search("empty_zero_page")) && (spn = next_symbol(NULL, sp)) && ((spn->value - sp->value) != PAGESIZE())) error(FATAL, "compiled-in page size: %d (apparent) kernel page size: %ld\n", PAGESIZE(), spn->value - sp->value); machdep->kvbase = KERNEL_VMALLOC_BASE; machdep->identity_map_base = KERNEL_CACHED_BASE; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = ia64_eframe_search; machdep->back_trace = ia64_back_trace_cmd; machdep->processor_speed = ia64_processor_speed; machdep->uvtop = ia64_uvtop; machdep->kvtop = ia64_kvtop; machdep->get_task_pgd = ia64_get_task_pgd; machdep->dump_irq = ia64_dump_irq; machdep->get_stack_frame = ia64_get_stack_frame; machdep->get_stackbase = ia64_get_stackbase; machdep->get_stacktop = ia64_get_stacktop; machdep->translate_pte = ia64_translate_pte; machdep->memory_size = generic_memory_size; machdep->vmalloc_start = ia64_vmalloc_start; machdep->is_task_addr = ia64_is_task_addr; machdep->dis_filter = ia64_dis_filter; machdep->cmd_mach = ia64_cmd_mach; machdep->get_smp_cpus = ia64_get_smp_cpus; machdep->line_number_hooks = ia64_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->show_interrupts = generic_show_interrupts; if ((sp = symbol_search("_stext"))) { machdep->machspec->kernel_region = VADDR_REGION(sp->value); machdep->machspec->kernel_start = sp->value; } else { machdep->machspec->kernel_region = KERNEL_CACHED_REGION; machdep->machspec->kernel_start = KERNEL_CACHED_BASE; } if (machdep->machspec->kernel_region == KERNEL_VMALLOC_REGION) { machdep->machspec->vmalloc_start = machdep->machspec->kernel_start + GIGABYTES((ulong)(4)); if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) ia64_calc_phys_start(); } else machdep->machspec->vmalloc_start = KERNEL_VMALLOC_BASE; machdep->xen_kdump_p2m_create = ia64_xen_kdump_p2m_create; machdep->xendump_p2m_create = ia64_xendump_p2m_create; machdep->xendump_panic_task = ia64_xendump_panic_task; machdep->get_xendump_regs = ia64_get_xendump_regs; break; case POST_GDB: STRUCT_SIZE_INIT(cpuinfo_ia64, "cpuinfo_ia64"); STRUCT_SIZE_INIT(switch_stack, "switch_stack"); MEMBER_OFFSET_INIT(thread_struct_fph, "thread_struct", "fph"); MEMBER_OFFSET_INIT(switch_stack_b0, "switch_stack", "b0"); MEMBER_OFFSET_INIT(switch_stack_ar_bspstore, "switch_stack", "ar_bspstore"); MEMBER_OFFSET_INIT(switch_stack_ar_pfs, "switch_stack", "ar_pfs"); MEMBER_OFFSET_INIT(switch_stack_ar_rnat, "switch_stack", "ar_rnat"); MEMBER_OFFSET_INIT(switch_stack_pr, "switch_stack", "pr"); MEMBER_OFFSET_INIT(cpuinfo_ia64_proc_freq, "cpuinfo_ia64", "proc_freq"); MEMBER_OFFSET_INIT(cpuinfo_ia64_unimpl_va_mask, "cpuinfo_ia64", "unimpl_va_mask"); MEMBER_OFFSET_INIT(cpuinfo_ia64_unimpl_pa_mask, "cpuinfo_ia64", "unimpl_pa_mask"); if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); else if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else if (symbol_exists("_irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "_irq_desc", NULL, 0); if (!machdep->hz) machdep->hz = 1024; machdep->section_size_bits = _SECTION_SIZE_BITS; machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; ia64_create_memmap(); break; case POST_INIT: ia64_post_init(); break; case LOG_ONLY: machdep->machspec = &ia64_machine_specific; machdep->machspec->kernel_start = kt->vmcoreinfo._stext_SYMBOL; machdep->machspec->kernel_region = VADDR_REGION(kt->vmcoreinfo._stext_SYMBOL); if (machdep->machspec->kernel_region == KERNEL_VMALLOC_REGION) { machdep->machspec->vmalloc_start = machdep->machspec->kernel_start + GIGABYTES((ulong)(4)); ia64_calc_phys_start(); } break; } } /* * --machdep defaults to the physical start location. * * Otherwise, it's got to be a "item=value" string, separated * by commas if more than one is passed in. */ void parse_cmdline_args(void) { int index, i, c, errflag; char *p; char buf[BUFSIZE]; char *arglist[MAXARGS]; ulong value; struct machine_specific *ms; int vm_flag; ms = &ia64_machine_specific; vm_flag = 0; for (index = 0; index < MAX_MACHDEP_ARGS; index++) { if (!machdep->cmdline_args[index]) break; if (!strstr(machdep->cmdline_args[index], "=")) { errflag = 0; value = htol(machdep->cmdline_args[index], RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { ms->phys_start = value; error(NOTE, "setting phys_start to: 0x%lx\n", ms->phys_start); } else error(WARNING, "ignoring --machdep option: %s\n\n", machdep->cmdline_args[index]); continue; } strcpy(buf, machdep->cmdline_args[index]); for (p = buf; *p; p++) { if (*p == ',') *p = ' '; } c = parse_line(buf, arglist); for (i = 0; i < c; i++) { errflag = 0; if (STRNEQ(arglist[i], "phys_start=")) { p = arglist[i] + strlen("phys_start="); if (strlen(p)) { value = htol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { ms->phys_start = value; error(NOTE, "setting phys_start to: 0x%lx\n", ms->phys_start); continue; } } } else if (STRNEQ(arglist[i], "init_stack_size=")) { p = arglist[i] + strlen("init_stack_size="); if (strlen(p)) { value = stol(p, RETURN_ON_ERROR|QUIET, &errflag); if (!errflag) { ms->ia64_init_stack_size = (int)value; error(NOTE, "setting init_stack_size to: 0x%x (%d)\n", ms->ia64_init_stack_size, ms->ia64_init_stack_size); continue; } } } else if (STRNEQ(arglist[i], "vm=")) { vm_flag++; p = arglist[i] + strlen("vm="); if (strlen(p)) { if (STREQ(p, "4l")) { machdep->flags |= VM_4_LEVEL; continue; } } } error(WARNING, "ignoring --machdep option: %s\n", arglist[i]); } if (vm_flag) { switch (machdep->flags & (VM_4_LEVEL)) { case VM_4_LEVEL: error(NOTE, "using 4-level pagetable\n"); c++; break; default: error(WARNING, "invalid vm= option\n"); c++; machdep->flags &= ~(VM_4_LEVEL); break; } } if (c) fprintf(fp, "\n"); } } int ia64_in_init_stack(ulong addr) { ulong init_stack_addr; if (!symbol_exists("ia64_init_stack")) return FALSE; /* * ia64_init_stack could be aliased to region 5 */ init_stack_addr = ia64_VTOP(symbol_value("ia64_init_stack")); addr = ia64_VTOP(addr); if ((addr < init_stack_addr) || (addr >= (init_stack_addr+machdep->machspec->ia64_init_stack_size))) return FALSE; return TRUE; } static ulong ia64_in_per_cpu_mca_stack(void) { int plen, i; ulong flag; ulong vaddr, paddr, stackbase, stacktop; ulong *__per_cpu_mca; struct task_context *tc; tc = CURRENT_CONTEXT(); if (STRNEQ(CURRENT_COMM(), "INIT")) flag = INIT; else if (STRNEQ(CURRENT_COMM(), "MCA")) flag = MCA; else return 0; if (!symbol_exists("__per_cpu_mca") || !(plen = get_array_length("__per_cpu_mca", NULL, 0)) || (plen < kt->cpus)) return 0; vaddr = SWITCH_STACK_ADDR(CURRENT_TASK()); if (VADDR_REGION(vaddr) != KERNEL_CACHED_REGION) return 0; paddr = ia64_VTOP(vaddr); __per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * kt->cpus); if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca, sizeof(ulong) * kt->cpus, "__per_cpu_mca", RETURN_ON_ERROR|QUIET)) return 0; if (CRASHDEBUG(1)) { for (i = 0; i < kt->cpus; i++) { fprintf(fp, "__per_cpu_mca[%d]: %lx\n", i, __per_cpu_mca[i]); } } stackbase = __per_cpu_mca[tc->processor]; stacktop = stackbase + (STACKSIZE() * 2); FREEBUF(__per_cpu_mca); if ((paddr >= stackbase) && (paddr < stacktop)) return flag; else return 0; } void ia64_dump_machdep_table(ulong arg) { int i, others, verbose; struct machine_specific *ms; verbose = FALSE; ms = &ia64_machine_specific; if (arg) { switch (arg) { default: case 1: verbose = TRUE; break; case 2: if (machdep->flags & NEW_UNWIND) { machdep->flags &= ~(NEW_UNWIND|NEW_UNW_V1|NEW_UNW_V2|NEW_UNW_V3); machdep->flags |= OLD_UNWIND; ms->unwind_init = ia64_old_unwind_init; ms->unwind = ia64_old_unwind; ms->dump_unwind_stats = NULL; ms->unwind_debug = NULL; } else { machdep->flags &= ~OLD_UNWIND; machdep->flags |= NEW_UNWIND; if (MEMBER_EXISTS("unw_frame_info", "pt")) { if (MEMBER_EXISTS("pt_regs", "ar_csd")) { machdep->flags |= NEW_UNW_V3; ms->unwind_init = unwind_init_v3; ms->unwind = unwind_v3; ms->unwind_debug = unwind_debug_v3; ms->dump_unwind_stats = dump_unwind_stats_v3; } else { machdep->flags |= NEW_UNW_V2; ms->unwind_init = unwind_init_v2; ms->unwind = unwind_v2; ms->unwind_debug = unwind_debug_v2; ms->dump_unwind_stats = dump_unwind_stats_v2; } } else { machdep->flags |= NEW_UNW_V1; ms->unwind_init = unwind_init_v1; ms->unwind = unwind_v1; ms->unwind_debug = unwind_debug_v1; ms->dump_unwind_stats = dump_unwind_stats_v1; } } ms->unwind_init(); return; case 3: if (machdep->flags & NEW_UNWIND) ms->unwind_debug(arg); return; } } others = 0; fprintf(fp, " flags: %lx (", machdep->flags); /* future flags tests here */ if (machdep->flags & NEW_UNWIND) fprintf(fp, "%sNEW_UNWIND", others++ ? "|" : ""); if (machdep->flags & NEW_UNW_V1) fprintf(fp, "%sNEW_UNW_V1", others++ ? "|" : ""); if (machdep->flags & NEW_UNW_V2) fprintf(fp, "%sNEW_UNW_V2", others++ ? "|" : ""); if (machdep->flags & NEW_UNW_V3) fprintf(fp, "%sNEW_UNW_V3", others++ ? "|" : ""); if (machdep->flags & OLD_UNWIND) fprintf(fp, "%sOLD_UNWIND", others++ ? "|" : ""); if (machdep->flags & UNW_OUT_OF_SYNC) fprintf(fp, "%sUNW_OUT_OF_SYNC", others++ ? "|" : ""); if (machdep->flags & UNW_READ) fprintf(fp, "%sUNW_READ", others++ ? "|" : ""); if (machdep->flags & UNW_PTREGS) fprintf(fp, "%sUNW_PTREGS", others++ ? "|" : ""); if (machdep->flags & UNW_R0) fprintf(fp, "%sUNW_R0", others++ ? "|" : ""); if (machdep->flags & MEM_LIMIT) fprintf(fp, "%sMEM_LIMIT", others++ ? "|" : ""); if (machdep->flags & DEVMEMRD) fprintf(fp, "%sDEVMEMRD", others++ ? "|" : ""); if (machdep->flags & INIT) fprintf(fp, "%sINIT", others++ ? "|" : ""); if (machdep->flags & MCA) fprintf(fp, "%sMCA", others++ ? "|" : ""); if (machdep->flags & VM_4_LEVEL) fprintf(fp, "%sVM_4_LEVEL", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %d\n", machdep->hz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: ia64_eframe_search()\n"); fprintf(fp, " back_trace: ia64_back_trace_cmd()\n"); fprintf(fp, "get_processor_speed: ia64_processor_speed()\n"); fprintf(fp, " uvtop: ia64_uvtop()\n"); fprintf(fp, " kvtop: ia64_kvtop()\n"); fprintf(fp, " get_task_pgd: ia64_get_task_pgd()\n"); fprintf(fp, " dump_irq: ia64_dump_irq()\n"); fprintf(fp, " get_stack_frame: ia64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: ia64_get_stackbase()\n"); fprintf(fp, " get_stacktop: ia64_get_stacktop()\n"); fprintf(fp, " translate_pte: ia64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: ia64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: ia64_is_task_addr()\n"); fprintf(fp, " verify_symbol: ia64_verify_symbol()\n"); fprintf(fp, " dis_filter: ia64_dis_filter()\n"); fprintf(fp, " cmd_mach: ia64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: ia64_get_smp_cpus()\n"); fprintf(fp, " get_kvaddr_ranges: ia64_get_kvaddr_ranges()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: %s()\n", (machdep->verify_paddr == ia64_verify_paddr) ? "ia64_verify_paddr" : "generic_verify_paddr"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, "xen_kdump_p2m_create: ia64_xen_kdump_p2m_create()\n"); fprintf(fp, " xendump_p2m_create: ia64_xendump_p2m_create()\n"); fprintf(fp, " xendump_panic_task: ia64_xendump_panic_task()\n"); fprintf(fp, " get_xendump_regs: ia64_get_xendump_regs()\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: ia64_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pud_read: %lx\n", machdep->last_pud_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pud: %lx\n", (ulong)machdep->pud); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); fprintf(fp, " machspec: ia64_machine_specific\n"); fprintf(fp, " cpu_data_address: %lx\n", machdep->machspec->cpu_data_address); fprintf(fp, " unimpl_va_mask: %lx\n", machdep->machspec->unimpl_va_mask); fprintf(fp, " unimpl_pa_mask: %lx\n", machdep->machspec->unimpl_pa_mask); fprintf(fp, " unw: %lx\n", (ulong)machdep->machspec->unw); fprintf(fp, " unw_tables_offset: %ld\n", machdep->machspec->unw_tables_offset); fprintf(fp, " unw_kernel_table_offset: %ld %s\n", machdep->machspec->unw_kernel_table_offset, machdep->machspec->unw_kernel_table_offset ? "" : "(unused)"); fprintf(fp, " unw_pt_regs_offsets: %ld %s\n", machdep->machspec->unw_pt_regs_offsets, machdep->machspec->unw_pt_regs_offsets ? "" : "(unused)"); fprintf(fp, " script_index: %d\n", machdep->machspec->script_index); fprintf(fp, " script_cache: %lx%s", (ulong)machdep->machspec->script_cache, machdep->flags & OLD_UNWIND ? "\n" : " "); if (machdep->flags & NEW_UNWIND) ms->dump_unwind_stats(); if (!(machdep->flags & (NEW_UNWIND|OLD_UNWIND))) fprintf(fp, "\n"); fprintf(fp, " mem_limit: %lx\n", machdep->machspec->mem_limit); fprintf(fp, " kernel_region: %ld\n", machdep->machspec->kernel_region); fprintf(fp, " kernel_start: %lx\n", machdep->machspec->kernel_start); fprintf(fp, " phys_start: %lx (%lx)\n", machdep->machspec->phys_start, machdep->machspec->phys_start & KERNEL_TR_PAGE_MASK); fprintf(fp, " vmalloc_start: %lx\n", machdep->machspec->vmalloc_start); fprintf(fp, " ia64_memmap: %lx\n", (ulong)machdep->machspec->ia64_memmap); fprintf(fp, " efi_memmap_size: %ld\n", (ulong)machdep->machspec->efi_memmap_size); fprintf(fp, " efi_memdesc_size: %ld\n", (ulong)machdep->machspec->efi_memdesc_size); fprintf(fp, " unwind_init: "); if (ms->unwind_init == unwind_init_v1) fprintf(fp, "unwind_init_v1()\n"); else if (ms->unwind_init == unwind_init_v2) fprintf(fp, "unwind_init_v2()\n"); else if (ms->unwind_init == unwind_init_v3) fprintf(fp, "unwind_init_v3()\n"); else if (ms->unwind_init == ia64_old_unwind_init) fprintf(fp, "ia64_old_unwind_init()\n"); else fprintf(fp, "%lx\n", (ulong)ms->unwind_init); fprintf(fp, " unwind: "); if (ms->unwind == unwind_v1) fprintf(fp, "unwind_v1()\n"); else if (ms->unwind == unwind_v2) fprintf(fp, "unwind_v2()\n"); else if (ms->unwind == unwind_v3) fprintf(fp, "unwind_v3()\n"); else if (ms->unwind == ia64_old_unwind) fprintf(fp, "ia64_old_unwind()\n"); else fprintf(fp, "%lx\n", (ulong)ms->unwind); fprintf(fp, " dump_unwind_stats: "); if (ms->dump_unwind_stats == dump_unwind_stats_v1) fprintf(fp, "dump_unwind_stats_v1()\n"); else if (ms->dump_unwind_stats == dump_unwind_stats_v2) fprintf(fp, "dump_unwind_stats_v2()\n"); else if (ms->dump_unwind_stats == dump_unwind_stats_v3) fprintf(fp, "dump_unwind_stats_v3()\n"); else fprintf(fp, "%lx\n", (ulong)ms->dump_unwind_stats); fprintf(fp, " unwind_debug: "); if (ms->unwind_debug == unwind_debug_v1) fprintf(fp, "unwind_debug_v1()\n"); else if (ms->unwind_debug == unwind_debug_v2) fprintf(fp, "unwind_debug_v2()\n"); else if (ms->unwind_debug == unwind_debug_v3) fprintf(fp, "unwind_debug_v3()\n"); else fprintf(fp, "%lx\n", (ulong)ms->unwind_debug); fprintf(fp, " ia64_init_stack_size: %d\n", ms->ia64_init_stack_size); if (verbose) ia64_display_memmap(); } /* * Keep or reject a symbol from the namelist. */ static int ia64_verify_symbol(const char *name, ulong value, char type) { ulong region; if (!name || !strlen(name)) return FALSE; if (XEN_HYPER_MODE() && STREQ(name, "__per_cpu_shift")) return TRUE; if (CRASHDEBUG(8)) fprintf(fp, "%016lx %s\n", value, name); // if (STREQ(name, "phys_start") && type == 'A') // if (machdep->machspec->phys_start == UNKNOWN_PHYS_START) // machdep->machspec->phys_start = value; region = VADDR_REGION(value); return (((region == KERNEL_CACHED_REGION) || (region == KERNEL_VMALLOC_REGION))); } /* * Look for likely exception frames in a stack. */ static int ia64_eframe_search(struct bt_info *bt) { return(error(FATAL, "ia64_eframe_search: not available for this architecture\n")); } /* * Unroll a kernel stack. */ #define BT_SWITCH_STACK BT_SYMBOLIC_ARGS static void ia64_back_trace_cmd(struct bt_info *bt) { struct machine_specific *ms = &ia64_machine_specific; if (bt->flags & BT_SWITCH_STACK) ia64_dump_switch_stack(bt->task, 0); if (machdep->flags & UNW_OUT_OF_SYNC) error(FATAL, "kernel and %s unwind data structures are out of sync\n", pc->program_name); ms->unwind(bt); if (bt->flags & BT_UNWIND_ERROR) try_old_unwind(bt); } /* * Dump the IRQ table. */ static void ia64_dump_irq(int irq) { if (symbol_exists("irq_desc") || symbol_exists("_irq_desc") || kernel_symbol_exists("irq_desc_ptrs")) { machdep->dump_irq = generic_dump_irq; return(generic_dump_irq(irq)); } error(FATAL, "ia64_dump_irq: neither irq_desc or _irq_desc exist\n"); } /* * Calculate and return the speed of the processor. */ static ulong ia64_processor_speed(void) { ulong mhz, proc_freq; int bootstrap_processor; if (machdep->mhz) return(machdep->mhz); mhz = 0; bootstrap_processor = 0; if (!machdep->machspec->cpu_data_address || !VALID_STRUCT(cpuinfo_ia64) || !VALID_MEMBER(cpuinfo_ia64_proc_freq)) return (machdep->mhz = mhz); if (symbol_exists("bootstrap_processor")) get_symbol_data("bootstrap_processor", sizeof(int), &bootstrap_processor); if (bootstrap_processor == -1) bootstrap_processor = 0; readmem(machdep->machspec->cpu_data_address + OFFSET(cpuinfo_ia64_proc_freq), KVADDR, &proc_freq, sizeof(ulong), "cpuinfo_ia64 proc_freq", FAULT_ON_ERROR); mhz = proc_freq/1000000; return (machdep->mhz = mhz); } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 4 level page table. */ static int ia64_vtop_4l(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { ulong *page_dir; ulong *page_upper; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pud_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1); page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE()); pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper)); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); if (!(pud_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P | _PAGE_PROTNONE))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 3 level page table. */ static int ia64_vtop(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT_3L) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P | _PAGE_PROTNONE))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * swapper_pg_dir, making it irrelevant in this processor's case. */ static int ia64_uvtop(struct task_context *tc, ulong uvaddr, physaddr_t *paddr, int verbose) { ulong mm; ulong *pgd; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (IS_KVADDR(uvaddr)) return ia64_kvtop(tc, uvaddr, paddr, verbose); if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l_xen_wpt(uvaddr, paddr, pgd, verbose, 1); else return ia64_vtop_xen_wpt(uvaddr, paddr, pgd, verbose, 1); } else { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l(uvaddr, paddr, pgd, verbose, 1); else return ia64_vtop(uvaddr, paddr, pgd, verbose, 1); } } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int ia64_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { *paddr = ia64_VTOP(kvaddr); return TRUE; } switch (VADDR_REGION(kvaddr)) { case KERNEL_UNCACHED_REGION: *paddr = kvaddr - KERNEL_UNCACHED_BASE; if (verbose) fprintf(fp, "[UNCACHED MEMORY]\n"); return TRUE; case KERNEL_CACHED_REGION: *paddr = ia64_VTOP(kvaddr); if (verbose) fprintf(fp, "[MAPPED IN TRANSLATION REGISTER]\n"); return TRUE; case KERNEL_VMALLOC_REGION: if (ia64_IS_VMALLOC_ADDR(kvaddr)) break; if ((kvaddr < machdep->machspec->kernel_start) && (machdep->machspec->kernel_region == KERNEL_VMALLOC_REGION)) { *paddr = PADDR_NOT_AVAILABLE; return FALSE; } *paddr = ia64_VTOP(kvaddr); if (verbose) fprintf(fp, "[MAPPED IN TRANSLATION REGISTER]\n"); return TRUE; } if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); if (XEN() && (kt->xen_flags & WRITABLE_PAGE_TABLES)) { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l_xen_wpt(kvaddr, paddr, pgd, verbose, 0); else return ia64_vtop_xen_wpt(kvaddr, paddr, pgd, verbose, 0); } else { if (machdep->flags & VM_4_LEVEL) return ia64_vtop_4l(kvaddr, paddr, pgd, verbose, 0); else return ia64_vtop(kvaddr, paddr, pgd, verbose, 0); } } /* * Even though thread_info structs are used in 2.6, they * are not the stack base. (until further notice...) */ static ulong ia64_get_stackbase(ulong task) { return (task); } static ulong ia64_get_stacktop(ulong task) { return (ia64_get_stackbase(task) + STACKSIZE()); } /* * Get the relevant page directory pointer from a task structure. */ static ulong ia64_get_task_pgd(ulong task) { return (error(FATAL, "ia64_get_task_pgd: N/A\n")); } static void ia64_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { if (pcp) *pcp = ia64_get_pc(bt); if (spp) *spp = ia64_get_sp(bt); } /* * Return the kernel switch_stack b0 value. */ static ulong ia64_get_pc(struct bt_info *bt) { ulong b0; readmem(SWITCH_STACK_ADDR(bt->task) + OFFSET(switch_stack_b0), KVADDR, &b0, sizeof(void *), "switch_stack b0", FAULT_ON_ERROR); return b0; } /* * Return the kernel switch_stack ar_bspstore value. * If it's "bt -t" request, calculate the register backing store offset. */ static ulong ia64_get_sp(struct bt_info *bt) { ulong bspstore; readmem(SWITCH_STACK_ADDR(bt->task) + OFFSET(switch_stack_ar_bspstore), KVADDR, &bspstore, sizeof(void *), "switch_stack ar_bspstore", FAULT_ON_ERROR); if (bt->flags & (BT_TEXT_SYMBOLS|BT_TEXT_SYMBOLS_PRINT|BT_TEXT_SYMBOLS_NOPRINT)) { bspstore = bt->task + SIZE(task_struct); if (tt->flags & THREAD_INFO) bspstore += SIZE(thread_info); bspstore = roundup(bspstore, sizeof(ulong)); } return bspstore; } /* * Get the ksp out of the task's thread_struct */ static ulong ia64_get_thread_ksp(ulong task) { ulong ksp; if (XEN_HYPER_MODE()) { readmem(task + XEN_HYPER_OFFSET(vcpu_thread_ksp), KVADDR, &ksp, sizeof(void *), "vcpu thread ksp", FAULT_ON_ERROR); } else { readmem(task + OFFSET(task_struct_thread_ksp), KVADDR, &ksp, sizeof(void *), "thread_struct ksp", FAULT_ON_ERROR); } return ksp; } /* * Return the switch_stack structure address of a task. */ ulong ia64_get_switch_stack(ulong task) { ulong sw; if (LKCD_DUMPFILE() && (sw = get_lkcd_switch_stack(task))) return sw; /* * debug only: get panic switch_stack from the ELF header. */ if (CRASHDEBUG(3) && NETDUMP_DUMPFILE() && (sw = get_netdump_switch_stack(task))) return sw; if (DISKDUMP_DUMPFILE() && (sw = get_diskdump_switch_stack(task))) return sw; return (ia64_get_thread_ksp((ulong)(task)) + 16); } /* * Translate a PTE, returning TRUE if the page is _PAGE_P. * If a physaddr pointer is passed in, don't print anything. */ static int ia64_translate_pte(ulong pte, void *physaddr, ulonglong unused) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; char *ptr; ulong paddr; paddr = pte & _PFN_MASK; page_present = !!(pte & (_PAGE_P | _PAGE_PROTNONE)); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%lx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_P) fprintf(fp, "%sP", others++ ? "|" : ""); switch (pte & _PAGE_MA_MASK) { case _PAGE_MA_WB: ptr = "MA_WB"; break; case _PAGE_MA_UC: ptr = "MA_UC"; break; case _PAGE_MA_UCE: ptr = "MA_UCE"; break; case _PAGE_MA_WC: ptr = "MA_WC"; break; case _PAGE_MA_NAT: ptr = "MA_NAT"; break; case (0x1 << 2): ptr = "MA_UC"; break; default: ptr = "MA_RSV"; break; } fprintf(fp, "%s%s", others++ ? "|" : "", ptr); switch (pte & _PAGE_PL_MASK) { case _PAGE_PL_0: ptr = "PL_0"; break; case _PAGE_PL_1: ptr = "PL_1"; break; case _PAGE_PL_2: ptr = "PL_2"; break; case _PAGE_PL_3: ptr = "PL_3"; break; } fprintf(fp, "%s%s", others++ ? "|" : "", ptr); switch (pte & _PAGE_AR_MASK) { case _PAGE_AR_R: ptr = "AR_R"; break; case _PAGE_AR_RX: ptr = "AT_RX"; break; case _PAGE_AR_RW: ptr = "AR_RW"; break; case _PAGE_AR_RWX: ptr = "AR_RWX"; break; case _PAGE_AR_R_RW: ptr = "AR_R_RW"; break; case _PAGE_AR_RX_RWX: ptr = "AR_RX_RWX"; break; case _PAGE_AR_RWX_RW: ptr = "AR_RWX_RW"; break; case _PAGE_AR_X_RX: ptr = "AR_X_RX"; break; } fprintf(fp, "%s%s", others++ ? "|" : "", ptr); if (pte & _PAGE_A) fprintf(fp, "%sA", others++ ? "|" : ""); if (pte & _PAGE_D) fprintf(fp, "%sD", others++ ? "|" : ""); if (pte & _PAGE_ED) fprintf(fp, "%sED", others++ ? "|" : ""); if (pte & _PAGE_PROTNONE) fprintf(fp, "%sPROTNONE", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return page_present; } /* * Determine where vmalloc'd memory starts. */ static ulong ia64_vmalloc_start(void) { return machdep->machspec->vmalloc_start; } /* * Verify that an address is a task_struct address. */ static int ia64_is_task_addr(ulong task) { int i; if (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)) return TRUE; for (i = 0; i < kt->cpus; i++) if (task == tt->idle_threads[i]) return TRUE; return FALSE; } /* * Filter disassembly output if the output radix is not gdb's default 10 */ static int ia64_dis_filter(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1, *p2; int argc; int revise_bracket, stop_bit; char *argv[MAXARGS]; ulong value; if (!inbuf) return TRUE; /* * For some reason gdb can go off into the weeds translating text addresses, * (on alpha -- not necessarily seen on ia64) so this routine both fixes the * references as well as imposing the current output radix on the translations. */ console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); revise_bracket = stop_bit = 0; if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { revise_bracket = TRUE; stop_bit = FALSE; } else if ((FIRSTCHAR(argv[argc-1]) == '<') && strstr(argv[argc-1], ">;;")) { revise_bracket = TRUE; stop_bit = TRUE; } if (revise_bracket) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && !STRNEQ(p1, "0x")) p1--; if (!STRNEQ(p1, "0x")) return FALSE; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return FALSE; sprintf(buf1, "0x%lx <%s>%s\n", value, value_to_symstr(value, buf2, output_radix), stop_bit ? ";;" : ""); sprintf(p1, "%s", buf1); } else if (STRNEQ(argv[argc-2], "br.call.") && STRNEQ(argv[argc-1], "b0=0x")) { /* * Update module function calls of these formats: * * br.call.sptk.many b0=0xa0000000003d5e40;; * br.call.sptk.many b0=0xa00000000001dfc0 * * to show a bracketed function name if the destination * address is a known symbol with no offset. */ if ((p1 = strstr(argv[argc-1], ";;")) && (p2 = strstr(inbuf, ";;\n"))) { *p1 = NULLCHAR; p1 = &argv[argc-1][3]; if (extract_hex(p1, &value, NULLCHAR, TRUE)) { sprintf(buf1, " <%s>;;\n", value_to_symstr(value, buf2, output_radix)); if (IS_MODULE_VADDR(value) && !strstr(buf2, "+")) sprintf(p2, "%s", buf1); } } else { p1 = &argv[argc-1][3]; p2 = &LASTCHAR(inbuf); if (extract_hex(p1, &value, '\n', TRUE)) { sprintf(buf1, " <%s>\n", value_to_symstr(value, buf2, output_radix)); if (IS_MODULE_VADDR(value) && !strstr(buf2, "+")) sprintf(p2, "%s", buf1); } } } console(" %s", inbuf); return TRUE; } /* * Format the pt_regs structure. */ enum pt_reg_names { P_cr_ipsr, P_cr_iip, P_cr_ifs, P_ar_unat, P_ar_pfs, P_ar_rsc, P_ar_rnat, P_ar_bspstore, P_ar_ccv, P_ar_fpsr, P_pr, P_loadrs, P_b0, P_b6, P_b7, P_r1, P_r2, P_r3, P_r8, P_r9, P_r10, P_r11, P_r12, P_r13, P_r14, P_r15, P_r16, P_r17, P_r18, P_r19, P_r20, P_r21, P_r22, P_r23, P_r24, P_r25, P_r26, P_r27, P_r28, P_r29, P_r30, P_r31, P_f6_lo, P_f6_hi, P_f7_lo, P_f7_hi, P_f8_lo, P_f8_hi, P_f9_lo, P_f9_hi, P_f10_lo, P_f10_hi, P_f11_lo, P_f11_hi, NUM_PT_REGS}; void ia64_exception_frame(ulong addr, struct bt_info *bt) { char buf[BUFSIZE], *p, *p1; int fval; ulong value1, value2; ulong eframe[NUM_PT_REGS]; console("ia64_exception_frame: pt_regs: %lx\n", addr); if (bt->debug) CRASHDEBUG_RESTORE(); CRASHDEBUG_SUSPEND(0); BZERO(&eframe, sizeof(ulong) * NUM_PT_REGS); open_tmpfile(); if (XEN_HYPER_MODE()) dump_struct("cpu_user_regs", addr, RADIX(16)); else dump_struct("pt_regs", addr, RADIX(16)); rewind(pc->tmpfile); fval = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "f6 = ")) { fval = 6; continue; } if (strstr(buf, "f7 = ")) { fval = 7; continue; } if (strstr(buf, "f8 = ")) { fval = 8; continue; } if (strstr(buf, "f9 = ")) { fval = 9; continue; } if (strstr(buf, "f10 = ")) { fval = 10; continue; } if (strstr(buf, "f11 = ")) { fval = 11; continue; } if (!strstr(buf, "0x")) continue; if (fval) { p = strstr(buf, "0x"); if ((p1 = strstr(p, "}"))) *p1 = NULLCHAR; extract_hex(p, &value1, ',', TRUE); p = strstr(buf, ","); extract_hex(p, &value2, NULLCHAR, FALSE); switch (fval) { case 6: eframe[P_f6_lo] = value1; eframe[P_f6_hi] = value2; break; case 7: eframe[P_f7_lo] = value1; eframe[P_f7_hi] = value2; break; case 8: eframe[P_f8_lo] = value1; eframe[P_f8_hi] = value2; break; case 9: eframe[P_f9_lo] = value1; eframe[P_f9_hi] = value2; break; case 10: eframe[P_f10_lo] = value1; eframe[P_f10_hi] = value2; break; case 11: eframe[P_f11_lo] = value1; eframe[P_f11_hi] = value2; break; } fval = 0; continue; } strip_comma(clean_line(buf)); p = strstr(buf, " = "); extract_hex(p, &value1, NULLCHAR, FALSE); if (strstr(buf, "cr_ipsr = ")) { eframe[P_cr_ipsr] = value1; } if (strstr(buf, "cr_iip = ")) { eframe[P_cr_iip] = value1; } if (strstr(buf, "cr_ifs = ")) { eframe[P_cr_ifs] = value1; } if (strstr(buf, "ar_unat = ")) { eframe[P_ar_unat] = value1; } if (strstr(buf, "ar_pfs = ")) { eframe[P_ar_pfs] = value1; } if (strstr(buf, "ar_rsc = ")) { eframe[P_ar_rsc] = value1; } if (strstr(buf, "ar_rnat = ")) { eframe[P_ar_rnat] = value1; } if (strstr(buf, "ar_bspstore = ")) { eframe[P_ar_bspstore] = value1; } if (strstr(buf, "ar_ccv = ")) { eframe[P_ar_ccv] = value1; } if (strstr(buf, "ar_fpsr = ")) { eframe[P_ar_fpsr] = value1; } if (strstr(buf, "pr = ")) { eframe[P_pr] = value1; } if (strstr(buf, "loadrs = ")) { eframe[P_loadrs] = value1; } if (strstr(buf, "b0 = ")) { eframe[P_b0] = value1; } if (strstr(buf, "b6 = ")) { eframe[P_b6] = value1; } if (strstr(buf, "b7 = ")) { eframe[P_b7] = value1; } if (strstr(buf, "r1 = ")) { eframe[P_r1] = value1; } if (strstr(buf, "r2 = ")) { eframe[P_r2] = value1; } if (strstr(buf, "r3 = ")) { eframe[P_r3] = value1; } if (strstr(buf, "r8 = ")) { eframe[P_r8] = value1; } if (strstr(buf, "r9 = ")) { eframe[P_r9] = value1; } if (strstr(buf, "r10 = ")) { eframe[P_r10] = value1; } if (strstr(buf, "r11 = ")) { eframe[P_r11] = value1; } if (strstr(buf, "r12 = ")) { eframe[P_r12] = value1; } if (strstr(buf, "r13 = ")) { eframe[P_r13] = value1; } if (strstr(buf, "r14 = ")) { eframe[P_r14] = value1; } if (strstr(buf, "r15 = ")) { eframe[P_r15] = value1; } if (strstr(buf, "r16 = ")) { eframe[P_r16] = value1; } if (strstr(buf, "r17 = ")) { eframe[P_r17] = value1; } if (strstr(buf, "r18 = ")) { eframe[P_r18] = value1; } if (strstr(buf, "r19 = ")) { eframe[P_r19] = value1; } if (strstr(buf, "r20 = ")) { eframe[P_r20] = value1; } if (strstr(buf, "r21 = ")) { eframe[P_r21] = value1; } if (strstr(buf, "r22 = ")) { eframe[P_r22] = value1; } if (strstr(buf, "r23 = ")) { eframe[P_r23] = value1; } if (strstr(buf, "r24 = ")) { eframe[P_r24] = value1; } if (strstr(buf, "r25 = ")) { eframe[P_r25] = value1; } if (strstr(buf, "r26 = ")) { eframe[P_r26] = value1; } if (strstr(buf, "r27 = ")) { eframe[P_r27] = value1; } if (strstr(buf, "r28 = ")) { eframe[P_r28] = value1; } if (strstr(buf, "r29 = ")) { eframe[P_r29] = value1; } if (strstr(buf, "r30 = ")) { eframe[P_r30] = value1; } if (strstr(buf, "r31 = ")) { eframe[P_r31] = value1; } } close_tmpfile(); fprintf(fp, " EFRAME: %lx\n", addr); if (bt->flags & BT_INCOMPLETE_USER_EFRAME) { fprintf(fp, " [exception frame incomplete -- check salinfo for complete context]\n"); bt->flags &= ~BT_INCOMPLETE_USER_EFRAME; } fprintf(fp, " B0: %016lx CR_IIP: %016lx\n", eframe[P_b0], eframe[P_cr_iip]); /** if (is_kernel_text(eframe[P_cr_iip])) fprintf(fp, "<%s>", value_to_symstr(eframe[P_cr_iip], buf, 0)); fprintf(fp, "\n"); **/ fprintf(fp, " CR_IPSR: %016lx CR_IFS: %016lx\n", eframe[P_cr_ipsr], eframe[P_cr_ifs]); fprintf(fp, " AR_PFS: %016lx AR_RSC: %016lx\n", eframe[P_ar_pfs], eframe[P_ar_rsc]); fprintf(fp, " AR_UNAT: %016lx AR_RNAT: %016lx\n", eframe[P_ar_unat], eframe[P_ar_rnat]); fprintf(fp, " AR_CCV: %016lx AR_FPSR: %016lx\n", eframe[P_ar_ccv], eframe[P_ar_fpsr]); fprintf(fp, " LOADRS: %016lx AR_BSPSTORE: %016lx\n", eframe[P_loadrs], eframe[P_ar_bspstore]); fprintf(fp, " B6: %016lx B7: %016lx\n", eframe[P_b6], eframe[P_b7]); fprintf(fp, " PR: %016lx R1: %016lx\n", eframe[P_pr], eframe[P_r1]); fprintf(fp, " R2: %016lx R3: %016lx\n", eframe[P_r2], eframe[P_r3]); fprintf(fp, " R8: %016lx R9: %016lx\n", eframe[P_r8], eframe[P_r9]); fprintf(fp, " R10: %016lx R11: %016lx\n", eframe[P_r10], eframe[P_r11]); fprintf(fp, " R12: %016lx R13: %016lx\n", eframe[P_r12], eframe[P_r13]); fprintf(fp, " R14: %016lx R15: %016lx\n", eframe[P_r14], eframe[P_r15]); fprintf(fp, " R16: %016lx R17: %016lx\n", eframe[P_r16], eframe[P_r17]); fprintf(fp, " R18: %016lx R19: %016lx\n", eframe[P_r18], eframe[P_r19]); fprintf(fp, " R20: %016lx R21: %016lx\n", eframe[P_r20], eframe[P_r21]); fprintf(fp, " R22: %016lx R23: %016lx\n", eframe[P_r22], eframe[P_r23]); fprintf(fp, " R24: %016lx R25: %016lx\n", eframe[P_r24], eframe[P_r25]); fprintf(fp, " R26: %016lx R27: %016lx\n", eframe[P_r26], eframe[P_r27]); fprintf(fp, " R28: %016lx R29: %016lx\n", eframe[P_r28], eframe[P_r29]); fprintf(fp, " R30: %016lx R31: %016lx\n", eframe[P_r30], eframe[P_r31]); fprintf(fp, " F6: %05lx%016lx ", eframe[P_f6_hi], eframe[P_f6_lo]); fprintf(fp, " F7: %05lx%016lx\n", eframe[P_f7_hi], eframe[P_f7_lo]); fprintf(fp, " F8: %05lx%016lx ", eframe[P_f8_hi], eframe[P_f8_lo]); fprintf(fp, " F9: %05lx%016lx\n", eframe[P_f9_hi], eframe[P_f9_lo]); if (machdep->flags & NEW_UNW_V3) { fprintf(fp, " F10: %05lx%016lx ", eframe[P_f10_hi], eframe[P_f10_lo]); fprintf(fp, " F11: %05lx%016lx\n", eframe[P_f11_hi], eframe[P_f11_lo]); } CRASHDEBUG_RESTORE(); if (bt->debug) CRASHDEBUG_SUSPEND(bt->debug); } enum ss_reg_names { S_caller_unat, S_ar_fpsr, S_f2_lo, S_f2_hi, S_f3_lo, S_f3_hi, S_f4_lo, S_f4_hi, S_f5_lo, S_f5_hi, S_f10_lo, S_f10_hi, S_f11_lo, S_f11_hi, S_f12_lo, S_f12_hi, S_f13_lo, S_f13_hi, S_f14_lo, S_f14_hi, S_f15_lo, S_f15_hi, S_f16_lo, S_f16_hi, S_f17_lo, S_f17_hi, S_f18_lo, S_f18_hi, S_f19_lo, S_f19_hi, S_f20_lo, S_f20_hi, S_f21_lo, S_f21_hi, S_f22_lo, S_f22_hi, S_f23_lo, S_f23_hi, S_f24_lo, S_f24_hi, S_f25_lo, S_f25_hi, S_f26_lo, S_f26_hi, S_f27_lo, S_f27_hi, S_f28_lo, S_f28_hi, S_f29_lo, S_f29_hi, S_f30_lo, S_f30_hi, S_f31_lo, S_f31_hi, S_r4, S_r5, S_r6, S_r7, S_b0, S_b1, S_b2, S_b3, S_b4, S_b5, S_ar_pfs, S_ar_lc, S_ar_unat, S_ar_rnat, S_ar_bspstore, S_pr, NUM_SS_REGS }; /* * Format the switch_stack structure. */ static void ia64_dump_switch_stack(ulong task, ulong flag) { ulong addr; char buf[BUFSIZE], *p; int fval; ulong value1, value2; ulong ss[NUM_SS_REGS]; addr = SWITCH_STACK_ADDR(task); BZERO(&ss, sizeof(ulong) * NUM_SS_REGS); open_tmpfile(); dump_struct("switch_stack", addr, RADIX(16)); rewind(pc->tmpfile); fval = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "f2 = ")) { fval = 2; continue; } if (strstr(buf, "f3 = ")) { fval = 3; continue; } if (strstr(buf, "f4 = ")) { fval = 4; continue; } if (strstr(buf, "f5 = ")) { fval = 5; continue; } if (strstr(buf, "f10 = ")) { fval = 10; continue; } if (strstr(buf, "f11 = ")) { fval = 11; continue; } if (strstr(buf, "f12 = ")) { fval = 12; continue; } if (strstr(buf, "f13 = ")) { fval = 13; continue; } if (strstr(buf, "f14 = ")) { fval = 14; continue; } if (strstr(buf, "f15 = ")) { fval = 15; continue; } if (strstr(buf, "f16 = ")) { fval = 16; continue; } if (strstr(buf, "f17 = ")) { fval = 17; continue; } if (strstr(buf, "f18 = ")) { fval = 18; continue; } if (strstr(buf, "f19 = ")) { fval = 19; continue; } if (strstr(buf, "f20 = ")) { fval = 20; continue; } if (strstr(buf, "f21 = ")) { fval = 21; continue; } if (strstr(buf, "f22 = ")) { fval = 22; continue; } if (strstr(buf, "f23 = ")) { fval = 23; continue; } if (strstr(buf, "f24 = ")) { fval = 24; continue; } if (strstr(buf, "f25 = ")) { fval = 25; continue; } if (strstr(buf, "f26 = ")) { fval = 26; continue; } if (strstr(buf, "f27 = ")) { fval = 27; continue; } if (strstr(buf, "f28 = ")) { fval = 28; continue; } if (strstr(buf, "f29 = ")) { fval = 29; continue; } if (strstr(buf, "f30 = ")) { fval = 30; continue; } if (strstr(buf, "f31 = ")) { fval = 31; continue; } if (!strstr(buf, "0x")) continue; if (fval) { p = strstr(buf, "0x"); extract_hex(p, &value1, ',', TRUE); p = strstr(buf, ","); extract_hex(p, &value2, '}', FALSE); switch (fval) { case 2: ss[S_f2_lo] = value1; ss[S_f2_hi] = value2; break; case 3: ss[S_f3_lo] = value1; ss[S_f3_hi] = value2; break; case 4: ss[S_f4_lo] = value1; ss[S_f4_hi] = value2; break; case 5: ss[S_f5_lo] = value1; ss[S_f5_hi] = value2; break; case 10: ss[S_f10_lo] = value1; ss[S_f10_hi] = value2; break; case 11: ss[S_f11_lo] = value1; ss[S_f11_hi] = value2; break; case 12: ss[S_f12_lo] = value1; ss[S_f12_hi] = value2; break; case 13: ss[S_f13_lo] = value1; ss[S_f13_hi] = value2; break; case 14: ss[S_f14_lo] = value1; ss[S_f14_hi] = value2; break; case 15: ss[S_f15_lo] = value1; ss[S_f15_hi] = value2; break; case 16: ss[S_f16_lo] = value1; ss[S_f16_hi] = value2; break; case 17: ss[S_f17_lo] = value1; ss[S_f17_hi] = value2; break; case 18: ss[S_f18_lo] = value1; ss[S_f18_hi] = value2; break; case 19: ss[S_f19_lo] = value1; ss[S_f19_hi] = value2; break; case 20: ss[S_f20_lo] = value1; ss[S_f20_hi] = value2; break; case 21: ss[S_f21_lo] = value1; ss[S_f21_hi] = value2; break; case 22: ss[S_f22_lo] = value1; ss[S_f22_hi] = value2; break; case 23: ss[S_f23_lo] = value1; ss[S_f23_hi] = value2; break; case 24: ss[S_f24_lo] = value1; ss[S_f24_hi] = value2; break; case 25: ss[S_f25_lo] = value1; ss[S_f25_hi] = value2; break; case 26: ss[S_f26_lo] = value1; ss[S_f26_hi] = value2; break; case 27: ss[S_f27_lo] = value1; ss[S_f27_hi] = value2; break; case 28: ss[S_f28_lo] = value1; ss[S_f28_hi] = value2; break; case 29: ss[S_f29_lo] = value1; ss[S_f29_hi] = value2; break; case 30: ss[S_f30_lo] = value1; ss[S_f30_hi] = value2; break; case 31: ss[S_f31_lo] = value1; ss[S_f31_hi] = value2; break; } fval = 0; continue; } strip_comma(clean_line(buf)); p = strstr(buf, " = "); extract_hex(p, &value1, NULLCHAR, FALSE); if (strstr(buf, "caller_unat = ")) { ss[S_caller_unat] = value1; } if (strstr(buf, "ar_fpsr = ")) { ss[S_ar_fpsr] = value1; } if (strstr(buf, "r4 = ")) { ss[S_r4] = value1; } if (strstr(buf, "r5 = ")) { ss[S_r5] = value1; } if (strstr(buf, "r6 = ")) { ss[S_r6] = value1; } if (strstr(buf, "r7 = ")) { ss[S_r7] = value1; } if (strstr(buf, "b0 = ")) { ss[S_b0] = value1; } if (strstr(buf, "b1 = ")) { ss[S_b1] = value1; } if (strstr(buf, "b2 = ")) { ss[S_b2] = value1; } if (strstr(buf, "b3 = ")) { ss[S_b3] = value1; } if (strstr(buf, "b4 = ")) { ss[S_b4] = value1; } if (strstr(buf, "b5 = ")) { ss[S_b5] = value1; } if (strstr(buf, "ar_pfs = ")) { ss[S_ar_pfs] = value1; } if (strstr(buf, "ar_lc = ")) { ss[S_ar_lc] = value1; } if (strstr(buf, "ar_unat = ")) { ss[S_ar_unat] = value1; } if (strstr(buf, "ar_rnat = ")) { ss[S_ar_rnat] = value1; } if (strstr(buf, "ar_bspstore = ")) { ss[S_ar_bspstore] = value1; } if (strstr(buf, "pr = ")) { ss[S_pr] = value1; } } close_tmpfile(); fprintf(fp, "SWITCH_STACK: %lx\n", addr); fprintf(fp, " B0: %016lx B1: %016lx\n", ss[S_b0], ss[S_b1]); fprintf(fp, " B2: %016lx B3: %016lx\n", ss[S_b2], ss[S_b3]); fprintf(fp, " B4: %016lx B5: %016lx\n", ss[S_b4], ss[S_b5]); fprintf(fp, " AR_PFS: %016lx AR_LC: %016lx\n", ss[S_ar_pfs], ss[S_ar_lc]); fprintf(fp, " AR_UNAT: %016lx AR_RNAT: %016lx\n", ss[S_ar_unat], ss[S_ar_rnat]); fprintf(fp, " PR: %016lx AR_BSPSTORE: %016lx\n", ss[S_pr], ss[S_ar_bspstore]); fprintf(fp, " AR_FPSR: %016lx CALLER_UNAT: %016lx\n", ss[S_ar_fpsr], ss[S_caller_unat]); fprintf(fp, " R4: %016lx R5: %016lx\n", ss[S_r4], ss[S_r5]); fprintf(fp, " R6: %016lx R7: %016lx\n", ss[S_r6], ss[S_r7]); fprintf(fp, " F2: %05lx%016lx ", ss[S_f2_hi], ss[S_f2_lo]); fprintf(fp, " F3: %05lx%016lx\n", ss[S_f3_hi], ss[S_f3_lo]); fprintf(fp, " F4: %05lx%016lx ", ss[S_f4_hi], ss[S_f4_lo]); fprintf(fp, " F5: %05lx%016lx\n", ss[S_f5_hi], ss[S_f5_lo]); fprintf(fp, " F10: %05lx%016lx ", ss[S_f10_hi], ss[S_f10_lo]); fprintf(fp, " F11: %05lx%016lx\n", ss[S_f11_hi], ss[S_f11_lo]); fprintf(fp, " F12: %05lx%016lx ", ss[S_f12_hi], ss[S_f12_lo]); fprintf(fp, " F13: %05lx%016lx\n", ss[S_f13_hi], ss[S_f13_lo]); fprintf(fp, " F14: %05lx%016lx ", ss[S_f14_hi], ss[S_f14_lo]); fprintf(fp, " F15: %05lx%016lx\n", ss[S_f15_hi], ss[S_f15_lo]); fprintf(fp, " F16: %05lx%016lx ", ss[S_f16_hi], ss[S_f16_lo]); fprintf(fp, " F17: %05lx%016lx\n", ss[S_f17_hi], ss[S_f17_lo]); fprintf(fp, " F18: %05lx%016lx ", ss[S_f18_hi], ss[S_f18_lo]); fprintf(fp, " F19: %05lx%016lx\n", ss[S_f19_hi], ss[S_f19_lo]); fprintf(fp, " F20: %05lx%016lx ", ss[S_f20_hi], ss[S_f20_lo]); fprintf(fp, " F21: %05lx%016lx\n", ss[S_f21_hi], ss[S_f21_lo]); fprintf(fp, " F22: %05lx%016lx ", ss[S_f22_hi], ss[S_f22_lo]); fprintf(fp, " F23: %05lx%016lx\n", ss[S_f23_hi], ss[S_f23_lo]); fprintf(fp, " F24: %05lx%016lx ", ss[S_f24_hi], ss[S_f24_lo]); fprintf(fp, " F25: %05lx%016lx\n", ss[S_f25_hi], ss[S_f25_lo]); fprintf(fp, " F26: %05lx%016lx ", ss[S_f26_hi], ss[S_f26_lo]); fprintf(fp, " F27: %05lx%016lx\n", ss[S_f27_hi], ss[S_f27_lo]); fprintf(fp, " F28: %05lx%016lx ", ss[S_f28_hi], ss[S_f28_lo]); fprintf(fp, " F29: %05lx%016lx\n", ss[S_f29_hi], ss[S_f29_lo]); fprintf(fp, " F30: %05lx%016lx ", ss[S_f30_hi], ss[S_f30_lo]); fprintf(fp, " F31: %05lx%016lx\n", ss[S_f31_hi], ss[S_f31_lo]); } /* * Override smp_num_cpus if possible and necessary. */ int ia64_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_online())) return MAX(cpus, get_highest_cpu_online()+1); else return kt->cpus; } /* * Machine dependent command. */ void ia64_cmd_mach(void) { int c, cflag, mflag; unsigned int radix; cflag = mflag = radix = 0; while ((c = getopt(argcnt, args, "cmxd")) != EOF) { switch(c) { case 'c': cflag++; break; case 'm': mflag++; ia64_display_memmap(); break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (cflag) ia64_display_cpu_data(radix); if (!cflag && !mflag) ia64_display_machine_stats(); } /* * "mach" command output. */ static void ia64_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); if (!STREQ(kt->hypervisor, "(undetermined)") && !STREQ(kt->hypervisor, "bare hardware")) fprintf(fp, " HYPERVISOR: %s\n", kt->hypervisor); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); // fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, " KERNEL CACHED REGION: %lx\n", (ulong)KERNEL_CACHED_REGION << REGION_SHIFT); fprintf(fp, " KERNEL UNCACHED REGION: %lx\n", (ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT); fprintf(fp, " KERNEL VMALLOC REGION: %lx\n", (ulong)KERNEL_VMALLOC_REGION << REGION_SHIFT); fprintf(fp, " USER DATA/STACK REGION: %lx\n", (ulong)USER_STACK_REGION << REGION_SHIFT); fprintf(fp, " USER DATA/STACK REGION: %lx\n", (ulong)USER_DATA_REGION << REGION_SHIFT); fprintf(fp, " USER TEXT REGION: %lx\n", (ulong)USER_TEXT_REGION << REGION_SHIFT); fprintf(fp, " USER SHARED MEMORY REGION: %lx\n", (ulong)USER_SHMEM_REGION << REGION_SHIFT); fprintf(fp, "USER IA32 EMULATION REGION: %016lx\n", (ulong)USER_IA32_EMUL_REGION << REGION_SHIFT); } static void ia64_display_cpu_data(unsigned int radix) { int cpu; ulong cpu_data; int array_location_known; struct syment *sp; if (!(cpu_data = machdep->machspec->cpu_data_address)) { error(FATAL, "cannot find cpuinfo_ia64 location\n"); return; } array_location_known = per_cpu_symbol_search("per_cpu__cpu_info") || symbol_exists("cpu_data") || symbol_exists("_cpu_data"); for (cpu = 0; cpu < kt->cpus; cpu++) { fprintf(fp, "%sCPU %d: %s\n", cpu ? "\n" : "", cpu, array_location_known ? "" : "(boot)"); dump_struct("cpuinfo_ia64", cpu_data, radix); if (!array_location_known) break; if ((sp = per_cpu_symbol_search("per_cpu__cpu_info"))) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) cpu_data = sp->value + kt->__per_cpu_offset[cpu+1]; else break; /* we've already done cpu 0 */ } else cpu_data += SIZE(cpuinfo_ia64); } } /* * Dump the EFI memory map. */ static void ia64_display_memmap(void) { int i, others; struct efi_memory_desc_t *desc; struct machine_specific *ms; char *map; ms = &ia64_machine_specific; map = ms->ia64_memmap; if (!map) { check_mem_limit(); error(FATAL, "efi_mmap not accessible\n"); } fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE / ATTRIBUTE / [ACCESS]\n"); for (i = 0; i < ms->efi_memmap_size/ms->efi_memdesc_size; i++) { desc = (struct efi_memory_desc_t *)map; fprintf(fp, "%016lx - %016lx ", desc->phys_addr, desc->phys_addr + (desc->num_pages * (1 << EFI_PAGE_SHIFT))); switch (desc->type) { case EFI_RESERVED_TYPE: fprintf(fp, "%s", "RESERVED_TYPE"); break; case EFI_LOADER_CODE: fprintf(fp, "%s", "LOADER_CODE"); break; case EFI_LOADER_DATA: fprintf(fp, "%s", "LOADER_DATA"); break; case EFI_BOOT_SERVICES_CODE: fprintf(fp, "%s", "BOOT_SERVICES_CODE"); break; case EFI_BOOT_SERVICES_DATA: fprintf(fp, "%s", "BOOT_SERVICES_DATA"); break; case EFI_RUNTIME_SERVICES_CODE: fprintf(fp, "%s", "RUNTIME_SERVICES_CODE"); break; case EFI_RUNTIME_SERVICES_DATA: fprintf(fp, "%s", "RUNTIME_SERVICES_DATA"); break; case EFI_CONVENTIONAL_MEMORY: fprintf(fp, "%s", "CONVENTIONAL_MEMORY"); break; case EFI_UNUSABLE_MEMORY: fprintf(fp, "%s", "UNUSABLE_MEMORY"); break; case EFI_ACPI_RECLAIM_MEMORY: fprintf(fp, "%s", "ACPI_RECLAIM_MEMORY"); break; case EFI_ACPI_MEMORY_NVS: fprintf(fp, "%s", "ACPI_MEMORY_NVS"); break; case EFI_MEMORY_MAPPED_IO: fprintf(fp, "%s", "MEMORY_MAPPED_IO"); break; case EFI_MEMORY_MAPPED_IO_PORT_SPACE: fprintf(fp, "%s", "MEMORY_MAPPED_IO_PORT_SPACE"); break; case EFI_PAL_CODE: fprintf(fp, "%s", "PAL_CODE"); break; default: fprintf(fp, "%s", "(unknown type)"); break; } fprintf(fp, " "); others = 0; if (desc->attribute & EFI_MEMORY_UC) fprintf(fp, "%sUC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WC) fprintf(fp, "%sWC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WT) fprintf(fp, "%sWT", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WB) fprintf(fp, "%sWB", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WP) fprintf(fp, "%sWP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RP) fprintf(fp, "%sRP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_XP) fprintf(fp, "%sXP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RUNTIME) fprintf(fp, "%sRUNTIME", others++ ? "|" : ""); fprintf(fp, " %s", ia64_available_memory(desc) ? "[available]" : ""); switch (VADDR_REGION(desc->virt_addr)) { case KERNEL_UNCACHED_REGION: fprintf(fp, "[R6]\n"); break; case KERNEL_CACHED_REGION: fprintf(fp, "[R7]\n"); break; default: fprintf(fp, "\n"); } if (!CRASHDEBUG(1)) goto next_desc; fprintf(fp, "physical: %016lx %dk pages: %ld virtual: %016lx\n", desc->phys_addr, (1 << EFI_PAGE_SHIFT)/1024, desc->num_pages, desc->virt_addr); fprintf(fp, "type: "); switch (desc->type) { case EFI_RESERVED_TYPE: fprintf(fp, "%-27s", "RESERVED_TYPE"); break; case EFI_LOADER_CODE: fprintf(fp, "%-27s", "LOADER_CODE"); break; case EFI_LOADER_DATA: fprintf(fp, "%-27s", "LOADER_DATA"); break; case EFI_BOOT_SERVICES_CODE: fprintf(fp, "%-27s", "BOOT_SERVICES_CODE"); break; case EFI_BOOT_SERVICES_DATA: fprintf(fp, "%-27s", "BOOT_SERVICES_DATA"); break; case EFI_RUNTIME_SERVICES_CODE: fprintf(fp, "%-27s", "RUNTIME_SERVICES_CODE"); break; case EFI_RUNTIME_SERVICES_DATA: fprintf(fp, "%-27s", "RUNTIME_SERVICES_DATA"); break; case EFI_CONVENTIONAL_MEMORY: fprintf(fp, "%-27s", "CONVENTIONAL_MEMORY"); break; case EFI_UNUSABLE_MEMORY: fprintf(fp, "%-27s", "UNUSABLE_MEMORY"); break; case EFI_ACPI_RECLAIM_MEMORY: fprintf(fp, "%-27s", "ACPI_RECLAIM_MEMORY"); break; case EFI_ACPI_MEMORY_NVS: fprintf(fp, "%-27s", "ACPI_MEMORY_NVS"); break; case EFI_MEMORY_MAPPED_IO: fprintf(fp, "%-27s", "MEMORY_MAPPED_IO"); break; case EFI_MEMORY_MAPPED_IO_PORT_SPACE: fprintf(fp, "%-27s", "MEMORY_MAPPED_IO_PORT_SPACE"); break; case EFI_PAL_CODE: fprintf(fp, "%-27s", "PAL_CODE"); break; default: fprintf(fp, "%-27s", "(unknown type)"); break; } fprintf(fp, " attribute: ("); others = 0; if (desc->attribute & EFI_MEMORY_UC) fprintf(fp, "%sUC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WC) fprintf(fp, "%sWC", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WT) fprintf(fp, "%sWT", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WB) fprintf(fp, "%sWB", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_WP) fprintf(fp, "%sWP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RP) fprintf(fp, "%sRP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_XP) fprintf(fp, "%sXP", others++ ? "|" : ""); if (desc->attribute & EFI_MEMORY_RUNTIME) fprintf(fp, "%sRUNTIME", others++ ? "|" : ""); fprintf(fp, ") %s\n", ia64_available_memory(desc) ? "[available]" : ""); next_desc: map += ms->efi_memdesc_size; } } static int ia64_available_memory(struct efi_memory_desc_t *desc) { if (desc->attribute & EFI_MEMORY_WB) { switch (desc->type) { case EFI_LOADER_CODE: case EFI_LOADER_DATA: case EFI_BOOT_SERVICES_CODE: case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: return TRUE; } } return FALSE; } /* * Make a copy of the memmap descriptor array. */ static void ia64_create_memmap(void) { struct machine_specific *ms; uint64_t ia64_boot_param, efi_memmap; ulong num_physpages; char *memmap; ms = &ia64_machine_specific; ms->ia64_memmap = NULL; if (symbol_exists("num_physpages")) { get_symbol_data("num_physpages", sizeof(ulong), &num_physpages); machdep->memsize = num_physpages * PAGESIZE(); } if (!symbol_exists("ia64_boot_param")) return; if ((ms->mem_limit = check_mem_limit())) machdep->flags |= MEM_LIMIT; get_symbol_data("ia64_boot_param", sizeof(void *), &ia64_boot_param); if ((ms->mem_limit && (ia64_VTOP(ia64_boot_param) >= ms->mem_limit)) || !readmem(ia64_boot_param+ MEMBER_OFFSET("ia64_boot_param", "efi_memmap"), KVADDR, &efi_memmap, sizeof(uint64_t), "efi_memmap", QUIET|RETURN_ON_ERROR)) { if (!XEN() || CRASHDEBUG(1)) error(WARNING, "cannot read ia64_boot_param: " "memory verification will not be performed\n\n"); return; } readmem(ia64_boot_param+MEMBER_OFFSET("ia64_boot_param", "efi_memmap_size"), KVADDR, &ms->efi_memmap_size, sizeof(uint64_t), "efi_memmap_size", FAULT_ON_ERROR); readmem(ia64_boot_param+MEMBER_OFFSET("ia64_boot_param", "efi_memdesc_size"), KVADDR, &ms->efi_memdesc_size, sizeof(uint64_t), "efi_memdesc_size", FAULT_ON_ERROR); if (!(memmap = (char *) malloc(ms->efi_memmap_size))) { error(WARNING, "cannot malloc ia64_memmap\n"); return; } if ((ms->mem_limit && (efi_memmap >= ms->mem_limit)) || !readmem(PTOV(efi_memmap), KVADDR, memmap, ms->efi_memmap_size, "efi_mmap contents", QUIET|RETURN_ON_ERROR)) { if (!XEN() || (XEN() && CRASHDEBUG(1))) error(WARNING, "cannot read efi_mmap: " "EFI memory verification will not be performed\n\n"); free(memmap); return; } ms->ia64_memmap = memmap; } /* * Kernel pages may cross EFI memmap boundaries, so the system page is * broken into EFI pages, and then each of them is verified. */ static int ia64_verify_paddr(uint64_t paddr) { int i, j, cnt, found, desc_count, desc_size; struct efi_memory_desc_t *desc; struct machine_specific *ms; uint64_t phys_end; char *map; int efi_pages; ulong efi_pagesize; /* * When kernel text and data are mapped in region 5, * and we're using the crash memory device driver, * then the driver will gracefully fail the read attempt * if the address is bogus. */ if ((VADDR_REGION(paddr) == KERNEL_VMALLOC_REGION) && (pc->flags & MEMMOD)) return TRUE; ms = &ia64_machine_specific; if (ms->ia64_memmap == NULL) return TRUE; desc_count = ms->efi_memmap_size/ms->efi_memdesc_size; desc_size = ms->efi_memdesc_size; efi_pagesize = (1 << EFI_PAGE_SHIFT); efi_pages = PAGESIZE() / efi_pagesize; paddr = PAGEBASE(paddr); for (i = cnt = 0; i < efi_pages; i++, paddr += efi_pagesize) { map = ms->ia64_memmap; for (j = found = 0; j < desc_count; j++) { desc = (struct efi_memory_desc_t *)map; if (ia64_available_memory(desc)) { phys_end = desc->phys_addr + (desc->num_pages * efi_pagesize); if ((paddr >= desc->phys_addr) && ((paddr + efi_pagesize) <= phys_end)) { cnt++; found = TRUE; } } if (found) break; map += desc_size; } } return (cnt == efi_pages); } /* * Check whether a "mem=X" argument was entered on the boot command line. * Note that the default setting of the kernel mem_limit is ~0UL. */ static ulong check_mem_limit(void) { ulong mem_limit; char *saved_command_line, *p1, *p2; int len; if (!symbol_exists("mem_limit")) return 0; get_symbol_data("mem_limit", sizeof(ulong), &mem_limit); if (mem_limit == ~0UL) return 0; mem_limit += 1; if (!symbol_exists("saved_command_line")) goto no_command_line; len = get_array_length("saved_command_line", 0, sizeof(char)); if (!len) goto no_command_line; saved_command_line = GETBUF(len+1); if (!readmem(symbol_value("saved_command_line"), KVADDR, saved_command_line, len, "saved_command_line", RETURN_ON_ERROR)) goto no_command_line; if (!(p1 = strstr(saved_command_line, "mem="))) goto no_command_line; p2 = p1; while (*p2 && !whitespace(*p2)) p2++; *p2 = NULLCHAR; error(pc->flags & RUNTIME ? INFO : WARNING, "boot command line argument: %s\n", p1); return mem_limit; no_command_line: error(pc->flags & RUNTIME ? INFO : WARNING, "boot command line memory limit: %lx\n", mem_limit); return mem_limit; } #ifndef _ASM_IA64_UNWIND_H #define _ASM_IA64_UNWIND_H /* * Copyright (C) 1999-2000 Hewlett-Packard Co * Copyright (C) 1999-2000 David Mosberger-Tang * * A simple API for unwinding kernel stacks. This is used for * debugging and error reporting purposes. The kernel doesn't need * full-blown stack unwinding with all the bells and whitles, so there * is not much point in implementing the full IA-64 unwind API (though * it would of course be possible to implement the kernel API on top * of it). */ struct task_struct; /* forward declaration */ struct switch_stack; /* forward declaration */ enum unw_application_register { UNW_AR_BSP, UNW_AR_BSPSTORE, UNW_AR_PFS, UNW_AR_RNAT, UNW_AR_UNAT, UNW_AR_LC, UNW_AR_EC, UNW_AR_FPSR, UNW_AR_RSC, UNW_AR_CCV }; /* * The following declarations are private to the unwind * implementation: */ struct unw_stack { unsigned long limit; unsigned long top; }; #define UNW_FLAG_INTERRUPT_FRAME (1UL << 0) /* * No user of this module should every access this structure directly * as it is subject to change. It is declared here solely so we can * use automatic variables. */ struct unw_frame_info { struct unw_stack regstk; struct unw_stack memstk; unsigned int flags; short hint; short prev_script; unsigned long bsp; unsigned long sp; /* stack pointer */ unsigned long psp; /* previous sp */ unsigned long ip; /* instruction pointer */ unsigned long pr_val; /* current predicates */ unsigned long *cfm; struct task_struct *task; struct switch_stack *sw; /* preserved state: */ unsigned long *pbsp; /* previous bsp */ unsigned long *bspstore; unsigned long *pfs; unsigned long *rnat; unsigned long *rp; unsigned long *pri_unat; unsigned long *unat; unsigned long *pr; unsigned long *lc; unsigned long *fpsr; struct unw_ireg { unsigned long *loc; struct unw_ireg_nat { int type : 3; /* enum unw_nat_type */ signed int off; /* NaT word is at loc+nat.off */ } nat; } r4, r5, r6, r7; unsigned long *b1, *b2, *b3, *b4, *b5; struct ia64_fpreg *f2, *f3, *f4, *f5, *fr[16]; }; #endif /* _ASM_UNWIND_H */ /* * Perform any leftover pre-prompt machine-specific initialization tasks here. */ static void ia64_post_init(void) { struct machine_specific *ms; struct gnu_request req; struct syment *sp; ulong flag; ms = &ia64_machine_specific; if (symbol_exists("unw_init_frame_info")) { machdep->flags |= NEW_UNWIND; if (MEMBER_EXISTS("unw_frame_info", "pt")) { if (MEMBER_EXISTS("pt_regs", "ar_csd")) { machdep->flags |= NEW_UNW_V3; ms->unwind_init = unwind_init_v3; ms->unwind = unwind_v3; ms->unwind_debug = unwind_debug_v3; ms->dump_unwind_stats = dump_unwind_stats_v3; } else { machdep->flags |= NEW_UNW_V2; ms->unwind_init = unwind_init_v2; ms->unwind = unwind_v2; ms->unwind_debug = unwind_debug_v2; ms->dump_unwind_stats = dump_unwind_stats_v2; } } else { machdep->flags |= NEW_UNW_V1; ms->unwind_init = unwind_init_v1; ms->unwind = unwind_v1; ms->unwind_debug = unwind_debug_v1; ms->dump_unwind_stats = dump_unwind_stats_v1; } } else { machdep->flags |= OLD_UNWIND; ms->unwind_init = ia64_old_unwind_init; ms->unwind = ia64_old_unwind; } ms->unwind_init(); if (!VALID_STRUCT(cpuinfo_ia64)) error(WARNING, "cpuinfo_ia64 structure does not exist\n"); else { if (symbol_exists("_cpu_data")) ms->cpu_data_address = symbol_value("_cpu_data"); else if (symbol_exists("boot_cpu_data")) get_symbol_data("boot_cpu_data", sizeof(ulong), &ms->cpu_data_address); else if (symbol_exists("cpu_data")) ms->cpu_data_address = symbol_value("cpu_data"); else if ((sp = per_cpu_symbol_search("per_cpu__cpu_info")) || (sp = per_cpu_symbol_search("per_cpu__ia64_cpu_info"))) { if ((kt->flags & SMP) && (kt->flags & PER_CPU_OFF)) ms->cpu_data_address = sp->value + kt->__per_cpu_offset[0]; else ms->cpu_data_address = sp->value; } else { error(WARNING, "cannot find cpuinfo_ia64 location\n"); ms->cpu_data_address = 0; } if (ms->cpu_data_address) { if (VALID_MEMBER(cpuinfo_ia64_unimpl_va_mask)) readmem(ms->cpu_data_address + OFFSET(cpuinfo_ia64_unimpl_va_mask), KVADDR, &ms->unimpl_va_mask, sizeof(ulong), "unimpl_va_mask", FAULT_ON_ERROR); if (VALID_MEMBER(cpuinfo_ia64_unimpl_pa_mask)) readmem(ms->cpu_data_address + OFFSET(cpuinfo_ia64_unimpl_pa_mask), KVADDR, &ms->unimpl_pa_mask, sizeof(ulong), "unimpl_pa_mask", FAULT_ON_ERROR); } } if (symbol_exists("ia64_init_stack") && !ms->ia64_init_stack_size) { get_symbol_type("ia64_init_stack", NULL, &req); ms->ia64_init_stack_size = req.length; } if (DUMPFILE() && ia64_in_init_stack(SWITCH_STACK_ADDR(CURRENT_TASK()))) machdep->flags |= INIT; if (DUMPFILE() && (flag = ia64_in_per_cpu_mca_stack())) machdep->flags |= flag; } /* * Try using the old unwind scheme if the new one fails, * that is as long as the unw_frame_info structs are the * same size. */ static void try_old_unwind(struct bt_info *bt) { if ((machdep->flags & NEW_UNWIND) && (STRUCT_SIZE("unw_frame_info") == sizeof(struct unw_frame_info))) { error(INFO, "unwind: trying old unwind mechanism\n"); ia64_old_unwind(bt); } } /* * Unwind the stack using the basic method used when CONFIG_IA64_NEW_UNWIND * is not configured into the kernel. * * NOTE: see kernel source: show_stack() and/or kdba_bt_stack() */ static void ia64_old_unwind_init(void) { long len; len = STRUCT_SIZE("unw_frame_info"); if (len < 0) { error(WARNING, "cannot determine size of unw_frame_info\n"); machdep->flags |= UNW_OUT_OF_SYNC; } else if (len != sizeof(struct unw_frame_info)) { error(WARNING, "unw_frame_info size differs: %ld (local: %d)\n", len, sizeof(struct unw_frame_info)); machdep->flags |= UNW_OUT_OF_SYNC; } } static int unw_debug; /* debug fprintf indent */ static void ia64_old_unwind(struct bt_info *bt) { struct unw_frame_info unw_frame_info, *info; struct syment *sm; int frame; char *name; if (bt->debug) CRASHDEBUG_SUSPEND(bt->debug); if (CRASHDEBUG(1)) unw_debug = 0; info = &unw_frame_info; unw_init_from_blocked_task(info, bt->task); frame = 0; do { if (info->ip == 0) break; if (!IS_KVADDR(info->ip)) break; if ((sm = value_search(info->ip, NULL))) name = sm->name; else name = "(unknown)"; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(name, bt->ref->str)) { bt->ref->cmdflags |= BT_REF_FOUND; goto unwind_return; } break; case BT_REF_HEXVAL: if (bt->ref->hexval == info->ip) { bt->ref->cmdflags |= BT_REF_FOUND; goto unwind_return; } break; } } else { fprintf(fp, "%s#%d [BSP:%lx] %s at %lx\n", frame >= 10 ? "" : " ", frame, info->bsp, name, info->ip); if (bt->flags & BT_FULL) rse_function_params(info, name); if (bt->flags & BT_LINE_NUMBERS) ia64_dump_line_number(info->ip); } frame++; if (CRASHDEBUG(1)) unw_debug = 0; if (STREQ(name, "start_kernel")) break; } while (old_unw_unwind(info) >= 0); unwind_return: if (!BT_REFERENCE_CHECK(bt) && !is_kernel_thread(bt->task)) ia64_exception_frame(bt->stacktop - SIZE(pt_regs), bt); if (bt->debug) CRASHDEBUG_RESTORE(); } static unsigned long ia64_rse_slot_num (unsigned long *addr) { return (((unsigned long) addr) >> 3) & 0x3f; } /* * Given a bsp address and a number of register locations, calculate a new * bsp address, accounting for any intervening RNAT stores. */ static unsigned long * ia64_rse_skip_regs (unsigned long *addr, long num_regs) { long delta = ia64_rse_slot_num(addr) + num_regs; if (CRASHDEBUG(1)) { fprintf(fp, "%sia64_rse_skip_regs: ia64_rse_slot_num(%lx): %ld num_regs: %ld\n", space(unw_debug), (ulong)addr, ia64_rse_slot_num(addr), num_regs); } if (num_regs < 0) delta -= 0x3e; if (CRASHDEBUG(1)) { fprintf(fp, "%sia64_rse_skip_regs: delta: %ld return(%lx)", space(unw_debug), delta, (ulong)(addr + num_regs + delta/0x3f)); if (addr > (addr + num_regs + delta/0x3f)) fprintf(fp, "(-%ld)\n", addr - (addr + num_regs + delta/0x3f)); else fprintf(fp, "(+%ld)\n", (addr + num_regs + delta/0x3f) - addr); } return(addr + num_regs + delta/0x3f); } /* * Returns the address of the RNAT slot that covers the slot at * address SLOT_ADDR. */ static unsigned long * ia64_rse_rnat_addr (unsigned long *slot_addr) { return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3)); } /* * Initialize the key fields in the unw_frame_info structure. * * NOTE: see kernel source: unw_init_from_blocked_task() */ static void unw_init_from_blocked_task(struct unw_frame_info *info, ulong task) { ulong sw; ulong sol, limit, top; ulong ar_pfs, ar_bspstore, b0; sw = SWITCH_STACK_ADDR(task); BZERO(info, sizeof(struct unw_frame_info)); readmem(sw + OFFSET(switch_stack_b0), KVADDR, &b0, sizeof(ulong), "switch_stack b0", FAULT_ON_ERROR); readmem(sw + OFFSET(switch_stack_ar_pfs), KVADDR, &ar_pfs, sizeof(ulong), "switch_stack ar_pfs", FAULT_ON_ERROR); readmem(sw + OFFSET(switch_stack_ar_bspstore), KVADDR, &ar_bspstore, sizeof(ulong), "switch_stack ar_bspstore", FAULT_ON_ERROR); sol = (ar_pfs >> 7) & 0x7f; /* size of locals */ limit = task + IA64_RBS_OFFSET; top = ar_bspstore; if ((top - task) >= IA64_STK_OFFSET) top = limit; if (CRASHDEBUG(1)) { unw_debug++; fprintf(fp, "unw_init_from_blocked_task: stack top: %lx sol: %ld\n", top, sol); } info->regstk.limit = limit; info->regstk.top = top; info->sw = (struct switch_stack *)sw; info->bsp = (ulong)ia64_rse_skip_regs((ulong *)info->regstk.top, -sol); info->cfm = (ulong *)(sw + OFFSET(switch_stack_ar_pfs)); info->ip = b0; if (CRASHDEBUG(1)) dump_unw_frame_info(info); } /* * Update the unw_frame_info structure based upon its current state. * This routine works without enabling CONFIG_IA64_NEW_UNWIND because * gdb allocates two additional "local" register locations for each * function, found at the end of the stored locals: * * register "sol-1" (last local) = ar.pfs (gives us previous sol) * register "sol-2" (2nd to last local = b0 to previous address * * NOTE: see kernel source: unw_unwind() (#ifndef CONFIG_IA64_NEW_UNWIND) * On entry, info->regstk.top should point to the register backing * store for r32. */ static int old_unw_unwind (struct unw_frame_info *info) { unsigned long sol, cfm; int is_nat; if (!readmem((ulong)info->cfm, KVADDR, &cfm, sizeof(long), "info->cfm", QUIET|RETURN_ON_ERROR)) return -1; sol = (cfm >> 7) & 0x7f; /* size of locals */ if (CRASHDEBUG(1)) { fprintf(fp, "old_unw_unwind: cfm: %lx sol: %ld\n", cfm, sol); unw_debug++; } /* * In general, we would have to make use of unwind info to * unwind an IA-64 stack, but for now gcc uses a special * convention that makes this possible without full-fledged * unwind info. Specifically, we expect "rp" in the second * last, and "ar.pfs" in the last local register, so the * number of locals in a frame must be at least two. If it's * less than that, we reached the end of the C call stack. */ if (sol < 2) return -1; info->ip = rse_read_reg(info, sol - 2, &is_nat); if (CRASHDEBUG(1)) fprintf(fp, "old_unw_unwind: ip: %lx\n", info->ip); if (is_nat || (info->ip & (machdep->machspec->unimpl_va_mask | 0xf))) return -1; info->cfm = ia64_rse_skip_regs((ulong *)info->bsp, sol - 1); cfm = rse_read_reg(info, sol - 1, &is_nat); if (CRASHDEBUG(1)) fprintf(fp, "old_unw_unwind: info->cfm: %lx => %lx\n", (ulong)info->cfm, cfm); if (is_nat) return -1; sol = (cfm >> 7) & 0x7f; info->bsp = (ulong)ia64_rse_skip_regs((ulong *)info->bsp, -sol); if (CRASHDEBUG(1)) { fprintf(fp, "old_unw_unwind: next sol: %ld\n", sol); fprintf(fp, "old_unw_unwind: next bsp: %lx\n", info->bsp); } return 0; #ifdef KERNEL_SOURCE unsigned long sol, cfm = *info->cfm; int is_nat; sol = (cfm >> 7) & 0x7f; /* size of locals */ /* * In general, we would have to make use of unwind info to * unwind an IA-64 stack, but for now gcc uses a special * convention that makes this possible without full-fledged * unwind info. Specifically, we expect "rp" in the second * last, and "ar.pfs" in the last local register, so the * number of locals in a frame must be at least two. If it's * less than that, we reached the end of the C call stack. */ if (sol < 2) return -1; info->ip = rse_read_reg(info, sol - 2, &is_nat); if (is_nat || (info->ip & (my_cpu_data.unimpl_va_mask | 0xf))) /* reject let obviously bad addresses */ return -1; info->cfm = ia64_rse_skip_regs((unsigned long *) info->bsp, sol - 1); cfm = rse_read_reg(info, sol - 1, &is_nat); if (is_nat) return -1; sol = (cfm >> 7) & 0x7f; info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -sol); return 0; #endif /* KERNEL_SOURCE */ } /* * Retrieve a register value from the stack, returning its NAT attribute * as well. * * NOTE: see kernel source: read_reg() */ static ulong rse_read_reg (struct unw_frame_info *info, int regnum, int *is_nat) { ulong *addr, *rnat_addr, rnat; ulong regcontent; if (CRASHDEBUG(1)) { fprintf(fp, "%srse_read_reg: bsp: %lx\n", space(unw_debug), info->bsp); unw_debug++; } addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum); if (CRASHDEBUG(1)) { unw_debug--; fprintf(fp, "%srse_read_reg: addr: %lx\n", space(unw_debug), (ulong)addr); } if (((ulong)addr < info->regstk.limit) || ((ulong)addr >= info->regstk.top) || (((long)addr & 0x7) != 0)) { *is_nat = 1; if (CRASHDEBUG(1)) fprintf(fp, "%srse_read_reg: is_nat: %d -- return 0xdeadbeefdeadbeef\n", space(unw_debug), *is_nat); return 0xdeadbeefdeadbeef; } rnat_addr = ia64_rse_rnat_addr(addr); if (CRASHDEBUG(1)) fprintf(fp, "%srse_read_reg: rnat_addr: %lx\n", space(unw_debug), (ulong)rnat_addr); if ((unsigned long) rnat_addr >= info->regstk.top) readmem((ulong)(info->sw) + OFFSET(switch_stack_ar_rnat), KVADDR, &rnat, sizeof(long), "info->sw->ar_rnat", FAULT_ON_ERROR); else readmem((ulong)rnat_addr, KVADDR, &rnat, sizeof(long), "rnat_addr", FAULT_ON_ERROR); *is_nat = (rnat & (1UL << ia64_rse_slot_num(addr))) != 0; if (CRASHDEBUG(1)) fprintf(fp, "%srse_read_reg: rnat: %lx is_nat: %d\n", space(unw_debug), rnat, *is_nat); readmem((ulong)addr, KVADDR, ®content, sizeof(long), "rse_read_reg addr", FAULT_ON_ERROR); if (CRASHDEBUG(1)) { char buf[BUFSIZE]; fprintf(fp, "%srse_read_reg: addr: %lx => %lx ", space(unw_debug), (ulong)addr, regcontent); if (is_kernel_text(regcontent)) fprintf(fp, "(%s)", value_to_symstr(regcontent, buf, pc->output_radix)); fprintf(fp, "\n"); } return regcontent; } /* * Display the arguments to a function, presuming that they are found at * the beginning of the sol section. */ #define MAX_REGISTER_PARAMS (8) static void rse_function_params(struct unw_frame_info *info, char *name) { int i; int numargs, is_nat[MAX_REGISTER_PARAMS]; char buf1[BUFSIZE], buf2[BUFSIZE], *p1, *p2; ulong arglist[MAX_REGISTER_PARAMS]; numargs = MIN(get_function_numargs(info->ip), MAX_REGISTER_PARAMS); if (CRASHDEBUG(1)) fprintf(fp, "rse_function_params: %s: %d args\n", name, numargs); switch (numargs) { case 0: fprintf(fp, " (void)\n"); return; case -1: return; default: break; } for (i = 0; i < numargs; i++) arglist[i] = rse_read_reg(info, i, &is_nat[i]); sprintf(buf1, " ("); for (i = 0; i < numargs; i++) { p1 = &buf1[strlen(buf1)]; if (is_nat[i]) sprintf(buf2, "[NAT]"); else { if ((p2 = value_symbol(arglist[i]))) sprintf(buf2, "%s", p2); else sprintf(buf2, "%lx", arglist[i]); } sprintf(p1, "%s%s", i ? ", " : "", buf2); if (strlen(buf1) >= 80) sprintf(p1, ",\n %s", buf2); } strcat(buf1, ")\n"); fprintf(fp, "%s", buf1); } static void dump_unw_frame_info(struct unw_frame_info *info) { unw_debug++; fprintf(fp, "%sregstk.limit: %lx\n", space(unw_debug), info->regstk.limit); fprintf(fp, "%s regstk.top: %lx\n", space(unw_debug), info->regstk.top); fprintf(fp, "%s sw: %lx\n", space(unw_debug), (ulong)info->sw); fprintf(fp, "%s bsp: %lx\n", space(unw_debug), info->bsp); fprintf(fp, "%s cfm: %lx\n", space(unw_debug), (ulong)info->cfm); fprintf(fp, "%s ip: %lx\n", space(unw_debug), info->ip); unw_debug--; } static const char *hook_files[] = { "arch/ia64/kernel/entry.S", "arch/ia64/kernel/head.S", }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) static struct line_number_hook ia64_line_number_hooks[] = { {"ia64_execve", ENTRY_S}, {"sys_clone2", ENTRY_S}, {"sys_clone", ENTRY_S}, {"ia64_switch_to", ENTRY_S}, {"save_switch_stack", ENTRY_S}, {"load_switch_stack", ENTRY_S}, {"__ia64_syscall", ENTRY_S}, {"invoke_syscall_trace", ENTRY_S}, {"ia64_trace_syscall", ENTRY_S}, {"ia64_ret_from_clone", ENTRY_S}, {"ia64_ret_from_syscall", ENTRY_S}, {"ia64_leave_kernel", ENTRY_S}, {"handle_syscall_error", ENTRY_S}, {"invoke_schedule_tail", ENTRY_S}, {"invoke_schedule", ENTRY_S}, {"handle_signal_delivery", ENTRY_S}, {"sys_rt_sigsuspend", ENTRY_S}, {"sys_rt_sigreturn", ENTRY_S}, {"ia64_prepare_handle_unaligned", ENTRY_S}, {"unw_init_running", ENTRY_S}, {"_start", HEAD_S}, {"ia64_save_debug_regs", HEAD_S}, {"ia64_load_debug_regs", HEAD_S}, {"__ia64_save_fpu", HEAD_S}, {"__ia64_load_fpu", HEAD_S}, {"__ia64_init_fpu", HEAD_S}, {"ia64_switch_mode", HEAD_S}, {"ia64_set_b1", HEAD_S}, {"ia64_set_b2", HEAD_S}, {"ia64_set_b3", HEAD_S}, {"ia64_set_b4", HEAD_S}, {"ia64_set_b5", HEAD_S}, {"ia64_spinlock_contention", HEAD_S}, {NULL, NULL} /* list must be NULL-terminated */ }; void ia64_dump_line_number(ulong ip) { int retries; char buf[BUFSIZE], *p; retries = 0; try_closest: get_line_number(ip, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); else { retries++; ip = closest_symbol_value(ip); goto try_closest; } } } /* * For now, just make it a region 7 address for all cases, ignoring the * fact that it might be in a 2.6 kernel's non-unity mapped region. XXX */ ulong ia64_PTOV(ulong paddr) { ulong vaddr; switch (machdep->machspec->kernel_region) { case KERNEL_VMALLOC_REGION: // error(FATAL, "ia64_PTOV: TBD for kernels loaded in region 5\n"); default: case KERNEL_CACHED_REGION: vaddr = paddr + (ulong)(KERNEL_CACHED_BASE); } return vaddr; } /* * Account for 2.6 kernel mapping in region 5. */ ulong ia64_VTOP(ulong vaddr) { struct machine_specific *ms; ulong paddr; ms = &ia64_machine_specific; switch (VADDR_REGION(vaddr)) { case KERNEL_CACHED_REGION: paddr = vaddr - (ulong)(KERNEL_CACHED_BASE); break; case KERNEL_UNCACHED_REGION: paddr = vaddr - (ulong)(KERNEL_UNCACHED_BASE); break; /* * Differentiate between a 2.6 kernel address in region 5 and * a real vmalloc() address. */ case KERNEL_VMALLOC_REGION: /* * Real vmalloc() addresses should never be the subject * of a VTOP() translation. */ if (ia64_IS_VMALLOC_ADDR(vaddr) || (ms->kernel_region != KERNEL_VMALLOC_REGION)) return(error(FATAL, "ia64_VTOP(%lx): unexpected region 5 address\n", vaddr)); /* * If it's a region 5 kernel address, subtract the starting * kernel virtual address, and then add the base physical page. */ paddr = vaddr - ms->kernel_start + (ms->phys_start & KERNEL_TR_PAGE_MASK); break; default: return(error(FATAL, "ia64_VTOP(%lx): invalid kernel address\n", vaddr)); } return paddr; } /* * vmalloc() starting address is either the traditional 0xa000000000000000 or * bumped up in 2.6 to 0xa000000200000000. */ int ia64_IS_VMALLOC_ADDR(ulong vaddr) { return ((vaddr >= machdep->machspec->vmalloc_start) && (vaddr < (ulong)KERNEL_UNCACHED_BASE)); } static int compare_kvaddr(const void *v1, const void *v2) { struct vaddr_range *r1, *r2; r1 = (struct vaddr_range *)v1; r2 = (struct vaddr_range *)v2; return (r1->start < r2->start ? -1 : r1->start == r2->start ? 0 : 1); } static int ia64_get_kvaddr_ranges(struct vaddr_range *vrp) { int cnt; cnt = 0; vrp[cnt].type = KVADDR_UNITY_MAP; vrp[cnt].start = machdep->identity_map_base; vrp[cnt++].end = vt->high_memory; if (machdep->machspec->kernel_start != machdep->identity_map_base) { vrp[cnt].type = KVADDR_START_MAP; vrp[cnt].start = machdep->machspec->kernel_start; vrp[cnt++].end = kt->end; } vrp[cnt].type = KVADDR_VMALLOC; vrp[cnt].start = machdep->machspec->vmalloc_start; vrp[cnt++].end = (ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT; if (VADDR_REGION(vt->node_table[0].mem_map) == KERNEL_VMALLOC_REGION) { vrp[cnt].type = KVADDR_VMEMMAP; vrp[cnt].start = vt->node_table[0].mem_map; vrp[cnt].end = vt->node_table[vt->numnodes-1].mem_map + (vt->node_table[vt->numnodes-1].size * SIZE(page)); /* * Prevent overlap with KVADDR_VMALLOC range. */ if (vrp[cnt].start > vrp[cnt-1].start) vrp[cnt-1].end = vrp[cnt].start; cnt++; } qsort(vrp, cnt, sizeof(struct vaddr_range), compare_kvaddr); return cnt; } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 4 level page table. */ static int ia64_vtop_4l_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { error(FATAL, "ia64_vtop_4l_xen_wpt: TBD\n"); return FALSE; #ifdef TBD ulong *page_dir; ulong *page_upper; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pud_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PUD_SHIFT) & (PTRS_PER_PUD - 1); page_upper = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PUD(PAGEBASE(page_upper), KVADDR, PAGESIZE()); pud_pte = ULONG(machdep->pud + PAGEOFFSET(page_upper)); if (verbose) fprintf(fp, " PUD: %lx => %lx\n", (ulong)page_upper, pud_pte); if (!(pud_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pud_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; #endif } /* Generic abstraction to translate user or kernel virtual * addresses to physical using a 3 level page table. */ static int ia64_vtop_xen_wpt(ulong vaddr, physaddr_t *paddr, ulong *pgd, int verbose, int usr) { error(FATAL, "ia64_vtop_xen_wpt: TBD\n"); return FALSE; #ifdef TBD ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; ulong region, offset; if (usr) { region = VADDR_REGION(vaddr); offset = (vaddr >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); offset |= (region << (PAGESHIFT() - 6)); page_dir = pgd + offset; } else { if (!(pgd = (ulong *)vt->kernel_pgd[0])) error(FATAL, "cannot determine kernel pgd pointer\n"); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)); } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte)) return FALSE; offset = (vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1); page_middle = (ulong *)(PTOV(pgd_pte & _PFN_MASK)) + offset; FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte)) return FALSE; offset = (vaddr >> PAGESHIFT()) & (PTRS_PER_PTE - 1); page_table = (ulong *)(PTOV(pmd_pte & _PFN_MASK)) + offset; FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_P))) { if (usr) *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); ia64_translate_pte(pte, 0, 0); } return FALSE; } *paddr = (pte & _PFN_MASK) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); ia64_translate_pte(pte, 0, 0); } return TRUE; #endif } #include "netdump.h" #include "xen_dom0.h" /* * Determine the relocatable physical address base. */ static void ia64_calc_phys_start(void) { FILE *iomem; int i, found, errflag; char buf[BUFSIZE]; char *p1; ulong kernel_code_start; struct vmcore_data *vd; ulong phys_start, text_start; Elf64_Phdr *phdr = NULL; /* * Default to 64MB. */ machdep->machspec->phys_start = DEFAULT_PHYS_START; text_start = symbol_exists("_text") ? symbol_value("_text") : BADADDR; if (ACTIVE()) { if ((iomem = fopen("/proc/iomem", "r")) == NULL) return; errflag = 1; while (fgets(buf, BUFSIZE, iomem)) { if (strstr(buf, ": Kernel code")) { clean_line(buf); errflag = 0; break; } } fclose(iomem); if (errflag) return; if (!(p1 = strstr(buf, "-"))) return; else *p1 = NULLCHAR; errflag = 0; kernel_code_start = htol(buf, RETURN_ON_ERROR|QUIET, &errflag); if (errflag) return; machdep->machspec->phys_start = kernel_code_start; if (CRASHDEBUG(1)) { if (text_start == BADADDR) fprintf(fp, "_text: (unknown) "); else fprintf(fp, "_text: %lx ", text_start); fprintf(fp, "Kernel code: %lx -> ", kernel_code_start); fprintf(fp, "phys_start: %lx\n\n", machdep->machspec->phys_start); } return; } /* * Get relocation value from whatever dumpfile format is being used. */ if (DISKDUMP_DUMPFILE()) { if (diskdump_phys_base(&phys_start)) { machdep->machspec->phys_start = phys_start; if (CRASHDEBUG(1)) fprintf(fp, "compressed kdump: phys_start: %lx\n", phys_start); } return; } else if (LKCD_DUMPFILE()) { if (lkcd_get_kernel_start(&phys_start)) { machdep->machspec->phys_start = phys_start; if (CRASHDEBUG(1)) fprintf(fp, "LKCD dump: phys_start: %lx\n", phys_start); } } if ((vd = get_kdump_vmcore_data())) { /* * There should be at most one region 5 region, and it * should be equal to "_text". If not, take whatever * region 5 address comes first and hope for the best. */ for (i = found = 0; i < vd->num_pt_load_segments; i++) { phdr = vd->load64 + i; if (phdr->p_vaddr == text_start) { machdep->machspec->phys_start = phdr->p_paddr; found++; break; } } for (i = 0; !found && (i < vd->num_pt_load_segments); i++) { phdr = vd->load64 + i; if (VADDR_REGION(phdr->p_vaddr) == KERNEL_VMALLOC_REGION) { machdep->machspec->phys_start = phdr->p_paddr; found++; break; } } if (found && CRASHDEBUG(1)) { if (text_start == BADADDR) fprintf(fp, "_text: (unknown) "); else fprintf(fp, "_text: %lx ", text_start); fprintf(fp, "p_vaddr: %lx p_paddr: %lx\n", phdr->p_vaddr, phdr->p_paddr); } return; } } /* * From the xen vmcore, create an index of mfns for each page that makes * up the dom0 kernel's complete phys_to_machine_mapping[max_pfn] array. */ static int ia64_xen_kdump_p2m_create(struct xen_kdump_data *xkd) { /* * Temporarily read physical (machine) addresses from vmcore. */ pc->curcmd_flags |= XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) { fprintf(fp, "readmem (temporary): force XEN_MACHINE_ADDR\n"); fprintf(fp, "ia64_xen_kdump_p2m_create: p2m_mfn: %lx\n", xkd->p2m_mfn); } if ((xkd->p2m_mfn_frame_list = (ulong *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc p2m_frame_list"); if (!readmem(PTOB(xkd->p2m_mfn), PHYSADDR, xkd->p2m_mfn_frame_list, PAGESIZE(), "xen kdump p2m mfn page", RETURN_ON_ERROR)) error(FATAL, "cannot read xen kdump p2m mfn page\n"); xkd->p2m_frames = PAGESIZE()/sizeof(ulong); pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return TRUE; } physaddr_t ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo) { ulong pgd_idx, pte_idx; ulong pmd, pte; physaddr_t paddr; /* * Temporarily read physical (machine) addresses from vmcore. */ pc->curcmd_flags |= XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (temporary): force XEN_MACHINE_ADDR\n"); xkd->accesses += 2; pgd_idx = (pseudo >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1); pmd = xkd->p2m_mfn_frame_list[pgd_idx] & _PFN_MASK; if (!pmd) { paddr = P2M_FAILURE; goto out; } pmd += ((pseudo >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong); if (pmd != xkd->last_pmd_read) { if (!readmem(pmd, PHYSADDR, &pte, sizeof(ulong), "ia64_xen_kdump_p2m pmd", RETURN_ON_ERROR)) { xkd->last_pmd_read = BADADDR; xkd->last_mfn_read = BADADDR; paddr = P2M_FAILURE; goto out; } xkd->last_pmd_read = pmd; } else { pte = xkd->last_mfn_read; xkd->cache_hits++; } pte = pte & _PFN_MASK; if (!pte) { paddr = P2M_FAILURE; goto out; } if (pte != xkd->last_mfn_read) { if (!readmem(pte, PHYSADDR, xkd->page, PAGESIZE(), "ia64_xen_kdump_p2m pte page", RETURN_ON_ERROR)) { xkd->last_pmd_read = BADADDR; xkd->last_mfn_read = BADADDR; paddr = P2M_FAILURE; goto out; } xkd->last_mfn_read = pte; } else xkd->cache_hits++; pte_idx = (pseudo >> PAGESHIFT()) & (PTRS_PER_PTE - 1); paddr = *(((ulong *)xkd->page) + pte_idx); if (!(paddr & _PAGE_P)) { paddr = P2M_FAILURE; goto out; } paddr = (paddr & _PFN_MASK) | PAGEOFFSET(pseudo); out: pc->curcmd_flags &= ~XEN_MACHINE_ADDR; if (CRASHDEBUG(1)) fprintf(fp, "readmem (restore): p2m translation\n"); return paddr; } #include "xendump.h" /* * Create an index of mfns for each page that makes up the * kernel's complete phys_to_machine_mapping[max_pfn] array. */ static int ia64_xendump_p2m_create(struct xendump_data *xd) { if (!symbol_exists("phys_to_machine_mapping")) { xd->flags |= XC_CORE_NO_P2M; return TRUE; } error(FATAL, "ia64_xendump_p2m_create: TBD\n"); /* dummy calls for clean "make [wW]arn" */ ia64_debug_dump_page(NULL, NULL, NULL); ia64_xendump_load_page(0, xd); ia64_xendump_page_index(0, xd); ia64_xendump_panic_task(xd); /* externally called */ ia64_get_xendump_regs(xd, NULL, NULL, NULL); /* externally called */ return FALSE; } static void ia64_debug_dump_page(FILE *ofp, char *page, char *name) { int i; ulong *up; fprintf(ofp, "%s\n", name); up = (ulong *)page; for (i = 0; i < 1024; i++) { fprintf(ofp, "%016lx: %016lx %016lx\n", (ulong)((i * 2) * sizeof(ulong)), *up, *(up+1)); up += 2; } } /* * Find the page associate with the kvaddr, and read its contents * into the passed-in buffer. */ static char * ia64_xendump_load_page(ulong kvaddr, struct xendump_data *xd) { error(FATAL, "ia64_xendump_load_page: TBD\n"); return NULL; } /* * Find the dumpfile page index associated with the kvaddr. */ static int ia64_xendump_page_index(ulong kvaddr, struct xendump_data *xd) { error(FATAL, "ia64_xendump_page_index: TBD\n"); return 0; } static ulong ia64_xendump_panic_task(struct xendump_data *xd) { if (CRASHDEBUG(1)) error(INFO, "ia64_xendump_panic_task: TBD\n"); return NO_TASK; } static void ia64_get_xendump_regs(struct xendump_data *xd, struct bt_info *bt, ulong *rip, ulong *rsp) { machdep->get_stack_frame(bt, rip, rsp); if (is_task_active(bt->task) && !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS)) && STREQ(closest_symbol(*rip), "schedule")) error(INFO, "xendump: switch_stack possibly not saved -- try \"bt -t\"\n"); } /* for XEN Hypervisor analysis */ static int ia64_is_kvaddr_hyper(ulong addr) { return (addr >= HYPERVISOR_VIRT_START && addr < HYPERVISOR_VIRT_END); } static int ia64_kvtop_hyper(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong virt_percpu_start, phys_percpu_start; ulong addr, dirp, entry; if (!IS_KVADDR(kvaddr)) return FALSE; if (PERCPU_VIRT_ADDR(kvaddr)) { virt_percpu_start = symbol_value("__phys_per_cpu_start"); phys_percpu_start = virt_percpu_start - DIRECTMAP_VIRT_START; *paddr = kvaddr - PERCPU_ADDR + phys_percpu_start; return TRUE; } else if (DIRECTMAP_VIRT_ADDR(kvaddr)) { *paddr = kvaddr - DIRECTMAP_VIRT_START; return TRUE; } else if (!FRAME_TABLE_VIRT_ADDR(kvaddr)) { return FALSE; } /* frametable virtual address */ addr = kvaddr - xhmachdep->frame_table; dirp = symbol_value("frametable_pg_dir"); dirp += ((addr >> PGDIR_SHIFT_3L) & (PTRS_PER_PGD - 1)) * sizeof(ulong); readmem(dirp, KVADDR, &entry, sizeof(ulong), "frametable_pg_dir", FAULT_ON_ERROR); dirp = entry & _PFN_MASK; if (!dirp) return FALSE; dirp += ((addr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) * sizeof(ulong); readmem(dirp, PHYSADDR, &entry, sizeof(ulong), "frametable pmd", FAULT_ON_ERROR); dirp = entry & _PFN_MASK; if (!dirp) return FALSE; dirp += ((addr >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) * sizeof(ulong); readmem(dirp, PHYSADDR, &entry, sizeof(ulong), "frametable pte", FAULT_ON_ERROR); if (!(entry & _PAGE_P)) return FALSE; *paddr = (entry & _PFN_MASK) + (kvaddr & (PAGESIZE() - 1)); return TRUE; } static void ia64_post_init_hyper(void) { struct machine_specific *ms; ulong frame_table; ms = &ia64_machine_specific; if (symbol_exists("unw_init_frame_info")) { machdep->flags |= NEW_UNWIND; if (MEMBER_EXISTS("unw_frame_info", "pt")) { if (MEMBER_EXISTS("cpu_user_regs", "ar_csd")) { machdep->flags |= NEW_UNW_V3; ms->unwind_init = unwind_init_v3; ms->unwind = unwind_v3; ms->unwind_debug = unwind_debug_v3; ms->dump_unwind_stats = dump_unwind_stats_v3; } else { machdep->flags |= NEW_UNW_V2; ms->unwind_init = unwind_init_v2; ms->unwind = unwind_v2; ms->unwind_debug = unwind_debug_v2; ms->dump_unwind_stats = dump_unwind_stats_v2; } } else { machdep->flags |= NEW_UNW_V1; ms->unwind_init = unwind_init_v1; ms->unwind = unwind_v1; ms->unwind_debug = unwind_debug_v1; ms->dump_unwind_stats = dump_unwind_stats_v1; } } else { machdep->flags |= OLD_UNWIND; ms->unwind_init = ia64_old_unwind_init; ms->unwind = ia64_old_unwind; } ms->unwind_init(); if (symbol_exists("frame_table")) { frame_table = symbol_value("frame_table"); readmem(frame_table, KVADDR, &xhmachdep->frame_table, sizeof(ulong), "frame_table virtual address", FAULT_ON_ERROR); } else { error(FATAL, "cannot find frame_table virtual address."); } } int ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt) { int plen, i; ulong paddr, stackbase, stacktop; ulong *__per_cpu_mca; struct xen_hyper_vcpu_context *vcc; vcc = xen_hyper_vcpu_to_vcpu_context(bt->task); if (!vcc) return 0; if (!symbol_exists("__per_cpu_mca") || !(plen = get_array_length("__per_cpu_mca", NULL, 0)) || (plen < xht->pcpus)) return 0; if (!machdep->kvtop(NULL, addr, &paddr, 0)) return 0; __per_cpu_mca = (ulong *)GETBUF(sizeof(ulong) * plen); if (!readmem(symbol_value("__per_cpu_mca"), KVADDR, __per_cpu_mca, sizeof(ulong) * plen, "__per_cpu_mca", RETURN_ON_ERROR|QUIET)) return 0; if (CRASHDEBUG(1)) { for (i = 0; i < plen; i++) { fprintf(fp, "__per_cpu_mca[%d]: %lx\n", i, __per_cpu_mca[i]); } } stackbase = __per_cpu_mca[vcc->processor]; stacktop = stackbase + (STACKSIZE() * 2); FREEBUF(__per_cpu_mca); if ((paddr >= stackbase) && (paddr < stacktop)) return 1; else return 0; } static void ia64_init_hyper(int when) { struct syment *sp; switch (when) { case SETUP_ENV: #if defined(PR_SET_FPEMU) && defined(PR_FPEMU_NOPRINT) prctl(PR_SET_FPEMU, PR_FPEMU_NOPRINT, 0, 0, 0); #endif #if defined(PR_SET_UNALIGN) && defined(PR_UNALIGN_NOPRINT) prctl(PR_SET_UNALIGN, PR_UNALIGN_NOPRINT, 0, 0, 0); #endif break; case PRE_SYMTAB: machdep->verify_symbol = ia64_verify_symbol; machdep->machspec = &ia64_machine_specific; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~(machdep->pageoffset); switch (machdep->pagesize) { case 4096: machdep->stacksize = (power(2, 3) * PAGESIZE()); break; case 8192: machdep->stacksize = (power(2, 2) * PAGESIZE()); break; case 16384: machdep->stacksize = (power(2, 1) * PAGESIZE()); break; case 65536: machdep->stacksize = (power(2, 0) * PAGESIZE()); break; default: machdep->stacksize = 32*1024; break; } if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pud = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pud space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pud_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = ia64_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; machdep->machspec->phys_start = UNKNOWN_PHYS_START; /* ODA: if need make hyper version if (machdep->cmdline_args[0]) parse_cmdline_args(); */ break; case PRE_GDB: if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->kvbase = HYPERVISOR_VIRT_START; machdep->identity_map_base = HYPERVISOR_VIRT_START; machdep->is_kvaddr = ia64_is_kvaddr_hyper; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = ia64_eframe_search; machdep->back_trace = ia64_back_trace_cmd; machdep->processor_speed = xen_hyper_ia64_processor_speed; machdep->uvtop = ia64_uvtop; machdep->kvtop = ia64_kvtop_hyper; machdep->get_stack_frame = ia64_get_stack_frame; machdep->get_stackbase = ia64_get_stackbase; machdep->get_stacktop = ia64_get_stacktop; machdep->translate_pte = ia64_translate_pte; machdep->memory_size = xen_hyper_ia64_memory_size; machdep->dis_filter = ia64_dis_filter; machdep->cmd_mach = ia64_cmd_mach; machdep->get_smp_cpus = xen_hyper_ia64_get_smp_cpus; machdep->line_number_hooks = ia64_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; if ((sp = symbol_search("_stext"))) { machdep->machspec->kernel_region = VADDR_REGION(sp->value); machdep->machspec->kernel_start = sp->value; } else { // machdep->machspec->kernel_region = KERNEL_CACHED_REGION; // machdep->machspec->kernel_start = KERNEL_CACHED_BASE; } /* machdep table for Xen Hypervisor */ xhmachdep->pcpu_init = xen_hyper_ia64_pcpu_init; break; case POST_GDB: STRUCT_SIZE_INIT(switch_stack, "switch_stack"); MEMBER_OFFSET_INIT(thread_struct_fph, "thread_struct", "fph"); MEMBER_OFFSET_INIT(switch_stack_b0, "switch_stack", "b0"); MEMBER_OFFSET_INIT(switch_stack_ar_bspstore, "switch_stack", "ar_bspstore"); MEMBER_OFFSET_INIT(switch_stack_ar_pfs, "switch_stack", "ar_pfs"); MEMBER_OFFSET_INIT(switch_stack_ar_rnat, "switch_stack", "ar_rnat"); MEMBER_OFFSET_INIT(switch_stack_pr, "switch_stack", "pr"); XEN_HYPER_STRUCT_SIZE_INIT(cpuinfo_ia64, "cpuinfo_ia64"); XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_proc_freq, "cpuinfo_ia64", "proc_freq"); XEN_HYPER_MEMBER_OFFSET_INIT(cpuinfo_ia64_vendor, "cpuinfo_ia64", "vendor"); if (symbol_exists("per_cpu__cpu_info")) { xht->cpu_data_address = symbol_value("per_cpu__cpu_info"); } /* kakuma Can this be calculated? */ if (!machdep->hz) { machdep->hz = XEN_HYPER_HZ; } break; case POST_INIT: ia64_post_init_hyper(); break; } } #endif crash-7.2.8/va_server.c0000775000000000000000000002437413614623427013515 0ustar rootroot/* va_server.c - kernel crash dump file translation library * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2011, 2013 David Anderson * Copyright (C) 2002-2006, 2011, 2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * 10/99, Dave Winchell, Initial release for kernel crash dump support. * 11/12/99, Dave Winchell, Add support for in memory dumps. */ #include #include #include #include #include #include #include #include #include "va_server.h" #include #include #include struct map_hdr *vas_map_base = (struct map_hdr *)0; /* base of tree */ #ifdef NOT_DEF #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(page_size - 1)))) #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(page_size - 1))) #endif u_long vas_base_va; u_long vas_start_va; FILE *vas_file_p; char *zero_page; int vas_version; int read_map(char *crash_file); void load_data(struct crash_map_entry *m); int find_data(u_long va, u_long *buf, u_long *len, u_long *offset); u_long vas_find_end(void); int vas_free_memory(char *); int vas_memory_used(void); int vas_memory_dump(FILE *); int mclx_page_size(void); void set_vas_debug(ulong); extern int monitor_memory(long *, long *, long *, long *); int Page_Size; ulong vas_debug = 0; extern void *malloc(size_t); int va_server_init(char *crash_file, u_long *start, u_long *end, u_long *stride) { Page_Size = getpagesize(); /* temporary setting until disk header is read */ if(read_map(crash_file)) { if(va_server_init_v1(crash_file, start, end, stride)) return -1; vas_version = 1; return 0; } vas_version = 2; zero_page = (char *)malloc(Page_Size); bzero((void *)zero_page, Page_Size); vas_base_va = vas_start_va = vas_map_base->map[0].start_va; if(start) *start = vas_start_va; if(end) { *end = vas_find_end(); } if(stride) *stride = Page_Size; return 0; } int vas_lseek(u_long position, int whence) { if(vas_version < 2) return vas_lseek_v1(position, whence); if(whence != SEEK_SET) return -1; vas_base_va = vas_start_va + position; return 0; } size_t vas_read(void *buf_in, size_t count) { u_long len, offset, buf, va; u_long num, output, remaining; if(vas_version < 2) return vas_read_v1(buf_in, count); va = vas_base_va; remaining = count; output = (u_long)buf_in; while(remaining) { find_data(va, &buf, &len, &offset); num = (remaining > (len - offset)) ? (len - offset) : remaining; bcopy((const void *)(buf+offset), (void *)output, num); remaining -= num; va += num; output += num; } vas_base_va += count; return count; } size_t vas_write(void *buf_in, size_t count) { u_long len, offset, buf, va; if(vas_version < 2) return vas_write_v1(buf_in, count); if(count != sizeof(u_long)) { printf("count %d not %d\n", (int)count, (int)sizeof(u_long)); return -1; } va = vas_base_va; if(!find_data(va, &buf, &len, &offset)) *(u_long *)(buf+offset) = *(u_long *)buf_in; vas_base_va += count; return count; } void vas_free_data(u_long va) { struct crash_map_entry *m, *last_m; if(vas_version < 2) { vas_free_data_v1(va); return; } m = last_m = vas_map_base->map; for(;m->start_va;) { if(m->start_va > va) break; last_m = m; m++; } if(last_m->exp_data) { free((void *)last_m->exp_data); last_m->exp_data = 0; } } u_long vas_find_end(void) { struct crash_map_entry *m; u_long *sub_m; m = vas_map_base->map; for(;m->start_va;m++) ; m--; load_data(m); sub_m = (u_long *)m->exp_data; for(;*sub_m; sub_m++) ; sub_m--; return *sub_m; } int find_data(u_long va, u_long *buf, u_long *len, u_long *offset) { u_long off; struct crash_map_entry *m, *last_m; u_long *sub_m, va_saved; char *data; int saved; m = last_m = vas_map_base->map; for(;m->start_va;) { if(m->start_va > va) break; last_m = m; m++; } load_data(last_m); sub_m = (u_long *)last_m->exp_data; data = last_m->exp_data + CRASH_SUB_MAP_PAGES*Page_Size; saved = 0; for(;*sub_m; sub_m++, data += Page_Size) { va_saved = *sub_m; if((va >= va_saved) && (va < (va_saved + Page_Size))) { saved = 1; break; } else if(va < va_saved) break; } off = va - (u_long)trunc_page(va); if(offset) *offset = off; if(len) *len = Page_Size; if (vas_debug && !saved) fprintf(stderr, "find_data: page containing %lx not saved\n", (u_long)trunc_page(va)); if(buf) *buf = saved ? (u_long)data : (u_long)zero_page; return (saved ^ 1); } void load_data(struct crash_map_entry *m) { char *compr_buf; char *exp_buf; int ret, items; uLongf destLen; int retries; if(m->exp_data) goto out; ret = fseek(vas_file_p, (long)(m->start_blk * Page_Size), SEEK_SET); if(ret == -1) { printf("load_data: unable to fseek, errno = %d\n", ferror(vas_file_p)); clean_exit(1); } retries = 0; load_data_retry1: compr_buf = (char *)malloc(m->num_blks * Page_Size); if(!compr_buf) { if (retries++ == 0) { vas_free_memory("malloc failure: out of memory"); goto load_data_retry1; } fprintf(stderr, "FATAL ERROR: malloc failure: out of memory\n"); clean_exit(1); } items = fread((void *)compr_buf, sizeof(char), m->num_blks * Page_Size, vas_file_p); if(items != m->num_blks * Page_Size) { printf("unable to read blocks from errno = %d\n", ferror(vas_file_p)); clean_exit(1); } load_data_retry2: m->exp_data = exp_buf = (char *)malloc((CRASH_SOURCE_PAGES+CRASH_SUB_MAP_PAGES) * Page_Size); if(!exp_buf) { if (retries++ == 0) { vas_free_memory("malloc failure: out of memory"); goto load_data_retry2; } fprintf(stderr, "FATAL ERROR: malloc failure: out of memory\n"); clean_exit(1); } destLen = (uLongf)((CRASH_SOURCE_PAGES+CRASH_SUB_MAP_PAGES) * Page_Size); ret = uncompress((Bytef *)exp_buf, &destLen, (const Bytef *)compr_buf, (uLong)items); if(ret) { if(ret == Z_MEM_ERROR) printf("load_data, bad ret Z_MEM_ERROR from uncompress\n"); else if(ret == Z_BUF_ERROR) printf("load_data, bad ret Z_BUF_ERROR from uncompress\n"); else if(ret == Z_DATA_ERROR) printf("load_data, bad ret Z_DATA_ERROR from uncompress\n"); else printf("load_data, bad ret %d from uncompress\n", ret); clean_exit(1); } free((void *)compr_buf); out: return; } int read_map(char *crash_file) { struct crash_map_hdr *disk_hdr; int ret, items; struct map_hdr *hdr; vas_file_p = fopen(crash_file, "r"); if(vas_file_p == (FILE *)0) { printf("read_maps: bad ret from fopen for %s: %s\n", crash_file, strerror(errno)); return -1; } hdr = (struct map_hdr *)malloc(sizeof(struct map_hdr)); if(!hdr) { printf("read_map: unable to malloc mem\n"); return -1; } bzero((void *)hdr, sizeof(struct map_hdr)); disk_hdr = (struct crash_map_hdr *)malloc(Page_Size); ret = fseek(vas_file_p, (long)0, SEEK_SET); if(ret == -1) { printf("va_server: unable to fseek, err = %d\n", ferror(vas_file_p)); free(hdr); free(disk_hdr); return -1; } items = fread((void *)disk_hdr, 1, Page_Size, vas_file_p); if(items != Page_Size) { free(hdr); free(disk_hdr); return -1; } if(disk_hdr->magic[0] != CRASH_MAGIC) { free(hdr); free(disk_hdr); return -1; } ret = fseek(vas_file_p, (long)((disk_hdr->map_block) * disk_hdr->blk_size), SEEK_SET); if(ret == -1) { printf("va_server: unable to fseek, err = %d\n", ferror(vas_file_p)); free(hdr); free(disk_hdr); return -1; } Page_Size = disk_hdr->blk_size; /* over-ride PAGE_SIZE */ hdr->blk_size = disk_hdr->blk_size; hdr->map = (struct crash_map_entry *)malloc(disk_hdr->map_blocks * disk_hdr->blk_size); items = fread((void *)hdr->map, hdr->blk_size, disk_hdr->map_blocks, vas_file_p); if(items != disk_hdr->map_blocks) { printf("unable to read map entries, err = %d\n", errno); free(hdr); free(disk_hdr); return -1; } vas_map_base = hdr; free(disk_hdr); return 0; } int vas_free_memory(char *s) { struct crash_map_entry *m; long swap_usage; int blks; if (vas_version < 2) return 0; if (s) { fprintf(stderr, "\nWARNING: %s ", s); if (monitor_memory(NULL, NULL, NULL, &swap_usage)) fprintf(stderr, "(swap space usage: %ld%%)", swap_usage); fprintf(stderr, "\nWARNING: memory/swap exhaustion may cause this session to be killed\n"); } for (blks = 0, m = vas_map_base->map; m->start_va; m++) { if (m->exp_data) { free((void *)m->exp_data); m->exp_data = 0; blks += m->num_blks; } } return blks; } int vas_memory_used(void) { struct crash_map_entry *m; int blks; if (vas_version < 2) return 0; for (blks = 0, m = vas_map_base->map; m->start_va; m++) { if (m->exp_data) blks += m->num_blks; } return blks; } char *memory_dump_hdr_32 = "START_VA EXP_DATA START_BLK NUM_BLKS\n"; char *memory_dump_fmt_32 = "%8lx %8lx %9d %8d\n"; char *memory_dump_hdr_64 = \ " START_VA EXP_DATA START_BLK NUM_BLKS\n"; char *memory_dump_fmt_64 = "%16lx %16lx %9d %8d\n"; int vas_memory_dump(FILE *fp) { struct crash_map_entry *m; char *hdr, *fmt; int blks; if (vas_version < 2) { fprintf(fp, "%s\n", vas_version ? "version 1: not supported" : "no dumpfile"); return 0; } hdr = sizeof(long) == 4 ? memory_dump_hdr_32 : memory_dump_hdr_64; fmt = sizeof(long) == 4 ? memory_dump_fmt_32 : memory_dump_fmt_64; fprintf(fp, "%s", hdr); for (blks = 0, m = vas_map_base->map; m->start_va; m++) { fprintf(fp, fmt, m->start_va, m->exp_data, m->start_blk, m->num_blks); if (m->exp_data) blks += m->num_blks; } fprintf(fp, "total blocks: %d\n", blks); return blks; } int mclx_page_size(void) { return (Page_Size); } void set_vas_debug(ulong value) { vas_debug = value; } crash-7.2.8/lkcd_v1.c0000775000000000000000000002112613614623427013034 0ustar rootroot/* lkcd_v1.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define LKCD_COMMON #include "defs.h" #define CONFIG_VMDUMP #include "lkcd_vmdump_v1.h" static dump_header_t dump_header_v1 = { 0 }; static dump_page_t dump_page = { 0 }; /* * Verify and initialize the LKCD environment, storing the common data * in the global lkcd_environment structure. */ int lkcd_dump_init_v1(FILE *fp, int fd) { int i; int eof; uint32_t pgcnt; dump_header_t *dh; dump_page_t *dp; lkcd->fd = fd; lkcd->fp = fp; lseek(lkcd->fd, 0, SEEK_SET); dh = &dump_header_v1; dp = &dump_page; if (read(lkcd->fd, dh, sizeof(dump_header_t)) != sizeof(dump_header_t)) return FALSE; lkcd->dump_header = dh; lkcd->dump_page = dp; if (lkcd->debug) dump_lkcd_environment(LKCD_DUMP_HEADER_ONLY); /* * Allocate and clear the benchmark offsets, one per megabyte. */ lkcd->page_size = dh->dh_page_size; lkcd->page_shift = ffs(lkcd->page_size) - 1; lkcd->bits = sizeof(long) * 8; lkcd->total_pages = dh->dh_num_pages; lkcd->benchmark_pages = (dh->dh_num_pages/LKCD_PAGES_PER_MEGABYTE())+1; lkcd->page_header_size = sizeof(dump_page_t); lkcd->zone_shift = ffs(ZONE_SIZE) - 1; lkcd->zone_mask = ~(ZONE_SIZE - 1); lkcd->num_zones = 0; lkcd->max_zones = 0; lkcd->get_dp_flags = get_dp_flags_v1; lkcd->get_dp_address = get_dp_address_v1; lkcd->get_dp_size = get_dp_size_v1; lkcd->compression = LKCD_DUMP_COMPRESS_RLE; lseek(lkcd->fd, LKCD_OFFSET_TO_FIRST_PAGE, SEEK_SET); for (pgcnt = 0, eof = FALSE; !eof; pgcnt++) { switch (lkcd_load_dump_page_header(dp, pgcnt)) { case LKCD_DUMPFILE_OK: case LKCD_DUMPFILE_END: break; case LKCD_DUMPFILE_EOF: eof = TRUE; continue; } if (!(dp->dp_flags & (DUMP_COMPRESSED|DUMP_RAW|DUMP_END))) { lkcd_print("unknown page flag in dump: %lx\n", dp->dp_flags); } if (dp->dp_size > 4096) { lkcd_print("dp_size > 4096: %d\n", dp->dp_size); dump_lkcd_environment(LKCD_DUMP_PAGE_ONLY); } if (dp->dp_flags & DUMP_END) { lkcd_print("found DUMP_END\n"); break; } lseek(lkcd->fd, dp->dp_size, SEEK_CUR); if (!LKCD_DEBUG(1)) break; } /* * Allocate space for LKCD_CACHED_PAGES data pages plus one to * contain a copy of the compressed data of the current page. */ if ((lkcd->page_cache_buf = (char *)malloc (dh->dh_page_size * (LKCD_CACHED_PAGES))) == NULL) return FALSE; /* * Clear the page data areas. */ lkcd_free_memory(); for (i = 0; i < LKCD_CACHED_PAGES; i++) { lkcd->page_cache_hdr[i].pg_bufptr = &lkcd->page_cache_buf[i * dh->dh_page_size]; } if ((lkcd->compressed_page = (char *)malloc(dh->dh_page_size)) == NULL) return FALSE; if ((lkcd->page_hash = (struct page_hash_entry *)calloc (LKCD_PAGE_HASH, sizeof(struct page_hash_entry))) == NULL) return FALSE; lkcd->total_pages = eof || (pgcnt > dh->dh_num_pages) ? pgcnt : dh->dh_num_pages; lkcd->panic_task = (ulong)dh->dh_current_task; lkcd->panic_string = (char *)&dh->dh_panic_string[0]; if (!fp) lkcd->flags |= LKCD_REMOTE; lkcd->flags |= LKCD_VALID; return TRUE; } /* * Return the current page's dp_size. */ uint32_t get_dp_size_v1(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_size); } /* * Return the current page's dp_flags. */ uint32_t get_dp_flags_v1(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_flags); } /* * Return the current page's dp_address. */ uint64_t get_dp_address_v1(void) { dump_page_t *dp; dp = (dump_page_t *)lkcd->dump_page; return(dp->dp_address); } /* * console-only output for info regarding current page. */ void dump_dump_page_v1(char *s, void *dpp) { dump_page_t *dp; uint32_t flags; int others; console(s); dp = (dump_page_t *)dpp; others = 0; console("dp_address: %llx ", dp->dp_address); console("dp_size: %ld ", dp->dp_size); console("dp_flags: %lx (", flags = dp->dp_flags); if (flags & DUMP_COMPRESSED) console("DUMP_COMPRESSED", others++); if (flags & DUMP_RAW) console("%sDUMP_RAW", others++ ? "|" : ""); if (flags & DUMP_END) console("DUMP_END", others++ ? "|" : ""); console(")\n"); } /* * help -S output, or as specified by arg. */ void dump_lkcd_environment_v1(ulong arg) { int others; dump_header_t *dh; dump_page_t *dp; dh = (dump_header_t *)lkcd->dump_header; dp = (dump_page_t *)lkcd->dump_page; if (arg == LKCD_DUMP_HEADER_ONLY) goto dump_header_only; if (arg == LKCD_DUMP_PAGE_ONLY) goto dump_page_only; dump_header_only: lkcd_print(" dump_header:\n"); lkcd_print(" dh_magic_number: %llx ", dh->dh_magic_number); if (dh->dh_magic_number == DUMP_MAGIC_NUMBER) lkcd_print("(DUMP_MAGIC_NUMBER)\n"); else lkcd_print("(?)\n"); lkcd_print(" dh_version: %d\n", dh->dh_version); lkcd_print(" dh_header_size: %d\n", dh->dh_header_size); lkcd_print(" dh_dump_level: %d\n", dh->dh_dump_level); lkcd_print(" dh_page_size: %d\n", dh->dh_page_size); lkcd_print(" dh_memory_size: %lld\n", dh->dh_memory_size); lkcd_print(" dh_memory_start: %llx\n", dh->dh_memory_start); lkcd_print(" dh_memory_end: %llx\n", dh->dh_memory_end); lkcd_print(" dh_esp: %lx\n", dh->dh_esp); lkcd_print(" dh_eip: %lx\n", dh->dh_eip); lkcd_print(" dh_num_pages: %d\n", dh->dh_num_pages); lkcd_print(" dh_panic_string: %s%s", dh->dh_panic_string, dh && dh->dh_panic_string && strstr(dh->dh_panic_string, "\n") ? "" : "\n"); lkcd_print(" dh_time: %s\n", strip_linefeeds(ctime(&(dh->dh_time.tv_sec)))); lkcd_print(" dh_utsname:\n"); lkcd_print(" sysname: %s\n", dh->dh_utsname.sysname); lkcd_print(" nodename: %s\n", dh->dh_utsname.nodename); lkcd_print(" release: %s\n", dh->dh_utsname.release); lkcd_print(" version: %s\n", dh->dh_utsname.version); lkcd_print(" machine: %s\n", dh->dh_utsname.machine); lkcd_print(" domainname: %s\n", dh->dh_utsname.domainname); lkcd_print(" dh_current_task: %lx\n", dh->dh_current_task); lkcd_print(" dh_regs:\n"); #ifdef PPC lkcd_print(" (PowerPC register display TBD)\n"); #endif #ifdef X86 lkcd_print(" ebx: %lx\n", dh->dh_regs.ebx); lkcd_print(" ecx: %lx\n", dh->dh_regs.ecx); lkcd_print(" edx: %lx\n", dh->dh_regs.edx); lkcd_print(" esi: %lx\n", dh->dh_regs.esi); lkcd_print(" edi: %lx\n", dh->dh_regs.edi); lkcd_print(" eax: %lx\n", dh->dh_regs.eax); lkcd_print(" xds: %x\n", dh->dh_regs.xds); lkcd_print(" xes: %x\n", dh->dh_regs.xes); lkcd_print(" orig_eax: %lx\n", dh->dh_regs.orig_eax); lkcd_print(" eip: %lx\n", dh->dh_regs.eip); lkcd_print(" xcs: %x\n", dh->dh_regs.xcs); lkcd_print(" eflags: %lx\n", dh->dh_regs.eflags); lkcd_print(" esp: %lx\n", dh->dh_regs.esp); lkcd_print(" xss: %x\n", dh->dh_regs.xss); #endif if (arg == LKCD_DUMP_HEADER_ONLY) return; dump_page_only: lkcd_print(" dump_page:\n"); lkcd_print(" dp_address: %llx\n", dp->dp_address); lkcd_print(" dp_size: %ld\n", dp->dp_size); lkcd_print(" dp_flags: %lx (", dp->dp_flags); others = 0; if (dp->dp_flags & DUMP_COMPRESSED) lkcd_print("DUMP_COMPRESSED", others++); if (dp->dp_flags & DUMP_RAW) lkcd_print("%sDUMP_RAW", others++ ? "|" : ""); if (dp->dp_flags & DUMP_END) lkcd_print("DUMP_END", others++ ? "|" : ""); lkcd_print(")\n"); } crash-7.2.8/xendump.c0000664000000000000000000022737213614623427013201 0ustar rootroot/* * xendump.c * * Copyright (C) 2006-2011, 2013-2014 David Anderson * Copyright (C) 2006-2011, 2013-2014 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "xendump.h" static struct xendump_data xendump_data = { 0 }; struct xendump_data *xd = &xendump_data; static int xc_save_verify(char *); static int xc_core_verify(char *, char *); static int xc_save_read(void *, int, ulong, physaddr_t); static int xc_core_read(void *, int, ulong, physaddr_t); static int xc_core_mfns(ulong, FILE *); static void poc_store(ulong, off_t); static off_t poc_get(ulong, int *); static void xen_dump_vmconfig(FILE *); static void xc_core_create_pfn_tables(void); static ulong xc_core_pfn_to_page_index(ulong); static int xc_core_pfn_valid(ulong); static void xendump_print(char *fmt, ...); static int xc_core_elf_verify(char *, char *); static void xc_core_elf_dump(void); static char *xc_core_elf_mfn_to_page(ulong, char *); static int xc_core_elf_mfn_to_page_index(ulong); static ulong xc_core_elf_pfn_valid(ulong); static ulong xc_core_elf_pfn_to_page_index(ulong); static void xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *); static void xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *); static void xc_core_dump_Elf32_Shdr(Elf32_Off offset, int); static void xc_core_dump_Elf64_Shdr(Elf64_Off offset, int); static char *xc_core_strtab(uint32_t, char *); static void xc_core_dump_elfnote(off_t, size_t, int); static void xc_core_elf_pfn_init(void); #define ELFSTORE 1 #define ELFREAD 0 /* * Determine whether a file is a xendump creation, and if TRUE, * initialize the xendump_data structure. */ int is_xendump(char *file) { int verified; char buf[BUFSIZE]; if ((xd->xfd = open(file, O_RDWR)) < 0) { if ((xd->xfd = open(file, O_RDONLY)) < 0) { sprintf(buf, "%s: open", file); perror(buf); return FALSE; } } if (read(xd->xfd, buf, BUFSIZE) != BUFSIZE) return FALSE; if (machine_type("X86") || machine_type("X86_64")) xd->page_size = 4096; else if (machine_type("IA64") && !machdep->pagesize) xd->page_size = 16384; else xd->page_size = machdep->pagesize; verified = xc_save_verify(buf) || xc_core_verify(file, buf); if (!verified) close(xd->xfd); return (verified); } /* * Verify whether the dump was created by the xc_domain_dumpcore() * library function in libxc/xc_core.c. */ static int xc_core_verify(char *file, char *buf) { struct xc_core_header *xcp; xcp = (struct xc_core_header *)buf; if (xc_core_elf_verify(file, buf)) return TRUE; if ((xcp->xch_magic != XC_CORE_MAGIC) && (xcp->xch_magic != XC_CORE_MAGIC_HVM)) return FALSE; if (!xcp->xch_nr_vcpus) { error(INFO, "faulty xc_core dump file header: xch_nr_vcpus is 0\n\n"); fprintf(stderr, " xch_magic: %x\n", xcp->xch_magic); fprintf(stderr, " xch_nr_vcpus: %d\n", xcp->xch_nr_vcpus); fprintf(stderr, " xch_nr_pages: %d\n", xcp->xch_nr_pages); fprintf(stderr, " xch_ctxt_offset: %d\n", xcp->xch_ctxt_offset); fprintf(stderr, " xch_index_offset: %d\n", xcp->xch_index_offset); fprintf(stderr, " xch_pages_offset: %d\n\n", xcp->xch_pages_offset); clean_exit(1); } xd->xc_core.header.xch_magic = xcp->xch_magic; xd->xc_core.header.xch_nr_vcpus = xcp->xch_nr_vcpus; xd->xc_core.header.xch_nr_pages = xcp->xch_nr_pages; xd->xc_core.header.xch_ctxt_offset = (off_t)xcp->xch_ctxt_offset; xd->xc_core.header.xch_index_offset = (off_t)xcp->xch_index_offset; xd->xc_core.header.xch_pages_offset = (off_t)xcp->xch_pages_offset; xd->flags |= (XENDUMP_LOCAL | XC_CORE_ORIG | XC_CORE_P2M_CREATE); if (xc_core_mfns(XC_CORE_64BIT_HOST, stderr)) xd->flags |= XC_CORE_64BIT_HOST; if (!xd->page_size) error(FATAL, "unknown page size: use -p command line option\n"); if (!(xd->page = (char *)malloc(xd->page_size))) error(FATAL, "cannot malloc page space."); if (!(xd->poc = (struct pfn_offset_cache *)calloc (PFN_TO_OFFSET_CACHE_ENTRIES, sizeof(struct pfn_offset_cache)))) error(FATAL, "cannot malloc pfn_offset_cache\n"); xd->last_pfn = ~(0UL); if (CRASHDEBUG(1)) xendump_memory_dump(stderr); return TRUE; } /* * Do the work for read_xendump() for the XC_CORE dumpfile format. */ static int xc_core_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr) { ulong pfn, page_index; off_t offset; int redundant; if (xd->flags & (XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE)) xc_core_create_pfn_tables(); pfn = (ulong)BTOP(paddr); if ((offset = poc_get(pfn, &redundant))) { if (!redundant) { if (lseek(xd->xfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; xd->last_pfn = pfn; } BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } if ((page_index = xc_core_pfn_to_page_index(pfn)) == PFN_NOT_FOUND) return READ_ERROR; offset = xd->xc_core.header.xch_pages_offset + ((off_t)(page_index) * (off_t)xd->page_size); if (lseek(xd->xfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; poc_store(pfn, offset); BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } /* * Verify whether the dumpfile was created by the "xm save" facility. * This gets started by the "save" function in XendCheckpoint.py, and * then by xc_save.c, with the work done in the xc_linux_save() library * function in libxc/xc_linux_save.c. */ #define MAX_BATCH_SIZE 1024 /* * Number of P2M entries in a page. */ #define ULPP (xd->page_size/sizeof(unsigned long)) /* * Number of P2M entries in the pfn_to_mfn_frame_list. */ #define P2M_FL_ENTRIES (((xd->xc_save.nr_pfns)+ULPP-1)/ULPP) /* * Size in bytes of the pfn_to_mfn_frame_list. */ #define P2M_FL_SIZE ((P2M_FL_ENTRIES)*sizeof(unsigned long)) #define XTAB (0xf<<28) /* invalid page */ #define LTAB_MASK XTAB static int xc_save_verify(char *buf) { int i, batch_count, done_batch, *intptr; ulong flags, *ulongptr; ulong batch_index, total_pages_read; ulong N; if (!STRNEQ(buf, XC_SAVE_SIGNATURE)) return FALSE; if (lseek(xd->xfd, strlen(XC_SAVE_SIGNATURE), SEEK_SET) == -1) return FALSE; flags = XC_SAVE; if (CRASHDEBUG(1)) { fprintf(stderr, "\"%s\"\n", buf); fprintf(stderr, "endian: %d %s\n", __BYTE_ORDER, __BYTE_ORDER == __BIG_ENDIAN ? "__BIG_ENDIAN" : (__BYTE_ORDER == __LITTLE_ENDIAN ? "__LITTLE_ENDIAN" : "???")); } /* * size of vmconfig data structure (big-endian) */ if (read(xd->xfd, buf, sizeof(int)) != sizeof(int)) return FALSE; intptr = (int *)buf; if (CRASHDEBUG(1) && BYTE_SWAP_REQUIRED(__BIG_ENDIAN)) { fprintf(stderr, "byte-swap required for this:\n"); for (i = 0; i < sizeof(int); i++) fprintf(stderr, "[%x]", buf[i] & 0xff); fprintf(stderr, ": %x -> ", *intptr); } xd->xc_save.vmconfig_size = swab32(*intptr); if (CRASHDEBUG(1)) fprintf(stderr, "%x\n", xd->xc_save.vmconfig_size); if (!(xd->xc_save.vmconfig_buf = (char *)malloc (xd->xc_save.vmconfig_size))) error(FATAL, "cannot malloc xc_save vmconfig space."); if (!xd->page_size) error(FATAL, "unknown page size: use -p command line option\n"); if (!(xd->page = (char *)malloc(xd->page_size))) error(FATAL, "cannot malloc page space."); if (!(xd->poc = (struct pfn_offset_cache *)calloc (PFN_TO_OFFSET_CACHE_ENTRIES, sizeof(struct pfn_offset_cache)))) error(FATAL, "cannot malloc pfn_offset_cache\n"); xd->last_pfn = ~(0UL); if (!(xd->xc_save.region_pfn_type = (ulong *)calloc (MAX_BATCH_SIZE, sizeof(ulong)))) error(FATAL, "cannot malloc region_pfn_type\n"); if (read(xd->xfd, xd->xc_save.vmconfig_buf, xd->xc_save.vmconfig_size) != xd->xc_save.vmconfig_size) goto xc_save_bailout; /* * nr_pfns (native byte order) */ if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong)) goto xc_save_bailout; ulongptr = (ulong *)buf; if (CRASHDEBUG(1)) { for (i = 0; i < sizeof(ulong); i++) fprintf(stderr, "[%x]", buf[i] & 0xff); fprintf(stderr, ": %lx (nr_pfns)\n", *ulongptr); } xd->xc_save.nr_pfns = *ulongptr; if (machine_type("IA64")) goto xc_save_ia64; /* * Get a local copy of the live_P2M_frame_list */ if (!(xd->xc_save.p2m_frame_list = (unsigned long *)malloc(P2M_FL_SIZE))) error(FATAL, "cannot allocate p2m_frame_list array"); if (!(xd->xc_save.batch_offsets = (off_t *)calloc((size_t)P2M_FL_ENTRIES, sizeof(off_t)))) error(FATAL, "cannot allocate batch_offsets array"); xd->xc_save.batch_count = P2M_FL_ENTRIES; if (read(xd->xfd, xd->xc_save.p2m_frame_list, P2M_FL_SIZE) != P2M_FL_SIZE) goto xc_save_bailout; if (CRASHDEBUG(1)) fprintf(stderr, "pre-batch file pointer: %lld\n", (ulonglong)lseek(xd->xfd, 0L, SEEK_CUR)); /* * ... * int batch_count * ulong region pfn_type[batch_count] * page 0 * page 1 * ... * page batch_count-1 * (repeat) */ total_pages_read = 0; batch_index = 0; done_batch = FALSE; while (!done_batch) { xd->xc_save.batch_offsets[batch_index] = (off_t) lseek(xd->xfd, 0L, SEEK_CUR); if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int)) goto xc_save_bailout; if (CRASHDEBUG(1)) fprintf(stderr, "batch[%ld]: %d ", batch_index, batch_count); batch_index++; if (batch_index >= P2M_FL_ENTRIES) { fprintf(stderr, "more than %ld batches encountered?\n", P2M_FL_ENTRIES); goto xc_save_bailout; } switch (batch_count) { case 0: if (CRASHDEBUG(1)) { fprintf(stderr, ": Batch work is done: %ld pages read (P2M_FL_ENTRIES: %ld)\n", total_pages_read, P2M_FL_ENTRIES); } done_batch = TRUE; continue; case -1: if (CRASHDEBUG(1)) fprintf(stderr, ": Entering page verify mode\n"); continue; default: if (batch_count > MAX_BATCH_SIZE) { if (CRASHDEBUG(1)) fprintf(stderr, ": Max batch size exceeded. Giving up.\n"); done_batch = TRUE; continue; } if (CRASHDEBUG(1)) fprintf(stderr, "\n"); break; } if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != batch_count * sizeof(ulong)) goto xc_save_bailout; for (i = 0; i < batch_count; i++) { unsigned long pagetype; unsigned long pfn; pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK; pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK; if (pagetype == XTAB) /* a bogus/unmapped page: skip it */ continue; if (pfn > xd->xc_save.nr_pfns) { if (CRASHDEBUG(1)) fprintf(stderr, "batch_count: %d pfn %ld out of range", batch_count, pfn); } if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) goto xc_save_bailout; total_pages_read++; } } /* * Get the list of PFNs that are not in the psuedo-phys map */ if (read(xd->xfd, &xd->xc_save.pfns_not, sizeof(xd->xc_save.pfns_not)) != sizeof(xd->xc_save.pfns_not)) goto xc_save_bailout; if (CRASHDEBUG(1)) fprintf(stderr, "PFNs not in pseudo-phys map: %d\n", xd->xc_save.pfns_not); if ((total_pages_read + xd->xc_save.pfns_not) != xd->xc_save.nr_pfns) error(WARNING, "nr_pfns: %ld != (total pages: %ld + pages not saved: %d)\n", xd->xc_save.nr_pfns, total_pages_read, xd->xc_save.pfns_not); xd->xc_save.pfns_not_offset = lseek(xd->xfd, 0L, SEEK_CUR); if (lseek(xd->xfd, sizeof(ulong) * xd->xc_save.pfns_not, SEEK_CUR) == -1) goto xc_save_bailout; xd->xc_save.vcpu_ctxt_offset = lseek(xd->xfd, 0L, SEEK_CUR); lseek(xd->xfd, 0, SEEK_END); lseek(xd->xfd, -((off_t)(xd->page_size)), SEEK_CUR); xd->xc_save.shared_info_page_offset = lseek(xd->xfd, 0L, SEEK_CUR); xd->flags |= (XENDUMP_LOCAL | flags); kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND); if (CRASHDEBUG(1)) xendump_memory_dump(stderr); return TRUE; xc_save_ia64: /* * Completely different format for ia64: * * ... * pfn # * page data * pfn # * page data * ... */ free(xd->poc); xd->poc = NULL; free(xd->xc_save.region_pfn_type); xd->xc_save.region_pfn_type = NULL; if (!(xd->xc_save.ia64_page_offsets = (ulong *)calloc(xd->xc_save.nr_pfns, sizeof(off_t)))) error(FATAL, "cannot allocate ia64_page_offsets array"); /* * version */ if (read(xd->xfd, buf, sizeof(ulong)) != sizeof(ulong)) goto xc_save_bailout; xd->xc_save.ia64_version = *((ulong *)buf); if (CRASHDEBUG(1)) fprintf(stderr, "ia64 version: %lx\n", xd->xc_save.ia64_version); /* * xen_domctl_arch_setup structure */ if (read(xd->xfd, buf, sizeof(xen_domctl_arch_setup_t)) != sizeof(xen_domctl_arch_setup_t)) goto xc_save_bailout; if (CRASHDEBUG(1)) { xen_domctl_arch_setup_t *setup = (xen_domctl_arch_setup_t *)buf; fprintf(stderr, "xen_domctl_arch_setup:\n"); fprintf(stderr, " flags: %lx\n", (ulong)setup->flags); fprintf(stderr, " bp: %lx\n", (ulong)setup->bp); fprintf(stderr, " maxmem: %lx\n", (ulong)setup->maxmem); fprintf(stderr, " xsi_va: %lx\n", (ulong)setup->xsi_va); fprintf(stderr, "hypercall_imm: %x\n", setup->hypercall_imm); } for (i = N = 0; i < xd->xc_save.nr_pfns; i++) { if (read(xd->xfd, &N, sizeof(N)) != sizeof(N)) goto xc_save_bailout; if (N < xd->xc_save.nr_pfns) xd->xc_save.ia64_page_offsets[N] = lseek(xd->xfd, 0, SEEK_CUR); else error(WARNING, "[%d]: pfn of %lx (0x%lx) in ia64 canonical page list exceeds %ld\n", i, N, N, xd->xc_save.nr_pfns); if (CRASHDEBUG(1)) { if ((i < 10) || (N >= (xd->xc_save.nr_pfns-10))) fprintf(stderr, "[%d]: %ld\n%s", i, N, i == 9 ? "...\n" : ""); } if ((N+1) >= xd->xc_save.nr_pfns) break; if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) goto xc_save_bailout; } if (CRASHDEBUG(1)) { for (i = N = 0; i < xd->xc_save.nr_pfns; i++) { if (!xd->xc_save.ia64_page_offsets[i]) N++; } fprintf(stderr, "%ld out of %ld pfns not dumped\n", N, xd->xc_save.nr_pfns); } xd->flags |= (XENDUMP_LOCAL | flags | XC_SAVE_IA64); kt->xen_flags |= (CANONICAL_PAGE_TABLES|XEN_SUSPEND); if (CRASHDEBUG(1)) xendump_memory_dump(stderr); return TRUE; xc_save_bailout: error(INFO, "xc_save_verify: \"LinuxGuestRecord\" file handling/format error\n"); if (xd->xc_save.p2m_frame_list) { free(xd->xc_save.p2m_frame_list); xd->xc_save.p2m_frame_list = NULL; } if (xd->xc_save.batch_offsets) { free(xd->xc_save.batch_offsets); xd->xc_save.batch_offsets = NULL; } if (xd->xc_save.vmconfig_buf) { free(xd->xc_save.vmconfig_buf); xd->xc_save.vmconfig_buf = NULL; } if (xd->page) { free(xd->page); xd->page = NULL; } return FALSE; } /* * Do the work for read_xendump() for the XC_SAVE dumpfile format. */ static int xc_save_read(void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int b, i, redundant; ulong reqpfn; int batch_count; off_t file_offset; reqpfn = (ulong)BTOP(paddr); if (CRASHDEBUG(8)) fprintf(xd->ofp, "xc_save_read(bufptr: %lx cnt: %d addr: %lx paddr: %llx (%ld, 0x%lx)\n", (ulong)bufptr, cnt, addr, (ulonglong)paddr, reqpfn, reqpfn); if (xd->flags & XC_SAVE_IA64) { if (reqpfn >= xd->xc_save.nr_pfns) { if (CRASHDEBUG(1)) fprintf(xd->ofp, "xc_save_read: pfn %lx too large: nr_pfns: %lx\n", reqpfn, xd->xc_save.nr_pfns); return SEEK_ERROR; } file_offset = xd->xc_save.ia64_page_offsets[reqpfn]; if (!file_offset) { if (CRASHDEBUG(1)) fprintf(xd->ofp, "xc_save_read: pfn %lx not stored in xendump\n", reqpfn); return SEEK_ERROR; } if (reqpfn != xd->last_pfn) { if (lseek(xd->xfd, file_offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; } else { xd->redundant++; xd->cache_hits++; } xd->accesses++; xd->last_pfn = reqpfn; BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } if ((file_offset = poc_get(reqpfn, &redundant))) { if (!redundant) { if (lseek(xd->xfd, file_offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; xd->last_pfn = reqpfn; } else if (CRASHDEBUG(1)) console("READ %ld (0x%lx) skipped!\n", reqpfn, reqpfn); BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } /* * ... * int batch_count * ulong region pfn_type[batch_count] * page 0 * page 1 * ... * page batch_count-1 * (repeat) */ for (b = 0; b < xd->xc_save.batch_count; b++) { if (lseek(xd->xfd, xd->xc_save.batch_offsets[b], SEEK_SET) == -1) return SEEK_ERROR; if (CRASHDEBUG(8)) fprintf(xd->ofp, "check batch[%d]: offset: %llx\n", b, (ulonglong)xd->xc_save.batch_offsets[b]); if (read(xd->xfd, &batch_count, sizeof(int)) != sizeof(int)) return READ_ERROR; switch (batch_count) { case 0: if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) { fprintf(xd->ofp, "batch[%d]: has count of zero -- bailing out on pfn %ld\n", b, reqpfn); } return READ_ERROR; case -1: return READ_ERROR; default: if (CRASHDEBUG(8)) fprintf(xd->ofp, "batch[%d]: offset: %llx batch count: %d\n", b, (ulonglong)xd->xc_save.batch_offsets[b], batch_count); break; } if (read(xd->xfd, xd->xc_save.region_pfn_type, batch_count * sizeof(ulong)) != batch_count * sizeof(ulong)) return READ_ERROR; for (i = 0; i < batch_count; i++) { unsigned long pagetype; unsigned long pfn; pfn = xd->xc_save.region_pfn_type[i] & ~LTAB_MASK; pagetype = xd->xc_save.region_pfn_type[i] & LTAB_MASK; if (pagetype == XTAB) /* a bogus/unmapped page: skip it */ continue; if (pfn > xd->xc_save.nr_pfns) { if (CRASHDEBUG(1)) fprintf(stderr, "batch_count: %d pfn %ld out of range", batch_count, pfn); } if (pfn == reqpfn) { file_offset = lseek(xd->xfd, 0, SEEK_CUR); poc_store(pfn, file_offset); if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } if (lseek(xd->xfd, xd->page_size, SEEK_CUR) == -1) return SEEK_ERROR; } } return READ_ERROR; } /* * Stash a pfn's offset. If they're all in use, put it in the * least-used slot that's closest to the beginning of the array. */ static void poc_store(ulong pfn, off_t file_offset) { int i; struct pfn_offset_cache *poc, *plow; ulong curlow; curlow = ~(0UL); plow = NULL; poc = xd->poc; for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) { if (poc->cnt == 0) { poc->cnt = 1; poc->pfn = pfn; poc->file_offset = file_offset; xd->last_pfn = pfn; return; } if (poc->cnt < curlow) { curlow = poc->cnt; plow = poc; } } plow->cnt = 1; plow->pfn = pfn; plow->file_offset = file_offset; xd->last_pfn = pfn; } /* * Check whether a pfn's offset has been cached. */ static off_t poc_get(ulong pfn, int *redundant) { int i; struct pfn_offset_cache *poc; xd->accesses++; if (pfn == xd->last_pfn) { xd->redundant++; *redundant = TRUE; return 1; } else *redundant = FALSE; poc = xd->poc; for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++, poc++) { if (poc->cnt && (poc->pfn == pfn)) { poc->cnt++; xd->cache_hits++; return poc->file_offset; } } return 0; } /* * Perform any post-dumpfile determination stuff here. */ int xendump_init(char *unused, FILE *fptr) { if (!XENDUMP_VALID()) return FALSE; xd->ofp = fptr; return TRUE; } int read_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (pc->curcmd_flags & XEN_MACHINE_ADDR) return READ_ERROR; switch (xd->flags & (XC_SAVE|XC_CORE_ORIG|XC_CORE_ELF)) { case XC_SAVE: return xc_save_read(bufptr, cnt, addr, paddr); case XC_CORE_ORIG: case XC_CORE_ELF: return xc_core_read(bufptr, cnt, addr, paddr); default: return READ_ERROR; } } int read_xendump_hyper(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { ulong pfn, page_index; off_t offset; pfn = (ulong)BTOP(paddr); /* ODA: pfn == mfn !!! */ if ((page_index = xc_core_mfn_to_page_index(pfn)) == PFN_NOT_FOUND) return READ_ERROR; offset = xd->xc_core.header.xch_pages_offset + ((off_t)(page_index) * (off_t)xd->page_size); if (lseek(xd->xfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) return READ_ERROR; BCOPY(xd->page + PAGEOFFSET(paddr), bufptr, cnt); return cnt; } int write_xendump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return WRITE_ERROR; } uint xendump_page_size(void) { if (!XENDUMP_VALID()) return 0; return xd->page_size; } /* * xendump_free_memory(), and xendump_memory_used() * are debug only, and typically unnecessary to implement. */ int xendump_free_memory(void) { return 0; } int xendump_memory_used(void) { return 0; } /* * This function is dump-type independent, used here to * to dump the xendump_data structure contents. */ int xendump_memory_dump(FILE *fp) { int i, linefeed, used, others; ulong *ulongptr; Elf32_Off offset32; Elf64_Off offset64; FILE *fpsave; fprintf(fp, " flags: %lx (", xd->flags); others = 0; if (xd->flags & XENDUMP_LOCAL) fprintf(fp, "%sXENDUMP_LOCAL", others++ ? "|" : ""); if (xd->flags & XC_SAVE) fprintf(fp, "%sXC_SAVE", others++ ? "|" : ""); if (xd->flags & XC_CORE_ORIG) fprintf(fp, "%sXC_CORE_ORIG", others++ ? "|" : ""); if (xd->flags & XC_CORE_ELF) fprintf(fp, "%sXC_CORE_ELF", others++ ? "|" : ""); if (xd->flags & XC_CORE_P2M_CREATE) fprintf(fp, "%sXC_CORE_P2M_CREATE", others++ ? "|" : ""); if (xd->flags & XC_CORE_PFN_CREATE) fprintf(fp, "%sXC_CORE_PFN_CREATE", others++ ? "|" : ""); if (xd->flags & XC_CORE_NO_P2M) fprintf(fp, "%sXC_CORE_NO_P2M", others++ ? "|" : ""); if (xd->flags & XC_SAVE_IA64) fprintf(fp, "%sXC_SAVE_IA64", others++ ? "|" : ""); if (xd->flags & XC_CORE_64BIT_HOST) fprintf(fp, "%sXC_CORE_64BIT_HOST", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " xfd: %d\n", xd->xfd); fprintf(fp, " page_size: %d\n", xd->page_size); fprintf(fp, " ofp: %lx\n", (ulong)xd->ofp); fprintf(fp, " page: %lx\n", (ulong)xd->page); fprintf(fp, " panic_pc: %lx\n", xd->panic_pc); fprintf(fp, " panic_sp: %lx\n", xd->panic_sp); fprintf(fp, " accesses: %ld\n", (ulong)xd->accesses); fprintf(fp, " cache_hits: %ld ", (ulong)xd->cache_hits); if (xd->accesses) fprintf(fp, "(%ld%%)\n", xd->cache_hits * 100 / xd->accesses); else fprintf(fp, "\n"); fprintf(fp, " last_pfn: %ld\n", xd->last_pfn); fprintf(fp, " redundant: %ld ", (ulong)xd->redundant); if (xd->accesses) fprintf(fp, "(%ld%%)\n", xd->redundant * 100 / xd->accesses); else fprintf(fp, "\n"); for (i = used = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) if (xd->poc && xd->poc[i].cnt) used++; if (xd->poc) fprintf(fp, " poc[%d]: %lx %s", PFN_TO_OFFSET_CACHE_ENTRIES, (ulong)xd->poc, xd->poc ? "" : "(none)"); else fprintf(fp, " poc[0]: (unused)\n"); for (i = 0; i < PFN_TO_OFFSET_CACHE_ENTRIES; i++) { if (!xd->poc) break; if (!xd->poc[i].cnt) { if (!i) fprintf(fp, "(none used)\n"); break; } else if (!i) fprintf(fp, "(%d used)\n", used); if (CRASHDEBUG(2)) fprintf(fp, " [%d]: pfn: %ld (0x%lx) count: %ld file_offset: %llx\n", i, xd->poc[i].pfn, xd->poc[i].pfn, xd->poc[i].cnt, (ulonglong)xd->poc[i].file_offset); } if (!xd->poc) fprintf(fp, "\n"); fprintf(fp, "\n xc_save:\n"); fprintf(fp, " nr_pfns: %ld (0x%lx)\n", xd->xc_save.nr_pfns, xd->xc_save.nr_pfns); fprintf(fp, " vmconfig_size: %d (0x%x)\n", xd->xc_save.vmconfig_size, xd->xc_save.vmconfig_size); fprintf(fp, " vmconfig_buf: %lx\n", (ulong)xd->xc_save.vmconfig_buf); if (xd->flags & XC_SAVE) xen_dump_vmconfig(fp); fprintf(fp, " p2m_frame_list: %lx ", (ulong)xd->xc_save.p2m_frame_list); if ((xd->flags & XC_SAVE) && xd->xc_save.p2m_frame_list) { fprintf(fp, "\n"); ulongptr = xd->xc_save.p2m_frame_list; for (i = 0; i < P2M_FL_ENTRIES; i++, ulongptr++) fprintf(fp, "%ld ", *ulongptr); fprintf(fp, "\n"); } else fprintf(fp, "(none)\n"); fprintf(fp, " pfns_not: %d\n", xd->xc_save.pfns_not); fprintf(fp, " pfns_not_offset: %lld\n", (ulonglong)xd->xc_save.pfns_not_offset); fprintf(fp, " vcpu_ctxt_offset: %lld\n", (ulonglong)xd->xc_save.vcpu_ctxt_offset); fprintf(fp, " shared_info_page_offset: %lld\n", (ulonglong)xd->xc_save.shared_info_page_offset); fprintf(fp, " region_pfn_type: %lx\n", (ulong)xd->xc_save.region_pfn_type); fprintf(fp, " batch_count: %ld\n", (ulong)xd->xc_save.batch_count); fprintf(fp, " batch_offsets: %lx %s\n", (ulong)xd->xc_save.batch_offsets, xd->xc_save.batch_offsets ? "" : "(none)"); for (i = linefeed = 0; i < xd->xc_save.batch_count; i++) { fprintf(fp, "[%d]: %llx ", i, (ulonglong)xd->xc_save.batch_offsets[i]); if (((i+1)%4) == 0) { fprintf(fp, "\n"); linefeed = FALSE; } else linefeed = TRUE; } if (linefeed) fprintf(fp, "\n"); fprintf(fp, " ia64_version: %ld\n", (ulong)xd->xc_save.ia64_version); fprintf(fp, " ia64_page_offsets: %lx ", (ulong)xd->xc_save.ia64_page_offsets); if (xd->xc_save.ia64_page_offsets) fprintf(fp, "(%ld entries)\n\n", xd->xc_save.nr_pfns); else fprintf(fp, "(none)\n\n"); fprintf(fp, " xc_core:\n"); fprintf(fp, " header:\n"); fprintf(fp, " xch_magic: %x ", xd->xc_core.header.xch_magic); if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC) fprintf(fp, "(XC_CORE_MAGIC)\n"); else if (xd->xc_core.header.xch_magic == XC_CORE_MAGIC_HVM) fprintf(fp, "(XC_CORE_MAGIC_HVM)\n"); else fprintf(fp, "(unknown)\n"); fprintf(fp, " xch_nr_vcpus: %d\n", xd->xc_core.header.xch_nr_vcpus); fprintf(fp, " xch_nr_pages: %d (0x%x)\n", xd->xc_core.header.xch_nr_pages, xd->xc_core.header.xch_nr_pages); fprintf(fp, " xch_ctxt_offset: %llu (0x%llx)\n", (ulonglong)xd->xc_core.header.xch_ctxt_offset, (ulonglong)xd->xc_core.header.xch_ctxt_offset); fprintf(fp, " xch_index_offset: %llu (0x%llx)\n", (ulonglong)xd->xc_core.header.xch_index_offset, (ulonglong)xd->xc_core.header.xch_index_offset); fprintf(fp, " xch_pages_offset: %llu (0x%llx)\n", (ulonglong)xd->xc_core.header.xch_pages_offset, (ulonglong)xd->xc_core.header.xch_pages_offset); fprintf(fp, " elf_class: %s\n", xd->xc_core.elf_class == ELFCLASS64 ? "ELFCLASS64" : xd->xc_core.elf_class == ELFCLASS32 ? "ELFCLASS32" : "n/a"); fprintf(fp, " elf_strtab_offset: %lld (0x%llx)\n", (ulonglong)xd->xc_core.elf_strtab_offset, (ulonglong)xd->xc_core.elf_strtab_offset); fprintf(fp, " format_version: %016llx\n", (ulonglong)xd->xc_core.format_version); fprintf(fp, " shared_info_offset: %lld (0x%llx)\n", (ulonglong)xd->xc_core.shared_info_offset, (ulonglong)xd->xc_core.shared_info_offset); if (machine_type("IA64")) fprintf(fp, " ia64_mapped_regs_offset: %lld (0x%llx)\n", (ulonglong)xd->xc_core.ia64_mapped_regs_offset, (ulonglong)xd->xc_core.ia64_mapped_regs_offset); fprintf(fp, " elf_index_pfn[%d]: %s", INDEX_PFN_COUNT, xd->xc_core.elf_class ? "\n" : "(none used)\n"); if (xd->xc_core.elf_class) { for (i = 0; i < INDEX_PFN_COUNT; i++) { fprintf(fp, "%ld:%ld ", xd->xc_core.elf_index_pfn[i].index, xd->xc_core.elf_index_pfn[i].pfn); } fprintf(fp, "\n"); } fprintf(fp, " last_batch:\n"); fprintf(fp, " index: %ld (%ld - %ld)\n", xd->xc_core.last_batch.index, xd->xc_core.last_batch.start, xd->xc_core.last_batch.end); fprintf(fp, " accesses: %ld\n", xd->xc_core.last_batch.accesses); fprintf(fp, " duplicates: %ld ", xd->xc_core.last_batch.duplicates); if (xd->xc_core.last_batch.accesses) fprintf(fp, "(%ld%%)\n", xd->xc_core.last_batch.duplicates * 100 / xd->xc_core.last_batch.accesses); else fprintf(fp, "\n"); fprintf(fp, " elf32: %lx\n", (ulong)xd->xc_core.elf32); fprintf(fp, " elf64: %lx\n", (ulong)xd->xc_core.elf64); fprintf(fp, " p2m_frames: %d\n", xd->xc_core.p2m_frames); fprintf(fp, " p2m_frame_index_list: %s\n", (xd->flags & (XC_CORE_NO_P2M|XC_SAVE)) ? "(not used)" : ""); for (i = 0; i < xd->xc_core.p2m_frames; i++) { fprintf(fp, "%ld ", xd->xc_core.p2m_frame_index_list[i]); } fprintf(fp, xd->xc_core.p2m_frames ? "\n" : ""); if ((xd->flags & XC_CORE_ORIG) && CRASHDEBUG(8)) xc_core_mfns(XENDUMP_LOCAL, fp); switch (xd->xc_core.elf_class) { case ELFCLASS32: fpsave = xd->ofp; xd->ofp = fp; xc_core_elf_dump(); offset32 = xd->xc_core.elf32->e_shoff; for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) { xc_core_dump_Elf32_Shdr(offset32, ELFREAD); offset32 += xd->xc_core.elf32->e_shentsize; } xendump_print("\n"); xd->ofp = fpsave; break; case ELFCLASS64: fpsave = xd->ofp; xd->ofp = fp; xc_core_elf_dump(); offset64 = xd->xc_core.elf64->e_shoff; for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) { xc_core_dump_Elf64_Shdr(offset64, ELFREAD); offset64 += xd->xc_core.elf64->e_shentsize; } xendump_print("\n"); xd->ofp = fpsave; break; } return 0; } static void xen_dump_vmconfig(FILE *fp) { int i, opens, closes; char *p; opens = closes = 0; p = xd->xc_save.vmconfig_buf; for (i = 0; i < xd->xc_save.vmconfig_size; i++, p++) { if (ascii(*p)) fprintf(fp, "%c", *p); else fprintf(fp, "<%x>", *p); if (*p == '(') opens++; else if (*p == ')') closes++; } fprintf(fp, "\n"); if (opens != closes) error(WARNING, "invalid vmconfig contents?\n"); } /* * Looking at the active set, try to determine who panicked, * or who was the "suspend" kernel thread. */ ulong get_xendump_panic_task(void) { int i; ulong task; struct task_context *tc; switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE)) { case XC_CORE_ORIG: case XC_CORE_ELF: if (machdep->xendump_panic_task) return (machdep->xendump_panic_task((void *)xd)); break; case XC_SAVE: for (i = 0; i < NR_CPUS; i++) { if (!(task = tt->active_set[i])) continue; tc = task_to_context(task); if (is_kernel_thread(task) && STREQ(tc->comm, "suspend")) return tc->task; } break; } return NO_TASK; } /* * Figure out the back trace hooks. */ void get_xendump_regs(struct bt_info *bt, ulong *pc, ulong *sp) { int i; ulong *up; if ((tt->panic_task == bt->task) && (xd->panic_pc && xd->panic_sp)) { *pc = xd->panic_pc; *sp = xd->panic_sp; return; } switch (xd->flags & (XC_CORE_ORIG|XC_CORE_ELF|XC_SAVE)) { case XC_CORE_ORIG: case XC_CORE_ELF: if (machdep->get_xendump_regs) return (machdep->get_xendump_regs(xd, bt, pc, sp)); break; case XC_SAVE: if (tt->panic_task != bt->task) break; for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++) { if (is_kernel_text(*up) && (STREQ(closest_symbol(*up), "__do_suspend"))) { *pc = *up; *sp = tt->flags & THREAD_INFO ? bt->tc->thread_info + (i * sizeof(long)) : bt->task + (i * sizeof(long)); xd->panic_pc = *pc; xd->panic_sp = *sp; return; } } } machdep->get_stack_frame(bt, pc, sp); } /* * Farm out most of the work to the proper architecture to create * the p2m table. For ELF core dumps, create the index;pfn table. */ static void xc_core_create_pfn_tables(void) { if (xd->flags & XC_CORE_P2M_CREATE) { if (!machdep->xendump_p2m_create) error(FATAL, "xen xc_core dumpfiles not supported on this architecture"); if (!machdep->xendump_p2m_create((void *)xd)) error(FATAL, "cannot create xen pfn-to-mfn mapping\n"); } if (xd->flags & XC_CORE_PFN_CREATE) xc_core_elf_pfn_init(); xd->flags &= ~(XC_CORE_P2M_CREATE|XC_CORE_PFN_CREATE); if (CRASHDEBUG(1)) xendump_memory_dump(xd->ofp); } /* * Find the page index containing the mfn, and read the * machine page into the buffer. */ char * xc_core_mfn_to_page(ulong mfn, char *pgbuf) { int i, b, idx, done; ulong tmp[MAX_BATCH_SIZE]; off_t offset; size_t size; uint nr_pages; if (xd->flags & XC_CORE_ELF) return xc_core_elf_mfn_to_page(mfn, pgbuf); if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to page index\n"); return NULL; } nr_pages = xd->xc_core.header.xch_nr_pages; if (xd->flags & XC_CORE_64BIT_HOST) nr_pages *= 2; for (b = 0, idx = -1, done = FALSE; !done && (b < nr_pages); b += MAX_BATCH_SIZE) { size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp, size) != size) { error(INFO, "cannot read index page %d\n", b); return NULL; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) { done = TRUE; break; } if (tmp[i] == mfn) { idx = i+b; if (CRASHDEBUG(4)) fprintf(xd->ofp, "page: found mfn 0x%lx (%ld) at index %d\n", mfn, mfn, idx); done = TRUE; } } } if (idx == -1) { error(INFO, "cannot find mfn %ld (0x%lx) in page index\n", mfn, mfn); return NULL; } if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to xch_pages_offset\n"); return NULL; } offset = (off_t)(idx) * (off_t)xd->page_size; if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { error(INFO, "cannot lseek to mfn-specified page\n"); return NULL; } if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) { error(INFO, "cannot read mfn-specified page\n"); return NULL; } return pgbuf; } /* * Find the page index containing the mfn, and read the * machine page into the buffer. */ static char * xc_core_elf_mfn_to_page(ulong mfn, char *pgbuf) { int i, b, idx, done; off_t offset; size_t size; uint nr_pages; ulong tmp; struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = 0, idx = -1, done = FALSE; !done && (b < nr_pages); b += MAX_BATCH_SIZE) { size = sizeof(struct xen_dumpcore_p2m) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &p2m_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return NULL; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) { done = TRUE; break; } tmp = (ulong)p2m_batch[i].gmfn; if (tmp == mfn) { idx = i+b; if (CRASHDEBUG(4)) fprintf(xd->ofp, "page: found mfn 0x%lx (%ld) at index %d\n", mfn, mfn, idx); done = TRUE; } } } if (idx == -1) { error(INFO, "cannot find mfn %ld (0x%lx) in page index\n", mfn, mfn); return NULL; } if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to xch_pages_offset\n"); offset = (off_t)(idx) * (off_t)xd->page_size; if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { error(INFO, "cannot lseek to mfn-specified page\n"); return NULL; } if (read(xd->xfd, pgbuf, xd->page_size) != xd->page_size) { error(INFO, "cannot read mfn-specified page\n"); return NULL; } return pgbuf; } /* * Find and return the page index containing the mfn. */ int xc_core_mfn_to_page_index(ulong mfn) { int i, b; ulong tmp[MAX_BATCH_SIZE]; uint nr_pages; size_t size; if (xd->flags & XC_CORE_ELF) return xc_core_elf_mfn_to_page_index(mfn); if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to page index\n"); return MFN_NOT_FOUND; } nr_pages = xd->xc_core.header.xch_nr_pages; if (xd->flags & XC_CORE_64BIT_HOST) nr_pages *= 2; for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp, size) != size) { error(INFO, "cannot read index page %d\n", b); return MFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; if (tmp[i] == mfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found mfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, mfn, mfn, i+b); return (i+b); } } } return MFN_NOT_FOUND; } /* * Find and return the page index containing the mfn. */ static int xc_core_elf_mfn_to_page_index(ulong mfn) { int i, b; off_t offset; size_t size; uint nr_pages; ulong tmp; struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(struct xen_dumpcore_p2m) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &p2m_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return MFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; tmp = (ulong)p2m_batch[i].gmfn; if (tmp == mfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found mfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, mfn, mfn, i+b); return (i+b); } } } return MFN_NOT_FOUND; } /* * XC_CORE mfn-related utility function. */ static int xc_core_mfns(ulong arg, FILE *ofp) { int i, b; uint nr_pages; ulong tmp[MAX_BATCH_SIZE]; ulonglong tmp64[MAX_BATCH_SIZE]; size_t size; if (lseek(xd->xfd, xd->xc_core.header.xch_index_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to page index\n"); return FALSE; } switch (arg) { case XC_CORE_64BIT_HOST: /* * Determine whether this is a 32-bit guest xendump that * was taken on a 64-bit xen host. */ if (machine_type("X86_64") || machine_type("IA64")) return FALSE; check_next_4: if (read(xd->xfd, tmp, sizeof(ulong) * 4) != (4 * sizeof(ulong))) { error(INFO, "cannot read index pages\n"); return FALSE; } if ((tmp[0] == 0xffffffff) || (tmp[1] == 0xffffffff) || (tmp[2] == 0xffffffff) || (tmp[3] == 0xffffffff) || (!tmp[0] && !tmp[1]) || (!tmp[2] && !tmp[3])) goto check_next_4; if (CRASHDEBUG(2)) fprintf(ofp, "mfns: %08lx %08lx %08lx %08lx\n", tmp[0], tmp[1], tmp[2], tmp[3]); if (tmp[0] && !tmp[1] && tmp[2] && !tmp[3]) return TRUE; else return FALSE; case XENDUMP_LOCAL: if (BITS64() || (xd->flags & XC_CORE_64BIT_HOST)) goto show_64bit_mfns; fprintf(ofp, "xch_index_offset mfn list:\n"); nr_pages = xd->xc_core.header.xch_nr_pages; for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(ulong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp, size) != size) { error(INFO, "cannot read index page %d\n", b); return FALSE; } if (b) fprintf(ofp, "\n"); for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; if ((i%8) == 0) fprintf(ofp, "%s[%d]:", i ? "\n" : "", b+i); if (tmp[i] == 0xffffffff) fprintf(ofp, " INVALID"); else fprintf(ofp, " %lx", tmp[i]); } } fprintf(ofp, "\nxch_nr_pages: %d\n", xd->xc_core.header.xch_nr_pages); return TRUE; show_64bit_mfns: fprintf(ofp, "xch_index_offset mfn list: %s\n", BITS32() ? "(64-bit mfns)" : ""); nr_pages = xd->xc_core.header.xch_nr_pages; for (b = 0; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(ulonglong) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, tmp64, size) != size) { error(INFO, "cannot read index page %d\n", b); return FALSE; } if (b) fprintf(ofp, "\n"); for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; if ((i%8) == 0) fprintf(ofp, "%s[%d]:", i ? "\n" : "", b+i); if (tmp64[i] == 0xffffffffffffffffULL) fprintf(ofp, " INVALID"); else fprintf(ofp, " %llx", tmp64[i]); } } fprintf(ofp, "\nxch_nr_pages: %d\n", nr_pages); return TRUE; default: return FALSE; } } /* * Given a normal kernel pfn, determine the page index in the dumpfile. * * - First determine which of the pages making up the * phys_to_machine_mapping[] array would contain the pfn. * - From the phys_to_machine_mapping page, determine the mfn. * - Find the mfn in the dumpfile page index. */ #define PFNS_PER_PAGE (xd->page_size/sizeof(unsigned long)) static ulong xc_core_pfn_to_page_index(ulong pfn) { ulong idx, p2m_idx, mfn_idx; ulong *up, mfn; off_t offset; /* * This function does not apply when there's no p2m * mapping and/or if this is an ELF format dumpfile. */ switch (xd->flags & (XC_CORE_NO_P2M|XC_CORE_ELF)) { case (XC_CORE_NO_P2M|XC_CORE_ELF): return xc_core_elf_pfn_valid(pfn); case XC_CORE_NO_P2M: return(xc_core_pfn_valid(pfn) ? pfn : PFN_NOT_FOUND); case XC_CORE_ELF: return xc_core_elf_pfn_to_page_index(pfn); } idx = pfn/PFNS_PER_PAGE; if (idx >= xd->xc_core.p2m_frames) { error(INFO, "pfn: %lx is too large for dumpfile\n", pfn); return PFN_NOT_FOUND; } p2m_idx = xd->xc_core.p2m_frame_index_list[idx]; if (lseek(xd->xfd, xd->xc_core.header.xch_pages_offset, SEEK_SET) == -1) { error(INFO, "cannot lseek to xch_pages_offset\n"); return PFN_NOT_FOUND; } offset = (off_t)(p2m_idx) * (off_t)xd->page_size; if (lseek(xd->xfd, offset, SEEK_CUR) == -1) { error(INFO, "cannot lseek to pfn-specified page\n"); return PFN_NOT_FOUND; } if (read(xd->xfd, xd->page, xd->page_size) != xd->page_size) { error(INFO, "cannot read pfn-specified page\n"); return PFN_NOT_FOUND; } up = (ulong *)xd->page; up += (pfn%PFNS_PER_PAGE); mfn = *up; if ((mfn_idx = xc_core_mfn_to_page_index(mfn)) == MFN_NOT_FOUND) { if (!STREQ(pc->curcmd, "search")) error(INFO, "cannot find mfn in page index\n"); return PFN_NOT_FOUND; } return mfn_idx; } /* * Search the .xen_p2m array for the target pfn, starting at a * higher batch if appropriate. This presumes that the pfns * are laid out in ascending order. */ static ulong xc_core_elf_pfn_to_page_index(ulong pfn) { int i, b, start_index; off_t offset; size_t size; uint nr_pages; ulong tmp; struct xen_dumpcore_p2m p2m_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; /* * Initialize the start_index. */ xd->xc_core.last_batch.accesses++; start_index = 0; if ((pfn >= xd->xc_core.last_batch.start) && (pfn <= xd->xc_core.last_batch.end)) { xd->xc_core.last_batch.duplicates++; start_index = xd->xc_core.last_batch.index; } else { for (i = 0; i <= INDEX_PFN_COUNT; i++) { if ((i == INDEX_PFN_COUNT) || (pfn < xd->xc_core.elf_index_pfn[i].pfn)) { if (--i < 0) i = 0; start_index = xd->xc_core.elf_index_pfn[i].index; break; } } } offset += (start_index * sizeof(struct xen_dumpcore_p2m)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(struct xen_dumpcore_p2m) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &p2m_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return PFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; tmp = (ulong)p2m_batch[i].pfn; if (tmp == pfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found pfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, pfn, pfn, i+b); if ((b+MAX_BATCH_SIZE) < nr_pages) { xd->xc_core.last_batch.index = b; xd->xc_core.last_batch.start = p2m_batch[0].pfn; xd->xc_core.last_batch.end = p2m_batch[MAX_BATCH_SIZE-1].pfn; } return (i+b); } } } return PFN_NOT_FOUND; } /* * In xendumps containing INVALID_MFN markers in the page index, * return the validity of the pfn. */ static int xc_core_pfn_valid(ulong pfn) { ulong mfn; off_t offset; if (pfn >= (ulong)xd->xc_core.header.xch_nr_pages) return FALSE; offset = xd->xc_core.header.xch_index_offset; if (xd->flags & XC_CORE_64BIT_HOST) offset += (off_t)(pfn * sizeof(ulonglong)); else offset += (off_t)(pfn * sizeof(ulong)); /* * The lseek and read should never fail, so report * any errors unconditionally. */ if (lseek(xd->xfd, offset, SEEK_SET) == -1) { error(INFO, "xendump: cannot lseek to page index for pfn %lx\n", pfn); return FALSE; } if (read(xd->xfd, &mfn, sizeof(ulong)) != sizeof(ulong)) { error(INFO, "xendump: cannot read index page for pfn %lx\n", pfn); return FALSE; } /* * If it's an invalid mfn, let the caller decide whether * to display an error message (unless debugging). */ if (mfn == INVALID_MFN) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "xendump: pfn %lx contains INVALID_MFN\n", pfn); return FALSE; } return TRUE; } /* * Return the index into the .xen_pfn array containing the pfn. * If not found, return PFN_NOT_FOUND. */ static ulong xc_core_elf_pfn_valid(ulong pfn) { int i, b, start_index; off_t offset; size_t size; uint nr_pages; ulong tmp; uint64_t pfn_batch[MAX_BATCH_SIZE]; offset = xd->xc_core.header.xch_index_offset; nr_pages = xd->xc_core.header.xch_nr_pages; /* * Initialize the start_index. */ xd->xc_core.last_batch.accesses++; start_index = 0; if ((pfn >= xd->xc_core.last_batch.start) && (pfn <= xd->xc_core.last_batch.end)) { xd->xc_core.last_batch.duplicates++; start_index = xd->xc_core.last_batch.index; } else { for (i = 0; i <= INDEX_PFN_COUNT; i++) { if ((i == INDEX_PFN_COUNT) || (pfn < xd->xc_core.elf_index_pfn[i].pfn)) { if (--i < 0) i = 0; start_index = xd->xc_core.elf_index_pfn[i].index; break; } } } offset += (start_index * sizeof(uint64_t)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index\n"); for (b = start_index; b < nr_pages; b += MAX_BATCH_SIZE) { size = sizeof(uint64_t) * MIN(MAX_BATCH_SIZE, nr_pages - b); if (read(xd->xfd, &pfn_batch[0], size) != size) { error(INFO, "cannot read index page %d\n", b); return PFN_NOT_FOUND; } for (i = 0; i < MAX_BATCH_SIZE; i++) { if ((b+i) >= nr_pages) break; tmp = (ulong)pfn_batch[i]; if (tmp == pfn) { if (CRASHDEBUG(4)) fprintf(xd->ofp, "index: batch: %d found pfn %ld (0x%lx) at index %d\n", b/MAX_BATCH_SIZE, pfn, pfn, i+b); if ((b+MAX_BATCH_SIZE) < nr_pages) { xd->xc_core.last_batch.index = b; xd->xc_core.last_batch.start = (ulong)pfn_batch[0]; xd->xc_core.last_batch.end = (ulong)pfn_batch[MAX_BATCH_SIZE-1]; } return (i+b); } } } return PFN_NOT_FOUND; } /* * Store the panic task's stack hooks from where it was found * in get_active_set_panic_task(). */ void xendump_panic_hook(char *stack) { int i, err, argc; char *arglist[MAXARGS]; char buf[BUFSIZE]; ulong value, *sp; if (machine_type("IA64")) /* needs switch_stack address */ return; strcpy(buf, stack); argc = parse_line(buf, arglist); if ((value = htol(strip_ending_char(arglist[0], ':'), RETURN_ON_ERROR, &err)) == BADADDR) return; for (sp = (ulong *)value, i = 1; i < argc; i++, sp++) { if (strstr(arglist[i], "xen_panic_event")) { if (!readmem((ulong)sp, KVADDR, &value, sizeof(ulong), "xen_panic_event address", RETURN_ON_ERROR)) return; xd->panic_sp = (ulong)sp; xd->panic_pc = value; } else if (strstr(arglist[i], "panic") && !xd->panic_sp) { if (!readmem((ulong)sp, KVADDR, &value, sizeof(ulong), "xen_panic_event address", RETURN_ON_ERROR)) return; xd->panic_sp = (ulong)sp; xd->panic_pc = value; } } } static void xendump_print(char *fmt, ...) { char buf[BUFSIZE]; va_list ap; if (!fmt || !strlen(fmt)) return; va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (xd->ofp) fprintf(xd->ofp, "%s", buf); else if (!XENDUMP_VALID() && CRASHDEBUG(7)) fprintf(stderr, "%s", buf); } /* * Support for xc_core ELF dumpfile format. */ static int xc_core_elf_verify(char *file, char *buf) { int i; Elf32_Ehdr *elf32; Elf64_Ehdr *elf64; Elf32_Off offset32; Elf64_Off offset64; char *eheader; int swap; eheader = buf; if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) goto bailout; swap = (((eheader[EI_DATA] == ELFDATA2LSB) && (__BYTE_ORDER == __BIG_ENDIAN)) || ((eheader[EI_DATA] == ELFDATA2MSB) && (__BYTE_ORDER == __LITTLE_ENDIAN))); elf32 = (Elf32_Ehdr *)buf; elf64 = (Elf64_Ehdr *)buf; if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && (swap16(elf32->e_type, swap) == ET_CORE) && (swap32(elf32->e_version, swap) == EV_CURRENT) && (swap16(elf32->e_shnum, swap) > 0)) { switch (swap16(elf32->e_machine, swap)) { case EM_386: if (machine_type_mismatch(file, "X86", NULL, 0)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, 0)) goto bailout; break; } if (endian_mismatch(file, elf32->e_ident[EI_DATA], 0)) goto bailout; xd->xc_core.elf_class = ELFCLASS32; if ((xd->xc_core.elf32 = (Elf32_Ehdr *)malloc(sizeof(Elf32_Ehdr))) == NULL) { fprintf(stderr, "cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(buf, xd->xc_core.elf32, sizeof(Elf32_Ehdr)); } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && (swap16(elf64->e_type, swap) == ET_CORE) && (swap32(elf64->e_version, swap) == EV_CURRENT) && (swap16(elf64->e_shnum, swap) > 0)) { switch (swap16(elf64->e_machine, swap)) { case EM_IA_64: if (machine_type_mismatch(file, "IA64", NULL, 0)) goto bailout; break; case EM_X86_64: if (machine_type_mismatch(file, "X86_64", "X86", 0)) goto bailout; break; case EM_386: if (machine_type_mismatch(file, "X86", NULL, 0)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, 0)) goto bailout; } if (endian_mismatch(file, elf64->e_ident[EI_DATA], 0)) goto bailout; xd->xc_core.elf_class = ELFCLASS64; if ((xd->xc_core.elf64 = (Elf64_Ehdr *)malloc(sizeof(Elf64_Ehdr))) == NULL) { fprintf(stderr, "cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(buf, xd->xc_core.elf64, sizeof(Elf64_Ehdr)); } else { if (CRASHDEBUG(1)) error(INFO, "%s: not a xen ELF core file\n", file); goto bailout; } xc_core_elf_dump(); switch (xd->xc_core.elf_class) { case ELFCLASS32: offset32 = xd->xc_core.elf32->e_shoff; for (i = 0; i < xd->xc_core.elf32->e_shnum; i++) { xc_core_dump_Elf32_Shdr(offset32, ELFSTORE); offset32 += xd->xc_core.elf32->e_shentsize; } xendump_print("\n"); break; case ELFCLASS64: offset64 = xd->xc_core.elf64->e_shoff; for (i = 0; i < xd->xc_core.elf64->e_shnum; i++) { xc_core_dump_Elf64_Shdr(offset64, ELFSTORE); offset64 += xd->xc_core.elf64->e_shentsize; } xendump_print("\n"); break; } xd->flags |= (XENDUMP_LOCAL | XC_CORE_ELF); if (!xd->page_size) error(FATAL, "unknown page size: use -p command line option\n"); if (!(xd->page = (char *)malloc(xd->page_size))) error(FATAL, "cannot malloc page space."); if (!(xd->poc = (struct pfn_offset_cache *)calloc (PFN_TO_OFFSET_CACHE_ENTRIES, sizeof(struct pfn_offset_cache)))) error(FATAL, "cannot malloc pfn_offset_cache\n"); xd->last_pfn = ~(0UL); for (i = 0; i < INDEX_PFN_COUNT; i++) xd->xc_core.elf_index_pfn[i].pfn = ~0UL; if (CRASHDEBUG(1)) xendump_memory_dump(fp); return TRUE; bailout: return FALSE; } /* * Dump the relevant ELF header. */ static void xc_core_elf_dump(void) { switch (xd->xc_core.elf_class) { case ELFCLASS32: xc_core_dump_Elf32_Ehdr(xd->xc_core.elf32); break; case ELFCLASS64: xc_core_dump_Elf64_Ehdr(xd->xc_core.elf64); break; } } /* * Dump the 32-bit ELF header, and grab a pointer to the strtab section. */ static void xc_core_dump_Elf32_Ehdr(Elf32_Ehdr *elf) { char buf[BUFSIZE]; Elf32_Off offset32; Elf32_Shdr shdr; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); xendump_print("\nElf32_Ehdr:\n"); xendump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); xendump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: xendump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: xendump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: xendump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: xendump_print("(ELFCLASSNUM)\n"); break; default: xendump_print("(?)\n"); break; } xendump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: xendump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: xendump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: xendump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: xendump_print("(ELFDATANUM)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) xendump_print("(EV_CURRENT)\n"); else xendump_print("(?)\n"); xendump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: xendump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: xendump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: xendump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: xendump_print("(ELFOSABI_STANDALONE)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); xendump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: xendump_print("(ET_NONE)\n"); break; case ET_REL: xendump_print("(ET_REL)\n"); break; case ET_EXEC: xendump_print("(ET_EXEC)\n"); break; case ET_DYN: xendump_print("(ET_DYN)\n"); break; case ET_CORE: xendump_print("(ET_CORE)\n"); break; case ET_NUM: xendump_print("(ET_NUM)\n"); break; case ET_LOOS: xendump_print("(ET_LOOS)\n"); break; case ET_HIOS: xendump_print("(ET_HIOS)\n"); break; case ET_LOPROC: xendump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: xendump_print("(ET_HIPROC)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_386: xendump_print("(EM_386)\n"); break; default: xendump_print("(unsupported)\n"); break; } xendump_print(" e_version: %ld ", (ulong)elf->e_version); xendump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); xendump_print(" e_entry: %lx\n", (ulong)elf->e_entry); xendump_print(" e_phoff: %lx\n", (ulong)elf->e_phoff); xendump_print(" e_shoff: %lx\n", (ulong)elf->e_shoff); xendump_print(" e_flags: %lx\n", (ulong)elf->e_flags); xendump_print(" e_ehsize: %x\n", elf->e_ehsize); xendump_print(" e_phentsize: %x\n", elf->e_phentsize); xendump_print(" e_phnum: %x\n", elf->e_phnum); xendump_print(" e_shentsize: %x\n", elf->e_shentsize); xendump_print(" e_shnum: %x\n", elf->e_shnum); xendump_print(" e_shstrndx: %x\n", elf->e_shstrndx); /* Determine the strtab location. */ offset32 = elf->e_shoff + (elf->e_shstrndx * elf->e_shentsize); if (lseek(xd->xfd, offset32, SEEK_SET) != offset32) error(FATAL, "xc_core_dump_Elf32_Ehdr: cannot seek to strtab Elf32_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) error(FATAL, "xc_core_dump_Elf32_Ehdr: cannot read strtab Elf32_Shdr\n"); xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset; } /* * Dump the 64-bit ELF header, and grab a pointer to the strtab section. */ static void xc_core_dump_Elf64_Ehdr(Elf64_Ehdr *elf) { char buf[BUFSIZE]; Elf64_Off offset64; Elf64_Shdr shdr; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); xendump_print("\nElf64_Ehdr:\n"); xendump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); xendump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: xendump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: xendump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: xendump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: xendump_print("(ELFCLASSNUM)\n"); break; default: xendump_print("(?)\n"); break; } xendump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: xendump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: xendump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: xendump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: xendump_print("(ELFDATANUM)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) xendump_print("(EV_CURRENT)\n"); else xendump_print("(?)\n"); xendump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: xendump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: xendump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: xendump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: xendump_print("(ELFOSABI_STANDALONE)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); xendump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: xendump_print("(ET_NONE)\n"); break; case ET_REL: xendump_print("(ET_REL)\n"); break; case ET_EXEC: xendump_print("(ET_EXEC)\n"); break; case ET_DYN: xendump_print("(ET_DYN)\n"); break; case ET_CORE: xendump_print("(ET_CORE)\n"); break; case ET_NUM: xendump_print("(ET_NUM)\n"); break; case ET_LOOS: xendump_print("(ET_LOOS)\n"); break; case ET_HIOS: xendump_print("(ET_HIOS)\n"); break; case ET_LOPROC: xendump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: xendump_print("(ET_HIPROC)\n"); break; default: xendump_print("(?)\n"); } xendump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_386: xendump_print("(EM_386)\n"); break; case EM_IA_64: xendump_print("(EM_IA_64)\n"); break; case EM_PPC64: xendump_print("(EM_PPC64)\n"); break; case EM_X86_64: xendump_print("(EM_X86_64)\n"); break; default: xendump_print("(unsupported)\n"); break; } xendump_print(" e_version: %ld ", (ulong)elf->e_version); xendump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); xendump_print(" e_entry: %lx\n", (ulong)elf->e_entry); xendump_print(" e_phoff: %lx\n", (ulong)elf->e_phoff); xendump_print(" e_shoff: %lx\n", (ulong)elf->e_shoff); xendump_print(" e_flags: %lx\n", (ulong)elf->e_flags); xendump_print(" e_ehsize: %x\n", elf->e_ehsize); xendump_print(" e_phentsize: %x\n", elf->e_phentsize); xendump_print(" e_phnum: %x\n", elf->e_phnum); xendump_print(" e_shentsize: %x\n", elf->e_shentsize); xendump_print(" e_shnum: %x\n", elf->e_shnum); xendump_print(" e_shstrndx: %x\n", elf->e_shstrndx); /* Determine the strtab location. */ offset64 = elf->e_shoff + (elf->e_shstrndx * elf->e_shentsize); if (lseek(xd->xfd, offset64, SEEK_SET) != offset64) error(FATAL, "xc_core_dump_Elf64_Ehdr: cannot seek to strtab Elf32_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) error(FATAL, "xc_core_dump_Elf64_Ehdr: cannot read strtab Elf32_Shdr\n"); xd->xc_core.elf_strtab_offset = (ulonglong)shdr.sh_offset; } /* * Dump each 32-bit section header and the data that they reference. */ static void xc_core_dump_Elf32_Shdr(Elf32_Off offset, int store) { Elf32_Shdr shdr; char name[BUFSIZE]; int i; char c; if (lseek(xd->xfd, offset, SEEK_SET) != offset) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot seek to Elf32_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf32_Shdr)) != sizeof(Elf32_Shdr)) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot read Elf32_Shdr\n"); xendump_print("\nElf32_Shdr:\n"); xendump_print(" sh_name: %lx ", shdr.sh_name); xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name)); xendump_print(" sh_type: %lx ", shdr.sh_type); switch (shdr.sh_type) { case SHT_NULL: xendump_print("(SHT_NULL)\n"); break; case SHT_PROGBITS: xendump_print("(SHT_PROGBITS)\n"); break; case SHT_STRTAB: xendump_print("(SHT_STRTAB)\n"); break; case SHT_NOTE: xendump_print("(SHT_NOTE)\n"); break; default: xendump_print("\n"); break; } xendump_print(" sh_flags: %lx\n", shdr.sh_flags); xendump_print(" sh_addr: %lx\n", shdr.sh_addr); xendump_print(" sh_offset: %lx\n", shdr.sh_offset); xendump_print(" sh_size: %lx\n", shdr.sh_size); xendump_print(" sh_link: %lx\n", shdr.sh_link); xendump_print(" sh_info: %lx\n", shdr.sh_info); xendump_print(" sh_addralign: %lx\n", shdr.sh_addralign); xendump_print(" sh_entsize: %lx\n", shdr.sh_entsize); if (STREQ(name, ".shstrtab")) { if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != xd->xc_core.elf_strtab_offset) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot seek to strtab data\n"); xendump_print(" "); for (i = 0; i < shdr.sh_size; i++) { if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) error(FATAL, "xc_core_dump_Elf32_Shdr: cannot read strtab data\n"); if (i && !c) xendump_print("\n "); else xendump_print("%c", c); } } if (STREQ(name, ".note.Xen")) xc_core_dump_elfnote((off_t)shdr.sh_offset, (size_t)shdr.sh_size, store); if (!store) return; if (STREQ(name, ".xen_prstatus")) xd->xc_core.header.xch_ctxt_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_shared_info")) xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_pfn")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE); } if (STREQ(name, ".xen_p2m")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= XC_CORE_PFN_CREATE; } if (STREQ(name, ".xen_pages")) xd->xc_core.header.xch_pages_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_ia64_mapped_regs")) xd->xc_core.ia64_mapped_regs_offset = (off_t)shdr.sh_offset; } /* * Dump each 64-bit section header and the data that they reference. */ static void xc_core_dump_Elf64_Shdr(Elf64_Off offset, int store) { Elf64_Shdr shdr; char name[BUFSIZE]; int i; char c; if (lseek(xd->xfd, offset, SEEK_SET) != offset) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot seek to Elf64_Shdr\n"); if (read(xd->xfd, &shdr, sizeof(Elf64_Shdr)) != sizeof(Elf64_Shdr)) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot read Elf64_Shdr\n"); xendump_print("\nElf64_Shdr:\n"); xendump_print(" sh_name: %x ", shdr.sh_name); xendump_print("\"%s\"\n", xc_core_strtab(shdr.sh_name, name)); xendump_print(" sh_type: %x ", shdr.sh_type); switch (shdr.sh_type) { case SHT_NULL: xendump_print("(SHT_NULL)\n"); break; case SHT_PROGBITS: xendump_print("(SHT_PROGBITS)\n"); break; case SHT_STRTAB: xendump_print("(SHT_STRTAB)\n"); break; case SHT_NOTE: xendump_print("(SHT_NOTE)\n"); break; default: xendump_print("\n"); break; } xendump_print(" sh_flags: %lx\n", shdr.sh_flags); xendump_print(" sh_addr: %lx\n", shdr.sh_addr); xendump_print(" sh_offset: %lx\n", shdr.sh_offset); xendump_print(" sh_size: %lx\n", shdr.sh_size); xendump_print(" sh_link: %x\n", shdr.sh_link); xendump_print(" sh_info: %x\n", shdr.sh_info); xendump_print(" sh_addralign: %lx\n", shdr.sh_addralign); xendump_print(" sh_entsize: %lx\n", shdr.sh_entsize); if (STREQ(name, ".shstrtab")) { if (lseek(xd->xfd, xd->xc_core.elf_strtab_offset, SEEK_SET) != xd->xc_core.elf_strtab_offset) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot seek to strtab data\n"); xendump_print(" "); for (i = 0; i < shdr.sh_size; i++) { if (read(xd->xfd, &c, sizeof(char)) != sizeof(char)) error(FATAL, "xc_core_dump_Elf64_Shdr: cannot read strtab data\n"); if (i && !c) xendump_print("\n "); else xendump_print("%c", c); } } if (STREQ(name, ".note.Xen")) xc_core_dump_elfnote((off_t)shdr.sh_offset, (size_t)shdr.sh_size, store); if (!store) return; if (STREQ(name, ".xen_prstatus")) xd->xc_core.header.xch_ctxt_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_shared_info")) xd->xc_core.shared_info_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_pfn")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= (XC_CORE_NO_P2M|XC_CORE_PFN_CREATE); } if (STREQ(name, ".xen_p2m")) { xd->xc_core.header.xch_index_offset = (off_t)shdr.sh_offset; xd->flags |= XC_CORE_PFN_CREATE; } if (STREQ(name, ".xen_pages")) xd->xc_core.header.xch_pages_offset = (off_t)shdr.sh_offset; if (STREQ(name, ".xen_ia64_mapped_regs")) xd->xc_core.ia64_mapped_regs_offset = (off_t)shdr.sh_offset; } /* * Return the string found at the specified index into * the dumpfile's strtab. */ static char * xc_core_strtab(uint32_t index, char *buf) { off_t offset; int i; offset = xd->xc_core.elf_strtab_offset + index; if (lseek(xd->xfd, offset, SEEK_SET) != offset) error(FATAL, "xc_core_strtab: cannot seek to Elf64_Shdr\n"); BZERO(buf, BUFSIZE); i = 0; while (read(xd->xfd, &buf[i], sizeof(char)) == sizeof(char)) { if (buf[i] == NULLCHAR) break; i++; } return buf; } /* * Dump the array of elfnote structures, storing relevant info * when requested during initialization. This function is * common to both 32-bit and 64-bit ELF files. */ static void xc_core_dump_elfnote(off_t sh_offset, size_t sh_size, int store) { int i, lf, index; char *notes_buffer; struct elfnote *elfnote; ulonglong *data; struct xen_dumpcore_elfnote_header_desc *elfnote_header; struct xen_dumpcore_elfnote_format_version_desc *format_version; elfnote_header = NULL; format_version = NULL; if (!(notes_buffer = (char *)malloc(sh_size))) error(FATAL, "cannot malloc notes space."); if (lseek(xd->xfd, sh_offset, SEEK_SET) != sh_offset) error(FATAL, "xc_core_dump_elfnote: cannot seek to sh_offset\n"); if (read(xd->xfd, notes_buffer, sh_size) != sh_size) error(FATAL, "xc_core_dump_elfnote: cannot read elfnote data\n"); for (index = 0; index < sh_size; ) { elfnote = (struct elfnote *)¬es_buffer[index]; xendump_print(" namesz: %d\n", elfnote->namesz); xendump_print(" descz: %d\n", elfnote->descsz); xendump_print(" type: %x ", elfnote->type); switch (elfnote->type) { case XEN_ELFNOTE_DUMPCORE_NONE: xendump_print("(XEN_ELFNOTE_DUMPCORE_NONE)\n"); break; case XEN_ELFNOTE_DUMPCORE_HEADER: xendump_print("(XEN_ELFNOTE_DUMPCORE_HEADER)\n"); elfnote_header = (struct xen_dumpcore_elfnote_header_desc *) (elfnote+1); break; case XEN_ELFNOTE_DUMPCORE_XEN_VERSION: xendump_print("(XEN_ELFNOTE_DUMPCORE_XEN_VERSION)\n"); break; case XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION: xendump_print("(XEN_ELFNOTE_DUMPCORE_FORMAT_VERSION)\n"); format_version = (struct xen_dumpcore_elfnote_format_version_desc *) (elfnote+1); break; default: xendump_print("(unknown)\n"); break; } xendump_print(" name: %s\n", elfnote->name); data = (ulonglong *)(elfnote+1); for (i = lf = 0; i < elfnote->descsz/sizeof(ulonglong); i++) { if (((i%2)==0)) { xendump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; xendump_print("%016llx ", *data++); } if (!elfnote->descsz) xendump_print(" (empty)"); xendump_print("\n"); index += sizeof(struct elfnote) + elfnote->descsz; } if (!store) { free(notes_buffer); return; } if (elfnote_header) { xd->xc_core.header.xch_magic = elfnote_header->xch_magic; xd->xc_core.header.xch_nr_vcpus = elfnote_header->xch_nr_vcpus; xd->xc_core.header.xch_nr_pages = elfnote_header->xch_nr_pages; xd->page_size = elfnote_header->xch_page_size; } if (format_version) { switch (format_version->version) { case FORMAT_VERSION_0000000000000001: break; default: error(WARNING, "unsupported xen dump-core format version: %016llx\n", format_version->version); } xd->xc_core.format_version = format_version->version; } free(notes_buffer); } /* * Initialize the batching list for the .xen_p2m or .xen_pfn * arrays. */ static void xc_core_elf_pfn_init(void) { int i, c, chunk; off_t offset; struct xen_dumpcore_p2m p2m; uint64_t pfn; switch (xd->flags & (XC_CORE_ELF|XC_CORE_NO_P2M)) { case (XC_CORE_ELF|XC_CORE_NO_P2M): chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT; for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) { offset = xd->xc_core.header.xch_index_offset + (off_t)(c * sizeof(uint64_t)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index %d\n", c); if (read(xd->xfd, &pfn, sizeof(uint64_t)) != sizeof(uint64_t)) error(FATAL, "cannot read page index %d\n", c); xd->xc_core.elf_index_pfn[i].index = c; xd->xc_core.elf_index_pfn[i].pfn = (ulong)pfn; } break; case XC_CORE_ELF: chunk = xd->xc_core.header.xch_nr_pages/INDEX_PFN_COUNT; for (i = c = 0; i < INDEX_PFN_COUNT; i++, c += chunk) { offset = xd->xc_core.header.xch_index_offset + (off_t)(c * sizeof(struct xen_dumpcore_p2m)); if (lseek(xd->xfd, offset, SEEK_SET) == -1) error(FATAL, "cannot lseek to page index %d\n", c); if (read(xd->xfd, &p2m, sizeof(struct xen_dumpcore_p2m)) != sizeof(struct xen_dumpcore_p2m)) error(FATAL, "cannot read page index %d\n", c); xd->xc_core.elf_index_pfn[i].index = c; xd->xc_core.elf_index_pfn[i].pfn = (ulong)p2m.pfn; } break; } } struct xendump_data * get_xendump_data(void) { return (XENDUMP_VALID() ? xd : NULL); } crash-7.2.8/lkcd_vmdump_v1.h0000775000000000000000000001202113614623427014423 0ustar rootroot/* lkcd_vmdump_v1.h - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _VMDUMP_H #define _VMDUMP_H /* necessary header files */ #ifndef MCLX #include /* for utsname structure */ #endif #ifndef IA64 typedef unsigned int u32; #include /* for pt_regs */ #endif /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ #ifdef CONFIG_VMDUMP /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_VERSION_NUMBER 0x1 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump flags -- add as necessary */ #define DUMP_RAW 0x1 /* raw page (no compression) */ #define DUMP_COMPRESSED 0x2 /* page is compressed */ #define DUMP_END 0x4 /* end marker on a full dump */ /* dump types - type specific stuff added later for page typing */ #define DUMP_NONE 0 /* no dumping at all -- just bail */ #define DUMP_HEADER 1 /* kernel dump header only */ #define DUMP_KERN 2 /* dump header and kernel pages */ #define DUMP_USED 3 /* dump header, kernel/user pages */ #define DUMP_ALL 4 /* dump header, all memory pages */ /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the esp for i386 systems -- MOVE LATER */ uint32_t dh_esp; /* the eip for i386 systems -- MOVE LATER */ uint32_t dh_eip; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* the time of the system crash */ struct timeval dh_time; /* the utsname (uname) information */ struct new_utsname dh_utsname; /* the dump registers */ #ifndef IA64 #ifndef S390 #ifndef S390X #ifndef ARM64 struct pt_regs dh_regs; #endif #endif #endif #endif /* the address of the current task */ struct task_struct *dh_current_task; } dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } dump_page_t; #endif /* CONFIG_VMDUMP */ #ifdef __KERNEL__ extern void dump_init(uint64_t, uint64_t); extern void dump_open(char *); extern void dump_execute(char *, struct pt_regs *); #endif #endif /* _VMDUMP_H */ crash-7.2.8/lkcd_dump_v7.h0000664000000000000000000003353713614623427014102 0ustar rootroot/* lkcd_dump_v5.h - core analysis suite * * Copyright (C) 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2006 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2006 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * Kernel header file for Linux crash dumps. * * Created by: Matt Robinson (yakker@sgi.com) * Copyright 1999 Silicon Graphics, Inc. All rights reserved. * * vmdump.h to dump.h by: Matt D. Robinson (yakker@sourceforge.net) * Copyright 2001 Matt D. Robinson. All rights reserved. * * Most of this is the same old stuff from vmdump.h, except now we're * actually a stand-alone driver plugged into the block layer interface, * with the exception that we now allow for compression modes externally * loaded (e.g., someone can come up with their own). */ /* This header file includes all structure definitions for crash dumps. */ #ifndef _DUMP_H #define _DUMP_H //#include /* define TRUE and FALSE for use in our dump modules */ #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #ifndef MCLX /* * MCLX NOTE: the architecture-specific headers are being ignored until * deemed necessary; crash has never used them functionally, and only * referencing them in the dump_sgi_environment() helper routines. */ /* necessary header files */ #include /* for architecture-specific header */ #endif #define UTSNAME_ENTRY_SZ 65 /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* header definitions for s390 dump */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */ #define DUMP_VERSION_NUMBER 0x5 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump levels - type specific stuff added later -- add as necessary */ #define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */ #define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */ #define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */ #define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */ #define DUMP_LEVEL_ALL 0x8 /* dump header, all memory pages */ /* dump compression options -- add as necessary */ #define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */ #define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */ #define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */ /* dump flags - any dump-type specific flags -- add as necessary */ #define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */ #define DUMP_FLAGS_NONDISRUPT 0x1 /* try to keep running after dump */ /* dump header flags -- add as necessary */ #define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */ #define DUMP_DH_RAW 0x1 /* raw page (no compression) */ #define DUMP_DH_COMPRESSED 0x2 /* page is compressed */ #define DUMP_DH_END 0x4 /* end marker on a full dump */ /* names for various dump tunables (they are now all read-only) */ #define DUMP_ROOT_NAME "sys/dump" #define DUMP_DEVICE_NAME "dump_device" #define DUMP_COMPRESS_NAME "dump_compress" #define DUMP_LEVEL_NAME "dump_level" #define DUMP_FLAGS_NAME "dump_flags" /* page size for gzip compression -- buffered beyond PAGE_SIZE slightly */ #define DUMP_DPC_PAGE_SIZE (PAGE_SIZE + 512) /* dump ioctl() control options */ #define DIOSDUMPDEV 1 /* set the dump device */ #define DIOGDUMPDEV 2 /* get the dump device */ #define DIOSDUMPLEVEL 3 /* set the dump level */ #define DIOGDUMPLEVEL 4 /* get the dump level */ #define DIOSDUMPFLAGS 5 /* set the dump flag parameters */ #define DIOGDUMPFLAGS 6 /* get the dump flag parameters */ #define DIOSDUMPCOMPRESS 7 /* set the dump compress level */ #define DIOGDUMPCOMPRESS 8 /* get the dump compress level */ /* the major number used for the dumping device */ #ifndef DUMP_MAJOR #define DUMP_MAJOR 227 #endif /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* the time of the system crash */ struct timeval dh_time; /* the NEW utsname (uname) information -- in character form */ /* we do this so we don't have to include utsname.h */ /* plus it helps us be more architecture independent */ /* now maybe one day soon they'll make the [65] a #define! */ char dh_utsname_sysname[65]; char dh_utsname_nodename[65]; char dh_utsname_release[65]; char dh_utsname_version[65]; char dh_utsname_machine[65]; char dh_utsname_domainname[65]; /* the address of current task (OLD = task_struct *, NEW = void *) */ void *dh_current_task; /* what type of compression we're using in this dump (if any) */ uint32_t dh_dump_compress; /* any additional flags */ uint32_t dh_dump_flags; /* any additional flags */ uint32_t dh_dump_device; } dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } dump_page_t; /* * This structure contains information needed for the lkcdutils * package (particularly lcrash) to determine what information is * associated to this kernel, specifically. */ typedef struct lkcdinfo_s { int arch; int ptrsz; int byte_order; int linux_release; int page_shift; int page_size; uint64_t page_mask; uint64_t page_offset; int stack_offset; } lkcdinfo_t; #ifdef IA64 #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ struct ia64_fpreg f10; /* scratch */ struct ia64_fpreg f11; /* scratch */ }; /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* pointer to pt_regs */ struct pt_regs *dha_pt_regs; /* the dump registers */ struct pt_regs dha_regs; /* the rnat register saved after flushrs */ uint64_t dha_rnat; /* the pfs register saved after flushrs */ uint64_t dha_pfs; /* the bspstore register saved after flushrs */ uint64_t dha_bspstore; /* smp specific */ uint32_t dha_smp_num_cpus; int dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; void * dha_smp_current_task[NR_CPUS]; void * dha_stack[NR_CPUS]; void * dha_switch_stack[NR_CPUS]; } dump_header_asm_t; #define NR_CPUS 32 typedef struct _dump_header_asm_smp_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* pointer to pt_regs */ struct pt_regs *dha_pt_regs; /* the dump registers */ struct pt_regs dha_regs; /* the rnat register saved after flushrs */ uint64_t dha_rnat; /* the pfs register saved after flushrs */ uint64_t dha_pfs; /* the bspstore register saved after flushrs */ uint64_t dha_bspstore; /* smp specific */ uint32_t dha_smp_num_cpus; int dha_dumping_cpu; struct pt_regs dha_smp_regs[NR_CPUS]; void * dha_smp_current_task[NR_CPUS]; void * dha_stack[NR_CPUS]; void * dha_switch_stack[NR_CPUS]; } dump_header_asm_smp_t; #endif #ifdef __KERNEL__ /* * Structure: dump_compress_t * Function: This is what an individual compression mechanism can use * to plug in their own compression techniques. It's always * best to build these as individual modules so that people * can put in whatever they want. */ typedef struct dump_compress_s { /* the list_head structure for list storage */ struct list_head list; /* the type of compression to use (DUMP_COMPRESS_XXX) */ int compress_type; /* the compression function to call */ int (*compress_func)(char *, int, char *, int); } dump_compress_t; extern int dump_init(void); extern void dump_execute(char *, struct pt_regs *); extern int page_is_ram(unsigned long); #endif /* __KERNEL__ */ #endif /* _DUMP_H */ crash-7.2.8/unwind_x86_32_64.c0000664000000000000000000010163413614623427014337 0ustar rootroot/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #if defined(X86_64) /* * Support for genarating DWARF CFI based backtraces. * Borrowed heavily from the kernel's implementation of unwinding using the * DWARF CFI written by Jan Beulich */ #ifdef X86_64 #include "unwind_x86_64.h" #endif #ifdef X86 #include "unwind_x86.h" #endif #include "defs.h" #define MAX_STACK_DEPTH 8 static struct local_unwind_table { struct { unsigned long pc; unsigned long range; } core, init; void *address; unsigned long size; } *local_unwind_tables, default_unwind_table; static int gather_in_memory_unwind_tables(void); static int populate_local_tables(ulong, char *); static int unwind_tables_cnt = 0; static struct local_unwind_table *find_table(unsigned long); static void dump_local_unwind_tables(void); static const struct { unsigned offs:BITS_PER_LONG / 2; unsigned width:BITS_PER_LONG / 2; } reg_info[] = { UNW_REGISTER_INFO }; #undef PTREGS_INFO #undef EXTRA_INFO #ifndef REG_INVALID #define REG_INVALID(r) (reg_info[r].width == 0) #endif #define DW_CFA_nop 0x00 #define DW_CFA_set_loc 0x01 #define DW_CFA_advance_loc1 0x02 #define DW_CFA_advance_loc2 0x03 #define DW_CFA_advance_loc4 0x04 #define DW_CFA_offset_extended 0x05 #define DW_CFA_restore_extended 0x06 #define DW_CFA_undefined 0x07 #define DW_CFA_same_value 0x08 #define DW_CFA_register 0x09 #define DW_CFA_remember_state 0x0a #define DW_CFA_restore_state 0x0b #define DW_CFA_def_cfa 0x0c #define DW_CFA_def_cfa_register 0x0d #define DW_CFA_def_cfa_offset 0x0e #define DW_CFA_def_cfa_expression 0x0f #define DW_CFA_expression 0x10 #define DW_CFA_offset_extended_sf 0x11 #define DW_CFA_def_cfa_sf 0x12 #define DW_CFA_def_cfa_offset_sf 0x13 #define DW_CFA_val_offset 0x14 #define DW_CFA_val_offset_sf 0x15 #define DW_CFA_val_expression 0x16 #define DW_CFA_lo_user 0x1c #define DW_CFA_GNU_window_save 0x2d #define DW_CFA_GNU_args_size 0x2e #define DW_CFA_GNU_negative_offset_extended 0x2f #define DW_CFA_hi_user 0x3f #define DW_EH_PE_FORM 0x07 #define DW_EH_PE_native 0x00 #define DW_EH_PE_leb128 0x01 #define DW_EH_PE_data2 0x02 #define DW_EH_PE_data4 0x03 #define DW_EH_PE_data8 0x04 #define DW_EH_PE_signed 0x08 #define DW_EH_PE_ADJUST 0x70 #define DW_EH_PE_abs 0x00 #define DW_EH_PE_pcrel 0x10 #define DW_EH_PE_textrel 0x20 #define DW_EH_PE_datarel 0x30 #define DW_EH_PE_funcrel 0x40 #define DW_EH_PE_aligned 0x50 #define DW_EH_PE_indirect 0x80 #define DW_EH_PE_omit 0xff #define min(x,y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (void) (&_x == &_y); \ _x < _y ? _x : _y; }) #define max(x,y) ({ \ typeof(x) _x = (x); \ typeof(y) _y = (y); \ (void) (&_x == &_y); \ _x > _y ? _x : _y; }) #define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1)) typedef unsigned long uleb128_t; typedef signed long sleb128_t; struct unwind_item { enum item_location { Nowhere, Memory, Register, Value } where; uleb128_t value; }; struct unwind_state { uleb128_t loc, org; const u8 *cieStart, *cieEnd; uleb128_t codeAlign; sleb128_t dataAlign; struct cfa { uleb128_t reg, offs; } cfa; struct unwind_item regs[ARRAY_SIZE(reg_info)]; unsigned stackDepth:8; unsigned version:8; const u8 *label; const u8 *stack[MAX_STACK_DEPTH]; }; static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 }; static uleb128_t get_uleb128(const u8 **pcur, const u8 *end) { const u8 *cur = *pcur; uleb128_t value; unsigned shift; for (shift = 0, value = 0; cur < end; shift += 7) { if (shift + 7 > 8 * sizeof(value) && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { cur = end + 1; break; } value |= (uleb128_t)(*cur & 0x7f) << shift; if (!(*cur++ & 0x80)) break; } *pcur = cur; return value; } static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) { const u8 *cur = *pcur; sleb128_t value; unsigned shift; for (shift = 0, value = 0; cur < end; shift += 7) { if (shift + 7 > 8 * sizeof(value) && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) { cur = end + 1; break; } value |= (sleb128_t)(*cur & 0x7f) << shift; if (!(*cur & 0x80)) { value |= -(*cur++ & 0x40) << shift; break; } } *pcur = cur; return value; } static unsigned long read_pointer(const u8 **pLoc, const void *end, signed ptrType) { unsigned long value = 0; union { const u8 *p8; const u16 *p16u; const s16 *p16s; const u32 *p32u; const s32 *p32s; const unsigned long *pul; } ptr; if (ptrType < 0 || ptrType == DW_EH_PE_omit) return 0; ptr.p8 = *pLoc; switch(ptrType & DW_EH_PE_FORM) { case DW_EH_PE_data2: if (end < (const void *)(ptr.p16u + 1)) return 0; if(ptrType & DW_EH_PE_signed) value = get_unaligned(ptr.p16s++); else value = get_unaligned(ptr.p16u++); break; case DW_EH_PE_data4: #ifdef CONFIG_64BIT if (end < (const void *)(ptr.p32u + 1)) return 0; if(ptrType & DW_EH_PE_signed) value = get_unaligned(ptr.p32s++); else value = get_unaligned(ptr.p32u++); break; case DW_EH_PE_data8: BUILD_BUG_ON(sizeof(u64) != sizeof(value)); #else BUILD_BUG_ON(sizeof(u32) != sizeof(value)); #endif case DW_EH_PE_native: if (end < (const void *)(ptr.pul + 1)) return 0; value = get_unaligned(ptr.pul++); break; case DW_EH_PE_leb128: BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value)); value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end) : get_uleb128(&ptr.p8, end); if ((const void *)ptr.p8 > end) return 0; break; default: return 0; } switch(ptrType & DW_EH_PE_ADJUST) { case DW_EH_PE_abs: break; case DW_EH_PE_pcrel: value += (unsigned long)*pLoc; break; default: return 0; } /* TBD if ((ptrType & DW_EH_PE_indirect) && __get_user(value, (unsigned long *)value)) return 0; */ *pLoc = ptr.p8; return value; } static signed fde_pointer_type(const u32 *cie) { const u8 *ptr = (const u8 *)(cie + 2); unsigned version = *ptr; if (version != 1) return -1; /* unsupported */ if (*++ptr) { const char *aug; const u8 *end = (const u8 *)(cie + 1) + *cie; uleb128_t len; /* check if augmentation size is first (and thus present) */ if (*ptr != 'z') return -1; /* check if augmentation string is nul-terminated */ if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL) return -1; ++ptr; /* skip terminator */ get_uleb128(&ptr, end); /* skip code alignment */ get_sleb128(&ptr, end); /* skip data alignment */ /* skip return address column */ version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end); len = get_uleb128(&ptr, end); /* augmentation length */ if (ptr + len < ptr || ptr + len > end) return -1; end = ptr + len; while (*++aug) { if (ptr >= end) return -1; switch(*aug) { case 'L': ++ptr; break; case 'P': { signed ptrType = *ptr++; if (!read_pointer(&ptr, end, ptrType) || ptr > end) return -1; } break; case 'R': return *ptr; default: return -1; } } } return DW_EH_PE_native|DW_EH_PE_abs; } static int advance_loc(unsigned long delta, struct unwind_state *state) { state->loc += delta * state->codeAlign; return delta > 0; } static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value, struct unwind_state *state) { if (reg < ARRAY_SIZE(state->regs)) { state->regs[reg].where = where; state->regs[reg].value = value; } } static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc, signed ptrType, struct unwind_state *state) { union { const u8 *p8; const u16 *p16; const u32 *p32; } ptr; int result = 1; if (start != state->cieStart) { state->loc = state->org; result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state); if (targetLoc == 0 && state->label == NULL) return result; } for (ptr.p8 = start; result && ptr.p8 < end; ) { switch(*ptr.p8 >> 6) { uleb128_t value; case 0: switch(*ptr.p8++) { case DW_CFA_nop: break; case DW_CFA_set_loc: if ((state->loc = read_pointer(&ptr.p8, end, ptrType)) == 0) result = 0; break; case DW_CFA_advance_loc1: result = ptr.p8 < end && advance_loc(*ptr.p8++, state); break; case DW_CFA_advance_loc2: result = ptr.p8 <= end + 2 && advance_loc(*ptr.p16++, state); break; case DW_CFA_advance_loc4: result = ptr.p8 <= end + 4 && advance_loc(*ptr.p32++, state); break; case DW_CFA_offset_extended: value = get_uleb128(&ptr.p8, end); set_rule(value, Memory, get_uleb128(&ptr.p8, end), state); break; case DW_CFA_val_offset: value = get_uleb128(&ptr.p8, end); set_rule(value, Value, get_uleb128(&ptr.p8, end), state); break; case DW_CFA_offset_extended_sf: value = get_uleb128(&ptr.p8, end); set_rule(value, Memory, get_sleb128(&ptr.p8, end), state); break; case DW_CFA_val_offset_sf: value = get_uleb128(&ptr.p8, end); set_rule(value, Value, get_sleb128(&ptr.p8, end), state); break; case DW_CFA_restore_extended: case DW_CFA_undefined: case DW_CFA_same_value: set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state); break; case DW_CFA_register: value = get_uleb128(&ptr.p8, end); set_rule(value, Register, get_uleb128(&ptr.p8, end), state); break; case DW_CFA_remember_state: if (ptr.p8 == state->label) { state->label = NULL; return 1; } if (state->stackDepth >= MAX_STACK_DEPTH) return 0; state->stack[state->stackDepth++] = ptr.p8; break; case DW_CFA_restore_state: if (state->stackDepth) { const uleb128_t loc = state->loc; const u8 *label = state->label; state->label = state->stack[state->stackDepth - 1]; memcpy(&state->cfa, &badCFA, sizeof(state->cfa)); memset(state->regs, 0, sizeof(state->regs)); state->stackDepth = 0; result = processCFI(start, end, 0, ptrType, state); state->loc = loc; state->label = label; } else return 0; break; case DW_CFA_def_cfa: state->cfa.reg = get_uleb128(&ptr.p8, end); /*nobreak*/ case DW_CFA_def_cfa_offset: state->cfa.offs = get_uleb128(&ptr.p8, end); break; case DW_CFA_def_cfa_sf: state->cfa.reg = get_uleb128(&ptr.p8, end); /*nobreak*/ case DW_CFA_def_cfa_offset_sf: state->cfa.offs = get_sleb128(&ptr.p8, end) * state->dataAlign; break; case DW_CFA_def_cfa_register: state->cfa.reg = get_uleb128(&ptr.p8, end); break; /*todo case DW_CFA_def_cfa_expression: */ /*todo case DW_CFA_expression: */ /*todo case DW_CFA_val_expression: */ case DW_CFA_GNU_args_size: get_uleb128(&ptr.p8, end); break; case DW_CFA_GNU_negative_offset_extended: value = get_uleb128(&ptr.p8, end); set_rule(value, Memory, (uleb128_t)0 - get_uleb128(&ptr.p8, end), state); break; case DW_CFA_GNU_window_save: default: result = 0; break; } break; case 1: result = advance_loc(*ptr.p8++ & 0x3f, state); break; case 2: value = *ptr.p8++ & 0x3f; set_rule(value, Memory, get_uleb128(&ptr.p8, end), state); break; case 3: set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state); break; } if (ptr.p8 > end) result = 0; if (result && targetLoc != 0 && targetLoc < state->loc) return 1; } return result && ptr.p8 == end && (targetLoc == 0 || (/*todo While in theory this should apply, gcc in practice omits everything past the function prolog, and hence the location never reaches the end of the function. targetLoc < state->loc &&*/ state->label == NULL)); } /* Unwind to previous to frame. Returns 0 if successful, negative * number in case of an error. */ int unwind(struct unwind_frame_info *frame, int is_ehframe) { #define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs]) const u32 *fde = NULL, *cie = NULL; const u8 *ptr = NULL, *end = NULL; unsigned long startLoc = 0, endLoc = 0, cfa; unsigned i; signed ptrType = -1; uleb128_t retAddrReg = 0; // struct unwind_table *table; void *unwind_table; struct local_unwind_table *table; struct unwind_state state; u64 reg_ptr = 0; if (UNW_PC(frame) == 0) return -EINVAL; if ((table = find_table(UNW_PC(frame)))) { // unsigned long tableSize = unwind_table_size; unsigned long tableSize = table->size; unwind_table = table->address; for (fde = unwind_table; tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { if (!*fde || (*fde & (sizeof(*fde) - 1))) break; if (is_ehframe && !fde[1]) continue; /* this is a CIE */ else if (fde[1] == 0xffffffff) continue; /* this is a CIE */ if ((fde[1] & (sizeof(*fde) - 1)) || fde[1] > (unsigned long)(fde + 1) - (unsigned long)unwind_table) continue; /* this is not a valid FDE */ if (is_ehframe) cie = fde + 1 - fde[1] / sizeof(*fde); else cie = unwind_table + fde[1]; if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) || (*cie & (sizeof(*cie) - 1)) || (cie[1] != 0xffffffff && cie[1]) || (ptrType = fde_pointer_type(cie)) < 0) { cie = NULL; /* this is not a (valid) CIE */ continue; } ptr = (const u8 *)(fde + 2); startLoc = read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, ptrType); endLoc = startLoc + read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, ptrType & DW_EH_PE_indirect ? ptrType : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); if (UNW_PC(frame) >= startLoc && UNW_PC(frame) < endLoc) break; cie = NULL; } } if (cie != NULL) { memset(&state, 0, sizeof(state)); state.cieEnd = ptr; /* keep here temporarily */ ptr = (const u8 *)(cie + 2); end = (const u8 *)(cie + 1) + *cie; if ((state.version = *ptr) != 1) cie = NULL; /* unsupported version */ else if (*++ptr) { /* check if augmentation size is first (and thus present) */ if (*ptr == 'z') { /* check for ignorable (or already handled) * nul-terminated augmentation string */ while (++ptr < end && *ptr) if (strchr("LPR", *ptr) == NULL) break; } if (ptr >= end || *ptr) cie = NULL; } ++ptr; } if (cie != NULL) { /* get code aligment factor */ state.codeAlign = get_uleb128(&ptr, end); /* get data aligment factor */ state.dataAlign = get_sleb128(&ptr, end); if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end) cie = NULL; else { retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); /* skip augmentation */ if (((const char *)(cie + 2))[1] == 'z') ptr += get_uleb128(&ptr, end); if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info) || REG_INVALID(retAddrReg) || reg_info[retAddrReg].width != sizeof(unsigned long)) cie = NULL; } } if (cie != NULL) { state.cieStart = ptr; ptr = state.cieEnd; state.cieEnd = end; end = (const u8 *)(fde + 1) + *fde; /* skip augmentation */ if (((const char *)(cie + 2))[1] == 'z') { uleb128_t augSize = get_uleb128(&ptr, end); if ((ptr += augSize) > end) fde = NULL; } } if (cie == NULL || fde == NULL) return -ENXIO; state.org = startLoc; memcpy(&state.cfa, &badCFA, sizeof(state.cfa)); /* process instructions */ if (!processCFI(ptr, end, UNW_PC(frame), ptrType, &state) || state.loc > endLoc || state.regs[retAddrReg].where == Nowhere || state.cfa.reg >= ARRAY_SIZE(reg_info) || reg_info[state.cfa.reg].width != sizeof(unsigned long) || state.cfa.offs % sizeof(unsigned long)) { return -EIO; } /* update frame */ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs; startLoc = min((unsigned long)UNW_SP(frame), cfa); endLoc = max((unsigned long)UNW_SP(frame), cfa); if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) { startLoc = min(STACK_LIMIT(cfa), cfa); endLoc = max(STACK_LIMIT(cfa), cfa); } #ifndef CONFIG_64BIT # define CASES CASE(8); CASE(16); CASE(32) #else # define CASES CASE(8); CASE(16); CASE(32); CASE(64) #endif for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { if (REG_INVALID(i)) { if (state.regs[i].where == Nowhere) continue; return -EIO; } switch(state.regs[i].where) { default: break; case Register: if (state.regs[i].value >= ARRAY_SIZE(reg_info) || REG_INVALID(state.regs[i].value) || reg_info[i].width > reg_info[state.regs[i].value].width){ return -EIO; } switch(reg_info[state.regs[i].value].width) { #define CASE(n) \ case sizeof(u##n): \ state.regs[i].value = FRAME_REG(state.regs[i].value, \ const u##n); \ break CASES; #undef CASE default: return -EIO; } break; } } for (i = 0; i < ARRAY_SIZE(state.regs); ++i) { if (REG_INVALID(i)) continue; switch(state.regs[i].where) { case Nowhere: if (reg_info[i].width != sizeof(UNW_SP(frame)) || &FRAME_REG(i, __typeof__(UNW_SP(frame))) != &UNW_SP(frame)) continue; UNW_SP(frame) = cfa; break; case Register: switch(reg_info[i].width) { #define CASE(n) case sizeof(u##n): \ FRAME_REG(i, u##n) = state.regs[i].value; \ break CASES; #undef CASE default: return -EIO; } break; case Value: if (reg_info[i].width != sizeof(unsigned long)){ return -EIO;} FRAME_REG(i, unsigned long) = cfa + state.regs[i].value * state.dataAlign; break; case Memory: { unsigned long addr = cfa + state.regs[i].value * state.dataAlign; if ((state.regs[i].value * state.dataAlign) % sizeof(unsigned long) || addr < startLoc || addr + sizeof(unsigned long) < addr || addr + sizeof(unsigned long) > endLoc){ return -EIO;} switch(reg_info[i].width) { #define CASE(n) case sizeof(u##n): \ readmem(addr, KVADDR, ®_ptr,sizeof(u##n), "register", RETURN_ON_ERROR|QUIET); \ FRAME_REG(i, u##n) = (u##n)reg_ptr;\ break CASES; #undef CASE default: return -EIO; } } break; } } return 0; #undef CASES #undef FRAME_REG } /* * Initialize the unwind table(s) in the best-case order: * * 1. Use the in-memory kernel and module unwind tables. * 2. Use the in-memory kernel-only .eh_frame data. (possible?) * 3. Use the kernel-only .eh_frame data from the vmlinux file. */ void init_unwind_table(void) { ulong unwind_table_size; void *unwind_table; kt->flags &= ~DWARF_UNWIND; if (gather_in_memory_unwind_tables()) { if (CRASHDEBUG(1)) fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY (%d tables)\n", unwind_tables_cnt); kt->flags |= DWARF_UNWIND_MEMORY; if (unwind_tables_cnt > 1) kt->flags |= DWARF_UNWIND_MODULES; if (!(kt->flags & NO_DWARF_UNWIND)) kt->flags |= DWARF_UNWIND; return; } if (symbol_exists("__start_unwind") && symbol_exists("__end_unwind")) { unwind_table_size = symbol_value("__end_unwind") - symbol_value("__start_unwind"); if (!(unwind_table = malloc(unwind_table_size))) { error(WARNING, "cannot malloc unwind table space\n"); goto try_eh_frame; } if (!readmem(symbol_value("__start_unwind"), KVADDR, unwind_table, unwind_table_size, "unwind table", RETURN_ON_ERROR)) { error(WARNING, "cannot read unwind table data\n"); free(unwind_table); goto try_eh_frame; } kt->flags |= DWARF_UNWIND_MEMORY; if (!(kt->flags & NO_DWARF_UNWIND)) kt->flags |= DWARF_UNWIND; default_unwind_table.size = unwind_table_size; default_unwind_table.address = unwind_table; if (CRASHDEBUG(1)) fprintf(fp, "init_unwind_table: DWARF_UNWIND_MEMORY\n"); return; } try_eh_frame: if (st->dwarf_eh_frame_size || st->dwarf_debug_frame_size) { int fd; int is_ehframe = (!st->dwarf_debug_frame_size && st->dwarf_eh_frame_size); unwind_table_size = is_ehframe ? st->dwarf_eh_frame_size : st->dwarf_debug_frame_size; if (!(unwind_table = malloc(unwind_table_size))) { error(WARNING, "cannot malloc unwind table space\n"); return; } if ((fd = open(pc->namelist, O_RDONLY)) < 0) { error(WARNING, "cannot open %s for %s data\n", pc->namelist, is_ehframe ? ".eh_frame" : ".debug_frame"); free(unwind_table); return; } if (is_ehframe) lseek(fd, st->dwarf_eh_frame_file_offset, SEEK_SET); else lseek(fd, st->dwarf_debug_frame_file_offset, SEEK_SET); if (read(fd, unwind_table, unwind_table_size) != unwind_table_size) { if (CRASHDEBUG(1)) error(WARNING, "cannot read %s data from %s\n", is_ehframe ? ".eh_frame" : ".debug_frame", pc->namelist); free(unwind_table); close(fd); return; } close(fd); default_unwind_table.size = unwind_table_size; default_unwind_table.address = unwind_table; kt->flags |= DWARF_UNWIND_EH_FRAME; if (!(kt->flags & NO_DWARF_UNWIND)) kt->flags |= DWARF_UNWIND; if (CRASHDEBUG(1)) fprintf(fp, "init_unwind_table: DWARF_UNWIND_EH_FRAME\n"); return; } } /* * Find the appropriate kernel-only "root_table" unwind_table, * and pass it to populate_local_tables() to do the heavy lifting. */ static int gather_in_memory_unwind_tables(void) { int i, cnt, found; struct syment *sp, *root_tables[10]; char *root_table_buf; char buf[BUFSIZE]; ulong name; STRUCT_SIZE_INIT(unwind_table, "unwind_table"); MEMBER_OFFSET_INIT(unwind_table_core, "unwind_table", "core"); MEMBER_OFFSET_INIT(unwind_table_init, "unwind_table", "init"); MEMBER_OFFSET_INIT(unwind_table_address, "unwind_table", "address"); MEMBER_OFFSET_INIT(unwind_table_size, "unwind_table", "size"); MEMBER_OFFSET_INIT(unwind_table_link, "unwind_table", "link"); MEMBER_OFFSET_INIT(unwind_table_name, "unwind_table", "name"); if (INVALID_SIZE(unwind_table) || INVALID_MEMBER(unwind_table_core) || INVALID_MEMBER(unwind_table_init) || INVALID_MEMBER(unwind_table_address) || INVALID_MEMBER(unwind_table_size) || INVALID_MEMBER(unwind_table_link) || INVALID_MEMBER(unwind_table_name)) { if (CRASHDEBUG(1)) error(NOTE, "unwind_table structure has changed, or does not exist in this kernel\n"); return 0; } /* * Unfortunately there are two kernel root_table symbols. */ if (!(cnt = get_syment_array("root_table", root_tables, 10))) return 0; root_table_buf = GETBUF(SIZE(unwind_table)); for (i = found = 0; i < cnt; i++) { sp = root_tables[i]; if (!readmem(sp->value, KVADDR, root_table_buf, SIZE(unwind_table), "root unwind_table", RETURN_ON_ERROR|QUIET)) goto gather_failed; name = ULONG(root_table_buf + OFFSET(unwind_table_name)); if (read_string(name, buf, strlen("kernel")+1) && STREQ("kernel", buf)) { found++; if (CRASHDEBUG(1)) fprintf(fp, "root_table name: %lx [%s]\n", name, buf); break; } } if (!found) goto gather_failed; cnt = populate_local_tables(sp->value, root_table_buf); FREEBUF(root_table_buf); return cnt; gather_failed: FREEBUF(root_table_buf); return 0; } /* * Transfer the relevant data from the kernel and module unwind_table * structures to the local_unwind_table structures. */ static int populate_local_tables(ulong root, char *buf) { struct list_data list_data, *ld; int i, cnt; ulong *table_list; ulong vaddr; struct local_unwind_table *tp; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->start = root; ld->member_offset = OFFSET(unwind_table_link); ld->flags = RETURN_ON_LIST_ERROR; if (CRASHDEBUG(1)) ld->flags |= VERBOSE; hq_open(); cnt = do_list(ld); if (cnt == -1) { error(WARNING, "UNWIND: failed to gather unwind_table list"); return 0; } table_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(table_list, cnt); hq_close(); if (!(local_unwind_tables = malloc(sizeof(struct local_unwind_table) * cnt))) { error(WARNING, "cannot malloc unwind_table space (%d tables)\n", cnt); FREEBUF(table_list); return 0; } for (i = 0; i < cnt; i++, tp++) { if (!readmem(table_list[i], KVADDR, buf, SIZE(unwind_table), "unwind_table", RETURN_ON_ERROR|QUIET)) { error(WARNING, "cannot read unwind_table\n"); goto failed; } tp = &local_unwind_tables[i]; /* * Copy the required table info for find_table(). */ BCOPY(buf + OFFSET(unwind_table_core), (char *)&tp->core.pc, sizeof(ulong)*2); BCOPY(buf + OFFSET(unwind_table_init), (char *)&tp->init.pc, sizeof(ulong)*2); BCOPY(buf + OFFSET(unwind_table_size), (char *)&tp->size, sizeof(ulong)); /* * Then read the DWARF CFI data. */ vaddr = ULONG(buf + OFFSET(unwind_table_address)); if (!(tp->address = malloc(tp->size))) { error(WARNING, "cannot malloc unwind_table space\n"); goto failed; break; } if (!readmem(vaddr, KVADDR, tp->address, tp->size, "DWARF CFI data", RETURN_ON_ERROR|QUIET)) { error(WARNING, "cannot read unwind_table data\n"); goto failed; } } unwind_tables_cnt = cnt; if (CRASHDEBUG(7)) dump_local_unwind_tables(); failed: FREEBUF(table_list); return unwind_tables_cnt; } /* * Find the unwind_table containing a pc. */ static struct local_unwind_table * find_table(unsigned long pc) { int i; struct local_unwind_table *tp, *table; table = &default_unwind_table; for (i = 0; i < unwind_tables_cnt; i++, tp++) { tp = &local_unwind_tables[i]; if ((pc >= tp->core.pc && pc < tp->core.pc + tp->core.range) || (pc >= tp->init.pc && pc < tp->init.pc + tp->init.range)) { table = tp; break; } } return table; } static void dump_local_unwind_tables(void) { int i, others; struct local_unwind_table *tp; others = 0; fprintf(fp, "DWARF flags: ("); if (kt->flags & DWARF_UNWIND) fprintf(fp, "%sDWARF_UNWIND", others++ ? "|" : ""); if (kt->flags & NO_DWARF_UNWIND) fprintf(fp, "%sNO_DWARF_UNWIND", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_MEMORY) fprintf(fp, "%sDWARF_UNWIND_MEMORY", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_EH_FRAME) fprintf(fp, "%sDWARF_UNWIND_EH_FRAME", others++ ? "|" : ""); if (kt->flags & DWARF_UNWIND_MODULES) fprintf(fp, "%sDWARF_UNWIND_MODULES", others++ ? "|" : ""); fprintf(fp, ")\n\n"); fprintf(fp, "default_unwind_table:\n"); fprintf(fp, " address: %lx\n", (ulong)default_unwind_table.address); fprintf(fp, " size: %ld\n\n", (ulong)default_unwind_table.size); fprintf(fp, "local_unwind_tables[%d]:\n", unwind_tables_cnt); for (i = 0; i < unwind_tables_cnt; i++, tp++) { tp = &local_unwind_tables[i]; fprintf(fp, "[%d]\n", i); fprintf(fp, " core: pc: %lx\n", tp->core.pc); fprintf(fp, " range: %ld\n", tp->core.range); fprintf(fp, " init: pc: %lx\n", tp->init.pc); fprintf(fp, " range: %ld\n", tp->init.range); fprintf(fp, " address: %lx\n", (ulong)tp->address); fprintf(fp, " size: %ld\n", tp->size); } } int dwarf_backtrace(struct bt_info *bt, int level, ulong stacktop) { unsigned long bp, offset; struct syment *sp; char *name; struct unwind_frame_info *frame; int is_ehframe = (!st->dwarf_debug_frame_size && st->dwarf_eh_frame_size); frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); // frame->regs.rsp = bt->stkptr; // frame->regs.rip = bt->instptr; UNW_SP(frame) = bt->stkptr; UNW_PC(frame) = bt->instptr; /* read rbp from stack for non active tasks */ if (!(bt->flags & BT_DUMPFILE_SEARCH) && !bt->bptr) { // readmem(frame->regs.rsp, KVADDR, &bp, readmem(UNW_SP(frame), KVADDR, &bp, sizeof(unsigned long), "reading bp", FAULT_ON_ERROR); frame->regs.rbp = bp; /* fixme for x86 */ } sp = value_search(UNW_PC(frame), &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)); goto bailout; } /* * If offset is zero, it means we have crossed over to the next * function. Recalculate by adjusting the text address */ if (!offset) { sp = value_search(UNW_PC(frame) - 1, &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)-1); goto bailout; } } name = sp->name; fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame)); if (CRASHDEBUG(2)) fprintf(fp, " < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), UNW_PC(frame), frame->regs.rbp); while ((UNW_SP(frame) < stacktop) && !unwind(frame, is_ehframe) && UNW_PC(frame)) { /* To prevent rip pushed on IRQ stack being reported both * both on the IRQ and process stacks */ if ((bt->flags & BT_IRQSTACK) && (UNW_SP(frame) >= stacktop - 16)) break; level++; sp = value_search(UNW_PC(frame), &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)); break; } /* * If offset is zero, it means we have crossed over to the next * function. Recalculate by adjusting the text address */ if (!offset) { sp = value_search(UNW_PC(frame) - 1, &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)-1); goto bailout; } } name = sp->name; fprintf(fp, "%s#%d [%016lx] %s at %016lx \n", level < 10 ? " " : "", level, UNW_SP(frame), name, UNW_PC(frame)); if (CRASHDEBUG(2)) fprintf(fp, " < SP: %lx PC: %lx FP: %lx >\n", UNW_SP(frame), UNW_PC(frame), frame->regs.rbp); } bailout: FREEBUF(frame); return ++level; } int dwarf_print_stack_entry(struct bt_info *bt, int level) { unsigned long offset; struct syment *sp; char *name; struct unwind_frame_info *frame; frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); UNW_SP(frame) = bt->stkptr; UNW_PC(frame) = bt->instptr; sp = value_search(UNW_PC(frame), &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)); goto bailout; } /* * If offset is zero, it means we have crossed over to the next * function. Recalculate by adjusting the text address */ if (!offset) { sp = value_search(UNW_PC(frame) - 1, &offset); if (!sp) { if (CRASHDEBUG(1)) fprintf(fp, "unwind: cannot find symbol for PC: %lx\n", UNW_PC(frame)-1); goto bailout; } } name = sp->name; fprintf(fp, " #%d [%016lx] %s at %016lx \n", level, UNW_SP(frame), name, UNW_PC(frame)); bailout: FREEBUF(frame); return level; } void dwarf_debug(struct bt_info *bt) { struct unwind_frame_info *frame; ulong bp; int is_ehframe = (!st->dwarf_debug_frame_size && st->dwarf_eh_frame_size); if (!bt->hp->eip) { dump_local_unwind_tables(); return; } if (!(kt->flags & DWARF_UNWIND_CAPABLE)) { error(INFO, "not DWARF capable\n"); return; } frame = (struct unwind_frame_info *)GETBUF(sizeof(struct unwind_frame_info)); /* * XXX: This only works for the first PC/SP pair seen in a normal * backtrace, so it's not particularly helpful. Ideally it should * be capable to take any PC/SP pair in a stack, but it appears to * related to the rbp value. */ UNW_PC(frame) = bt->hp->eip; UNW_SP(frame) = bt->hp->esp; readmem(UNW_SP(frame), KVADDR, &bp, sizeof(unsigned long), "reading bp", FAULT_ON_ERROR); frame->regs.rbp = bp; /* fixme for x86 */ unwind(frame, is_ehframe); fprintf(fp, "frame size: %lx (%lx)\n", (ulong)UNW_SP(frame), (ulong)UNW_SP(frame) - bt->hp->esp); FREEBUF(frame); } #endif crash-7.2.8/unwind_x86_64.h0000664000000000000000000000624713614623427014044 0ustar rootroot/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define CONFIG_64BIT 1 #define NULL ((void *)0) typedef unsigned long size_t; typedef unsigned char u8; typedef signed short s16; typedef unsigned short u16; typedef signed int s32; typedef unsigned int u32; typedef unsigned long long u64; struct pt_regs { unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long rbp; unsigned long rbx; /* arguments: non interrupts/non tracing syscalls only save upto here*/ unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long rax; unsigned long rcx; unsigned long rdx; unsigned long rsi; unsigned long rdi; unsigned long orig_rax; /* end of arguments */ /* cpu exception frame or undefined */ unsigned long rip; unsigned long cs; unsigned long eflags; unsigned long rsp; unsigned long ss; /* top of stack page */ }; struct unwind_frame_info { struct pt_regs regs; }; extern int unwind(struct unwind_frame_info *, int); extern void init_unwind_table(void); extern void free_unwind_table(void); #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) #define get_unaligned(ptr) (*(ptr)) //#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) #define THREAD_ORDER 1 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) #define UNW_PC(frame) (frame)->regs.rip #define UNW_SP(frame) (frame)->regs.rsp #ifdef CONFIG_FRAME_POINTER #define UNW_FP(frame) (frame)->regs.rbp #define FRAME_RETADDR_OFFSET 8 #define FRAME_LINK_OFFSET 0 #define STACK_BOTTOM(tsk) (((tsk)->thread.rsp0 - 1) & ~(THREAD_SIZE - 1)) #define STACK_TOP(tsk) ((tsk)->thread.rsp0) #endif #define EXTRA_INFO(f) { BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) % FIELD_SIZEOF(struct unwind_frame_info, f)) + offsetof(struct unwind_frame_info, f)/ FIELD_SIZEOF(struct unwind_frame_info, f), FIELD_SIZEOF(struct unwind_frame_info, f) } #define PTREGS_INFO(f) EXTRA_INFO(regs.f) #define UNW_REGISTER_INFO \ PTREGS_INFO(rax),\ PTREGS_INFO(rdx),\ PTREGS_INFO(rcx),\ PTREGS_INFO(rbx), \ PTREGS_INFO(rsi), \ PTREGS_INFO(rdi), \ PTREGS_INFO(rbp), \ PTREGS_INFO(rsp), \ PTREGS_INFO(r8), \ PTREGS_INFO(r9), \ PTREGS_INFO(r10),\ PTREGS_INFO(r11), \ PTREGS_INFO(r12), \ PTREGS_INFO(r13), \ PTREGS_INFO(r14), \ PTREGS_INFO(r15), \ PTREGS_INFO(rip) crash-7.2.8/netdump.c0000664000000000000000000043373013614623427013172 0ustar rootroot/* netdump.c * * Copyright (C) 2002-2019 David Anderson * Copyright (C) 2002-2019 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: David Anderson */ #define _LARGEFILE64_SOURCE 1 /* stat64() */ #include "defs.h" #include "netdump.h" #include "sadump.h" #include "xen_dom0.h" static struct vmcore_data vmcore_data = { 0 }; static struct vmcore_data *nd = &vmcore_data; static struct proc_kcore_data proc_kcore_data = { 0 }; static struct proc_kcore_data *pkd = &proc_kcore_data; static void netdump_print(char *, ...); static size_t resize_elf_header(int, char *, char **, char **, ulong); static void dump_Elf32_Ehdr(Elf32_Ehdr *); static void dump_Elf32_Phdr(Elf32_Phdr *, int); static size_t dump_Elf32_Nhdr(Elf32_Off offset, int); static void dump_Elf64_Ehdr(Elf64_Ehdr *); static void dump_Elf64_Phdr(Elf64_Phdr *, int); static void dump_Elf64_Shdr(Elf64_Shdr *shdr); static size_t dump_Elf64_Nhdr(Elf64_Off offset, int); static void get_netdump_regs_32(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_ppc(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_ppc64(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_arm(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_arm64(struct bt_info *, ulong *, ulong *); static void get_netdump_regs_mips(struct bt_info *, ulong *, ulong *); static void check_dumpfile_size(char *); static int proc_kcore_init_32(FILE *, int); static int proc_kcore_init_64(FILE *, int); static char *get_regs_from_note(char *, ulong *, ulong *); static void kdump_get_osrelease(void); static char *vmcoreinfo_read_string(const char *); #define ELFSTORE 1 #define ELFREAD 0 #define MIN_PAGE_SIZE (4096) /* * Architectures that have configurable page sizes, * can differ from the host machine's page size. */ #define READ_PAGESIZE_FROM_VMCOREINFO() \ (machine_type("IA64") || machine_type("PPC64") || machine_type("PPC") || machine_type("ARM64")) /* * kdump installs NT_PRSTATUS elf notes only to the cpus * that were online during dumping. Hence we call into * this function after reading the cpu map from the kernel, * to remap the NT_PRSTATUS notes only to the online cpus. */ void map_cpus_to_prstatus(void) { void **nt_ptr; int online, i, j, nrcpus; size_t size; if (pc->flags2 & QEMU_MEM_DUMP_ELF) /* notes exist for all cpus */ return; if (!(online = get_cpus_online()) || (online == kt->cpus)) return; if (CRASHDEBUG(1)) error(INFO, "cpus: %d online: %d NT_PRSTATUS notes: %d (remapping)\n", kt->cpus, online, nd->num_prstatus_notes); size = NR_CPUS * sizeof(void *); nt_ptr = (void **)GETBUF(size); BCOPY(nd->nt_prstatus_percpu, nt_ptr, size); BZERO(nd->nt_prstatus_percpu, size); /* * Re-populate the array with the notes mapping to online cpus */ nrcpus = (kt->kernel_NR_CPUS ? kt->kernel_NR_CPUS : NR_CPUS); for (i = 0, j = 0; i < nrcpus; i++) { if (in_cpu_map(ONLINE_MAP, i)) nd->nt_prstatus_percpu[i] = nt_ptr[j++]; } FREEBUF(nt_ptr); } /* * Determine whether a file is a netdump/diskdump/kdump creation, * and if TRUE, initialize the vmcore_data structure. */ int is_netdump(char *file, ulong source_query) { int i, fd, swap; Elf32_Ehdr *elf32; Elf32_Phdr *load32; Elf64_Ehdr *elf64; Elf64_Phdr *load64; char *eheader, *sect0; char buf[BUFSIZE]; size_t size, len, tot; Elf32_Off offset32; Elf64_Off offset64; ulong format; if ((fd = open(file, O_RDWR)) < 0) { if ((fd = open(file, O_RDONLY)) < 0) { sprintf(buf, "%s: open", file); perror(buf); return FALSE; } } size = MIN_NETDUMP_ELF_HEADER_SIZE; if ((eheader = (char *)malloc(size)) == NULL) { fprintf(stderr, "cannot malloc minimum ELF header buffer\n"); clean_exit(1); } if (FLAT_FORMAT()) { if (!read_flattened_format(fd, 0, eheader, size)) goto bailout; } else { if (read(fd, eheader, size) != size) { sprintf(buf, "%s: ELF header read", file); perror(buf); goto bailout; } } load32 = NULL; load64 = NULL; format = 0; elf32 = (Elf32_Ehdr *)&eheader[0]; elf64 = (Elf64_Ehdr *)&eheader[0]; /* * Verify the ELF header, and determine the dumpfile format. * * For now, kdump vmcores differ from netdump/diskdump like so: * * 1. The first kdump PT_LOAD segment is packed just after * the ELF header, whereas netdump/diskdump page-align * the first PT_LOAD segment. * 2. Each kdump PT_LOAD segment has a p_align field of zero, * whereas netdump/diskdump have their p_align fields set * to the system page-size. * * If either kdump difference is seen, presume kdump -- this * is obviously subject to change. */ if (!STRNEQ(eheader, ELFMAG) || eheader[EI_VERSION] != EV_CURRENT) goto bailout; swap = (((eheader[EI_DATA] == ELFDATA2LSB) && (__BYTE_ORDER == __BIG_ENDIAN)) || ((eheader[EI_DATA] == ELFDATA2MSB) && (__BYTE_ORDER == __LITTLE_ENDIAN))); if ((elf32->e_ident[EI_CLASS] == ELFCLASS32) && (swap16(elf32->e_type, swap) == ET_CORE) && (swap32(elf32->e_version, swap) == EV_CURRENT) && (swap16(elf32->e_phnum, swap) >= 2)) { switch (swap16(elf32->e_machine, swap)) { case EM_386: if (machine_type_mismatch(file, "X86", NULL, source_query)) goto bailout; break; case EM_ARM: if (machine_type_mismatch(file, "ARM", NULL, source_query)) goto bailout; break; case EM_PPC: if (machine_type_mismatch(file, "PPC", NULL, source_query)) goto bailout; break; case EM_MIPS: if (machine_type_mismatch(file, "MIPS", NULL, source_query)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, source_query)) goto bailout; } if (endian_mismatch(file, elf32->e_ident[EI_DATA], source_query)) goto bailout; load32 = (Elf32_Phdr *) &eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; if ((load32->p_offset & (MIN_PAGE_SIZE-1)) || (load32->p_align == 0)) format = KDUMP_ELF32; else format = NETDUMP_ELF32; } else if ((elf64->e_ident[EI_CLASS] == ELFCLASS64) && (swap16(elf64->e_type, swap) == ET_CORE) && (swap32(elf64->e_version, swap) == EV_CURRENT) && (swap16(elf64->e_phnum, swap) >= 2)) { switch (swap16(elf64->e_machine, swap)) { case EM_IA_64: if (machine_type_mismatch(file, "IA64", NULL, source_query)) goto bailout; break; case EM_PPC64: if (machine_type_mismatch(file, "PPC64", NULL, source_query)) goto bailout; break; case EM_X86_64: if (machine_type_mismatch(file, "X86_64", NULL, source_query)) goto bailout; break; case EM_S390: if (machine_type_mismatch(file, "S390X", NULL, source_query)) goto bailout; break; case EM_386: if (machine_type_mismatch(file, "X86", NULL, source_query)) goto bailout; break; case EM_ARM: if (machine_type_mismatch(file, "ARM", NULL, source_query)) goto bailout; break; case EM_AARCH64: if (machine_type_mismatch(file, "ARM64", NULL, source_query)) goto bailout; break; case EM_MIPS: if (machine_type_mismatch(file, "MIPS", NULL, source_query)) goto bailout; break; default: if (machine_type_mismatch(file, "(unknown)", NULL, source_query)) goto bailout; } if (endian_mismatch(file, elf64->e_ident[EI_DATA], source_query)) goto bailout; load64 = (Elf64_Phdr *) &eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; if ((load64->p_offset & (MIN_PAGE_SIZE-1)) || (load64->p_align == 0)) format = KDUMP_ELF64; else format = NETDUMP_ELF64; } else { if (CRASHDEBUG(2)) error(INFO, "%s: not a %s ELF dumpfile\n", file, source_query == NETDUMP_LOCAL ? "netdump" : "kdump"); goto bailout; } if (source_query == KCORE_LOCAL) { close(fd); return TRUE; } switch (format) { case NETDUMP_ELF32: case NETDUMP_ELF64: if (source_query & (NETDUMP_LOCAL|NETDUMP_REMOTE)) break; else goto bailout; case KDUMP_ELF32: case KDUMP_ELF64: if (source_query & KDUMP_LOCAL) break; else goto bailout; } sect0 = NULL; if (!(size = resize_elf_header(fd, file, &eheader, §0, format))) goto bailout; nd->ndfd = fd; nd->elf_header = eheader; nd->flags = format | source_query; switch (format) { case NETDUMP_ELF32: case KDUMP_ELF32: nd->header_size = size; nd->elf32 = (Elf32_Ehdr *)&nd->elf_header[0]; nd->num_pt_load_segments = nd->elf32->e_phnum - 1; if ((nd->pt_load_segments = (struct pt_load_segment *) malloc(sizeof(struct pt_load_segment) * nd->num_pt_load_segments)) == NULL) { fprintf(stderr, "cannot malloc PT_LOAD segment buffers\n"); clean_exit(1); } nd->notes32 = (Elf32_Phdr *) &nd->elf_header[sizeof(Elf32_Ehdr)]; nd->load32 = (Elf32_Phdr *) &nd->elf_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; if (format == NETDUMP_ELF32) nd->page_size = (uint)nd->load32->p_align; dump_Elf32_Ehdr(nd->elf32); dump_Elf32_Phdr(nd->notes32, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf32_Phdr(nd->load32 + i, ELFSTORE+i); offset32 = nd->notes32->p_offset; for (tot = 0; tot < nd->notes32->p_filesz; tot += len) { if (!(len = dump_Elf32_Nhdr(offset32, ELFSTORE))) break; offset32 += len; } break; case NETDUMP_ELF64: case KDUMP_ELF64: nd->header_size = size; nd->elf64 = (Elf64_Ehdr *)&nd->elf_header[0]; /* * Extended Numbering support * See include/uapi/linux/elf.h and elf(5) for more information */ if (nd->elf64->e_phnum == PN_XNUM) { nd->sect0_64 = (Elf64_Shdr *)sect0; nd->num_pt_load_segments = nd->sect0_64->sh_info - 1; } else nd->num_pt_load_segments = nd->elf64->e_phnum - 1; if ((nd->pt_load_segments = (struct pt_load_segment *) malloc(sizeof(struct pt_load_segment) * nd->num_pt_load_segments)) == NULL) { fprintf(stderr, "cannot malloc PT_LOAD segment buffers\n"); clean_exit(1); } nd->notes64 = (Elf64_Phdr *) &nd->elf_header[sizeof(Elf64_Ehdr)]; nd->load64 = (Elf64_Phdr *) &nd->elf_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; if (format == NETDUMP_ELF64) nd->page_size = (uint)nd->load64->p_align; dump_Elf64_Ehdr(nd->elf64); dump_Elf64_Phdr(nd->notes64, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf64_Phdr(nd->load64 + i, ELFSTORE+i); offset64 = nd->notes64->p_offset; for (tot = 0; tot < nd->notes64->p_filesz; tot += len) { if (!(len = dump_Elf64_Nhdr(offset64, ELFSTORE))) break; offset64 += len; } break; } if (CRASHDEBUG(1)) netdump_memory_dump(fp); pc->read_vmcoreinfo = vmcoreinfo_read_string; if ((source_query == KDUMP_LOCAL) && (pc->flags2 & GET_OSRELEASE)) kdump_get_osrelease(); if ((source_query == KDUMP_LOCAL) && (pc->flags2 & GET_LOG)) { pc->dfd = nd->ndfd; pc->readmem = read_kdump; nd->flags |= KDUMP_LOCAL; pc->flags |= KDUMP; get_log_from_vmcoreinfo(file); } return nd->header_size; bailout: close(fd); free(eheader); return FALSE; } /* * Search through all PT_LOAD segments to determine the * file offset where the physical memory segment(s) start * in the vmcore, and consider everything prior to that as * header contents. */ static size_t resize_elf_header(int fd, char *file, char **eheader_ptr, char **sect0_ptr, ulong format) { int i; char buf[BUFSIZE]; char *eheader; Elf32_Ehdr *elf32; Elf32_Phdr *load32; Elf64_Ehdr *elf64; Elf64_Phdr *load64; Elf32_Off p_offset32; Elf64_Off p_offset64; size_t header_size; uint num_pt_load_segments; eheader = *eheader_ptr; header_size = num_pt_load_segments = 0; elf32 = (Elf32_Ehdr *)&eheader[0]; elf64 = (Elf64_Ehdr *)&eheader[0]; switch (format) { case NETDUMP_ELF32: case KDUMP_ELF32: num_pt_load_segments = elf32->e_phnum - 1; header_size = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) + (sizeof(Elf32_Phdr) * num_pt_load_segments); break; case NETDUMP_ELF64: case KDUMP_ELF64: /* * Extended Numbering support * See include/uapi/linux/elf.h and elf(5) for more information */ if (elf64->e_phnum == PN_XNUM) { Elf64_Shdr *shdr64; shdr64 = (Elf64_Shdr *)malloc(sizeof(*shdr64)); if (!shdr64) { fprintf(stderr, "cannot malloc a section header buffer\n"); return 0; } if (FLAT_FORMAT()) { if (!read_flattened_format(fd, elf64->e_shoff, shdr64, elf64->e_shentsize)) return 0; } else { if (lseek(fd, elf64->e_shoff, SEEK_SET) != elf64->e_shoff) { sprintf(buf, "%s: section header lseek", file); perror(buf); return 0; } if (read(fd, shdr64, elf64->e_shentsize) != elf64->e_shentsize) { sprintf(buf, "%s: section header read", file); perror(buf); return 0; } } num_pt_load_segments = shdr64->sh_info - 1; *sect0_ptr = (char *)shdr64; } else num_pt_load_segments = elf64->e_phnum - 1; header_size = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) + (sizeof(Elf64_Phdr) * num_pt_load_segments); break; } if ((eheader = (char *)realloc(eheader, header_size)) == NULL) { fprintf(stderr, "cannot realloc interim ELF header buffer\n"); clean_exit(1); } else *eheader_ptr = eheader; if (FLAT_FORMAT()) { if (!read_flattened_format(fd, 0, eheader, header_size)) return 0; } else { if (lseek(fd, 0, SEEK_SET) != 0) { sprintf(buf, "%s: lseek", file); perror(buf); return 0; } if (read(fd, eheader, header_size) != header_size) { sprintf(buf, "%s: ELF header read", file); perror(buf); return 0; } } switch (format) { case NETDUMP_ELF32: case KDUMP_ELF32: load32 = (Elf32_Phdr *)&eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; p_offset32 = load32->p_offset; for (i = 0; i < num_pt_load_segments; i++, load32 += 1) { if (load32->p_offset && (p_offset32 > load32->p_offset)) p_offset32 = load32->p_offset; } header_size = (size_t)p_offset32; break; case NETDUMP_ELF64: case KDUMP_ELF64: load64 = (Elf64_Phdr *)&eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; p_offset64 = load64->p_offset; for (i = 0; i < num_pt_load_segments; i++, load64 += 1) { if (load64->p_offset && (p_offset64 > load64->p_offset)) p_offset64 = load64->p_offset; } header_size = (size_t)p_offset64; break; } if ((eheader = (char *)realloc(eheader, header_size)) == NULL) { perror("realloc"); fprintf(stderr, "cannot realloc resized ELF header buffer\n"); clean_exit(1); } else *eheader_ptr = eheader; if (FLAT_FORMAT()) { if (!read_flattened_format(fd, 0, eheader, header_size)) return 0; } else { if (lseek(fd, 0, SEEK_SET) != 0) { sprintf(buf, "%s: lseek", file); perror(buf); return 0; } if (read(fd, eheader, header_size) != header_size) { sprintf(buf, "%s: ELF header read", file); perror(buf); return 0; } } return header_size; } /* * Return the e_version number of an ELF file * (or -1 if its not readable ELF file) */ int file_elf_version(char *file) { int fd, size; Elf32_Ehdr *elf32; Elf64_Ehdr *elf64; char header[MIN_NETDUMP_ELF_HEADER_SIZE]; char buf[BUFSIZE]; if ((fd = open(file, O_RDONLY)) < 0) { sprintf(buf, "%s: open", file); perror(buf); return -1; } size = MIN_NETDUMP_ELF_HEADER_SIZE; if (read(fd, header, size) != size) { sprintf(buf, "%s: read", file); perror(buf); close(fd); return -1; } close(fd); elf32 = (Elf32_Ehdr *)&header[0]; elf64 = (Elf64_Ehdr *)&header[0]; if (STRNEQ(elf32->e_ident, ELFMAG) && (elf32->e_ident[EI_CLASS] == ELFCLASS32) && (elf32->e_ident[EI_DATA] == ELFDATA2LSB) && (elf32->e_ident[EI_VERSION] == EV_CURRENT)) { return (elf32->e_version); } else if (STRNEQ(elf64->e_ident, ELFMAG) && (elf64->e_ident[EI_CLASS] == ELFCLASS64) && (elf64->e_ident[EI_VERSION] == EV_CURRENT)) { return (elf64->e_version); } return -1; } /* * Check whether any PT_LOAD segment goes beyond the file size. */ static void check_dumpfile_size(char *file) { int i; struct stat64 stat; struct pt_load_segment *pls; uint64_t segment_end; if (is_ramdump_image()) return; if (stat64(file, &stat) < 0) return; if (S_ISBLK(stat.st_mode)) { error(NOTE, "%s: No dump complete check for block devices\n", file); return; } for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; segment_end = pls->file_offset + (pls->phys_end - pls->phys_start); if (segment_end > stat.st_size) { error(WARNING, "%s: may be truncated or incomplete\n" " PT_LOAD p_offset: %lld\n" " p_filesz: %lld\n" " bytes required: %lld\n" " dumpfile size: %lld\n\n", file, pls->file_offset, pls->phys_end - pls->phys_start, segment_end, stat.st_size); return; } } } /* * Perform any post-dumpfile determination stuff here. */ int netdump_init(char *unused, FILE *fptr) { if (!VMCORE_VALID()) return FALSE; nd->ofp = fptr; check_dumpfile_size(pc->dumpfile); return TRUE; } /* * Read from a netdump-created dumpfile. */ int read_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { off_t offset; ssize_t read_ret; struct pt_load_segment *pls; int i; offset = 0; /* * The Elf32_Phdr has 32-bit fields for p_paddr, p_filesz and * p_memsz, so for now, multiple PT_LOAD segment support is * restricted to 64-bit machines for netdump/diskdump vmcores. * However, kexec/kdump has introduced the optional use of a * 64-bit ELF header for 32-bit processors. */ switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: offset = (off_t)paddr + (off_t)nd->header_size; break; case NETDUMP_ELF64: case KDUMP_ELF32: case KDUMP_ELF64: if (nd->num_pt_load_segments == 1) { offset = (off_t)paddr + (off_t)nd->header_size - (off_t)nd->pt_load_segments[0].phys_start; break; } for (i = offset = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if ((paddr >= pls->phys_start) && (paddr < pls->phys_end)) { offset = (off_t)(paddr - pls->phys_start) + pls->file_offset; break; } if (pls->zero_fill && (paddr >= pls->phys_end) && (paddr < pls->zero_fill)) { memset(bufptr, 0, cnt); if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: zero-fill: " "addr: %lx paddr: %llx cnt: %d\n", addr, (ulonglong)paddr, cnt); return cnt; } } if (!offset) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: READ_ERROR: " "offset not found for paddr: %llx\n", (ulonglong)paddr); return READ_ERROR; } break; } if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: addr: %lx paddr: %llx cnt: %d offset: %llx\n", addr, (ulonglong)paddr, cnt, (ulonglong)offset); if (FLAT_FORMAT()) { if (!read_flattened_format(nd->ndfd, offset, bufptr, cnt)) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: READ_ERROR: " "read_flattened_format failed for offset:" " %llx\n", (ulonglong)offset); return READ_ERROR; } } else { if (lseek(nd->ndfd, offset, SEEK_SET) == -1) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: SEEK_ERROR: " "offset: %llx\n", (ulonglong)offset); return SEEK_ERROR; } read_ret = read(nd->ndfd, bufptr, cnt); if (read_ret != cnt) { /* * If the incomplete flag has been set in the header, * first check whether zero_excluded has been set. */ if (is_incomplete_dump() && (read_ret >= 0) && (*diskdump_flags & ZERO_EXCLUDED)) { if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: zero-fill: " "addr: %lx paddr: %llx cnt: %d\n", addr + read_ret, (ulonglong)paddr + read_ret, cnt - (int)read_ret); bufptr += read_ret; bzero(bufptr, cnt - read_ret); return cnt; } if (CRASHDEBUG(8)) fprintf(fp, "read_netdump: READ_ERROR: " "offset: %llx\n", (ulonglong)offset); return READ_ERROR; } } return cnt; } /* * Write to a netdump-created dumpfile. Note that cmd_wr() does not * allow writes to dumpfiles, so you can't get here from there. * But, if it would ever be helpful, here it is... */ int write_netdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { off_t offset; struct pt_load_segment *pls; int i; offset = 0; switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: offset = (off_t)paddr + (off_t)nd->header_size; break; case NETDUMP_ELF64: case KDUMP_ELF32: case KDUMP_ELF64: if (nd->num_pt_load_segments == 1) { offset = (off_t)paddr + (off_t)nd->header_size; break; } for (i = offset = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if ((paddr >= pls->phys_start) && (paddr < pls->phys_end)) { offset = (off_t)(paddr - pls->phys_start) + pls->file_offset; break; } } if (!offset) return READ_ERROR; break; } if (lseek(nd->ndfd, offset, SEEK_SET) == -1) return SEEK_ERROR; if (write(nd->ndfd, bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Set the file pointer for debug output. */ FILE * set_netdump_fp(FILE *fp) { if (!VMCORE_VALID()) return NULL; nd->ofp = fp; return fp; } /* * Generic print routine to handle integral and remote daemon output. */ static void netdump_print(char *fmt, ...) { char buf[BUFSIZE]; va_list ap; if (!fmt || !strlen(fmt) || !VMCORE_VALID()) return; va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (nd->ofp) fprintf(nd->ofp, "%s", buf); else console(buf); } uint netdump_page_size(void) { if (!VMCORE_VALID()) return 0; return nd->page_size; } int netdump_free_memory(void) { return (VMCORE_VALID() ? 0 : 0); } int netdump_memory_used(void) { return (VMCORE_VALID() ? 0 : 0); } /* * The netdump server will eventually use the NT_TASKSTRUCT section * to pass the task address. Until such time, look at the ebp of the * user_regs_struct, which is located at the end of the NT_PRSTATUS * elf_prstatus structure, minus one integer: * * struct elf_prstatus * { * ... * elf_gregset_t pr_reg; (maps to user_regs_struct) * int pr_fpvalid; * }; * * If it's a kernel stack address who's adjusted task_struct value is * equal to one of the active set tasks, we'll presume it's legit. * */ ulong get_netdump_panic_task(void) { #ifdef DAEMON return nd->task_struct; #else int i, crashing_cpu; size_t len; char *user_regs; ulong ebp, esp, task; if (!VMCORE_VALID() || !get_active_set()) goto panic_task_undetermined; if (nd->task_struct) { if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: NT_TASKSTRUCT: %lx\n", nd->task_struct); return nd->task_struct; } switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: case NETDUMP_ELF64: crashing_cpu = -1; break; case KDUMP_ELF32: case KDUMP_ELF64: crashing_cpu = -1; if (kernel_symbol_exists("crashing_cpu")) { get_symbol_data("crashing_cpu", sizeof(int), &i); if ((i >= 0) && in_cpu_map(ONLINE_MAP, i)) { crashing_cpu = i; if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: active_set[crashing_cpu: %d]: %lx\n", crashing_cpu, tt->active_set[crashing_cpu]); } } if ((nd->num_prstatus_notes > 1) && (crashing_cpu == -1)) goto panic_task_undetermined; break; default: crashing_cpu = -1; break; } if (nd->elf32 && (nd->elf32->e_machine == EM_386)) { Elf32_Nhdr *note32 = NULL; if (nd->num_prstatus_notes > 1) { if (crashing_cpu != -1) note32 = (Elf32_Nhdr *) nd->nt_prstatus_percpu[crashing_cpu]; } else note32 = (Elf32_Nhdr *)nd->nt_prstatus; if (!note32) goto panic_task_undetermined; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); user_regs = ((char *)note32 + len) - SIZE(user_regs_struct) - sizeof(int); ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); check_ebp_esp: if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: NT_PRSTATUS esp: %lx ebp: %lx\n", esp, ebp); if (IS_KVADDR(esp)) { task = stkptr_to_task(esp); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: esp: %lx -> task: %lx\n", esp, task); for (i = 0; task && (i < NR_CPUS); i++) { if (task == tt->active_set[i]) return task; } } if (IS_KVADDR(ebp)) { task = stkptr_to_task(ebp); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: ebp: %lx -> task: %lx\n", ebp, task); for (i = 0; task && (i < NR_CPUS); i++) { if (task == tt->active_set[i]) return task; } } } else if (nd->elf64) { Elf64_Nhdr *note64 = NULL; if (nd->num_prstatus_notes > 1) { if (crashing_cpu != -1) note64 = (Elf64_Nhdr *) nd->nt_prstatus_percpu[crashing_cpu]; } else note64 = (Elf64_Nhdr *)nd->nt_prstatus; if (!note64) goto panic_task_undetermined; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); user_regs = (char *)((char *)note64 + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); if (nd->elf64->e_machine == EM_386) { ebp = ULONG(user_regs + OFFSET(user_regs_struct_ebp)); esp = ULONG(user_regs + OFFSET(user_regs_struct_esp)); goto check_ebp_esp; } if (nd->elf64->e_machine == EM_PPC64) { /* * Get the GPR1 register value. */ esp = *(ulong *)((char *)user_regs + 8); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: NT_PRSTATUS esp: %lx\n", esp); if (IS_KVADDR(esp)) { task = stkptr_to_task(esp); if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: esp: %lx -> task: %lx\n", esp, task); for (i = 0; task && (i < NR_CPUS); i++) { if (task == tt->active_set[i]) return task; } } } if (nd->elf64->e_machine == EM_X86_64) { if ((crashing_cpu != -1) && (crashing_cpu <= kt->cpus)) return (tt->active_set[crashing_cpu]); } } panic_task_undetermined: if (CRASHDEBUG(1)) error(INFO, "get_netdump_panic_task: failed\n"); return NO_TASK; #endif } /* * Get the switch_stack address of the passed-in task. Currently only * the panicking task reports its switch-stack address. */ ulong get_netdump_switch_stack(ulong task) { #ifdef DAEMON if (nd->task_struct == task) return nd->switch_stack; return 0; #else if (!VMCORE_VALID() || !get_active_set()) return 0; if (nd->task_struct == task) return nd->switch_stack; return 0; #endif } int netdump_memory_dump(FILE *fp) { int i, others, wrap, flen; size_t len, tot; FILE *fpsave; Elf32_Off offset32; Elf32_Off offset64; struct pt_load_segment *pls; if (!VMCORE_VALID()) return FALSE; fpsave = nd->ofp; nd->ofp = fp; if (FLAT_FORMAT()) dump_flat_header(nd->ofp); netdump_print("vmcore_data: \n"); netdump_print(" flags: %lx (", nd->flags); others = 0; if (nd->flags & NETDUMP_LOCAL) netdump_print("%sNETDUMP_LOCAL", others++ ? "|" : ""); if (nd->flags & KDUMP_LOCAL) netdump_print("%sKDUMP_LOCAL", others++ ? "|" : ""); if (nd->flags & NETDUMP_REMOTE) netdump_print("%sNETDUMP_REMOTE", others++ ? "|" : ""); if (nd->flags & NETDUMP_ELF32) netdump_print("%sNETDUMP_ELF32", others++ ? "|" : ""); if (nd->flags & NETDUMP_ELF64) netdump_print("%sNETDUMP_ELF64", others++ ? "|" : ""); if (nd->flags & KDUMP_ELF32) netdump_print("%sKDUMP_ELF32", others++ ? "|" : ""); if (nd->flags & KDUMP_ELF64) netdump_print("%sKDUMP_ELF64", others++ ? "|" : ""); if (nd->flags & PARTIAL_DUMP) netdump_print("%sPARTIAL_DUMP", others++ ? "|" : ""); if (nd->flags & QEMU_MEM_DUMP_KDUMP_BACKUP) netdump_print("%sQEMU_MEM_DUMP_KDUMP_BACKUP", others++ ? "|" : ""); netdump_print(") %s\n", FLAT_FORMAT() ? "[FLAT]" : ""); if ((pc->flags & RUNTIME) && symbol_exists("dump_level")) { int dump_level; if (readmem(symbol_value("dump_level"), KVADDR, &dump_level, sizeof(dump_level), "dump_level", QUIET|RETURN_ON_ERROR)) { netdump_print(" dump_level: %d (0x%x) %s", dump_level, dump_level, dump_level > 0 ? "(" : ""); #define DUMP_EXCLUDE_CACHE 0x00000001 /* Exclude LRU & SwapCache pages*/ #define DUMP_EXCLUDE_CLEAN 0x00000002 /* Exclude all-zero pages */ #define DUMP_EXCLUDE_FREE 0x00000004 /* Exclude free pages */ #define DUMP_EXCLUDE_ANON 0x00000008 /* Exclude Anon pages */ #define DUMP_SAVE_PRIVATE 0x00000010 /* Save private pages */ others = 0; if (dump_level & DUMP_EXCLUDE_CACHE) netdump_print("%sDUMP_EXCLUDE_CACHE", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_CLEAN) netdump_print("%sDUMP_EXCLUDE_CLEAN", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_FREE) netdump_print("%sDUMP_EXCLUDE_FREE", others++ ? "|" : ""); if (dump_level & DUMP_EXCLUDE_ANON) netdump_print("%sDUMP_EXCLUDE_ANON", others++ ? "|" : ""); if (dump_level & DUMP_SAVE_PRIVATE) netdump_print("%sDUMP_SAVE_PRIVATE", others++ ? "|" : ""); netdump_print("%s\n", dump_level > 0 ? ")" : ""); } else netdump_print(" dump_level: (unknown)\n"); } else if (!(pc->flags & RUNTIME) && symbol_exists("dump_level")) netdump_print(" dump_level: (undetermined)\n"); netdump_print(" ndfd: %d\n", nd->ndfd); netdump_print(" ofp: %lx\n", nd->ofp); netdump_print(" header_size: %d\n", nd->header_size); netdump_print(" num_pt_load_segments: %d\n", nd->num_pt_load_segments); for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; netdump_print(" pt_load_segment[%d]:\n", i); netdump_print(" file_offset: %lx\n", pls->file_offset); netdump_print(" phys_start: %llx\n", pls->phys_start); netdump_print(" phys_end: %llx\n", pls->phys_end); netdump_print(" zero_fill: %llx\n", pls->zero_fill); } netdump_print(" elf_header: %lx\n", nd->elf_header); netdump_print(" elf32: %lx\n", nd->elf32); netdump_print(" notes32: %lx\n", nd->notes32); netdump_print(" load32: %lx\n", nd->load32); netdump_print(" elf64: %lx\n", nd->elf64); netdump_print(" notes64: %lx\n", nd->notes64); netdump_print(" load64: %lx\n", nd->load64); netdump_print(" sect0_64: %lx\n", nd->sect0_64); netdump_print(" nt_prstatus: %lx\n", nd->nt_prstatus); netdump_print(" nt_prpsinfo: %lx\n", nd->nt_prpsinfo); netdump_print(" nt_taskstruct: %lx\n", nd->nt_taskstruct); netdump_print(" task_struct: %lx\n", nd->task_struct); netdump_print(" arch_data1: "); if (nd->arch_data1) { if (machine_type("X86_64")) netdump_print("%lx (relocate)\n", nd->arch_data1); else if (machine_type("ARM64")) netdump_print("%lx (kimage_voffset)\n", nd->arch_data1); } else netdump_print("(unused)\n"); netdump_print(" arch_data2: "); if (nd->arch_data2) { if (machine_type("ARM64")) netdump_print("%016lx\n" " CONFIG_ARM64_VA_BITS: %ld\n" " VA_BITS_ACTUAL: %lld\n", nd->arch_data2, nd->arch_data2 & 0xffffffff, ((ulonglong)nd->arch_data2 >> 32)); else netdump_print("%016lx (?)\n", nd->arch_data2); } else netdump_print("(unused)\n"); netdump_print(" switch_stack: %lx\n", nd->switch_stack); netdump_print(" page_size: %d\n", nd->page_size); dump_xen_kdump_data(fp); netdump_print(" num_prstatus_notes: %d\n", nd->num_prstatus_notes); netdump_print(" num_qemu_notes: %d\n", nd->num_qemu_notes); netdump_print(" vmcoreinfo: %lx\n", (ulong)nd->vmcoreinfo); netdump_print(" size_vmcoreinfo: %d\n", nd->size_vmcoreinfo); netdump_print(" nt_prstatus_percpu: "); wrap = sizeof(void *) == SIZEOF_32BIT ? 8 : 4; flen = sizeof(void *) == SIZEOF_32BIT ? 8 : 16; if (nd->num_prstatus_notes == 1) netdump_print("%.*lx\n", flen, nd->nt_prstatus_percpu[0]); else { for (i = 0; i < nd->num_prstatus_notes; i++) { if ((i % wrap) == 0) netdump_print("\n "); netdump_print("%.*lx ", flen, nd->nt_prstatus_percpu[i]); } } netdump_print("\n"); netdump_print(" nt_qemu_percpu: "); if (nd->num_qemu_notes == 1) netdump_print("%.*lx\n", flen, nd->nt_qemu_percpu[0]); else { for (i = 0; i < nd->num_qemu_notes; i++) { if ((i % wrap) == 0) netdump_print("\n "); netdump_print("%.*lx ", flen, nd->nt_qemu_percpu[i]); } } netdump_print("\n"); netdump_print(" backup_src_start: %llx\n", nd->backup_src_start); netdump_print(" backup_src_size: %lx\n", nd->backup_src_size); netdump_print(" backup_offset: %llx\n", nd->backup_offset); netdump_print("\n"); switch (DUMPFILE_FORMAT(nd->flags)) { case NETDUMP_ELF32: case KDUMP_ELF32: dump_Elf32_Ehdr(nd->elf32); dump_Elf32_Phdr(nd->notes32, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf32_Phdr(nd->load32 + i, ELFREAD); offset32 = nd->notes32->p_offset; for (tot = 0; tot < nd->notes32->p_filesz; tot += len) { if (!(len = dump_Elf32_Nhdr(offset32, ELFREAD))) break; offset32 += len; } break; case NETDUMP_ELF64: case KDUMP_ELF64: dump_Elf64_Ehdr(nd->elf64); dump_Elf64_Phdr(nd->notes64, ELFREAD); for (i = 0; i < nd->num_pt_load_segments; i++) dump_Elf64_Phdr(nd->load64 + i, ELFREAD); if (nd->sect0_64) dump_Elf64_Shdr(nd->sect0_64); offset64 = nd->notes64->p_offset; for (tot = 0; tot < nd->notes64->p_filesz; tot += len) { if (!(len = dump_Elf64_Nhdr(offset64, ELFREAD))) break; offset64 += len; } break; } dump_ramdump_data(); nd->ofp = fpsave; return TRUE; } /* * Dump an ELF file header. */ static void dump_Elf32_Ehdr(Elf32_Ehdr *elf) { char buf[BUFSIZE]; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); netdump_print("Elf32_Ehdr:\n"); netdump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); netdump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: netdump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: netdump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: netdump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: netdump_print("(ELFCLASSNUM)\n"); break; default: netdump_print("(?)\n"); break; } netdump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: netdump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: netdump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: netdump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: netdump_print("(ELFDATANUM)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) netdump_print("(EV_CURRENT)\n"); else netdump_print("(?)\n"); netdump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: netdump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: netdump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: netdump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: netdump_print("(ELFOSABI_STANDALONE)\n"); break; case ELFOSABI_LINUX: netdump_print("(ELFOSABI_LINUX)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); netdump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: netdump_print("(ET_NONE)\n"); break; case ET_REL: netdump_print("(ET_REL)\n"); break; case ET_EXEC: netdump_print("(ET_EXEC)\n"); break; case ET_DYN: netdump_print("(ET_DYN)\n"); break; case ET_CORE: netdump_print("(ET_CORE)\n"); break; case ET_NUM: netdump_print("(ET_NUM)\n"); break; case ET_LOOS: netdump_print("(ET_LOOS)\n"); break; case ET_HIOS: netdump_print("(ET_HIOS)\n"); break; case ET_LOPROC: netdump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: netdump_print("(ET_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_ARM: netdump_print("(EM_ARM)\n"); break; case EM_386: netdump_print("(EM_386)\n"); break; case EM_MIPS: netdump_print("(EM_MIPS)\n"); break; default: netdump_print("(unsupported)\n"); break; } netdump_print(" e_version: %ld ", elf->e_version); netdump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); netdump_print(" e_entry: %lx\n", elf->e_entry); netdump_print(" e_phoff: %lx\n", elf->e_phoff); netdump_print(" e_shoff: %lx\n", elf->e_shoff); netdump_print(" e_flags: %lx\n", elf->e_flags); if ((elf->e_flags & DUMP_ELF_INCOMPLETE) && (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF32)) pc->flags2 |= INCOMPLETE_DUMP; netdump_print(" e_ehsize: %x\n", elf->e_ehsize); netdump_print(" e_phentsize: %x\n", elf->e_phentsize); netdump_print(" e_phnum: %x\n", elf->e_phnum); netdump_print(" e_shentsize: %x\n", elf->e_shentsize); netdump_print(" e_shnum: %x\n", elf->e_shnum); netdump_print(" e_shstrndx: %x\n", elf->e_shstrndx); } static void dump_Elf64_Ehdr(Elf64_Ehdr *elf) { char buf[BUFSIZE]; BZERO(buf, BUFSIZE); BCOPY(elf->e_ident, buf, SELFMAG); netdump_print("Elf64_Ehdr:\n"); netdump_print(" e_ident: \\%o%s\n", buf[0], &buf[1]); netdump_print(" e_ident[EI_CLASS]: %d ", elf->e_ident[EI_CLASS]); switch (elf->e_ident[EI_CLASS]) { case ELFCLASSNONE: netdump_print("(ELFCLASSNONE)"); break; case ELFCLASS32: netdump_print("(ELFCLASS32)\n"); break; case ELFCLASS64: netdump_print("(ELFCLASS64)\n"); break; case ELFCLASSNUM: netdump_print("(ELFCLASSNUM)\n"); break; default: netdump_print("(?)\n"); break; } netdump_print(" e_ident[EI_DATA]: %d ", elf->e_ident[EI_DATA]); switch (elf->e_ident[EI_DATA]) { case ELFDATANONE: netdump_print("(ELFDATANONE)\n"); break; case ELFDATA2LSB: netdump_print("(ELFDATA2LSB)\n"); break; case ELFDATA2MSB: netdump_print("(ELFDATA2MSB)\n"); break; case ELFDATANUM: netdump_print("(ELFDATANUM)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_VERSION]: %d ", elf->e_ident[EI_VERSION]); if (elf->e_ident[EI_VERSION] == EV_CURRENT) netdump_print("(EV_CURRENT)\n"); else netdump_print("(?)\n"); netdump_print(" e_ident[EI_OSABI]: %d ", elf->e_ident[EI_OSABI]); switch (elf->e_ident[EI_OSABI]) { case ELFOSABI_SYSV: netdump_print("(ELFOSABI_SYSV)\n"); break; case ELFOSABI_HPUX: netdump_print("(ELFOSABI_HPUX)\n"); break; case ELFOSABI_ARM: netdump_print("(ELFOSABI_ARM)\n"); break; case ELFOSABI_STANDALONE: netdump_print("(ELFOSABI_STANDALONE)\n"); break; case ELFOSABI_LINUX: netdump_print("(ELFOSABI_LINUX)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_ident[EI_ABIVERSION]: %d\n", elf->e_ident[EI_ABIVERSION]); netdump_print(" e_type: %d ", elf->e_type); switch (elf->e_type) { case ET_NONE: netdump_print("(ET_NONE)\n"); break; case ET_REL: netdump_print("(ET_REL)\n"); break; case ET_EXEC: netdump_print("(ET_EXEC)\n"); break; case ET_DYN: netdump_print("(ET_DYN)\n"); break; case ET_CORE: netdump_print("(ET_CORE)\n"); break; case ET_NUM: netdump_print("(ET_NUM)\n"); break; case ET_LOOS: netdump_print("(ET_LOOS)\n"); break; case ET_HIOS: netdump_print("(ET_HIOS)\n"); break; case ET_LOPROC: netdump_print("(ET_LOPROC)\n"); break; case ET_HIPROC: netdump_print("(ET_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" e_machine: %d ", elf->e_machine); switch (elf->e_machine) { case EM_386: netdump_print("(EM_386)\n"); break; case EM_IA_64: netdump_print("(EM_IA_64)\n"); break; case EM_PPC64: netdump_print("(EM_PPC64)\n"); break; case EM_X86_64: netdump_print("(EM_X86_64)\n"); break; case EM_S390: netdump_print("(EM_S390)\n"); break; case EM_ARM: netdump_print("(EM_ARM)\n"); break; case EM_AARCH64: netdump_print("(EM_AARCH64)\n"); break; default: netdump_print("(unsupported)\n"); break; } netdump_print(" e_version: %ld ", elf->e_version); netdump_print("%s\n", elf->e_version == EV_CURRENT ? "(EV_CURRENT)" : ""); netdump_print(" e_entry: %lx\n", elf->e_entry); netdump_print(" e_phoff: %lx\n", elf->e_phoff); netdump_print(" e_shoff: %lx\n", elf->e_shoff); netdump_print(" e_flags: %lx\n", elf->e_flags); if ((elf->e_flags & DUMP_ELF_INCOMPLETE) && (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64)) pc->flags2 |= INCOMPLETE_DUMP; netdump_print(" e_ehsize: %x\n", elf->e_ehsize); netdump_print(" e_phentsize: %x\n", elf->e_phentsize); netdump_print(" e_phnum: %x\n", elf->e_phnum); netdump_print(" e_shentsize: %x\n", elf->e_shentsize); netdump_print(" e_shnum: %x\n", elf->e_shnum); netdump_print(" e_shstrndx: %x\n", elf->e_shstrndx); } /* * Dump a program segment header */ static void dump_Elf32_Phdr(Elf32_Phdr *prog, int store_pt_load_data) { int others; struct pt_load_segment *pls; if ((char *)prog > (nd->elf_header + nd->header_size)) error(FATAL, "Elf32_Phdr pointer: %lx ELF header end: %lx\n\n", (char *)prog, nd->elf_header + nd->header_size); if (store_pt_load_data) pls = &nd->pt_load_segments[store_pt_load_data-1]; else pls = NULL; netdump_print("Elf32_Phdr:\n"); netdump_print(" p_type: %lx ", prog->p_type); switch (prog->p_type) { case PT_NULL: netdump_print("(PT_NULL)\n"); break; case PT_LOAD: netdump_print("(PT_LOAD)\n"); break; case PT_DYNAMIC: netdump_print("(PT_DYNAMIC)\n"); break; case PT_INTERP: netdump_print("(PT_INTERP)\n"); break; case PT_NOTE: netdump_print("(PT_NOTE)\n"); break; case PT_SHLIB: netdump_print("(PT_SHLIB)\n"); break; case PT_PHDR: netdump_print("(PT_PHDR)\n"); break; case PT_NUM: netdump_print("(PT_NUM)\n"); break; case PT_LOOS: netdump_print("(PT_LOOS)\n"); break; case PT_HIOS: netdump_print("(PT_HIOS)\n"); break; case PT_LOPROC: netdump_print("(PT_LOPROC)\n"); break; case PT_HIPROC: netdump_print("(PT_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" p_offset: %ld (%lx)\n", prog->p_offset, prog->p_offset); if (store_pt_load_data) pls->file_offset = prog->p_offset; netdump_print(" p_vaddr: %lx\n", prog->p_vaddr); netdump_print(" p_paddr: %lx\n", prog->p_paddr); if (store_pt_load_data) pls->phys_start = prog->p_paddr; netdump_print(" p_filesz: %lu (%lx)\n", prog->p_filesz, prog->p_filesz); if (store_pt_load_data) { pls->phys_end = pls->phys_start + prog->p_filesz; pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? 0 : pls->phys_start + prog->p_memsz; } netdump_print(" p_memsz: %lu (%lx)\n", prog->p_memsz, prog->p_memsz); netdump_print(" p_flags: %lx (", prog->p_flags); others = 0; if (prog->p_flags & PF_X) netdump_print("PF_X", others++); if (prog->p_flags & PF_W) netdump_print("%sPF_W", others++ ? "|" : ""); if (prog->p_flags & PF_R) netdump_print("%sPF_R", others++ ? "|" : ""); netdump_print(")\n"); netdump_print(" p_align: %ld\n", prog->p_align); } static void dump_Elf64_Phdr(Elf64_Phdr *prog, int store_pt_load_data) { int others; struct pt_load_segment *pls; if (store_pt_load_data) pls = &nd->pt_load_segments[store_pt_load_data-1]; else pls = NULL; if ((char *)prog > (nd->elf_header + nd->header_size)) error(FATAL, "Elf64_Phdr pointer: %lx ELF header end: %lx\n\n", (char *)prog, nd->elf_header + nd->header_size); netdump_print("Elf64_Phdr:\n"); netdump_print(" p_type: %lx ", prog->p_type); switch (prog->p_type) { case PT_NULL: netdump_print("(PT_NULL)\n"); break; case PT_LOAD: netdump_print("(PT_LOAD)\n"); break; case PT_DYNAMIC: netdump_print("(PT_DYNAMIC)\n"); break; case PT_INTERP: netdump_print("(PT_INTERP)\n"); break; case PT_NOTE: netdump_print("(PT_NOTE)\n"); break; case PT_SHLIB: netdump_print("(PT_SHLIB)\n"); break; case PT_PHDR: netdump_print("(PT_PHDR)\n"); break; case PT_NUM: netdump_print("(PT_NUM)\n"); break; case PT_LOOS: netdump_print("(PT_LOOS)\n"); break; case PT_HIOS: netdump_print("(PT_HIOS)\n"); break; case PT_LOPROC: netdump_print("(PT_LOPROC)\n"); break; case PT_HIPROC: netdump_print("(PT_HIPROC)\n"); break; default: netdump_print("(?)\n"); } netdump_print(" p_offset: %lld (%llx)\n", prog->p_offset, prog->p_offset); if (store_pt_load_data) pls->file_offset = prog->p_offset; netdump_print(" p_vaddr: %llx\n", prog->p_vaddr); netdump_print(" p_paddr: %llx\n", prog->p_paddr); if (store_pt_load_data) pls->phys_start = prog->p_paddr; netdump_print(" p_filesz: %llu (%llx)\n", prog->p_filesz, prog->p_filesz); if (store_pt_load_data) { pls->phys_end = pls->phys_start + prog->p_filesz; pls->zero_fill = (prog->p_filesz == prog->p_memsz) ? 0 : pls->phys_start + prog->p_memsz; } netdump_print(" p_memsz: %llu (%llx)\n", prog->p_memsz, prog->p_memsz); netdump_print(" p_flags: %lx (", prog->p_flags); others = 0; if (prog->p_flags & PF_X) netdump_print("PF_X", others++); if (prog->p_flags & PF_W) netdump_print("%sPF_W", others++ ? "|" : ""); if (prog->p_flags & PF_R) netdump_print("%sPF_R", others++ ? "|" : ""); netdump_print(")\n"); netdump_print(" p_align: %lld\n", prog->p_align); } static void dump_Elf64_Shdr(Elf64_Shdr *shdr) { netdump_print("Elf64_Shdr:\n"); netdump_print(" sh_name: %x\n", shdr->sh_name); netdump_print(" sh_type: %x ", shdr->sh_type); switch (shdr->sh_type) { case SHT_NULL: netdump_print("(SHT_NULL)\n"); break; default: netdump_print("\n"); break; } netdump_print(" sh_flags: %lx\n", shdr->sh_flags); netdump_print(" sh_addr: %lx\n", shdr->sh_addr); netdump_print(" sh_offset: %lx\n", shdr->sh_offset); netdump_print(" sh_size: %lx\n", shdr->sh_size); netdump_print(" sh_link: %x\n", shdr->sh_link); netdump_print(" sh_info: %x (%u)\n", shdr->sh_info, shdr->sh_info); netdump_print(" sh_addralign: %lx\n", shdr->sh_addralign); netdump_print(" sh_entsize: %lx\n", shdr->sh_entsize); } /* * VMCOREINFO * * This is a ELF note intented for makedumpfile that is exported by the * kernel that crashes and presented as ELF note to the /proc/vmcore * of the panic kernel. */ #define VMCOREINFO_NOTE_NAME "VMCOREINFO" #define VMCOREINFO_NOTE_NAME_BYTES (sizeof(VMCOREINFO_NOTE_NAME)) /* * Reads a string value from VMCOREINFO. * * Returns a string (that has to be freed by the caller) that contains the * value for key or NULL if the key has not been found. */ static char * vmcoreinfo_read_string(const char *key) { int i, j, end; size_t value_length; size_t key_length = strlen(key); char *vmcoreinfo; uint size_vmcoreinfo; char *value = NULL; /* * Borrow this function for ELF vmcores created by the snap.so * extension module, where arch-specific data may be passed in * the NT_TASKSTRUCT note. */ if ((pc->flags2 & SNAP)) { if (STREQ(key, "NUMBER(kimage_voffset)") && nd->arch_data1) { value = calloc(VADDR_PRLEN+1, sizeof(char)); sprintf(value, "%lx", nd->arch_data1); if (nd->arch_data2 == 0) pc->read_vmcoreinfo = no_vmcoreinfo; return value; } if (STREQ(key, "NUMBER(VA_BITS)") && nd->arch_data2) { value = calloc(VADDR_PRLEN+1, sizeof(char)); sprintf(value, "%ld", nd->arch_data2 & 0xffffffff); return value; } if (STREQ(key, "NUMBER(tcr_el1_t1sz)") && nd->arch_data2) { value = calloc(VADDR_PRLEN+1, sizeof(char)); sprintf(value, "%lld", ((ulonglong)nd->arch_data2 >> 32) & 0xffffffff); pc->read_vmcoreinfo = no_vmcoreinfo; return value; } if (STREQ(key, "relocate") && nd->arch_data1) { value = calloc(VADDR_PRLEN+1, sizeof(char)); sprintf(value, "%lx", nd->arch_data1); pc->read_vmcoreinfo = no_vmcoreinfo; return value; } return NULL; } if (nd->vmcoreinfo) { vmcoreinfo = (char *)nd->vmcoreinfo; size_vmcoreinfo = nd->size_vmcoreinfo; } else if (ACTIVE() && pkd->vmcoreinfo) { vmcoreinfo = (char *)pkd->vmcoreinfo; size_vmcoreinfo = pkd->size_vmcoreinfo; } else { vmcoreinfo = NULL; size_vmcoreinfo = 0; } if (!vmcoreinfo) return NULL; /* the '+ 1' is the equal sign */ for (i = 0; i < (int)(size_vmcoreinfo - key_length + 1); i++) { /* * We must also check if we're at the beginning of VMCOREINFO * or the separating newline is there, and of course if we * have a equal sign after the key. */ if ((strncmp(vmcoreinfo+i, key, key_length) == 0) && (i == 0 || vmcoreinfo[i-1] == '\n') && (vmcoreinfo[i+key_length] == '=')) { end = -1; /* Found -- search for the next newline. */ for (j = i + key_length + 1; j < size_vmcoreinfo; j++) { if (vmcoreinfo[j] == '\n') { end = j; break; } } /* * If we didn't find an end, we assume it's the end * of VMCOREINFO data. */ if (end == -1) { /* Point after the end. */ end = size_vmcoreinfo + 1; } value_length = end - (1+ i + key_length); value = calloc(value_length+1, sizeof(char)); if (value) strncpy(value, vmcoreinfo + i + key_length + 1, value_length); break; } } return value; } /* * Reads an integer value from VMCOREINFO. */ static long vmcoreinfo_read_integer(const char *key, long default_value) { char *string; long retval = default_value; string = vmcoreinfo_read_string(key); if (string) { retval = atol(string); free(string); } return retval; } void display_vmcoredd_note(void *ptr, FILE *ofp) { int sp; unsigned int dump_size; struct vmcoredd_header *vh; sp = VMCORE_VALID() ? 25 : 22; vh = (struct vmcoredd_header *)ptr; dump_size = vh->n_descsz - VMCOREDD_MAX_NAME_BYTES; fprintf(ofp, "%sname: \"%s\"\n", space(sp), vh->dump_name); fprintf(ofp, "%ssize: %u\n", space(sp), dump_size); } /* * Dump a note section header -- the actual data is defined by netdump */ static size_t dump_Elf32_Nhdr(Elf32_Off offset, int store) { int i, lf; Elf32_Nhdr *note; size_t len; char buf[BUFSIZE]; char *ptr; ulong *uptr; int xen_core, vmcoreinfo, vmcoreinfo_xen, eraseinfo, qemuinfo; uint64_t remaining, notesize; note = (Elf32_Nhdr *)((char *)nd->elf32 + offset); BZERO(buf, BUFSIZE); xen_core = vmcoreinfo = eraseinfo = qemuinfo = FALSE; ptr = (char *)note + sizeof(Elf32_Nhdr); if (ptr > (nd->elf_header + nd->header_size)) { error(WARNING, "Elf32_Nhdr pointer: %lx ELF header end: %lx\n", (char *)note, nd->elf_header + nd->header_size); return 0; } else remaining = (uint64_t)((nd->elf_header + nd->header_size) - ptr); notesize = (uint64_t)note->n_namesz + (uint64_t)note->n_descsz; if ((note->n_namesz == 0) || !remaining || (notesize > remaining)) { error(WARNING, "possibly corrupt Elf32_Nhdr: " "n_namesz: %ld n_descsz: %ld n_type: %lx\n%s", note->n_namesz, note->n_descsz, note->n_type, note->n_namesz || note->n_descsz || !remaining ? "\n" : ""); if (note->n_namesz || note->n_descsz || !remaining) return 0; } netdump_print("Elf32_Nhdr:\n"); netdump_print(" n_namesz: %ld ", note->n_namesz); BCOPY(ptr, buf, note->n_namesz); netdump_print("(\"%s\")\n", buf); netdump_print(" n_descsz: %ld\n", note->n_descsz); netdump_print(" n_type: %lx ", note->n_type); switch (note->n_type) { case NT_PRSTATUS: netdump_print("(NT_PRSTATUS)\n"); if (store) { if (!nd->nt_prstatus) nd->nt_prstatus = (void *)note; for (i = 0; i < NR_CPUS; i++) { if (!nd->nt_prstatus_percpu[i]) { nd->nt_prstatus_percpu[i] = (void *)note; nd->num_prstatus_notes++; break; } } } if (machine_type("PPC") && (nd->num_prstatus_notes > 0)) pc->flags2 |= ELF_NOTES; break; case NT_PRPSINFO: netdump_print("(NT_PRPSINFO)\n"); if (store) nd->nt_prpsinfo = (void *)note; break; case NT_TASKSTRUCT: netdump_print("(NT_TASKSTRUCT)\n"); if (store) { nd->nt_taskstruct = (void *)note; nd->task_struct = *((ulong *)(ptr + note->n_namesz)); } break; case NT_DISKDUMP: netdump_print("(NT_DISKDUMP)\n"); uptr = (ulong *)(ptr + note->n_namesz); if (*uptr && store) nd->flags |= PARTIAL_DUMP; break; #ifdef NOTDEF /* * Note: Based upon the original, abandoned, proposal for * its contents -- keep around for potential future use. */ case NT_KDUMPINFO: netdump_print("(NT_KDUMPINFO)\n"); if (store) { uptr = (note->n_namesz == 5) ? (ulong *)(ptr + ((note->n_namesz + 3) & ~3)) : (ulong *)(ptr + note->n_namesz); nd->page_size = (uint)(1 << *uptr); uptr++; nd->task_struct = *uptr; } break; #endif case NT_VMCOREDD: netdump_print("(NT_VMCOREDD)\n"); if (store) { for (i = 0; i < NR_DEVICE_DUMPS; i++) { if (!nd->nt_vmcoredd_array[i]) { nd->nt_vmcoredd_array[i] = (void *)note; nd->num_vmcoredd_notes++; break; } } } break; default: xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); if (STRNEQ(buf, "VMCOREINFO_XEN")) vmcoreinfo_xen = TRUE; else vmcoreinfo = STRNEQ(buf, "VMCOREINFO"); eraseinfo = STRNEQ(buf, "ERASEINFO"); qemuinfo = STRNEQ(buf, "QEMU"); if (xen_core) { netdump_print("(unknown Xen n_type)\n"); if (store) error(WARNING, "unknown Xen n_type: %lx\n\n", note->n_type); } else if (vmcoreinfo) { netdump_print("(unused)\n"); nd->vmcoreinfo = (char *)(ptr + note->n_namesz + 1); nd->size_vmcoreinfo = note->n_descsz; if (READ_PAGESIZE_FROM_VMCOREINFO() && store) nd->page_size = (uint) vmcoreinfo_read_integer("PAGESIZE", 0); pc->flags2 |= VMCOREINFO; } else if (eraseinfo) { netdump_print("(unused)\n"); if (note->n_descsz) pc->flags2 |= ERASEINFO_DATA; } else if (qemuinfo) { pc->flags2 |= QEMU_MEM_DUMP_ELF; netdump_print("(QEMUCPUState)\n"); } else if (vmcoreinfo_xen) netdump_print("(unused)\n"); else netdump_print("(?)\n"); break; case NT_XEN_KDUMP_CR3: netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); /* FALL THROUGH */ case XEN_ELFNOTE_CRASH_INFO: /* * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure */ if (note->n_type == XEN_ELFNOTE_CRASH_INFO) netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); xen_core = TRUE; if (store) process_xen_note(note->n_type, ptr + roundup(note->n_namesz, 4), note->n_descsz); break; case XEN_ELFNOTE_CRASH_REGS: /* * x86 and x86_64: cr0, cr2, cr3, cr4 */ xen_core = TRUE; netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); break; } uptr = (ulong *)(ptr + note->n_namesz); /* * kdumps are off-by-1, because their n_namesz is 5 for "CORE". */ if ((nd->flags & KDUMP_ELF32) && (note->n_namesz == 5)) uptr = (ulong *)(ptr + ((note->n_namesz + 3) & ~3)); if (xen_core) uptr = (ulong *)roundup((ulong)uptr, 4); if (store && qemuinfo) { for(i = 0; i < NR_CPUS; i++) { if (!nd->nt_qemu_percpu[i]) { nd->nt_qemu_percpu[i] = (void *)uptr; nd->num_qemu_notes++; break; } } } if (vmcoreinfo || eraseinfo || vmcoreinfo_xen) { netdump_print(" "); ptr += note->n_namesz + 1; for (i = 0; i < note->n_descsz; i++, ptr++) { netdump_print("%c", *ptr); if (*ptr == '\n') netdump_print(" "); } lf = 0; } else if (note->n_type == NT_VMCOREDD) { if (nd->ofp) display_vmcoredd_note(note, nd->ofp); } else { if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { if (machine_type("X86")) { if (note->n_type == NT_PRSTATUS) display_ELF_note(EM_386, PRSTATUS_NOTE, note, nd->ofp); else if (qemuinfo) display_ELF_note(EM_386, QEMU_NOTE, note, nd->ofp); } } for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { if (((i%4)==0)) { netdump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; netdump_print("%08lx ", *uptr++); } } if (!lf || (note->n_type == NT_TASKSTRUCT) || (note->n_type == NT_DISKDUMP) || xen_core) netdump_print("\n"); len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); return len; } static size_t dump_Elf64_Nhdr(Elf64_Off offset, int store) { int i = 0, lf = 0; Elf64_Nhdr *note; size_t len; char buf[BUFSIZE]; char *ptr; ulonglong *uptr; int *iptr; int xen_core, vmcoreinfo, vmcoreinfo_xen, eraseinfo, qemuinfo; uint64_t remaining, notesize; note = (Elf64_Nhdr *)((char *)nd->elf64 + offset); BZERO(buf, BUFSIZE); ptr = (char *)note + sizeof(Elf64_Nhdr); xen_core = vmcoreinfo = vmcoreinfo_xen = eraseinfo = qemuinfo = FALSE; if (ptr > (nd->elf_header + nd->header_size)) { error(WARNING, "Elf64_Nhdr pointer: %lx ELF header end: %lx\n\n", (char *)note, nd->elf_header + nd->header_size); return 0; } else remaining = (uint64_t)((nd->elf_header + nd->header_size) - ptr); notesize = (uint64_t)note->n_namesz + (uint64_t)note->n_descsz; if ((note->n_namesz == 0) || !remaining || (notesize > remaining)) { error(WARNING, "possibly corrupt Elf64_Nhdr: " "n_namesz: %ld n_descsz: %ld n_type: %lx\n%s", note->n_namesz, note->n_descsz, note->n_type, note->n_namesz || note->n_descsz || !remaining ? "\n" : ""); if (note->n_namesz || note->n_descsz || !remaining) return 0; } netdump_print("Elf64_Nhdr:\n"); netdump_print(" n_namesz: %ld ", note->n_namesz); BCOPY(ptr, buf, note->n_namesz); netdump_print("(\"%s\")\n", buf); netdump_print(" n_descsz: %ld\n", note->n_descsz); netdump_print(" n_type: %lx ", note->n_type); switch (note->n_type) { case NT_PRSTATUS: netdump_print("(NT_PRSTATUS)\n"); if (store) { if (!nd->nt_prstatus) nd->nt_prstatus = (void *)note; for (i = 0; i < NR_CPUS; i++) { if (!nd->nt_prstatus_percpu[i]) { nd->nt_prstatus_percpu[i] = (void *)note; nd->num_prstatus_notes++; break; } } } break; case NT_PRPSINFO: netdump_print("(NT_PRPSINFO)\n"); if (store) nd->nt_prpsinfo = (void *)note; break; case NT_FPREGSET: netdump_print("(NT_FPREGSET)\n"); break; case NT_S390_TIMER: netdump_print("(NT_S390_TIMER)\n"); break; case NT_S390_TODCMP: netdump_print("(NT_S390_TODCMP)\n"); break; case NT_S390_TODPREG: netdump_print("(NT_S390_TODPREG)\n"); break; case NT_S390_CTRS: netdump_print("(NT_S390_CTRS)\n"); break; case NT_S390_PREFIX: netdump_print("(NT_S390_PREFIX)\n"); break; case NT_S390_VXRS_LOW: netdump_print("(NT_S390_VXRS_LOW)\n"); break; case NT_S390_VXRS_HIGH: netdump_print("(NT_S390_VXRS_HIGH)\n"); break; case NT_TASKSTRUCT: netdump_print("(NT_TASKSTRUCT)\n"); if (STRNEQ(buf, "SNAP")) pc->flags2 |= (LIVE_DUMP|SNAP); if (store) { nd->nt_taskstruct = (void *)note; nd->task_struct = *((ulong *)(ptr + note->n_namesz)); if (pc->flags2 & SNAP) { if (note->n_descsz >= 16) nd->arch_data1 = *((ulong *) (ptr + note->n_namesz + sizeof(ulong))); if (note->n_descsz >= 24) nd->arch_data2 = *((ulong *) (ptr + note->n_namesz + sizeof(ulong) + sizeof(ulong))); } else if (machine_type("IA64")) nd->switch_stack = *((ulong *) (ptr + note->n_namesz + sizeof(ulong))); } break; case NT_DISKDUMP: netdump_print("(NT_DISKDUMP)\n"); iptr = (int *)(ptr + note->n_namesz); if (*iptr && store) nd->flags |= PARTIAL_DUMP; if (note->n_descsz < sizeof(ulonglong)) netdump_print(" %08x", *iptr); break; #ifdef NOTDEF /* * Note: Based upon the original, abandoned, proposal for * its contents -- keep around for potential future use. */ case NT_KDUMPINFO: netdump_print("(NT_KDUMPINFO)\n"); if (store) { uint32_t *u32ptr; if (nd->elf64->e_machine == EM_386) { u32ptr = (note->n_namesz == 5) ? (uint *)(ptr + ((note->n_namesz + 3) & ~3)) : (uint *)(ptr + note->n_namesz); nd->page_size = 1 << *u32ptr; u32ptr++; nd->task_struct = *u32ptr; } else { uptr = (note->n_namesz == 5) ? (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)) : (ulonglong *)(ptr + note->n_namesz); nd->page_size = (uint)(1 << *uptr); uptr++; nd->task_struct = *uptr; } } break; #endif case NT_VMCOREDD: netdump_print("(NT_VMCOREDD)\n"); if (store) { for (i = 0; i < NR_DEVICE_DUMPS; i++) { if (!nd->nt_vmcoredd_array[i]) { nd->nt_vmcoredd_array[i] = (void *)note; nd->num_vmcoredd_notes++; break; } } } break; default: xen_core = STRNEQ(buf, "XEN CORE") || STRNEQ(buf, "Xen"); if (STRNEQ(buf, "VMCOREINFO_XEN")) vmcoreinfo_xen = TRUE; else vmcoreinfo = STRNEQ(buf, "VMCOREINFO"); eraseinfo = STRNEQ(buf, "ERASEINFO"); qemuinfo = STRNEQ(buf, "QEMU"); if (xen_core) { netdump_print("(unknown Xen n_type)\n"); if (store) error(WARNING, "unknown Xen n_type: %lx\n\n", note->n_type); } else if (vmcoreinfo) { netdump_print("(unused)\n"); nd->vmcoreinfo = (char *)nd->elf64 + offset + (sizeof(Elf64_Nhdr) + ((note->n_namesz + 3) & ~3)); nd->size_vmcoreinfo = note->n_descsz; if (READ_PAGESIZE_FROM_VMCOREINFO() && store) nd->page_size = (uint) vmcoreinfo_read_integer("PAGESIZE", 0); pc->flags2 |= VMCOREINFO; } else if (eraseinfo) { netdump_print("(unused)\n"); if (note->n_descsz) pc->flags2 |= ERASEINFO_DATA; } else if (qemuinfo) { pc->flags2 |= QEMU_MEM_DUMP_ELF; netdump_print("(QEMUCPUState)\n"); } else if (vmcoreinfo_xen) netdump_print("(unused)\n"); else netdump_print("(?)\n"); break; case NT_XEN_KDUMP_CR3: netdump_print("(NT_XEN_KDUMP_CR3) [obsolete]\n"); /* FALL THROUGH */ case XEN_ELFNOTE_CRASH_INFO: /* * x86 and x86_64: p2m mfn appended to crash_xen_info_t structure */ if (note->n_type == XEN_ELFNOTE_CRASH_INFO) netdump_print("(XEN_ELFNOTE_CRASH_INFO)\n"); xen_core = TRUE; if (store) process_xen_note(note->n_type, ptr + roundup(note->n_namesz, 4), note->n_descsz); break; case XEN_ELFNOTE_CRASH_REGS: /* * x86 and x86_64: cr0, cr2, cr3, cr4 */ xen_core = TRUE; netdump_print("(XEN_ELFNOTE_CRASH_REGS)\n"); break; } if (machine_type("S390X")) { if (store) machdep->dumpfile_init(nd->num_prstatus_notes, note); uptr = (ulonglong *) ((void *)note + roundup(sizeof(*note) + note->n_namesz, 4)); } else { uptr = (ulonglong *)(ptr + note->n_namesz); /* * kdumps are off-by-1, because their n_namesz is 5 for "CORE". */ if ((nd->flags & KDUMP_ELF64) && (note->n_namesz == 5)) uptr = (ulonglong *)(ptr + ((note->n_namesz + 3) & ~3)); if (xen_core) uptr = (ulonglong *)roundup((ulong)uptr, 4); } if (store && qemuinfo) { for(i=0; int_qemu_percpu[i]) { nd->nt_qemu_percpu[i] = (void *)uptr; nd->num_qemu_notes++; break; } } } if (note->n_type == NT_VMCOREDD) { if (nd->ofp) display_vmcoredd_note(note, nd->ofp); } else if (BITS32() && (xen_core || (note->n_type == NT_PRSTATUS) || qemuinfo)) { if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { if (machine_type("X86")) { if (note->n_type == NT_PRSTATUS) display_ELF_note(EM_386, PRSTATUS_NOTE, note, nd->ofp); else if (qemuinfo) display_ELF_note(EM_386, QEMU_NOTE, note, nd->ofp); } } iptr = (int *)uptr; for (i = lf = 0; i < note->n_descsz/sizeof(ulong); i++) { if (((i%4)==0)) { netdump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; netdump_print("%08lx ", *iptr++); } } else if (vmcoreinfo || eraseinfo || vmcoreinfo_xen) { netdump_print(" "); ptr += note->n_namesz + 1; for (i = 0; i < note->n_descsz; i++, ptr++) { netdump_print("%c", *ptr); if (*ptr == '\n') netdump_print(" "); } lf = 0; } else if (note->n_descsz == 4) { i = 0; lf = 1; iptr = (int *)uptr; netdump_print(" %08lx\n", *iptr); } else { if (nd->ofp && !XEN_CORE_DUMPFILE() && !(pc->flags2 & LIVE_DUMP)) { if (machine_type("X86_64")) { if (note->n_type == NT_PRSTATUS) display_ELF_note(EM_X86_64, PRSTATUS_NOTE, note, nd->ofp); else if (qemuinfo) display_ELF_note(EM_X86_64, QEMU_NOTE, note, nd->ofp); } if (machine_type("PPC64") && (note->n_type == NT_PRSTATUS)) display_ELF_note(EM_PPC64, PRSTATUS_NOTE, note, nd->ofp); if (machine_type("ARM64") && (note->n_type == NT_PRSTATUS)) display_ELF_note(EM_AARCH64, PRSTATUS_NOTE, note, nd->ofp); } for (i = lf = 0; i < note->n_descsz/sizeof(ulonglong); i++) { if (((i%2)==0)) { netdump_print("%s ", i ? "\n" : ""); lf++; } else lf = 0; netdump_print("%016llx ", *uptr++); } } if (!lf) netdump_print("\n"); else if (i && (i&1)) netdump_print("\n"); len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); return len; } void * netdump_get_prstatus_percpu(int cpu) { int online; if ((cpu < 0) || (cpu >= nd->num_prstatus_notes)) return NULL; /* * If no cpu mapping was done, then there must be * a one-to-one relationship between the number * of online cpus and the number of notes. */ if ((online = get_cpus_online()) && (online == kt->cpus) && (online != nd->num_prstatus_notes)) return NULL; return nd->nt_prstatus_percpu[cpu]; } /* * Send the request to the proper architecture hander. */ void get_netdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) { int e_machine; if (nd->elf32) e_machine = nd->elf32->e_machine; else if (nd->elf64) e_machine = nd->elf64->e_machine; else e_machine = EM_NONE; switch (e_machine) { case EM_386: return get_netdump_regs_x86(bt, eip, esp); break; case EM_IA_64: /* For normal backtraces, this information will be obtained * frome the switch_stack structure, which is pointed to by * the thread.ksp field of the task_struct. But it's still * needed by the "bt -t" option. */ machdep->get_stack_frame(bt, eip, esp); break; case EM_PPC: return get_netdump_regs_ppc(bt, eip, esp); break; case EM_PPC64: return get_netdump_regs_ppc64(bt, eip, esp); break; case EM_X86_64: return get_netdump_regs_x86_64(bt, eip, esp); break; case EM_S390: machdep->get_stack_frame(bt, eip, esp); break; case EM_ARM: return get_netdump_regs_arm(bt, eip, esp); break; case EM_AARCH64: return get_netdump_regs_arm64(bt, eip, esp); break; case EM_MIPS: return get_netdump_regs_mips(bt, eip, esp); break; default: error(FATAL, "support for ELF machine type %d not available\n", e_machine); } } /* * get regs from elf note, and return the address of user_regs. */ static char * get_regs_from_note(char *note, ulong *ip, ulong *sp) { Elf32_Nhdr *note32; Elf64_Nhdr *note64; size_t len; char *user_regs; long offset_sp, offset_ip; if (machine_type("X86_64")) { note64 = (Elf64_Nhdr *)note; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); offset_sp = OFFSET(user_regs_struct_rsp); offset_ip = OFFSET(user_regs_struct_rip); } else if (machine_type("X86")) { note32 = (Elf32_Nhdr *)note; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); offset_sp = OFFSET(user_regs_struct_esp); offset_ip = OFFSET(user_regs_struct_eip); } else return NULL; user_regs = note + len - SIZE(user_regs_struct) - sizeof(long); *sp = ULONG(user_regs + offset_sp); *ip = ULONG(user_regs + offset_ip); return user_regs; } void display_regs_from_elf_notes(int cpu, FILE *ofp) { Elf32_Nhdr *note32; Elf64_Nhdr *note64; size_t len; char *user_regs; int c, skipped_count; /* * Kdump NT_PRSTATUS notes are only related to online cpus, * so offline cpus should be skipped. */ if (pc->flags2 & QEMU_MEM_DUMP_ELF) skipped_count = 0; else { for (c = skipped_count = 0; c < cpu; c++) { if (check_offline_cpu(c)) skipped_count++; } } if ((cpu - skipped_count) >= nd->num_prstatus_notes && !machine_type("MIPS")) { error(INFO, "registers not collected for cpu %d\n", cpu); return; } if (machine_type("X86_64")) { if (nd->num_prstatus_notes > 1) note64 = (Elf64_Nhdr *) nd->nt_prstatus_percpu[cpu]; else note64 = (Elf64_Nhdr *)nd->nt_prstatus; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); user_regs = ((char *)note64) + len - SIZE(user_regs_struct) - sizeof(long); fprintf(ofp, " RIP: %016llx RSP: %016llx RFLAGS: %08llx\n" " RAX: %016llx RBX: %016llx RCX: %016llx\n" " RDX: %016llx RSI: %016llx RDI: %016llx\n" " RBP: %016llx R8: %016llx R9: %016llx\n" " R10: %016llx R11: %016llx R12: %016llx\n" " R13: %016llx R14: %016llx R15: %016llx\n" " CS: %04x SS: %04x\n", ULONGLONG(user_regs + OFFSET(user_regs_struct_rip)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_eflags)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rax)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rcx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdx)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rsi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rdi)), ULONGLONG(user_regs + OFFSET(user_regs_struct_rbp)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r8)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r9)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r10)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r11)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r12)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r13)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r14)), ULONGLONG(user_regs + OFFSET(user_regs_struct_r15)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)) ); } else if (machine_type("X86")) { if (nd->num_prstatus_notes > 1) note32 = (Elf32_Nhdr *) nd->nt_prstatus_percpu[cpu]; else note32 = (Elf32_Nhdr *)nd->nt_prstatus; len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); user_regs = ((char *)note32) + len - SIZE(user_regs_struct) - sizeof(long); fprintf(ofp, " EAX: %08x EBX: %08x ECX: %08x EDX: %08x\n" " ESP: %08x EIP: %08x ESI: %08x EDI: %08x\n" " CS: %04x DS: %04x ES: %04x FS: %04x\n" " GS: %04x SS: %04x\n" " EBP: %08x EFLAGS: %08x\n", UINT(user_regs + OFFSET(user_regs_struct_eax)), UINT(user_regs + OFFSET(user_regs_struct_ebx)), UINT(user_regs + OFFSET(user_regs_struct_ecx)), UINT(user_regs + OFFSET(user_regs_struct_edx)), UINT(user_regs + OFFSET(user_regs_struct_esp)), UINT(user_regs + OFFSET(user_regs_struct_eip)), UINT(user_regs + OFFSET(user_regs_struct_esi)), UINT(user_regs + OFFSET(user_regs_struct_edi)), USHORT(user_regs + OFFSET(user_regs_struct_cs)), USHORT(user_regs + OFFSET(user_regs_struct_ds)), USHORT(user_regs + OFFSET(user_regs_struct_es)), USHORT(user_regs + OFFSET(user_regs_struct_fs)), USHORT(user_regs + OFFSET(user_regs_struct_gs)), USHORT(user_regs + OFFSET(user_regs_struct_ss)), UINT(user_regs + OFFSET(user_regs_struct_ebp)), UINT(user_regs + OFFSET(user_regs_struct_eflags)) ); } else if (machine_type("PPC64")) { struct ppc64_elf_prstatus *prs; struct ppc64_pt_regs *pr; if (nd->num_prstatus_notes > 1) note64 = (Elf64_Nhdr *)nd->nt_prstatus_percpu[cpu]; else note64 = (Elf64_Nhdr *)nd->nt_prstatus; prs = (struct ppc64_elf_prstatus *) ((char *)note64 + sizeof(Elf64_Nhdr) + note64->n_namesz); prs = (struct ppc64_elf_prstatus *)roundup((ulong)prs, 4); pr = &prs->pr_reg; fprintf(ofp, " R0: %016lx R1: %016lx R2: %016lx\n" " R3: %016lx R4: %016lx R5: %016lx\n" " R6: %016lx R7: %016lx R8: %016lx\n" " R9: %016lx R10: %016lx R11: %016lx\n" " R12: %016lx R13: %016lx R14: %016lx\n" " R15: %016lx R16: %016lx R16: %016lx\n" " R18: %016lx R19: %016lx R20: %016lx\n" " R21: %016lx R22: %016lx R23: %016lx\n" " R24: %016lx R25: %016lx R26: %016lx\n" " R27: %016lx R28: %016lx R29: %016lx\n" " R30: %016lx R31: %016lx\n" " NIP: %016lx MSR: %016lx\n" " OGPR3: %016lx CTR: %016lx\n" " LINK: %016lx XER: %016lx\n" " CCR: %016lx MQ: %016lx\n" " TRAP: %016lx DAR: %016lx\n" " DSISR: %016lx RESULT: %016lx\n", pr->gpr[0], pr->gpr[1], pr->gpr[2], pr->gpr[3], pr->gpr[4], pr->gpr[5], pr->gpr[6], pr->gpr[7], pr->gpr[8], pr->gpr[9], pr->gpr[10], pr->gpr[11], pr->gpr[12], pr->gpr[13], pr->gpr[14], pr->gpr[15], pr->gpr[16], pr->gpr[17], pr->gpr[18], pr->gpr[19], pr->gpr[20], pr->gpr[21], pr->gpr[22], pr->gpr[23], pr->gpr[24], pr->gpr[25], pr->gpr[26], pr->gpr[27], pr->gpr[28], pr->gpr[29], pr->gpr[30], pr->gpr[31], pr->nip, pr->msr, pr->orig_gpr3, pr->ctr, pr->link, pr->xer, pr->ccr, pr->mq, pr->trap, pr->dar, pr->dsisr, pr->result); } else if (machine_type("ARM64")) { if (nd->num_prstatus_notes > 1) note64 = (Elf64_Nhdr *) nd->nt_prstatus_percpu[cpu]; else note64 = (Elf64_Nhdr *)nd->nt_prstatus; len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); user_regs = (char *)note64 + len - SIZE(elf_prstatus) + OFFSET(elf_prstatus_pr_reg); fprintf(ofp, " X0: %016lx X1: %016lx X2: %016lx\n" " X3: %016lx X4: %016lx X5: %016lx\n" " X6: %016lx X7: %016lx X8: %016lx\n" " X9: %016lx X10: %016lx X11: %016lx\n" " X12: %016lx X13: %016lx X14: %016lx\n" " X15: %016lx X16: %016lx X17: %016lx\n" " X18: %016lx X19: %016lx X20: %016lx\n" " X21: %016lx X22: %016lx X23: %016lx\n" " X24: %016lx X25: %016lx X26: %016lx\n" " X27: %016lx X28: %016lx X29: %016lx\n" " LR: %016lx SP: %016lx PC: %016lx\n" " PSTATE: %08lx FPVALID: %08x\n", ULONG(user_regs + sizeof(ulong) * 0), ULONG(user_regs + sizeof(ulong) * 1), ULONG(user_regs + sizeof(ulong) * 2), ULONG(user_regs + sizeof(ulong) * 3), ULONG(user_regs + sizeof(ulong) * 4), ULONG(user_regs + sizeof(ulong) * 5), ULONG(user_regs + sizeof(ulong) * 6), ULONG(user_regs + sizeof(ulong) * 7), ULONG(user_regs + sizeof(ulong) * 8), ULONG(user_regs + sizeof(ulong) * 9), ULONG(user_regs + sizeof(ulong) * 10), ULONG(user_regs + sizeof(ulong) * 11), ULONG(user_regs + sizeof(ulong) * 12), ULONG(user_regs + sizeof(ulong) * 13), ULONG(user_regs + sizeof(ulong) * 14), ULONG(user_regs + sizeof(ulong) * 15), ULONG(user_regs + sizeof(ulong) * 16), ULONG(user_regs + sizeof(ulong) * 17), ULONG(user_regs + sizeof(ulong) * 18), ULONG(user_regs + sizeof(ulong) * 19), ULONG(user_regs + sizeof(ulong) * 20), ULONG(user_regs + sizeof(ulong) * 21), ULONG(user_regs + sizeof(ulong) * 22), ULONG(user_regs + sizeof(ulong) * 23), ULONG(user_regs + sizeof(ulong) * 24), ULONG(user_regs + sizeof(ulong) * 25), ULONG(user_regs + sizeof(ulong) * 26), ULONG(user_regs + sizeof(ulong) * 27), ULONG(user_regs + sizeof(ulong) * 28), ULONG(user_regs + sizeof(ulong) * 29), ULONG(user_regs + sizeof(ulong) * 30), ULONG(user_regs + sizeof(ulong) * 31), ULONG(user_regs + sizeof(ulong) * 32), ULONG(user_regs + sizeof(ulong) * 33), UINT(user_regs + sizeof(ulong) * 34)); } else if (machine_type("MIPS")) { mips_display_regs_from_elf_notes(cpu, ofp); } } void dump_registers_for_elf_dumpfiles(void) { int c; if (!(machine_type("X86") || machine_type("X86_64") || machine_type("ARM64") || machine_type("PPC64") || machine_type("MIPS"))) error(FATAL, "-r option not supported for this dumpfile\n"); if (NETDUMP_DUMPFILE()) { display_regs_from_elf_notes(0, fp); return; } for (c = 0; c < kt->cpus; c++) { if (check_offline_cpu(c)) { fprintf(fp, "%sCPU %d: [OFFLINE]\n", c ? "\n" : "", c); continue; } fprintf(fp, "%sCPU %d:\n", c ? "\n" : "", c); display_regs_from_elf_notes(c, fp); } } struct x86_64_user_regs_struct { unsigned long r15,r14,r13,r12,rbp,rbx,r11,r10; unsigned long r9,r8,rax,rcx,rdx,rsi,rdi,orig_rax; unsigned long rip,cs,eflags; unsigned long rsp,ss; unsigned long fs_base, gs_base; unsigned long ds,es,fs,gs; }; struct x86_64_prstatus { int si_signo; int si_code; int si_errno; short cursig; unsigned long sigpend; unsigned long sighold; int pid; int ppid; int pgrp; int sid; struct timeval utime; struct timeval stime; struct timeval cutime; struct timeval cstime; struct x86_64_user_regs_struct regs; int fpvalid; }; static void display_prstatus_x86_64(void *note_ptr, FILE *ofp) { struct x86_64_prstatus *pr; Elf64_Nhdr *note; int sp; note = (Elf64_Nhdr *)note_ptr; pr = (struct x86_64_prstatus *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); pr = (struct x86_64_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold: %lx\n" "%spid: %d ppid: %d pgrp: %d sid:%d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n" "%sORIG_RAX: %lx fpvalid: %d\n" "%s R15: %016lx R14: %016lx\n" "%s R13: %016lx R12: %016lx\n" "%s RBP: %016lx RBX: %016lx\n" "%s R11: %016lx R10: %016lx\n" "%s R9: %016lx R8: %016lx\n" "%s RAX: %016lx RCX: %016lx\n" "%s RDX: %016lx RSI: %016lx\n" "%s RDI: %016lx RIP: %016lx\n" "%s RFLAGS: %016lx RSP: %016lx\n" "%s FS_BASE: %016lx\n" "%s GS_BASE: %016lx\n" "%s CS: %04lx SS: %04lx DS: %04lx\n" "%s ES: %04lx FS: %04lx GS: %04lx\n", space(sp), pr->si_signo, pr->si_code, pr->si_errno, space(sp), pr->cursig, pr->sigpend, pr->sighold, space(sp), pr->pid, pr->ppid, pr->pgrp, pr->sid, space(sp), (long long)pr->utime.tv_sec, (int)pr->utime.tv_usec, (long long)pr->stime.tv_sec, (int)pr->stime.tv_usec, space(sp), (long long)pr->cutime.tv_sec, (int)pr->cutime.tv_usec, (long long)pr->cstime.tv_sec, (int)pr->cstime.tv_usec, space(sp), pr->regs.orig_rax, pr->fpvalid, space(sp), pr->regs.r15, pr->regs.r14, space(sp), pr->regs.r13, pr->regs.r12, space(sp), pr->regs.rbp, pr->regs.rbx, space(sp), pr->regs.r11, pr->regs.r10, space(sp), pr->regs.r9, pr->regs.r8, space(sp), pr->regs.rax, pr->regs.rcx, space(sp), pr->regs.rdx, pr->regs.rsi, space(sp), pr->regs.rdi, pr->regs.rip, space(sp), pr->regs.eflags, pr->regs.rsp, space(sp), pr->regs.fs_base, space(sp), pr->regs.gs_base, space(sp), pr->regs.cs, pr->regs.ss, pr->regs.ds, space(sp), pr->regs.es, pr->regs.fs, pr->regs.gs); } struct x86_user_regs_struct { unsigned long ebx,ecx,edx,esi,edi,ebp,eax; unsigned long ds,es,fs,gs,orig_eax; unsigned long eip,cs,eflags; unsigned long esp,ss; }; struct x86_prstatus { int si_signo; int si_code; int si_errno; short cursig; unsigned long sigpend; unsigned long sighold; int pid; int ppid; int pgrp; int sid; struct timeval utime; struct timeval stime; struct timeval cutime; struct timeval cstime; struct x86_user_regs_struct regs; int fpvalid; }; static void display_prstatus_x86(void *note_ptr, FILE *ofp) { struct x86_prstatus *pr; Elf32_Nhdr *note; int sp; note = (Elf32_Nhdr *)note_ptr; pr = (struct x86_prstatus *)( (char *)note + sizeof(Elf32_Nhdr) + note->n_namesz); pr = (struct x86_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold : %lx\n" "%spid: %d ppid: %d pgrp: %d sid: %d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n" "%sORIG_EAX: %lx fpvalid: %d\n" "%s EBX: %08lx ECX: %08lx\n" "%s EDX: %08lx ESI: %08lx\n" "%s EDI: %08lx EBP: %08lx\n" "%s EAX: %08lx EIP: %08lx\n" "%s EFLAGS: %08lx ESP: %08lx\n" "%s DS: %04lx ES: %04lx FS: %04lx\n" "%s GS: %04lx CS: %04lx SS: %04lx\n", space(sp), pr->si_signo, pr->si_code, pr->si_errno, space(sp), pr->cursig, pr->sigpend, pr->sighold, space(sp), pr->pid, pr->ppid, pr->pgrp, pr->sid, space(sp), (long long)pr->utime.tv_sec, (int)pr->utime.tv_usec, (long long)pr->stime.tv_sec, (int)pr->stime.tv_usec, space(sp), (long long)pr->cutime.tv_sec, (int)pr->cutime.tv_usec, (long long)pr->cstime.tv_sec, (int)pr->cstime.tv_usec, space(sp), pr->regs.orig_eax, pr->fpvalid, space(sp), pr->regs.ebx, pr->regs.ecx, space(sp), pr->regs.edx, pr->regs.esi, space(sp), pr->regs.edi, pr->regs.ebp, space(sp), pr->regs.eax, pr->regs.eip, space(sp), pr->regs.eflags, pr->regs.esp, space(sp), pr->regs.ds, pr->regs.es, pr->regs.fs, space(sp), pr->regs.gs, pr->regs.cs, pr->regs.ss); } static void display_qemu_x86_64(void *note_ptr, FILE *ofp) { int i, sp; Elf64_Nhdr *note; QEMUCPUState *ptr; QEMUCPUSegment *seg; char *seg_names[] = {"CS", "DS", "ES", "FS", "GS", "SS", "LDT", "TR", "GDT", "IDT"}; note = (Elf64_Nhdr *)note_ptr; ptr = (QEMUCPUState *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); ptr = (QEMUCPUState *)roundup((ulong)ptr, 4); seg = &(ptr->cs); sp = VMCORE_VALID()? 25 : 22; fprintf(ofp, "%sversion: %d size: %d\n" "%sRAX: %016llx RBX: %016llx\n" "%sRCX: %016llx RDX: %016llx\n" "%sRSI: %016llx RDI: %016llx\n" "%sRSP: %016llx RBP: %016llx\n" "%sRIP: %016llx RFLAGS: %016llx\n" "%s R8: %016llx R9: %016llx\n" "%sR10: %016llx R11: %016llx\n" "%sR12: %016llx R13: %016llx\n" "%sR14: %016llx R15: %016llx\n", space(sp), ptr->version, ptr->size, space(sp), (ulonglong)ptr->rax, (ulonglong)ptr->rbx, space(sp), (ulonglong)ptr->rcx, (ulonglong)ptr->rdx, space(sp), (ulonglong)ptr->rsi, (ulonglong)ptr->rdi, space(sp), (ulonglong)ptr->rsp, (ulonglong)ptr->rbp, space(sp), (ulonglong)ptr->rip, (ulonglong)ptr->rflags, space(sp), (ulonglong)ptr->r8, (ulonglong)ptr->r9, space(sp), (ulonglong)ptr->r10, (ulonglong)ptr->r11, space(sp), (ulonglong)ptr->r12, (ulonglong)ptr->r13, space(sp), (ulonglong)ptr->r14, (ulonglong)ptr->r15); for (i = 0; i < sizeof(seg_names)/sizeof(seg_names[0]); i++) { fprintf(ofp, "%s%s", space(sp), strlen(seg_names[i]) > 2 ? "" : " "); fprintf(ofp, "%s: " "selector: %04x limit: %08x flags: %08x\n" "%spad: %08x base: %016llx\n", seg_names[i], seg->selector, seg->limit, seg->flags, space(sp+5), seg->pad, (ulonglong)seg->base); seg++; } fprintf(ofp, "%sCR0: %016llx CR1: %016llx\n" "%sCR2: %016llx CR3: %016llx\n" "%sCR4: %016llx\n", space(sp), (ulonglong)ptr->cr[0], (ulonglong)ptr->cr[1], space(sp), (ulonglong)ptr->cr[2], (ulonglong)ptr->cr[3], space(sp), (ulonglong)ptr->cr[4]); } static void display_qemu_x86(void *note_ptr, FILE *ofp) { int i, sp; Elf32_Nhdr *note; QEMUCPUState *ptr; QEMUCPUSegment *seg; char *seg_names[] = {"CS", "DS", "ES", "FS", "GS", "SS", "LDT", "TR", "GDT", "IDT"}; note = (Elf32_Nhdr *)note_ptr; ptr = (QEMUCPUState *)( (char *)note + sizeof(Elf32_Nhdr) + note->n_namesz); ptr = (QEMUCPUState *)roundup((ulong)ptr, 4); seg = &(ptr->cs); sp = VMCORE_VALID()? 25 : 22; fprintf(ofp, "%sversion: %d size: %d\n" "%sEAX: %016llx EBX: %016llx\n" "%sECX: %016llx EDX: %016llx\n" "%sESI: %016llx EDI: %016llx\n" "%sESP: %016llx EBP: %016llx\n" "%sEIP: %016llx EFLAGS: %016llx\n", space(sp), ptr->version, ptr->size, space(sp), (ulonglong)ptr->rax, (ulonglong)ptr->rbx, space(sp), (ulonglong)ptr->rcx, (ulonglong)ptr->rdx, space(sp), (ulonglong)ptr->rsi, (ulonglong)ptr->rdi, space(sp), (ulonglong)ptr->rsp, (ulonglong)ptr->rbp, space(sp), (ulonglong)ptr->rip, (ulonglong)ptr->rflags); for(i = 0; i < sizeof(seg_names)/sizeof(seg_names[0]); i++) { fprintf(ofp, "%s%s", space(sp), strlen(seg_names[i]) > 2 ? "" : " "); fprintf(ofp, "%s: " "selector: %04x limit: %08x flags: %08x\n" "%spad: %08x base: %016llx\n", seg_names[i], seg->selector, seg->limit, seg->flags, space(sp+5), seg->pad, (ulonglong)seg->base); seg++; } fprintf(ofp, "%sCR0: %016llx CR1: %016llx\n" "%sCR2: %016llx CR3: %016llx\n" "%sCR4: %016llx\n", space(sp), (ulonglong)ptr->cr[0], (ulonglong)ptr->cr[1], space(sp), (ulonglong)ptr->cr[2], (ulonglong)ptr->cr[3], space(sp), (ulonglong)ptr->cr[4]); } static void display_prstatus_ppc64(void *note_ptr, FILE *ofp) { struct ppc64_elf_prstatus *pr; Elf64_Nhdr *note; int sp; note = (Elf64_Nhdr *)note_ptr; pr = (struct ppc64_elf_prstatus *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); pr = (struct ppc64_elf_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold: %lx\n" "%spid: %d ppid: %d pgrp: %d sid:%d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n" "%s R0: %016lx R1: %016lx R2: %016lx\n" "%s R3: %016lx R4: %016lx R5: %016lx\n" "%s R6: %016lx R7: %016lx R8: %016lx\n" "%s R9: %016lx R10: %016lx R11: %016lx\n" "%sR12: %016lx R13: %016lx R14: %016lx\n" "%sR15: %016lx R16: %016lx R16: %016lx\n" "%sR18: %016lx R19: %016lx R20: %016lx\n" "%sR21: %016lx R22: %016lx R23: %016lx\n" "%sR24: %016lx R25: %016lx R26: %016lx\n" "%sR27: %016lx R28: %016lx R29: %016lx\n" "%sR30: %016lx R31: %016lx\n" "%s NIP: %016lx MSR: %016lx\n" "%sOGPR3: %016lx CTR: %016lx\n" "%s LINK: %016lx XER: %016lx\n" "%s CCR: %016lx MQ: %016lx\n" "%s TRAP: %016lx DAR: %016lx\n" "%sDSISR: %016lx RESULT: %016lx\n", space(sp), pr->pr_info.si_signo, pr->pr_info.si_code, pr->pr_info.si_errno, space(sp), pr->pr_cursig, pr->pr_sigpend, pr->pr_sighold, space(sp), pr->pr_pid, pr->pr_ppid, pr->pr_pgrp, pr->pr_sid, space(sp), (long long)pr->pr_utime.tv_sec, (int)pr->pr_utime.tv_usec, (long long)pr->pr_stime.tv_sec, (int)pr->pr_stime.tv_usec, space(sp), (long long)pr->pr_cutime.tv_sec, (int)pr->pr_cutime.tv_usec, (long long)pr->pr_cstime.tv_sec, (int)pr->pr_cstime.tv_usec, space(sp), pr->pr_reg.gpr[0], pr->pr_reg.gpr[1], pr->pr_reg.gpr[2], space(sp), pr->pr_reg.gpr[3], pr->pr_reg.gpr[4], pr->pr_reg.gpr[5], space(sp), pr->pr_reg.gpr[6], pr->pr_reg.gpr[7], pr->pr_reg.gpr[8], space(sp), pr->pr_reg.gpr[9], pr->pr_reg.gpr[10], pr->pr_reg.gpr[11], space(sp), pr->pr_reg.gpr[12], pr->pr_reg.gpr[13], pr->pr_reg.gpr[14], space(sp), pr->pr_reg.gpr[15], pr->pr_reg.gpr[16], pr->pr_reg.gpr[17], space(sp), pr->pr_reg.gpr[18], pr->pr_reg.gpr[19], pr->pr_reg.gpr[20], space(sp), pr->pr_reg.gpr[21], pr->pr_reg.gpr[22], pr->pr_reg.gpr[23], space(sp), pr->pr_reg.gpr[24], pr->pr_reg.gpr[25], pr->pr_reg.gpr[26], space(sp), pr->pr_reg.gpr[27], pr->pr_reg.gpr[28], pr->pr_reg.gpr[29], space(sp), pr->pr_reg.gpr[30], pr->pr_reg.gpr[31], space(sp), pr->pr_reg.nip, pr->pr_reg.msr, space(sp), pr->pr_reg.orig_gpr3, pr->pr_reg.ctr, space(sp), pr->pr_reg.link, pr->pr_reg.xer, space(sp), pr->pr_reg.ccr, pr->pr_reg.mq, space(sp), pr->pr_reg.trap, pr->pr_reg.dar, space(sp), pr->pr_reg.dsisr, pr->pr_reg.result); } struct arm64_elf_siginfo { int si_signo; int si_code; int si_errno; }; struct arm64_elf_prstatus { struct arm64_elf_siginfo pr_info; short pr_cursig; unsigned long pr_sigpend; unsigned long pr_sighold; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct timeval pr_utime; struct timeval pr_stime; struct timeval pr_cutime; struct timeval pr_cstime; /* arm64_elf_gregset_t pr_reg; -> typedef unsigned long [34] arm64_elf_gregset_t */ unsigned long pr_reg[34]; int pr_fpvalid; }; /* Note that the ARM64 elf_gregset_t includes the 31 numbered registers plus the sp, pc and pstate: typedef unsigned long [34] elf_gregset_t; struct pt_regs { union { struct user_pt_regs user_regs; struct { u64 regs[31]; u64 sp; u64 pc; u64 pstate; }; }; u64 orig_x0; u64 syscallno; } */ static void display_prstatus_arm64(void *note_ptr, FILE *ofp) { struct arm64_elf_prstatus *pr; Elf64_Nhdr *note; int sp; note = (Elf64_Nhdr *)note_ptr; pr = (struct arm64_elf_prstatus *)( (char *)note + sizeof(Elf64_Nhdr) + note->n_namesz); pr = (struct arm64_elf_prstatus *)roundup((ulong)pr, 4); sp = nd->num_prstatus_notes ? 25 : 22; fprintf(ofp, "%ssi.signo: %d si.code: %d si.errno: %d\n" "%scursig: %d sigpend: %lx sighold: %lx\n" "%spid: %d ppid: %d pgrp: %d sid:%d\n" "%sutime: %01lld.%06d stime: %01lld.%06d\n" "%scutime: %01lld.%06d cstime: %01lld.%06d\n", space(sp), pr->pr_info.si_signo, pr->pr_info.si_code, pr->pr_info.si_errno, space(sp), pr->pr_cursig, pr->pr_sigpend, pr->pr_sighold, space(sp), pr->pr_pid, pr->pr_ppid, pr->pr_pgrp, pr->pr_sid, space(sp), (long long)pr->pr_utime.tv_sec, (int)pr->pr_utime.tv_usec, (long long)pr->pr_stime.tv_sec, (int)pr->pr_stime.tv_usec, space(sp), (long long)pr->pr_cutime.tv_sec, (int)pr->pr_cutime.tv_usec, (long long)pr->pr_cstime.tv_sec, (int)pr->pr_cstime.tv_usec); fprintf(ofp, "%s X0: %016lx X1: %016lx X2: %016lx\n" "%s X3: %016lx X4: %016lx X5: %016lx\n" "%s X6: %016lx X7: %016lx X8: %016lx\n" "%s X9: %016lx X10: %016lx X11: %016lx\n" "%sX12: %016lx X13: %016lx X14: %016lx\n" "%sX15: %016lx X16: %016lx X17: %016lx\n" "%sX18: %016lx X19: %016lx X20: %016lx\n" "%sX21: %016lx X22: %016lx X23: %016lx\n" "%sX24: %016lx X25: %016lx X26: %016lx\n" "%sX27: %016lx X28: %016lx X29: %016lx\n" "%s LR: %016lx SP: %016lx PC: %016lx\n" "%sPSTATE: %08lx FPVALID: %08x\n", space(sp), pr->pr_reg[0], pr->pr_reg[1], pr->pr_reg[2], space(sp), pr->pr_reg[3], pr->pr_reg[4], pr->pr_reg[5], space(sp), pr->pr_reg[6], pr->pr_reg[7], pr->pr_reg[8], space(sp), pr->pr_reg[9], pr->pr_reg[10], pr->pr_reg[11], space(sp), pr->pr_reg[12], pr->pr_reg[13], pr->pr_reg[14], space(sp), pr->pr_reg[15], pr->pr_reg[16], pr->pr_reg[17], space(sp), pr->pr_reg[18], pr->pr_reg[19], pr->pr_reg[20], space(sp), pr->pr_reg[21], pr->pr_reg[22], pr->pr_reg[23], space(sp), pr->pr_reg[24], pr->pr_reg[25], pr->pr_reg[26], space(sp), pr->pr_reg[27], pr->pr_reg[28], pr->pr_reg[29], space(sp), pr->pr_reg[30], pr->pr_reg[31], pr->pr_reg[32], space(sp), pr->pr_reg[33], pr->pr_fpvalid); } void display_ELF_note(int machine, int type, void *note, FILE *ofp) { if (note == NULL) return; switch (machine) { case EM_386: switch (type) { case PRSTATUS_NOTE: display_prstatus_x86(note, ofp); break; case QEMU_NOTE: display_qemu_x86(note, ofp); break; } break; case EM_X86_64: switch (type) { case PRSTATUS_NOTE: display_prstatus_x86_64(note, ofp); break; case QEMU_NOTE: display_qemu_x86_64(note, ofp); break; } break; case EM_PPC64: switch (type) { case PRSTATUS_NOTE: display_prstatus_ppc64(note, ofp); break; } break; case EM_AARCH64: switch (type) { case PRSTATUS_NOTE: display_prstatus_arm64(note, ofp); break; } break; default: return; } } void get_netdump_regs_x86_64(struct bt_info *bt, ulong *ripp, ulong *rspp) { Elf64_Nhdr *note; size_t len; char *user_regs; ulong regs_size, rsp_offset, rip_offset; ulong rip, rsp; if (is_task_active(bt->task)) bt->flags |= BT_DUMPFILE_SEARCH; if (((NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) && VALID_STRUCT(user_regs_struct) && ((bt->task == tt->panic_task) || (pc->flags2 & QEMU_MEM_DUMP_ELF))) || (KDUMP_DUMPFILE() && (kt->flags & DWARF_UNWIND) && (bt->flags & BT_DUMPFILE_SEARCH))) { if (nd->num_prstatus_notes > 1) note = (Elf64_Nhdr *) nd->nt_prstatus_percpu[bt->tc->processor]; else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); len = roundup(len + note->n_descsz, 4); regs_size = VALID_STRUCT(user_regs_struct) ? SIZE(user_regs_struct) : sizeof(struct x86_64_user_regs_struct); rsp_offset = VALID_MEMBER(user_regs_struct_rsp) ? OFFSET(user_regs_struct_rsp) : offsetof(struct x86_64_user_regs_struct, rsp); rip_offset = VALID_MEMBER(user_regs_struct_rip) ? OFFSET(user_regs_struct_rip) : offsetof(struct x86_64_user_regs_struct, rip); user_regs = ((char *)note + len) - regs_size - sizeof(long); rsp = ULONG(user_regs + rsp_offset); rip = ULONG(user_regs + rip_offset); if (INSTACK(rsp, bt) || in_alternate_stack(bt->tc->processor, rsp)) { if (CRASHDEBUG(1)) netdump_print("ELF prstatus rsp: %lx rip: %lx\n", rsp, rip); if (KDUMP_DUMPFILE()) { *rspp = rsp; *ripp = rip; if (*ripp && *rspp) bt->flags |= BT_KDUMP_ELF_REGS; } bt->machdep = (void *)user_regs; } } if (ELF_NOTES_VALID() && (bt->flags & BT_DUMPFILE_SEARCH) && DISKDUMP_DUMPFILE() && (note = (Elf64_Nhdr *) diskdump_get_prstatus_percpu(bt->tc->processor))) { if (!note) goto no_nt_prstatus_exists; user_regs = get_regs_from_note((char *)note, &rip, &rsp); if (INSTACK(rsp, bt) || in_alternate_stack(bt->tc->processor, rsp)) { if (CRASHDEBUG(1)) netdump_print("ELF prstatus rsp: %lx rip: %lx\n", rsp, rip); *rspp = rsp; *ripp = rip; if (*ripp && *rspp) bt->flags |= BT_KDUMP_ELF_REGS; bt->machdep = (void *)user_regs; } } no_nt_prstatus_exists: machdep->get_stack_frame(bt, ripp, rspp); } /* * Netdump doesn't save state of the active tasks in the TSS, so poke around * the raw stack for some reasonable hooks. */ void get_netdump_regs_x86(struct bt_info *bt, ulong *eip, ulong *esp) { int i, search, panic, panic_task, altered; char *sym; ulong *up; ulong ipintr_eip, ipintr_esp, ipintr_func; ulong halt_eip, halt_esp, panic_eip, panic_esp; int check_hardirq, check_softirq; ulong stackbase, stacktop; Elf32_Nhdr *note; char *user_regs ATTRIBUTE_UNUSED; ulong ip, sp; if (!is_task_active(bt->task)) { machdep->get_stack_frame(bt, eip, esp); return; } panic_task = tt->panic_task == bt->task ? TRUE : FALSE; ipintr_eip = ipintr_esp = ipintr_func = panic = altered = 0; halt_eip = halt_esp = panic_eip = panic_esp = 0; check_hardirq = check_softirq = tt->flags & IRQSTACKS ? TRUE : FALSE; search = ((bt->flags & BT_TEXT_SYMBOLS) && (tt->flags & TASK_INIT_DONE)) || (machdep->flags & OMIT_FRAME_PTR); stackbase = bt->stackbase; stacktop = bt->stacktop; retry: for (i = 0, up = (ulong *)bt->stackbuf; i < LONGS_PER_STACK; i++, up++){ sym = closest_symbol(*up); if (XEN_CORE_DUMPFILE()) { if (STREQ(sym, "xen_machine_kexec")) { *eip = *up; *esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); return; } if (STREQ(sym, "crash_kexec")) { halt_eip = *up; halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); } } else if (STREQ(sym, "netconsole_netdump") || STREQ(sym, "netpoll_start_netdump") || STREQ(sym, "start_disk_dump") || (STREQ(sym, "crash_kexec") && !KVMDUMP_DUMPFILE()) || STREQ(sym, "disk_dump")) { crash_kexec: *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); return; } if (STREQ(sym, "panic")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); panic_eip = *eip; panic_esp = *esp; panic = TRUE; continue; /* keep looking for die */ } if (STREQ(sym, "die")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); for (i++, up++; i < LONGS_PER_STACK; i++, up++) { sym = closest_symbol(*up); if (STREQ(sym, "sysrq_handle_crash")) goto next_sysrq; } return; } if (STREQ(sym, "sysrq_handle_crash")) { next_sysrq: *eip = *up; *esp = bt->stackbase + ((char *)(up+4) - bt->stackbuf); pc->flags |= SYSRQ; for (i++, up++; i < LONGS_PER_STACK; i++, up++) { sym = closest_symbol(*up); if (STREQ(sym, "crash_kexec") && !KVMDUMP_DUMPFILE()) goto crash_kexec; if (STREQ(sym, "sysrq_handle_crash")) goto next_sysrq; } if (!panic) return; } /* * Obsolete -- replaced by sysrq_handle_crash */ if (STREQ(sym, "sysrq_handle_netdump")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); pc->flags |= SYSRQ; return; } if (STREQ(sym, "crash_nmi_callback")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); return; } if (STREQ(sym, "stop_this_cpu")) { *eip = *up; *esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : *(up-1); return; } if (STREQ(sym, "smp_call_function_interrupt")) { if (ipintr_eip && IS_VMALLOC_ADDR(ipintr_func) && IS_KERNEL_STATIC_TEXT(*(up - 2))) continue; ipintr_eip = *up; ipintr_esp = search ? bt->stackbase + ((char *)(up+1) - bt->stackbuf) : bt->stackbase + ((char *)(up-1) - bt->stackbuf); ipintr_func = *(up - 2); } if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && STREQ(sym, "safe_halt")) { halt_eip = *up; halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); } if (XEN_CORE_DUMPFILE() && !panic_task && (bt->tc->pid == 0) && !halt_eip && STREQ(sym, "xen_idle")) { halt_eip = *up; halt_esp = bt->stackbase + ((char *)(up+1) - bt->stackbuf); } } if (panic) { *eip = panic_eip; *esp = panic_esp; return; } if (ipintr_eip) { *eip = ipintr_eip; *esp = ipintr_esp; return; } if (halt_eip && halt_esp) { *eip = halt_eip; *esp = halt_esp; return; } bt->flags &= ~(BT_HARDIRQ|BT_SOFTIRQ); if (check_hardirq && (tt->hardirq_tasks[bt->tc->processor] == bt->tc->task)) { bt->stackbase = tt->hardirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_HARDIRQ; check_hardirq = FALSE; altered = TRUE; goto retry; } if (check_softirq && (tt->softirq_tasks[bt->tc->processor] == bt->tc->task)) { bt->stackbase = tt->softirq_ctx[bt->tc->processor]; bt->stacktop = bt->stackbase + STACKSIZE(); alter_stackbuf(bt); bt->flags |= BT_SOFTIRQ; check_softirq = FALSE; altered = TRUE; goto retry; } if (ELF_NOTES_VALID() && DISKDUMP_DUMPFILE() && (note = (Elf32_Nhdr *) diskdump_get_prstatus_percpu(bt->tc->processor))) { user_regs = get_regs_from_note((char *)note, &ip, &sp); if (is_kernel_text(ip) && (((sp >= GET_STACKBASE(bt->task)) && (sp < GET_STACKTOP(bt->task))) || in_alternate_stack(bt->tc->processor, sp))) { bt->flags |= BT_KERNEL_SPACE; *eip = ip; *esp = sp; return; } if (!is_kernel_text(ip) && in_user_stack(bt->tc->task, sp)) { bt->flags |= BT_USER_SPACE; *eip = ip; *esp = sp; return; } } if (CRASHDEBUG(1)) error(INFO, "get_netdump_regs_x86: cannot find anything useful (task: %lx)\n", bt->task); if (altered) { bt->stackbase = stackbase; bt->stacktop = stacktop; alter_stackbuf(bt); } if (XEN_CORE_DUMPFILE() && !panic_task && is_task_active(bt->task) && !(bt->flags & (BT_TEXT_SYMBOLS_ALL|BT_TEXT_SYMBOLS))) error(FATAL, "starting backtrace locations of the active (non-crashing) " "xen tasks\n cannot be determined: try -t or -T options\n"); if (KVMDUMP_DUMPFILE() || SADUMP_DUMPFILE()) bt->flags &= ~(ulonglong)BT_DUMPFILE_SEARCH; machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_32(struct bt_info *bt, ulong *eip, ulong *esp) { Elf32_Nhdr *note; size_t len; if ((bt->task == tt->panic_task) || (is_task_active(bt->task) && nd->num_prstatus_notes)) { /* * Registers are saved during the dump process for the * panic task. Whereas in kdump, regs are captured for all * CPUs if they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { if (!nd->nt_prstatus_percpu[bt->tc->processor]) error(FATAL, "cannot determine NT_PRSTATUS ELF note " "for %s task: %lx\n", (bt->task == tt->panic_task) ? "panic" : "active", bt->task); note = (Elf32_Nhdr *) nd->nt_prstatus_percpu[bt->tc->processor]; } else note = (Elf32_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); bt->machdep = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); } no_nt_prstatus_exists: machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_ppc(struct bt_info *bt, ulong *eip, ulong *esp) { ppc_relocate_nt_prstatus_percpu(nd->nt_prstatus_percpu, &nd->num_prstatus_notes); get_netdump_regs_32(bt, eip, esp); } static void get_netdump_regs_ppc64(struct bt_info *bt, ulong *eip, ulong *esp) { Elf64_Nhdr *note; size_t len; if ((bt->task == tt->panic_task) || (is_task_active(bt->task) && nd->num_prstatus_notes > 1)) { /* * Registers are saved during the dump process for the * panic task. Whereas in kdump, regs are captured for all * CPUs if they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { if (!nd->nt_prstatus_percpu[bt->tc->processor]) error(FATAL, "cannot determine NT_PRSTATUS ELF note " "for %s task: %lx\n", (bt->task == tt->panic_task) ? "panic" : "active", bt->task); note = (Elf64_Nhdr *) nd->nt_prstatus_percpu[bt->tc->processor]; } else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); bt->machdep = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); } no_nt_prstatus_exists: machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_arm(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_arm64(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } static void get_netdump_regs_mips(struct bt_info *bt, ulong *eip, ulong *esp) { machdep->get_stack_frame(bt, eip, esp); } int is_partial_netdump(void) { return (nd->flags & PARTIAL_DUMP ? TRUE : FALSE); } /* * kexec/kdump generated vmcore files are similar enough in * nature to netdump/diskdump such that most vmcore access * functionality may be borrowed from the equivalent netdump * function. If not, re-work them here. */ int is_kdump(char *file, ulong source_query) { return is_netdump(file, source_query); } int kdump_init(char *unused, FILE *fptr) { return netdump_init(unused, fptr); } ulong get_kdump_panic_task(void) { return get_netdump_panic_task(); } int read_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { physaddr_t paddr_in = paddr; if ((nd->flags & QEMU_MEM_DUMP_KDUMP_BACKUP) && (paddr >= nd->backup_src_start) && (paddr < nd->backup_src_start + nd->backup_src_size)) { paddr += nd->backup_offset - nd->backup_src_start; if (CRASHDEBUG(1)) error(INFO, "qemu_mem_dump: kdump backup region: %#llx => %#llx\n", paddr_in, paddr); } if (XEN_CORE_DUMPFILE() && !XEN_HYPER_MODE()) { if ((paddr = xen_kdump_p2m(paddr)) == P2M_FAILURE) { if (CRASHDEBUG(8)) fprintf(fp, "read_kdump: xen_kdump_p2m(%llx): " "P2M_FAILURE\n", (ulonglong)paddr_in); return READ_ERROR; } if (CRASHDEBUG(8)) fprintf(fp, "read_kdump: xen_kdump_p2m(%llx): %llx\n", (ulonglong)paddr_in, (ulonglong)paddr); } return read_netdump(fd, bufptr, cnt, addr, paddr); } int write_kdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return write_netdump(fd, bufptr, cnt, addr, paddr); } void get_kdump_regs(struct bt_info *bt, ulong *eip, ulong *esp) { get_netdump_regs(bt, eip, esp); } uint kdump_page_size(void) { uint pagesz; if (!VMCORE_VALID()) return 0; if (!(pagesz = nd->page_size)) pagesz = (uint)getpagesize(); return pagesz; } int kdump_free_memory(void) { return netdump_free_memory(); } int kdump_memory_used(void) { return netdump_memory_used(); } int kdump_memory_dump(FILE *fp) { return netdump_memory_dump(fp); } struct vmcore_data * get_kdump_vmcore_data(void) { if (!VMCORE_VALID() || !KDUMP_DUMPFILE()) return NULL; return &vmcore_data; } /* * The following set of functions are not used by the crash * source code, but are available to extension modules for * gathering register sets from ELF NT_PRSTATUS note sections. * * Contributed by: Sharyathi Nagesh (sharyath@in.ibm.com) */ static void *get_ppc_regs_from_elf_notes(struct task_context *); static void *get_ppc64_regs_from_elf_notes(struct task_context *); static void *get_x86_regs_from_elf_notes(struct task_context *); static void *get_x86_64_regs_from_elf_notes(struct task_context *); static void *get_arm_regs_from_elf_notes(struct task_context *); int get_netdump_arch(void) { int e_machine; if (nd->elf32) e_machine = nd->elf32->e_machine; else if (nd->elf64) e_machine = nd->elf64->e_machine; else e_machine = EM_NONE; return e_machine; } int exist_regs_in_elf_notes(struct task_context *tc) { if ((tc->task == tt->panic_task) || (is_task_active(tc->task) && (nd->num_prstatus_notes > 1) && (tc->processor < nd->num_prstatus_notes))) return TRUE; else return FALSE; } void * get_regs_from_elf_notes(struct task_context *tc) { int e_machine = get_netdump_arch(); switch (e_machine) { case EM_386: case EM_PPC: case EM_PPC64: case EM_X86_64: case EM_ARM: break; case EM_AARCH64: error(FATAL, "get_regs_from_elf_notes: ARM64 support TBD\n"); default: error(FATAL, "support for ELF machine type %d not available\n", e_machine); } if (!exist_regs_in_elf_notes(tc)) error(FATAL, "cannot determine register set " "for active task: %lx comm: \"%s\"\n", tc->task, tc->comm); switch(e_machine) { case EM_386: return get_x86_regs_from_elf_notes(tc); case EM_PPC: return get_ppc_regs_from_elf_notes(tc); case EM_PPC64: return get_ppc64_regs_from_elf_notes(tc); case EM_X86_64: return get_x86_64_regs_from_elf_notes(tc); case EM_ARM: return get_arm_regs_from_elf_notes(tc); case EM_AARCH64: break; /* TBD */ } return NULL; } static void * get_x86_regs_from_elf_notes(struct task_context *tc) { Elf32_Nhdr *note_32; Elf64_Nhdr *note_64; void *note; size_t len; void *pt_regs; len = 0; pt_regs = NULL; if (nd->num_prstatus_notes > 1) note = (void *)nd->nt_prstatus_percpu[tc->processor]; else note = (void *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; if (nd->elf32) { note_32 = (Elf32_Nhdr *)note; len = sizeof(Elf32_Nhdr); len = roundup(len + note_32->n_namesz, 4); } else if (nd->elf64) { note_64 = (Elf64_Nhdr *)note; len = sizeof(Elf64_Nhdr); len = roundup(len + note_64->n_namesz, 4); } pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); /* NEED TO BE FIXED: Hack to get the proper alignment */ pt_regs +=4; no_nt_prstatus_exists: return pt_regs; } static void * get_x86_64_regs_from_elf_notes(struct task_context *tc) { Elf64_Nhdr *note; size_t len; void *pt_regs; pt_regs = NULL; if (nd->num_prstatus_notes > 1) note = (Elf64_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } static void * get_ppc_regs_from_elf_notes(struct task_context *tc) { Elf32_Nhdr *note; size_t len; void *pt_regs; extern struct vmcore_data *nd; pt_regs = NULL; /* * Registers are always saved during the dump process for the * panic task. Kdump also captures registers for all CPUs if * they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { note = (Elf32_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; } else note = (Elf32_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf32_Nhdr); len = roundup(len + note->n_namesz, 4); pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } static void * get_ppc64_regs_from_elf_notes(struct task_context *tc) { Elf64_Nhdr *note; size_t len; void *pt_regs; extern struct vmcore_data *nd; pt_regs = NULL; /* * Registers are always saved during the dump process for the * panic task. Kdump also captures registers for all CPUs if * they responded to an IPI. */ if (nd->num_prstatus_notes > 1) { note = (Elf64_Nhdr *)nd->nt_prstatus_percpu[tc->processor]; } else note = (Elf64_Nhdr *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; len = sizeof(Elf64_Nhdr); len = roundup(len + note->n_namesz, 4); pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } int kdump_phys_base(ulong *phys_base) { if (!kdump_kaslr_check()) return FALSE; *phys_base = nd->phys_base; return TRUE; } int kdump_set_phys_base(ulong phys_base) { if (!kdump_kaslr_check()) return FALSE; nd->phys_base = phys_base; return TRUE; } /* * In case of ARM we need to determine correct PHYS_OFFSET from the kdump file. * This is done by taking lowest physical address (LMA) from given load * segments. Normally this is the right one. * * Alternative would be to store phys_base in VMCOREINFO but current kernel * kdump doesn't do that yet. */ int arm_kdump_phys_base(ulong *phys_base) { struct pt_load_segment *pls; ulong paddr = ULONG_MAX; int i; for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if (pls->phys_start < paddr) paddr = pls->phys_start; } if (paddr != ULONG_MAX) { *phys_base = paddr; return TRUE; } return FALSE; } /* * physical memory size, calculated by given load segments */ int arm_kdump_phys_end(ulong *phys_end) { struct pt_load_segment *pls; ulong paddr = 0; int i; for (i = 0; i < nd->num_pt_load_segments; i++) { pls = &nd->pt_load_segments[i]; if (pls->phys_end > paddr) paddr = pls->phys_end; } if (paddr != 0) { *phys_end = paddr; return TRUE; } return FALSE; } static void * get_arm_regs_from_elf_notes(struct task_context *tc) { Elf32_Nhdr *note_32; Elf64_Nhdr *note_64; void *note; size_t len; void *pt_regs; len = 0; pt_regs = NULL; if (nd->num_prstatus_notes > 1) note = (void *)nd->nt_prstatus_percpu[tc->processor]; else note = (void *)nd->nt_prstatus; if (!note) goto no_nt_prstatus_exists; if (nd->elf32) { note_32 = (Elf32_Nhdr *)note; len = sizeof(Elf32_Nhdr); len = roundup(len + note_32->n_namesz, 4); } else if (nd->elf64) { note_64 = (Elf64_Nhdr *)note; len = sizeof(Elf64_Nhdr); len = roundup(len + note_64->n_namesz, 4); } pt_regs = (void *)((char *)note + len + MEMBER_OFFSET("elf_prstatus", "pr_reg")); no_nt_prstatus_exists: return pt_regs; } /* * Read from /proc/kcore. */ int read_proc_kcore(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int i; size_t readcnt; ulong kvaddr; Elf32_Phdr *lp32; Elf64_Phdr *lp64; off_t offset; if (paddr != KCORE_USE_VADDR) { if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } } /* * Unless specified otherwise, turn the physical address into * a unity-mapped kernel virtual address, which should work * for 64-bit architectures, and for lowmem access for 32-bit * architectures. */ if (paddr == KCORE_USE_VADDR) kvaddr = addr; else kvaddr = PTOV((ulong)paddr); offset = UNINITIALIZED; readcnt = cnt; switch (pkd->flags & (KCORE_ELF32|KCORE_ELF64)) { case KCORE_ELF32: for (i = 0; i < pkd->segments; i++) { lp32 = pkd->load32 + i; if ((kvaddr >= lp32->p_vaddr) && (kvaddr < (lp32->p_vaddr + lp32->p_memsz))) { offset = (off_t)(kvaddr - lp32->p_vaddr) + (off_t)lp32->p_offset; break; } } /* * If it's not accessible via unity-mapping, check whether * it's a request for a vmalloc address that can be found * in the header. */ if (pc->curcmd_flags & MEMTYPE_KVADDR) pc->curcmd_flags &= ~MEMTYPE_KVADDR; else break; for (i = 0; i < pkd->segments; i++) { lp32 = pkd->load32 + i; if ((addr >= lp32->p_vaddr) && (addr < (lp32->p_vaddr + lp32->p_memsz))) { offset = (off_t)(addr - lp32->p_vaddr) + (off_t)lp32->p_offset; break; } } break; case KCORE_ELF64: /* * If KASLR, the PAGE_OFFSET may be unknown early on, so try * the (hopefully) mapped kernel address first. */ if (!(pc->flags & RUNTIME) && (pc->curcmd_flags & MEMTYPE_KVADDR) && (kvaddr != addr)) { pc->curcmd_flags &= ~MEMTYPE_KVADDR; for (i = 0; i < pkd->segments; i++) { lp64 = pkd->load64 + i; if ((addr >= lp64->p_vaddr) && (addr < (lp64->p_vaddr + lp64->p_memsz))) { offset = (off_t)(addr - lp64->p_vaddr) + (off_t)lp64->p_offset; break; } } if (offset != UNINITIALIZED) break; } for (i = 0; i < pkd->segments; i++) { lp64 = pkd->load64 + i; if ((kvaddr >= lp64->p_vaddr) && (kvaddr < (lp64->p_vaddr + lp64->p_memsz))) { offset = (off_t)(kvaddr - lp64->p_vaddr) + (off_t)lp64->p_offset; break; } } break; } if (offset == UNINITIALIZED) return SEEK_ERROR; if (lseek(fd, offset, SEEK_SET) != offset) perror("lseek"); if (read(fd, bufptr, readcnt) != readcnt) return READ_ERROR; return cnt; } /* * place holder -- cannot write to /proc/kcore */ int write_proc_kcore(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { error(FATAL, "cannot write to /proc/kcore\n"); return FALSE; } int is_proc_kcore(char *file, ulong source_query) { if (STREQ(file, "/proc/kcore") || same_file(file, "/proc/kcore")) { if (!is_netdump(file, source_query)) error(FATAL, "cannot translate the ELF header of /proc/kcore\n"); pkd->flags |= KCORE_LOCAL; return TRUE; } else return FALSE; } int proc_kcore_init(FILE *fp, int kcore_fd) { if (pkd->flags & (KCORE_ELF32|KCORE_ELF64)) return TRUE; if (BITS32()) return proc_kcore_init_32(fp, kcore_fd); else return proc_kcore_init_64(fp, kcore_fd); } static int proc_kcore_init_32(FILE *fp, int kcore_fd) { int fd; Elf32_Ehdr *elf32; Elf32_Phdr *load32; Elf32_Phdr *notes32; char eheader[MAX_KCORE_ELF_HEADER_SIZE]; char buf[BUFSIZE]; size_t load_size, notes_size; if (kcore_fd == UNUSED) { if ((fd = open("/proc/kcore", O_RDONLY)) < 0) { error(INFO, "/proc/kcore: %s\n", strerror(errno)); return FALSE; } } else fd = kcore_fd; if (read(fd, eheader, MAX_KCORE_ELF_HEADER_SIZE) != MAX_KCORE_ELF_HEADER_SIZE) { sprintf(buf, "/proc/kcore: read"); perror(buf); goto bailout; } if (lseek(fd, 0, SEEK_SET) != 0) { sprintf(buf, "/proc/kcore: lseek"); perror(buf); goto bailout; } if (fd != kcore_fd) close(fd); elf32 = (Elf32_Ehdr *)&eheader[0]; notes32 = (Elf32_Phdr *)&eheader[sizeof(Elf32_Ehdr)]; load32 = (Elf32_Phdr *)&eheader[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; pkd->segments = elf32->e_phnum - 1; notes_size = load_size = 0; if (notes32->p_type == PT_NOTE) notes_size = notes32->p_offset + notes32->p_filesz; if (notes32->p_type == PT_LOAD) load_size = (ulong)(load32+(elf32->e_phnum)) - (ulong)elf32; pkd->header_size = MAX(notes_size, load_size); if (!pkd->header_size) pkd->header_size = MAX_KCORE_ELF_HEADER_SIZE; if ((pkd->elf_header = (char *)malloc(pkd->header_size)) == NULL) { error(INFO, "/proc/kcore: cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(&eheader[0], &pkd->elf_header[0], pkd->header_size); pkd->notes32 = (Elf32_Phdr *)&pkd->elf_header[sizeof(Elf32_Ehdr)]; pkd->load32 = (Elf32_Phdr *) &pkd->elf_header[sizeof(Elf32_Ehdr)+sizeof(Elf32_Phdr)]; pkd->flags |= KCORE_ELF32; kcore_memory_dump(CRASHDEBUG(1) ? fp : pc->nullfp); return TRUE; bailout: if (fd != kcore_fd) close(fd); return FALSE; } static int proc_kcore_init_64(FILE *fp, int kcore_fd) { int fd; Elf64_Ehdr *elf64; Elf64_Phdr *load64; Elf64_Phdr *notes64; char eheader[MAX_KCORE_ELF_HEADER_SIZE]; char buf[BUFSIZE]; size_t load_size, notes_size; if (kcore_fd == UNUSED) { if ((fd = open("/proc/kcore", O_RDONLY)) < 0) { error(INFO, "/proc/kcore: %s\n", strerror(errno)); return FALSE; } } else fd = kcore_fd; if (read(fd, eheader, MAX_KCORE_ELF_HEADER_SIZE) != MAX_KCORE_ELF_HEADER_SIZE) { sprintf(buf, "/proc/kcore: read"); perror(buf); goto bailout; } if (lseek(fd, 0, SEEK_SET) != 0) { sprintf(buf, "/proc/kcore: lseek"); perror(buf); goto bailout; } if (fd != kcore_fd) close(fd); elf64 = (Elf64_Ehdr *)&eheader[0]; notes64 = (Elf64_Phdr *)&eheader[sizeof(Elf64_Ehdr)]; load64 = (Elf64_Phdr *)&eheader[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; pkd->segments = elf64->e_phnum - 1; notes_size = load_size = 0; if (notes64->p_type == PT_NOTE) notes_size = notes64->p_offset + notes64->p_filesz; if (notes64->p_type == PT_LOAD) load_size = (ulong)(load64+(elf64->e_phnum)) - (ulong)elf64; pkd->header_size = MAX(notes_size, load_size); if (!pkd->header_size) pkd->header_size = MAX_KCORE_ELF_HEADER_SIZE; if ((pkd->elf_header = (char *)malloc(pkd->header_size)) == NULL) { error(INFO, "/proc/kcore: cannot malloc ELF header buffer\n"); clean_exit(1); } BCOPY(&eheader[0], &pkd->elf_header[0], pkd->header_size); pkd->notes64 = (Elf64_Phdr *)&pkd->elf_header[sizeof(Elf64_Ehdr)]; pkd->load64 = (Elf64_Phdr *) &pkd->elf_header[sizeof(Elf64_Ehdr)+sizeof(Elf64_Phdr)]; pkd->flags |= KCORE_ELF64; kcore_memory_dump(CRASHDEBUG(1) ? fp : pc->nullfp); return TRUE; bailout: if (fd != kcore_fd) close(fd); return FALSE; } int kcore_memory_dump(FILE *ofp) { int i, others; Elf32_Phdr *ph32; Elf64_Phdr *ph64; Elf32_Nhdr *note32; Elf64_Nhdr *note64; size_t tot, len; char *name, *ptr, buf[BUFSIZE]; fprintf(ofp, "proc_kcore_data:\n"); fprintf(ofp, " flags: %x (", pkd->flags); others = 0; if (pkd->flags & KCORE_LOCAL) fprintf(ofp, "%sKCORE_LOCAL", others++ ? "|" : ""); if (pkd->flags & KCORE_ELF32) fprintf(ofp, "%sKCORE_ELF32", others++ ? "|" : ""); if (pkd->flags & KCORE_ELF64) fprintf(ofp, "%sKCORE_ELF64", others++ ? "|" : ""); fprintf(ofp, ")\n"); fprintf(ofp, " segments: %d\n", pkd->segments); fprintf(ofp, " elf_header: %lx\n", (ulong)pkd->elf_header); fprintf(ofp, " header_size: %ld\n", (ulong)pkd->header_size); fprintf(ofp, " notes64: %lx\n", (ulong)pkd->notes64); fprintf(ofp, " load64: %lx\n", (ulong)pkd->load64); fprintf(ofp, " notes32: %lx\n", (ulong)pkd->notes32); fprintf(ofp, " load32: %lx\n", (ulong)pkd->load32); fprintf(ofp, " vmcoreinfo: %lx\n", (ulong)pkd->vmcoreinfo); fprintf(ofp, " size_vmcoreinfo: %d\n\n", pkd->size_vmcoreinfo); if (pkd->flags & KCORE_ELF32) { ph32 = pkd->notes32; fprintf(ofp, " Elf32_Phdr:\n"); fprintf(ofp, " p_type: %x ", ph32->p_type); switch (ph32->p_type) { case PT_NOTE: fprintf(ofp, "(PT_NOTE)\n"); break; case PT_LOAD: fprintf(ofp, "(PT_LOAD)\n"); break; default: fprintf(ofp, "(unknown)\n"); break; } fprintf(ofp, " p_flags: %x\n", ph32->p_flags); fprintf(ofp, " p_offset: %x\n", ph32->p_offset); fprintf(ofp, " p_vaddr: %x\n", ph32->p_vaddr); fprintf(ofp, " p_paddr: %x\n", ph32->p_paddr); fprintf(ofp, " p_filesz: %d\n", ph32->p_filesz); fprintf(ofp, " p_memsz: %d\n", ph32->p_memsz); fprintf(ofp, " p_align: %d\n", ph32->p_align); fprintf(ofp, "\n"); for (i = 0; i < pkd->segments; i++) { ph32 = pkd->load32 + i; fprintf(ofp, " Elf32_Phdr:\n"); fprintf(ofp, " p_type: %x ", ph32->p_type); switch (ph32->p_type) { case PT_NOTE: fprintf(ofp, "(PT_NOTE)\n"); break; case PT_LOAD: fprintf(ofp, "(PT_LOAD)\n"); break; default: fprintf(ofp, "(unknown)\n"); break; } fprintf(ofp, " p_flags: %x\n", ph32->p_flags); fprintf(ofp, " p_offset: %x\n", ph32->p_offset); fprintf(ofp, " p_vaddr: %x\n", ph32->p_vaddr); fprintf(ofp, " p_paddr: %x\n", ph32->p_paddr); fprintf(ofp, " p_filesz: %d\n", ph32->p_filesz); fprintf(ofp, " p_memsz: %d\n", ph32->p_memsz); fprintf(ofp, " p_align: %d\n", ph32->p_align); fprintf(ofp, "\n"); } note32 = (Elf32_Nhdr *)(pkd->elf_header + pkd->notes32->p_offset); for (tot = 0; tot < pkd->notes32->p_filesz; tot += len) { name = (char *)((ulong)note32 + sizeof(Elf32_Nhdr)); snprintf(buf, note32->n_namesz, "%s", name); fprintf(ofp, " Elf32_Nhdr:\n"); fprintf(ofp, " n_namesz: %d (\"%s\")\n", note32->n_namesz, buf); fprintf(ofp, " n_descsz: %d\n", note32->n_descsz); fprintf(ofp, " n_type: %d ", note32->n_type); switch (note32->n_type) { case NT_PRSTATUS: fprintf(ofp, "(NT_PRSTATUS)\n"); break; case NT_PRPSINFO: fprintf(ofp, "(NT_PRPSINFO)\n"); break; case NT_TASKSTRUCT: fprintf(ofp, "(NT_TASKSTRUCT)\n"); break; default: fprintf(ofp, "(unknown)\n"); if (STRNEQ(name, "VMCOREINFO")) { ptr = (char *)note32 + sizeof(Elf32_Nhdr) + note32->n_namesz + 1; pkd->vmcoreinfo = (void *)ptr; pkd->size_vmcoreinfo = note32->n_descsz; pc->read_vmcoreinfo = vmcoreinfo_read_string; fprintf(ofp, "\n "); for (i = 0; i < note32->n_descsz; i++, ptr++) { fprintf(ofp, "%c%s", *ptr, *ptr == '\n' ? " " : ""); } } break; } fprintf(ofp, "\n"); len = sizeof(Elf32_Nhdr); len = roundup(len + note32->n_namesz, 4); len = roundup(len + note32->n_descsz, 4); note32 = (Elf32_Nhdr *)((ulong)note32 + len); } } if (pkd->flags & KCORE_ELF64) { ph64 = pkd->notes64; fprintf(ofp, " Elf64_Phdr:\n"); fprintf(ofp, " p_type: %x ", ph64->p_type); switch (ph64->p_type) { case PT_NOTE: fprintf(ofp, "(PT_NOTE)\n"); break; case PT_LOAD: fprintf(ofp, "(PT_LOAD)\n"); break; default: fprintf(ofp, "(unknown)\n"); break; } fprintf(ofp, " p_flags: %x\n", ph64->p_flags); fprintf(ofp, " p_offset: %llx\n", (ulonglong)ph64->p_offset); fprintf(ofp, " p_vaddr: %llx\n", (ulonglong)ph64->p_vaddr); fprintf(ofp, " p_paddr: %llx\n", (ulonglong)ph64->p_paddr); fprintf(ofp, " p_filesz: %lld\n", (ulonglong)ph64->p_filesz); fprintf(ofp, " p_memsz: %lld\n", (ulonglong)ph64->p_memsz); fprintf(ofp, " p_align: %lld\n", (ulonglong)ph64->p_align); fprintf(ofp, "\n"); for (i = 0; i < pkd->segments; i++) { ph64 = pkd->load64 + i; fprintf(ofp, " Elf64_Phdr:\n"); fprintf(ofp, " p_type: %x ", ph64->p_type); switch (ph64->p_type) { case PT_NOTE: fprintf(ofp, "(PT_NOTE)\n"); break; case PT_LOAD: fprintf(ofp, "(PT_LOAD)\n"); break; default: fprintf(ofp, "(unknown)\n"); break; } fprintf(ofp, " p_flags: %x\n", ph64->p_flags); fprintf(ofp, " p_offset: %llx\n", (ulonglong)ph64->p_offset); fprintf(ofp, " p_vaddr: %llx\n", (ulonglong)ph64->p_vaddr); fprintf(ofp, " p_paddr: %llx\n", (ulonglong)ph64->p_paddr); fprintf(ofp, " p_filesz: %lld\n", (ulonglong)ph64->p_filesz); fprintf(ofp, " p_memsz: %lld\n", (ulonglong)ph64->p_memsz); fprintf(ofp, " p_align: %lld\n", (ulonglong)ph64->p_align); fprintf(ofp, "\n"); } note64 = (Elf64_Nhdr *)(pkd->elf_header + pkd->notes64->p_offset); for (tot = 0; tot < pkd->notes64->p_filesz; tot += len) { name = (char *)((ulong)note64 + sizeof(Elf64_Nhdr)); snprintf(buf, note64->n_namesz, "%s", name); fprintf(ofp, " Elf64_Nhdr:\n"); fprintf(ofp, " n_namesz: %d (\"%s\")\n", note64->n_namesz, buf); fprintf(ofp, " n_descsz: %d\n", note64->n_descsz); fprintf(ofp, " n_type: %d ", note64->n_type); switch (note64->n_type) { case NT_PRSTATUS: fprintf(ofp, "(NT_PRSTATUS)\n"); break; case NT_PRPSINFO: fprintf(ofp, "(NT_PRPSINFO)\n"); break; case NT_TASKSTRUCT: fprintf(ofp, "(NT_TASKSTRUCT)\n"); break; default: fprintf(ofp, "(unknown)\n"); if (STRNEQ(name, "VMCOREINFO")) { ptr = (char *)note64 + sizeof(Elf64_Nhdr) + note64->n_namesz + 1; pkd->vmcoreinfo = (void *)ptr; pkd->size_vmcoreinfo = note64->n_descsz; pc->read_vmcoreinfo = vmcoreinfo_read_string; fprintf(ofp, "\n "); for (i = 0; i < note64->n_descsz; i++, ptr++) { fprintf(ofp, "%c%s", *ptr, *ptr == '\n' ? " " : ""); } } break; } fprintf(ofp, "\n"); len = sizeof(Elf64_Nhdr); len = roundup(len + note64->n_namesz, 4); len = roundup(len + note64->n_descsz, 4); note64 = (Elf64_Nhdr *)((ulong)note64 + len); } } return TRUE; } static void kdump_get_osrelease(void) { char *string; if ((string = vmcoreinfo_read_string("OSRELEASE"))) { fprintf(fp, "%s\n", string); free(string); } else pc->flags2 &= ~GET_OSRELEASE; } void dump_registers_for_qemu_mem_dump(void) { int i; QEMUCPUState *ptr; FILE *fpsave; fpsave = nd->ofp; nd->ofp = fp; for (i = 0; i < nd->num_qemu_notes; i++) { ptr = (QEMUCPUState *)nd->nt_qemu_percpu[i]; if (i) netdump_print("\n"); if (hide_offline_cpu(i)) { netdump_print("CPU %d: [OFFLINE]\n", i); continue; } else netdump_print("CPU %d:\n", i); if (CRASHDEBUG(1)) netdump_print(" version:%d size:%d\n", ptr->version, ptr->size); netdump_print(" RAX: %016llx RBX: %016llx RCX: %016llx\n", ptr->rax, ptr->rbx, ptr->rcx); netdump_print(" RDX: %016llx RSI: %016llx RDI:%016llx\n", ptr->rdx, ptr->rsi, ptr->rdi); netdump_print(" RSP: %016llx RBP: %016llx ", ptr->rsp, ptr->rbp); if (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64) { netdump_print(" R8: %016llx\n", ptr->r8); netdump_print(" R9: %016llx R10: %016llx R11: %016llx\n", ptr->r9, ptr->r10, ptr->r11); netdump_print(" R12: %016llx R13: %016llx R14: %016llx\n", ptr->r12, ptr->r13, ptr->r14); netdump_print(" R15: %016llx", ptr->r15); } else netdump_print("\n"); netdump_print(" RIP: %016llx RFLAGS: %08llx\n", ptr->rip, ptr->rflags); netdump_print(" CS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->cs.selector, ptr->cs.limit, ptr->cs.flags, ptr->cs.pad, ptr->cs.base); netdump_print(" DS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->ds.selector, ptr->ds.limit, ptr->ds.flags, ptr->ds.pad, ptr->ds.base); netdump_print(" ES: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->es.selector, ptr->es.limit, ptr->es.flags, ptr->es.pad, ptr->es.base); netdump_print(" FS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->fs.selector, ptr->fs.limit, ptr->fs.flags, ptr->fs.pad, ptr->fs.base); netdump_print(" GS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->gs.selector, ptr->gs.limit, ptr->gs.flags, ptr->gs.pad, ptr->gs.base); netdump_print(" SS: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->ss.selector, ptr->ss.limit, ptr->ss.flags, ptr->ss.pad, ptr->ss.base); netdump_print(" LDT: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->ldt.selector, ptr->ldt.limit, ptr->ldt.flags, ptr->ldt.pad, ptr->ldt.base); netdump_print(" TR: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->tr.selector, ptr->tr.limit, ptr->tr.flags, ptr->tr.pad, ptr->tr.base); netdump_print(" GDT: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->gdt.selector, ptr->gdt.limit, ptr->gdt.flags, ptr->gdt.pad, ptr->gdt.base); netdump_print(" IDT: selector: %04lx limit: %08lx flags: %08lx\n\ pad: %08lx base: %016llx\n", ptr->idt.selector, ptr->idt.limit, ptr->idt.flags, ptr->idt.pad, ptr->idt.base); netdump_print(" CR0: %016llx CR1: %016llx CR2: %016llx\n", ptr->cr[0], ptr->cr[1], ptr->cr[2]); netdump_print(" CR3: %016llx CR4: %016llx\n", ptr->cr[3], ptr->cr[4]); } nd->ofp = fpsave; } /* * kdump saves the first 640kB physical memory for BIOS to use the * range on boot of 2nd kernel. Read request to the 640k should be * translated to the back up region. This function searches kexec * resources for the backup region. */ void kdump_backup_region_init(void) { char buf[BUFSIZE]; ulong i, total, kexec_crash_image_p, elfcorehdr_p; Elf32_Off e_phoff32; Elf64_Off e_phoff64; uint16_t e_phnum, e_phentsize; ulonglong backup_offset; ulonglong backup_src_start; ulong backup_src_size; int kimage_segment_len; size_t bufsize; struct vmcore_data *vd; struct sadump_data *sd; int is_32_bit; char typename[BUFSIZE]; e_phoff32 = e_phoff64 = 0; vd = NULL; sd = NULL; if (SADUMP_DUMPFILE()) { sd = get_sadump_data(); is_32_bit = FALSE; sprintf(typename, "sadump"); } else if (pc->flags2 & QEMU_MEM_DUMP_ELF) { vd = get_kdump_vmcore_data(); if (vd->flags & KDUMP_ELF32) is_32_bit = TRUE; else is_32_bit = FALSE; sprintf(typename, "qemu mem dump"); } else return; if (symbol_exists("kexec_crash_image")) { if (!readmem(symbol_value("kexec_crash_image"), KVADDR, &kexec_crash_image_p, sizeof(ulong), "kexec backup region: kexec_crash_image", QUIET|RETURN_ON_ERROR)) goto error; } else kexec_crash_image_p = 0; if (!kexec_crash_image_p) { if (CRASHDEBUG(1)) error(INFO, "%s: kexec_crash_image not loaded\n", typename); return; } kimage_segment_len = get_array_length("kimage.segment", NULL, STRUCT_SIZE("kexec_segment")); if (!readmem(kexec_crash_image_p + MEMBER_OFFSET("kimage", "segment"), KVADDR, buf, MEMBER_SIZE("kimage", "segment"), "kexec backup region: kexec_crash_image->segment", QUIET|RETURN_ON_ERROR)) goto error; elfcorehdr_p = 0; for (i = 0; i < kimage_segment_len; ++i) { char e_ident[EI_NIDENT]; ulong mem; mem = ULONG(buf + i * STRUCT_SIZE("kexec_segment") + MEMBER_OFFSET("kexec_segment", "mem")); if (!mem) continue; if (!readmem(mem, PHYSADDR, e_ident, SELFMAG, "elfcorehdr: e_ident", QUIET|RETURN_ON_ERROR)) goto error; if (strncmp(ELFMAG, e_ident, SELFMAG) == 0) { elfcorehdr_p = mem; break; } } if (!elfcorehdr_p) { if (CRASHDEBUG(1)) error(INFO, "%s: elfcorehdr not found in segments of kexec_crash_image\n", typename); goto error; } if (is_32_bit) { if (!readmem(elfcorehdr_p, PHYSADDR, buf, STRUCT_SIZE("elf32_hdr"), "elfcorehdr", QUIET|RETURN_ON_ERROR)) goto error; e_phnum = USHORT(buf + MEMBER_OFFSET("elf32_hdr", "e_phnum")); e_phentsize = USHORT(buf + MEMBER_OFFSET("elf32_hdr", "e_phentsize")); e_phoff32 = ULONG(buf + MEMBER_OFFSET("elf32_hdr", "e_phoff")); } else { if (!readmem(elfcorehdr_p, PHYSADDR, buf, STRUCT_SIZE("elf64_hdr"), "elfcorehdr", QUIET|RETURN_ON_ERROR)) goto error; e_phnum = USHORT(buf + MEMBER_OFFSET("elf64_hdr", "e_phnum")); e_phentsize = USHORT(buf + MEMBER_OFFSET("elf64_hdr", "e_phentsize")); e_phoff64 = ULONG(buf + MEMBER_OFFSET("elf64_hdr", "e_phoff")); } backup_src_start = backup_src_size = backup_offset = 0; for (i = 0; i < e_phnum; ++i) { uint32_t p_type; Elf32_Off p_offset32; Elf64_Off p_offset64; Elf32_Addr p_paddr32; Elf64_Addr p_paddr64; uint32_t p_memsz32; uint64_t p_memsz64; if (is_32_bit) { if (!readmem(elfcorehdr_p + e_phoff32 + i * e_phentsize, PHYSADDR, buf, e_phentsize, "elfcorehdr: program header", QUIET|RETURN_ON_ERROR)) goto error; p_type = UINT(buf+MEMBER_OFFSET("elf32_phdr","p_type")); p_offset32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_offset")); p_paddr32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_paddr")); p_memsz32 = ULONG(buf+MEMBER_OFFSET("elf32_phdr","p_memsz")); } else { if (!readmem(elfcorehdr_p + e_phoff64 + i * e_phentsize, PHYSADDR, buf, e_phentsize, "elfcorehdr: program header", QUIET|RETURN_ON_ERROR)) goto error; p_type = UINT(buf+MEMBER_OFFSET("elf64_phdr","p_type")); p_offset64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_offset")); p_paddr64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_paddr")); p_memsz64 = ULONG(buf+MEMBER_OFFSET("elf64_phdr","p_memsz")); } /* * kexec marks backup region PT_LOAD by assigning * backup region address in p_offset, and p_addr in * p_offsets for other PT_LOAD entries. */ if (is_32_bit) { if (p_type == PT_LOAD && p_paddr32 <= KEXEC_BACKUP_SRC_END && p_paddr32 != p_offset32) { backup_src_start = p_paddr32; backup_src_size = p_memsz32; backup_offset = p_offset32; if (CRASHDEBUG(1)) error(INFO, "%s: kexec backup region found: " "START: %#016llx SIZE: %#016lx OFFSET: %#016llx\n", typename, backup_src_start, backup_src_size, backup_offset); break; } } else { if (p_type == PT_LOAD && p_paddr64 <= KEXEC_BACKUP_SRC_END && p_paddr64 != p_offset64) { backup_src_start = p_paddr64; backup_src_size = p_memsz64; backup_offset = p_offset64; if (CRASHDEBUG(1)) error(INFO, "%s: kexec backup region found: " "START: %#016llx SIZE: %#016lx OFFSET: %#016llx\n", typename, backup_src_start, backup_src_size, backup_offset); break; } } } if (!backup_offset) { if (CRASHDEBUG(1)) error(WARNING, "%s: backup region not found in elfcorehdr\n", typename); return; } bufsize = BUFSIZE; for (total = 0; total < backup_src_size; total += bufsize) { char backup_buf[BUFSIZE]; int j; if (backup_src_size - total < BUFSIZE) bufsize = backup_src_size - total; if (!readmem(backup_offset + total, PHYSADDR, backup_buf, bufsize, "backup source", QUIET|RETURN_ON_ERROR)) goto error; /* * We're assuming the backup region is initialized * with 0 filled if kdump has not run. */ for (j = 0; j < bufsize; ++j) { if (backup_buf[j]) { if (SADUMP_DUMPFILE()) { sd->flags |= SADUMP_KDUMP_BACKUP; sd->backup_src_start = backup_src_start; sd->backup_src_size = backup_src_size; sd->backup_offset = backup_offset; } else if (pc->flags2 & QEMU_MEM_DUMP_ELF) { vd->flags |= QEMU_MEM_DUMP_KDUMP_BACKUP; vd->backup_src_start = backup_src_start; vd->backup_src_size = backup_src_size; vd->backup_offset = backup_offset; } if (CRASHDEBUG(1)) error(INFO, "%s: backup region is used: %llx\n", typename, backup_offset + total + j); return; } } } if (CRASHDEBUG(1)) error(INFO, "%s: kexec backup region not used\n", typename); return; error: error(WARNING, "failed to init kexec backup region\n"); } int kdump_kaslr_check(void) { if (!QEMU_MEM_DUMP_NO_VMCOREINFO()) return FALSE; /* If vmcore has QEMU note, need to calculate kaslr offset */ if (nd->num_qemu_notes) return TRUE; else return FALSE; } #ifdef X86_64 QEMUCPUState * kdump_get_qemucpustate(int cpu) { if (cpu >= nd->num_qemu_notes) { if (CRASHDEBUG(1)) error(INFO, "Invalid index for QEMU Note: %d (>= %d)\n", cpu, nd->num_qemu_notes); return NULL; } if (!nd->elf64 || (nd->elf64->e_machine != EM_X86_64)) { if (CRASHDEBUG(1)) error(INFO, "Only x86_64 64bit is supported.\n"); return NULL; } return (QEMUCPUState *)nd->nt_qemu_percpu[cpu]; } #endif static void * get_kdump_device_dump_offset(void) { void *elf_base = NULL; if (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF64) elf_base = (void *)nd->elf64; else if (DUMPFILE_FORMAT(nd->flags) == KDUMP_ELF32) elf_base = (void *)nd->elf32; else error(FATAL, "no device dumps found in this dumpfile\n"); return elf_base; } /* * extract hardware specific device dumps from coredump. */ void kdump_device_dump_extract(int index, char *outfile, FILE *ofp) { ulonglong offset; void *elf_base; if (!nd->num_vmcoredd_notes) error(FATAL, "no device dumps found in this dumpfile\n"); else if (index >= nd->num_vmcoredd_notes) error(FATAL, "no device dump found at index: %d", index); elf_base = get_kdump_device_dump_offset(); offset = nd->nt_vmcoredd_array[index] - elf_base; devdump_extract(nd->nt_vmcoredd_array[index], offset, outfile, ofp); } /* * list all hardware specific device dumps present in coredump. */ void kdump_device_dump_info(FILE *ofp) { ulonglong offset; char buf[BUFSIZE]; void *elf_base; ulong i; if (!nd->num_vmcoredd_notes) error(FATAL, "no device dumps found in this dumpfile\n"); fprintf(fp, "%s ", mkstring(buf, strlen("INDEX"), LJUST, "INDEX")); fprintf(fp, " %s ", mkstring(buf, LONG_LONG_PRLEN, LJUST, "OFFSET")); fprintf(fp, " %s ", mkstring(buf, LONG_PRLEN, LJUST, "SIZE")); fprintf(fp, "NAME\n"); elf_base = get_kdump_device_dump_offset(); for (i = 0; i < nd->num_vmcoredd_notes; i++) { fprintf(fp, "%s ", mkstring(buf, strlen("INDEX"), CENTER | INT_DEC, MKSTR(i))); offset = nd->nt_vmcoredd_array[i] - elf_base; devdump_info(nd->nt_vmcoredd_array[i], offset, ofp); } } crash-7.2.8/xen_hyper_dump_tables.c0000664000000000000000000011305713614623427016073 0ustar rootroot/* * xen_hyper_dump_tables.c * * Portions Copyright (C) 2006-2007 Fujitsu Limited * Portions Copyright (C) 2006-2007 VA Linux Systems Japan K.K. * * Authors: Itsuro Oda * Fumihiko Kakuma * * This file is part of Xencrash. * * Xencrash is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Xencrash is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Xencrash; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "defs.h" #ifdef XEN_HYPERVISOR_ARCH #include "xen_hyper_defs.h" static void xen_hyper_dump_xen_hyper_table(int verbose); static void xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose); static void xen_hyper_dump_xen_hyper_domain_table(int verbose); static void xen_hyper_dump_xen_hyper_vcpu_table(int verbose); static void xen_hyper_dump_xen_hyper_pcpu_table(int verbose); static void xen_hyper_dump_xen_hyper_sched_table(int verbose); static void xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct); static void xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct); static void xen_hyper_dump_mem(void *mem, ulong len, int dsz); /* * Get help for a command, to dump an internal table, or the GNU public * license copying/warranty information. */ void xen_hyper_cmd_help(void) { int c; int oflag; oflag = 0; while ((c = getopt(argcnt, args, "aBbcDgHhM:mnOopszX:")) != EOF) { switch(c) { case 'a': dump_alias_data(); return; case 'b': dump_shared_bufs(); return; case 'B': dump_build_data(); return; case 'c': dump_numargs_cache(); return; case 'n': case 'D': dumpfile_memory(DUMPFILE_MEM_DUMP); return; case 'g': dump_gdb_data(); return; case 'H': dump_hash_table(VERBOSE); return; case 'h': dump_hash_table(!VERBOSE); return; case 'M': dump_machdep_table(stol(optarg, FAULT_ON_ERROR, NULL)); return; case 'm': dump_machdep_table(0); return; case 'O': dump_offset_table(NULL, TRUE); return; case 'o': oflag = TRUE; break; case 'p': dump_program_context(); return; case 's': dump_symbol_table(); return; case 'X': if (strlen(optarg) != 3) { argerrs++; break; } if (!strncmp("Xen", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_table(VERBOSE); else if (!strncmp("xen", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_table(!VERBOSE); else if (!strncmp("Dmp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_dumpinfo_table(VERBOSE); else if (!strncmp("dmp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_dumpinfo_table(!VERBOSE); else if (!strncmp("Dom", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_domain_table(VERBOSE); else if (!strncmp("dom", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_domain_table(!VERBOSE); else if (!strncmp("Vcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_vcpu_table(VERBOSE); else if (!strncmp("vcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_vcpu_table(!VERBOSE); else if (!strncmp("Pcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_pcpu_table(VERBOSE); else if (!strncmp("pcp", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_pcpu_table(!VERBOSE); else if (!strncmp("Sch", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_sched_table(VERBOSE); else if (!strncmp("sch", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_sched_table(!VERBOSE); else if (!strncmp("siz", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_size_table(NULL, TRUE); else if (!strncmp("ofs", optarg, strlen(optarg))) xen_hyper_dump_xen_hyper_offset_table(NULL, TRUE); else { argerrs++; break; } return; case 'z': fprintf(fp, "help options:\n"); fprintf(fp, " -a - alias data\n"); fprintf(fp, " -b - shared buffer data\n"); fprintf(fp, " -B - build data\n"); fprintf(fp, " -c - numargs cache\n"); fprintf(fp, " -M machine specific\n"); fprintf(fp, " -m - machdep_table\n"); fprintf(fp, " -s - symbol table data\n"); fprintf(fp, " -o - offset_table and size_table\n"); fprintf(fp, " -p - program_context\n"); fprintf(fp, " -h - hash_table data\n"); fprintf(fp, " -H - hash_table data (verbose)\n"); fprintf(fp, " -X Xen - xen table data (verbose)\n"); fprintf(fp, " -X xen - xen table data\n"); fprintf(fp, " -X Dmp - dumpinfo table data (verbose)\n"); fprintf(fp, " -X dmp - dumpinfo table data\n"); fprintf(fp, " -X Dom - domain table data (verbose)\n"); fprintf(fp, " -X dom - domain table data\n"); fprintf(fp, " -X Vcp - vcpu table data (verbose)\n"); fprintf(fp, " -X vcp - vcpu table data\n"); fprintf(fp, " -X Pcp - pcpu table data (verbose)\n"); fprintf(fp, " -X pcp - pcpu table data\n"); fprintf(fp, " -X Sch - schedule table data (verbose)\n"); fprintf(fp, " -X sch - schedule table data\n"); fprintf(fp, " -X siz - size table data\n"); fprintf(fp, " -X ofs - offset table data\n"); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, COMPLETE_HELP); if (!args[optind]) { if (oflag) dump_offset_table(NULL, FALSE); else display_help_screen(""); return; } do { if (oflag) dump_offset_table(args[optind], FALSE); else cmd_usage(args[optind], COMPLETE_HELP); optind++; } while (args[optind]); } /* * "help -x xen" output */ static void xen_hyper_dump_xen_hyper_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; uint cpuid; int len, flag, i; len = 14; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "cpu_data_address: ", buf, flag, (buf, "%lu\n", xht->cpu_data_address)); XEN_HYPER_PRI(fp, len, "cpu_curr: ", buf, flag, (buf, "%u\n", xht->cpu_curr)); XEN_HYPER_PRI(fp, len, "max_cpus: ", buf, flag, (buf, "%u\n", xht->max_cpus)); XEN_HYPER_PRI(fp, len, "cores: ", buf, flag, (buf, "%d\n", xht->cores)); XEN_HYPER_PRI(fp, len, "pcpus: ", buf, flag, (buf, "%d\n", xht->pcpus)); XEN_HYPER_PRI(fp, len, "vcpus: ", buf, flag, (buf, "%d\n", xht->vcpus)); XEN_HYPER_PRI(fp, len, "domains: ", buf, flag, (buf, "%d\n", xht->domains)); XEN_HYPER_PRI(fp, len, "sys_pages: ", buf, flag, (buf, "%lu\n", xht->sys_pages)); XEN_HYPER_PRI(fp, len, "crashing_cpu: ", buf, flag, (buf, "%d\n", xht->crashing_cpu)); XEN_HYPER_PRI(fp, len, "crashing_vcc: ", buf, flag, (buf, "%p\n", xht->crashing_vcc)); XEN_HYPER_PRI(fp, len, "max_page: ", buf, flag, (buf, "%lu\n", xht->max_page)); XEN_HYPER_PRI(fp, len, "total_pages: ", buf, flag, (buf, "%lu\n", xht->total_pages)); XEN_HYPER_PRI(fp, len, "cpumask: ", buf, flag, (buf, "%p\n", xht->cpumask)); if (verbose && xht->cpumask) { xen_hyper_dump_mem(xht->cpumask, XEN_HYPER_SIZE(cpumask_t), sizeof(long)); } XEN_HYPER_PRI(fp, len, "cpu_idxs: ", buf, flag, (buf, "%p\n", xht->cpu_idxs)); if (verbose) { for_cpu_indexes(i, cpuid) fprintf(fp, "%03d : %d\n", i, cpuid); } } /* * "help -x dmp" output */ static void xen_hyper_dump_xen_hyper_dumpinfo_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 25; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "note_ver: ", buf, flag, (buf, "%u\n", xhdit->note_ver)); XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", xhdit->context_array)); if (verbose && xhdit->context_array) { xen_hyper_dump_mem((long *)xhdit->context_array, sizeof(struct xen_hyper_dumpinfo_context) * XEN_HYPER_MAX_CPUS(), sizeof(long)); } XEN_HYPER_PRI(fp, len, "context_xen_core_array: ", buf, flag, (buf, "%p\n", xhdit->context_xen_core_array)); if (verbose && xhdit->context_xen_core_array) { xen_hyper_dump_mem((long *)xhdit->context_xen_core_array, sizeof(struct xen_hyper_dumpinfo_context_xen_core) * XEN_HYPER_MAX_CPUS(), sizeof(long)); } XEN_HYPER_PRI_CONST(fp, len, "context_xen_info: ", flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "note: ", buf, flag, (buf, "%lx\n", xhdit->context_xen_info.note)); XEN_HYPER_PRI(fp, len, "pcpu_id: ", buf, flag, (buf, "%u\n", xhdit->context_xen_info.pcpu_id)); XEN_HYPER_PRI(fp, len, "crash_xen_info_ptr: ", buf, flag, (buf, "%p\n", xhdit->context_xen_info.crash_xen_info_ptr)); XEN_HYPER_PRI(fp, len, "crash_note_core_array: ", buf, flag, (buf, "%p\n", xhdit->crash_note_core_array)); if (verbose && xhdit->crash_note_core_array) { xen_hyper_dump_mem((long *)xhdit->crash_note_core_array, xhdit->core_size * XEN_HYPER_NR_PCPUS(), sizeof(long)); } XEN_HYPER_PRI(fp, len, "crash_note_xen_core_array: ", buf, flag, (buf, "%p\n", xhdit->crash_note_xen_core_array)); if (verbose && xhdit->crash_note_xen_core_array) { xen_hyper_dump_mem( xhdit->crash_note_xen_core_array, xhdit->xen_core_size * XEN_HYPER_NR_PCPUS(), sizeof(long)); } XEN_HYPER_PRI(fp, len, "crash_note_xen_info_ptr: ", buf, flag, (buf, "%p\n", xhdit->crash_note_xen_info_ptr)); if (verbose && xhdit->crash_note_xen_info_ptr) { xen_hyper_dump_mem( xhdit->crash_note_xen_info_ptr, xhdit->xen_info_size, sizeof(long)); } XEN_HYPER_PRI(fp, len, "xen_info_cpu: ", buf, flag, (buf, "%u\n", xhdit->xen_info_cpu)); XEN_HYPER_PRI(fp, len, "note_size: ", buf, flag, (buf, "%u\n", xhdit->note_size)); XEN_HYPER_PRI(fp, len, "core_offset: ", buf, flag, (buf, "%u\n", xhdit->core_offset)); XEN_HYPER_PRI(fp, len, "core_size: ", buf, flag, (buf, "%u\n", xhdit->core_size)); XEN_HYPER_PRI(fp, len, "xen_core_offset: ", buf, flag, (buf, "%u\n", xhdit->xen_core_offset)); XEN_HYPER_PRI(fp, len, "xen_core_size: ", buf, flag, (buf, "%u\n", xhdit->xen_core_size)); XEN_HYPER_PRI(fp, len, "xen_info_offset: ", buf, flag, (buf, "%u\n", xhdit->xen_info_offset)); XEN_HYPER_PRI(fp, len, "xen_info_size: ", buf, flag, (buf, "%u\n", xhdit->xen_info_size)); } /* * "help -x dom" output */ static void xen_hyper_dump_xen_hyper_domain_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; struct xen_hyper_domain_context *dcca; int len, flag, i; len = 22; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", xhdt->context_array)); if (verbose) { char buf1[XEN_HYPER_CMD_BUFSIZE]; int j; for (i = 0, dcca = xhdt->context_array; i < xhdt->context_array_cnt; i++, dcca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", i); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, (buf, "%lx\n", dcca->domain)); XEN_HYPER_PRI(fp, len, "domain_id: ", buf, flag, (buf, "%d\n", dcca->domain_id)); XEN_HYPER_PRI(fp, len, "tot_pages: ", buf, flag, (buf, "%x\n", dcca->tot_pages)); XEN_HYPER_PRI(fp, len, "max_pages: ", buf, flag, (buf, "%x\n", dcca->max_pages)); XEN_HYPER_PRI(fp, len, "xenheap_pages: ", buf, flag, (buf, "%x\n", dcca->xenheap_pages)); XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag, (buf, "%lx\n", dcca->shared_info)); XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, (buf, "%lx\n", dcca->sched_priv)); XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag, (buf, "%lx\n", dcca->next_in_list)); XEN_HYPER_PRI(fp, len, "domain_flags: ", buf, flag, (buf, "%lx\n", dcca->domain_flags)); XEN_HYPER_PRI(fp, len, "evtchn: ", buf, flag, (buf, "%lx\n", dcca->evtchn)); XEN_HYPER_PRI(fp, len, "vcpu_cnt: ", buf, flag, (buf, "%d\n", dcca->vcpu_cnt)); for (j = 0; j < XEN_HYPER_MAX_VIRT_CPUS; j++) { snprintf(buf1, XEN_HYPER_CMD_BUFSIZE, "vcpu[%d]: ", j); XEN_HYPER_PRI(fp, len, buf1, buf, flag, (buf, "%lx\n", dcca->vcpu[j])); } XEN_HYPER_PRI(fp, len, "vcpu_context_array: ", buf, flag, (buf, "%p\n", dcca->vcpu_context_array)); } } XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag, (buf, "%d\n", xhdt->context_array_cnt)); XEN_HYPER_PRI(fp, len, "running_domains: ", buf, flag, (buf, "%lu\n", xhdt->running_domains)); XEN_HYPER_PRI(fp, len, "dom_io: ", buf, flag, (buf, "%p\n", xhdt->dom_io)); XEN_HYPER_PRI(fp, len, "dom_xen: ", buf, flag, (buf, "%p\n", xhdt->dom_xen)); XEN_HYPER_PRI(fp, len, "dom0: ", buf, flag, (buf, "%p\n", xhdt->dom0)); XEN_HYPER_PRI(fp, len, "idle_domain: ", buf, flag, (buf, "%p\n", xhdt->idle_domain)); XEN_HYPER_PRI(fp, len, "curr_domain: ", buf, flag, (buf, "%p\n", xhdt->curr_domain)); XEN_HYPER_PRI(fp, len, "last: ", buf, flag, (buf, "%p\n", xhdt->last)); XEN_HYPER_PRI(fp, len, "domain_struct: ", buf, flag, (buf, "%p\n", xhdt->domain_struct)); XEN_HYPER_PRI(fp, len, "domain_struct_verify: ", buf, flag, (buf, "%p\n", xhdt->domain_struct_verify)); } /* * "help -x vcp" output */ static void xen_hyper_dump_xen_hyper_vcpu_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 25; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "vcpu_context_arrays: ", buf, flag, (buf, "%p\n", xhvct->vcpu_context_arrays)); XEN_HYPER_PRI(fp, len, "vcpu_context_arrays_cnt: ", buf, flag, (buf, "%d\n", xhvct->vcpu_context_arrays_cnt)); if (verbose) { struct xen_hyper_vcpu_context_array *vcca; struct xen_hyper_vcpu_context *vca; int i, j; for (i = 0, vcca = xhvct->vcpu_context_arrays; i < xhvct->vcpu_context_arrays_cnt; i++, vcca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "vcpu_context_arrays[%d]: ", i); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); if (vcca->context_array) { XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", vcca->context_array)); } else { XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "NULL\n")); } XEN_HYPER_PRI(fp, len, "context_array_cnt: ", buf, flag, (buf, "%d\n", vcca->context_array_cnt)); XEN_HYPER_PRI(fp, len, "context_array_valid: ", buf, flag, (buf, "%d\n", vcca->context_array_valid)); for (j = 0, vca = vcca->context_array; j < vcca->context_array_cnt; j++, vca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array[%d]: ", j); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag, (buf, "%lx\n", vca->vcpu)); XEN_HYPER_PRI(fp, len, "vcpu_id: ", buf, flag, (buf, "%d\n", vca->vcpu_id)); XEN_HYPER_PRI(fp, len, "processor: ", buf, flag, (buf, "%d\n", vca->processor)); XEN_HYPER_PRI(fp, len, "vcpu_info: ", buf, flag, (buf, "%lx\n", vca->vcpu_info)); XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, (buf, "%lx\n", vca->domain)); XEN_HYPER_PRI(fp, len, "next_in_list: ", buf, flag, (buf, "%lx\n", vca->next_in_list)); XEN_HYPER_PRI(fp, len, "sleep_tick: ", buf, flag, (buf, "%lx\n", vca->sleep_tick)); XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, (buf, "%lx\n", vca->sched_priv)); XEN_HYPER_PRI(fp, len, "state: ", buf, flag, (buf, "%d\n", vca->state)); XEN_HYPER_PRI(fp, len, "state_entry_time: ", buf, flag, (buf, "%llux\n", (unsigned long long)(vca->state_entry_time))); XEN_HYPER_PRI(fp, len, "runstate_guest: ", buf, flag, (buf, "%lx\n", vca->runstate_guest)); XEN_HYPER_PRI(fp, len, "vcpu_flags: ", buf, flag, (buf, "%lx\n", vca->vcpu_flags)); } } } XEN_HYPER_PRI(fp, len, "idle_vcpu: ", buf, flag, (buf, "%lx\n", xhvct->idle_vcpu)); XEN_HYPER_PRI(fp, len, "idle_vcpu_context_array: ", buf, flag, (buf, "%p\n", xhvct->idle_vcpu_context_array)); XEN_HYPER_PRI(fp, len, "last: ", buf, flag, (buf, "%p\n", xhvct->last)); XEN_HYPER_PRI(fp, len, "vcpu_struct: ", buf, flag, (buf, "%p\n", xhvct->vcpu_struct)); XEN_HYPER_PRI(fp, len, "vcpu_struct_verify: ", buf, flag, (buf, "%p\n", xhvct->vcpu_struct_verify)); } /* * "help -x pcp" output */ static void xen_hyper_dump_xen_hyper_pcpu_table(int verbose) { char buf[XEN_HYPER_CMD_BUFSIZE]; struct xen_hyper_pcpu_context *pcca; int len, flag, i; #ifdef X86_64 uint64_t *ist_p; int j; #endif len = 21; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "context_array: ", buf, flag, (buf, "%p\n", xhpct->context_array)); if (verbose) { for (i = 0, pcca = xhpct->context_array; i < XEN_HYPER_MAX_CPUS(); i++, pcca++) { snprintf(buf, XEN_HYPER_CMD_BUFSIZE, "context_array %d: ", i); XEN_HYPER_PRI_CONST(fp, len, buf, flag|XEN_HYPER_PRI_LF); XEN_HYPER_PRI(fp, len, "pcpu: ", buf, flag, (buf, "%lx\n", pcca->pcpu)); XEN_HYPER_PRI(fp, len, "processor_id: ", buf, flag, (buf, "%u\n", pcca->processor_id)); XEN_HYPER_PRI(fp, len, "guest_cpu_user_regs: ", buf, flag, (buf, "%lx\n", pcca->guest_cpu_user_regs)); XEN_HYPER_PRI(fp, len, "current_vcpu: ", buf, flag, (buf, "%lx\n", pcca->current_vcpu)); XEN_HYPER_PRI(fp, len, "init_tss: ", buf, flag, (buf, "%lx\n", pcca->init_tss)); #ifdef X86 XEN_HYPER_PRI(fp, len, "sp.esp0: ", buf, flag, (buf, "%x\n", pcca->sp.esp0)); #endif #ifdef X86_64 XEN_HYPER_PRI(fp, len, "sp.rsp0: ", buf, flag, (buf, "%lx\n", pcca->sp.rsp0)); for (j = 0, ist_p = pcca->ist; j < XEN_HYPER_TSS_IST_MAX; j++, ist_p++) { XEN_HYPER_PRI(fp, len, "ist: ", buf, flag, (buf, "%lx\n", *ist_p)); } #endif } } XEN_HYPER_PRI(fp, len, "last: ", buf, flag, (buf, "%p\n", xhpct->last)); XEN_HYPER_PRI(fp, len, "pcpu_struct: ", buf, flag, (buf, "%p\n", xhpct->pcpu_struct)); } /* * "help -x sch" output */ static void xen_hyper_dump_xen_hyper_sched_table(int verbose) { struct xen_hyper_sched_context *schc; char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag, i; len = 21; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "name: ", buf, flag, (buf, "%s\n", xhscht->name)); XEN_HYPER_PRI(fp, len, "opt_sched: ", buf, flag, (buf, "%s\n", xhscht->opt_sched)); XEN_HYPER_PRI(fp, len, "sched_id: ", buf, flag, (buf, "%d\n", xhscht->sched_id)); XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag, (buf, "%lx\n", xhscht->scheduler)); XEN_HYPER_PRI(fp, len, "scheduler_struct: ", buf, flag, (buf, "%p\n", xhscht->scheduler_struct)); XEN_HYPER_PRI(fp, len, "sched_context_array: ", buf, flag, (buf, "%p\n", xhscht->sched_context_array)); if (verbose) { for (i = 0, schc = xhscht->sched_context_array; i < xht->pcpus; i++, schc++) { XEN_HYPER_PRI(fp, len, "sched_context_array[", buf, flag, (buf, "%d]\n", i)); XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag, (buf, "%lx\n", schc->schedule_data)); XEN_HYPER_PRI(fp, len, "curr: ", buf, flag, (buf, "%lx\n", schc->curr)); XEN_HYPER_PRI(fp, len, "idle: ", buf, flag, (buf, "%lx\n", schc->idle)); XEN_HYPER_PRI(fp, len, "sched_priv: ", buf, flag, (buf, "%lx\n", schc->sched_priv)); XEN_HYPER_PRI(fp, len, "tick: ", buf, flag, (buf, "%lx\n", schc->tick)); } } } /* * "help -x siz" output */ static void xen_hyper_dump_xen_hyper_size_table(char *spec, ulong makestruct) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 23; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "ELF_Prstatus: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Prstatus)); XEN_HYPER_PRI(fp, len, "ELF_Signifo: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Signifo)); XEN_HYPER_PRI(fp, len, "ELF_Gregset: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Gregset)); XEN_HYPER_PRI(fp, len, "ELF_Timeval: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.ELF_Timeval)); XEN_HYPER_PRI(fp, len, "arch_domain: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.arch_domain)); XEN_HYPER_PRI(fp, len, "arch_shared_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.arch_shared_info)); XEN_HYPER_PRI(fp, len, "cpu_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpu_info)); XEN_HYPER_PRI(fp, len, "cpu_time: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpu_time)); XEN_HYPER_PRI(fp, len, "cpu_user_regs: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpu_user_regs)); XEN_HYPER_PRI(fp, len, "cpumask_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpumask_t)); XEN_HYPER_PRI(fp, len, "cpuinfo_ia64: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpuinfo_ia64)); XEN_HYPER_PRI(fp, len, "cpuinfo_x86: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.cpuinfo_x86)); XEN_HYPER_PRI(fp, len, "crash_note_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_t)); XEN_HYPER_PRI(fp, len, "crash_note_core_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_core_t)); XEN_HYPER_PRI(fp, len, "crash_note_xen_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_t)); XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_core_t)); XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_note_xen_info_t)); XEN_HYPER_PRI(fp, len, "crash_xen_core_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_xen_core_t)); XEN_HYPER_PRI(fp, len, "crash_xen_info_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.crash_xen_info_t)); XEN_HYPER_PRI(fp, len, "domain: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.domain)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "mm_struct: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.mm_struct)); #endif XEN_HYPER_PRI(fp, len, "note_buf_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.note_buf_t)); XEN_HYPER_PRI(fp, len, "schedule_data: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.schedule_data)); XEN_HYPER_PRI(fp, len, "scheduler: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.scheduler)); XEN_HYPER_PRI(fp, len, "shared_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.shared_info)); XEN_HYPER_PRI(fp, len, "timer: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.timer)); XEN_HYPER_PRI(fp, len, "tss: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.tss)); XEN_HYPER_PRI(fp, len, "vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.vcpu)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.vcpu_runstate_info)); XEN_HYPER_PRI(fp, len, "xen_crash_xen_regs_t: ", buf, flag, (buf, "%ld\n", xen_hyper_size_table.xen_crash_xen_regs_t)); } /* * "help -x ofs" output */ static void xen_hyper_dump_xen_hyper_offset_table(char *spec, ulong makestruct) { char buf[XEN_HYPER_CMD_BUFSIZE]; int len, flag; len = 45; flag = XEN_HYPER_PRI_R; XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_info)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cursig: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cursig)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sigpend: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sigpend)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sighold: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sighold)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pid)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_ppid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_ppid)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_pgrp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_pgrp)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_sid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_sid)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_stime: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_stime)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cutime: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cutime)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_cstime: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_cstime)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_reg: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_reg)); XEN_HYPER_PRI(fp, len, "ELF_Prstatus_pr_fpvalid: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Prstatus_pr_fpvalid)); XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_sec: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_sec)); XEN_HYPER_PRI(fp, len, "ELF_Timeval_tv_usec: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.ELF_Timeval_tv_usec)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "arch_domain_mm: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_domain_mm)); #endif XEN_HYPER_PRI(fp, len, "arch_shared_info_max_pfn: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_max_pfn)); XEN_HYPER_PRI(fp, len, "arch_shared_info_pfn_to_mfn_frame_list_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_pfn_to_mfn_frame_list_list)); XEN_HYPER_PRI(fp, len, "arch_shared_info_nmi_reason: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.arch_shared_info_nmi_reason)); XEN_HYPER_PRI(fp, len, "cpu_info_guest_cpu_user_regs: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_info_guest_cpu_user_regs)); XEN_HYPER_PRI(fp, len, "cpu_info_processor_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_info_processor_id)); XEN_HYPER_PRI(fp, len, "cpu_info_current_vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_info_current_vcpu)); XEN_HYPER_PRI(fp, len, "cpu_time_local_tsc_stamp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_local_tsc_stamp)); XEN_HYPER_PRI(fp, len, "cpu_time_stime_local_stamp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_local_stamp)); XEN_HYPER_PRI(fp, len, "cpu_time_stime_master_stamp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_stime_master_stamp)); XEN_HYPER_PRI(fp, len, "cpu_time_tsc_scale: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_tsc_scale)); XEN_HYPER_PRI(fp, len, "cpu_time_calibration_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.cpu_time_calibration_timer)); XEN_HYPER_PRI(fp, len, "crash_note_t_core: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_core)); XEN_HYPER_PRI(fp, len, "crash_note_t_xen: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen)); XEN_HYPER_PRI(fp, len, "crash_note_t_xen_regs: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_regs)); XEN_HYPER_PRI(fp, len, "crash_note_t_xen_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_t_xen_info)); XEN_HYPER_PRI(fp, len, "crash_note_core_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_core_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_core_t_desc)); XEN_HYPER_PRI(fp, len, "crash_note_xen_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_xen_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_t_desc)); XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_xen_core_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_core_t_desc)); XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_note: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_note)); XEN_HYPER_PRI(fp, len, "crash_note_xen_info_t_desc: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.crash_note_xen_info_t_desc)); XEN_HYPER_PRI(fp, len, "domain_page_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_page_list)); XEN_HYPER_PRI(fp, len, "domain_xenpage_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_xenpage_list)); XEN_HYPER_PRI(fp, len, "domain_domain_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_domain_id)); XEN_HYPER_PRI(fp, len, "domain_tot_pages: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_tot_pages)); XEN_HYPER_PRI(fp, len, "domain_max_pages: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_max_pages)); XEN_HYPER_PRI(fp, len, "domain_xenheap_pages: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_xenheap_pages)); XEN_HYPER_PRI(fp, len, "domain_shared_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_shared_info)); XEN_HYPER_PRI(fp, len, "domain_sched_priv: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_sched_priv)); XEN_HYPER_PRI(fp, len, "domain_next_in_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_next_in_list)); XEN_HYPER_PRI(fp, len, "domain_domain_flags: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_domain_flags)); XEN_HYPER_PRI(fp, len, "domain_evtchn: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_evtchn)); XEN_HYPER_PRI(fp, len, "domain_is_hvm: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_hvm)); XEN_HYPER_PRI(fp, len, "domain_guest_type: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_guest_type)); XEN_HYPER_PRI(fp, len, "domain_is_privileged: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_privileged)); XEN_HYPER_PRI(fp, len, "domain_debugger_attached: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_debugger_attached)); if (XEN_HYPER_VALID_MEMBER(domain_is_polling)) { XEN_HYPER_PRI(fp, len, "domain_is_polling: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_polling)); } XEN_HYPER_PRI(fp, len, "domain_is_dying: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_dying)); /* Only one of next both exists but print both, ones value is -1. */ XEN_HYPER_PRI(fp, len, "domain_is_paused_by_controller: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_paused_by_controller)); XEN_HYPER_PRI(fp, len, "domain_controller_pause_count: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_controller_pause_count)); XEN_HYPER_PRI(fp, len, "domain_is_shutting_down: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_shutting_down)); XEN_HYPER_PRI(fp, len, "domain_is_shut_down: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_is_shut_down)); XEN_HYPER_PRI(fp, len, "domain_vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_vcpu)); XEN_HYPER_PRI(fp, len, "domain_arch: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.domain_arch)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "mm_struct_pgd: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.mm_struct_pgd)); #endif XEN_HYPER_PRI(fp, len, "schedule_data_schedule_lock: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_schedule_lock)); XEN_HYPER_PRI(fp, len, "schedule_data_curr: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_curr)); XEN_HYPER_PRI(fp, len, "schedule_data_idle: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_idle)); XEN_HYPER_PRI(fp, len, "schedule_data_sched_priv: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_sched_priv)); XEN_HYPER_PRI(fp, len, "schedule_data_s_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_s_timer)); XEN_HYPER_PRI(fp, len, "schedule_data_tick: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.schedule_data_tick)); XEN_HYPER_PRI(fp, len, "scheduler_name: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_name)); XEN_HYPER_PRI(fp, len, "scheduler_opt_name: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_opt_name)); XEN_HYPER_PRI(fp, len, "scheduler_sched_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_sched_id)); XEN_HYPER_PRI(fp, len, "scheduler_init: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_init)); XEN_HYPER_PRI(fp, len, "scheduler_tick: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_tick)); XEN_HYPER_PRI(fp, len, "scheduler_init_vcpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_init_vcpu)); XEN_HYPER_PRI(fp, len, "scheduler_destroy_domain: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_destroy_domain)); XEN_HYPER_PRI(fp, len, "scheduler_sleep: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_sleep)); XEN_HYPER_PRI(fp, len, "scheduler_wake: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_wake)); XEN_HYPER_PRI(fp, len, "scheduler_set_affinity: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_set_affinity)); XEN_HYPER_PRI(fp, len, "scheduler_do_schedule: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_do_schedule)); XEN_HYPER_PRI(fp, len, "scheduler_adjust: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_adjust)); XEN_HYPER_PRI(fp, len, "scheduler_dump_settings: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_settings)); XEN_HYPER_PRI(fp, len, "scheduler_dump_cpu_state: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.scheduler_dump_cpu_state)); XEN_HYPER_PRI(fp, len, "shared_info_vcpu_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_vcpu_info)); XEN_HYPER_PRI(fp, len, "shared_info_evtchn_pending: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_pending)); XEN_HYPER_PRI(fp, len, "shared_info_evtchn_mask: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_evtchn_mask)); XEN_HYPER_PRI(fp, len, "shared_info_arch: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.shared_info_arch)); XEN_HYPER_PRI(fp, len, "timer_expires: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_expires)); XEN_HYPER_PRI(fp, len, "timer_cpu: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_cpu)); XEN_HYPER_PRI(fp, len, "timer_function: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_function)); XEN_HYPER_PRI(fp, len, "timer_data: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_data)); XEN_HYPER_PRI(fp, len, "timer_heap_offset: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_heap_offset)); XEN_HYPER_PRI(fp, len, "timer_killed: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.timer_killed)); XEN_HYPER_PRI(fp, len, "tss_struct_rsp0: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.tss_rsp0)); XEN_HYPER_PRI(fp, len, "tss_struct_esp0: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.tss_esp0)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_id: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_id)); XEN_HYPER_PRI(fp, len, "vcpu_processor: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_processor)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_info: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_info)); XEN_HYPER_PRI(fp, len, "vcpu_domain: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_domain)); XEN_HYPER_PRI(fp, len, "vcpu_next_in_list: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_next_in_list)); XEN_HYPER_PRI(fp, len, "vcpu_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_timer)); XEN_HYPER_PRI(fp, len, "vcpu_sleep_tick: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_sleep_tick)); XEN_HYPER_PRI(fp, len, "vcpu_poll_timer: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_poll_timer)); XEN_HYPER_PRI(fp, len, "vcpu_sched_priv: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_sched_priv)); XEN_HYPER_PRI(fp, len, "vcpu_runstate: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_guest: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_guest)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_flags: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_flags)); XEN_HYPER_PRI(fp, len, "vcpu_pause_count: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_pause_count)); XEN_HYPER_PRI(fp, len, "vcpu_virq_to_evtchn: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_virq_to_evtchn)); XEN_HYPER_PRI(fp, len, "vcpu_cpu_affinity: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_cpu_affinity)); XEN_HYPER_PRI(fp, len, "vcpu_nmi_addr: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_nmi_addr)); XEN_HYPER_PRI(fp, len, "vcpu_vcpu_dirty_cpumask: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_vcpu_dirty_cpumask)); XEN_HYPER_PRI(fp, len, "vcpu_arch: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_arch)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_state_entry_time: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_state_entry_time)); XEN_HYPER_PRI(fp, len, "vcpu_runstate_info_time: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_runstate_info_time)); #ifdef IA64 XEN_HYPER_PRI(fp, len, "vcpu_thread_ksp: ", buf, flag, (buf, "%ld\n", xen_hyper_offset_table.vcpu_thread_ksp)); #endif } /* * dump specified memory with specified size. */ #define DSP_BYTE_SIZE 16 static void xen_hyper_dump_mem(void *mem, ulong len, int dsz) { long i, max; void *mem_w = mem; if (!len || (dsz != SIZEOF_8BIT && dsz != SIZEOF_16BIT && dsz != SIZEOF_32BIT && dsz != SIZEOF_64BIT)) return; max = len / dsz + (len % dsz ? 1 : 0); for (i = 0; i < max; i++) { if (i != 0 && !(i % (DSP_BYTE_SIZE / dsz))) fprintf(fp, "\n"); if (i == 0 || !(i % (DSP_BYTE_SIZE / dsz))) fprintf(fp, "%p : ", mem_w); if (dsz == SIZEOF_8BIT) fprintf(fp, "%02x ", *(uint8_t *)mem_w); else if (dsz == SIZEOF_16BIT) fprintf(fp, "%04x ", *(uint16_t *)mem_w); else if (dsz == SIZEOF_32BIT) fprintf(fp, "%08x ", *(uint32_t *)mem_w); else if (dsz == SIZEOF_64BIT) fprintf(fp, "%016llx ", *(unsigned long long *)mem_w); mem_w = (char *)mem_w + dsz; } fprintf(fp, "\n"); } #endif crash-7.2.8/rse.h0000664000000000000000000000461513614623427012310 0ustar rootroot#ifndef _ASM_IA64_RSE_H #define _ASM_IA64_RSE_H /* * Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999 David Mosberger-Tang */ /* * rse.h * * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * include/asm-ia64/rse.h (2.4.9-e.3) */ /* * Register stack engine related helper functions. This file may be * used in applications, so be careful about the name-space and give * some consideration to non-GNU C compilers (though __inline__ is * fine). */ static __inline__ unsigned long ia64_rse_slot_num (unsigned long *addr) { return (((unsigned long) addr) >> 3) & 0x3f; } /* * Return TRUE if ADDR is the address of an RNAT slot. */ static __inline__ unsigned long ia64_rse_is_rnat_slot (unsigned long *addr) { return ia64_rse_slot_num(addr) == 0x3f; } /* * Returns the address of the RNAT slot that covers the slot at * address SLOT_ADDR. */ static __inline__ unsigned long * ia64_rse_rnat_addr (unsigned long *slot_addr) { return (unsigned long *) ((unsigned long) slot_addr | (0x3f << 3)); } /* * Calcuate the number of registers in the dirty partition starting at * BSPSTORE with a size of DIRTY bytes. This isn't simply DIRTY * divided by eight because the 64th slot is used to store ar.rnat. */ static __inline__ unsigned long ia64_rse_num_regs (unsigned long *bspstore, unsigned long *bsp) { unsigned long slots = (bsp - bspstore); return slots - (ia64_rse_slot_num(bspstore) + slots)/0x40; } /* * The inverse of the above: given bspstore and the number of * registers, calculate ar.bsp. */ static __inline__ unsigned long * ia64_rse_skip_regs (unsigned long *addr, long num_regs) { long delta = ia64_rse_slot_num(addr) + num_regs; if (num_regs < 0) delta -= 0x3e; return addr + num_regs + delta/0x3f; } #endif /* _ASM_IA64_RSE_H */ crash-7.2.8/memory.c0000775000000000000000000206505713614623427013036 0ustar rootroot/* memory.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2019 David Anderson * Copyright (C) 2002-2019 Red Hat, Inc. All rights reserved. * Copyright (C) 2002 Silicon Graphics, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include struct meminfo { /* general purpose memory information structure */ ulong cache; /* used by the various memory searching/dumping */ ulong slab; /* routines. Only one of these is used per cmd */ ulong c_flags; /* so stuff whatever's helpful in here... */ ulong c_offset; ulong c_num; ulong s_mem; void *s_freep; ulong *s_index; ulong s_inuse; ulong cpucached_cache; ulong cpucached_slab; ulong inuse; ulong order; ulong slabsize; ulong num_slabs; ulong objects; ulonglong spec_addr; ulong flags; ulong size; ulong objsize; int memtype; int free; int slab_offset; char *reqname; char *curname; ulong *addrlist; int *kmem_bufctl; ulong *cpudata[NR_CPUS]; ulong *shared_array_cache; int current_cache_index; ulong found; ulong retval; struct struct_member_data *page_member_cache; ulong nr_members; char *ignore; int errors; int calls; int cpu; int cache_count; ulong get_shared; ulong get_totalram; ulong get_buffers; ulong get_slabs; char *slab_buf; char *cache_buf; ulong *cache_list; struct vmlist { ulong addr; ulong size; } *vmlist; ulong container; int *freelist; int freelist_index_size; ulong random; }; /* * Search modes */ #define SEARCH_ULONG (0) #define SEARCH_UINT (1) #define SEARCH_USHORT (2) #define SEARCH_CHARS (3) #define SEARCH_DEFAULT (SEARCH_ULONG) /* search mode information */ struct searchinfo { int mode; int vcnt; int val; int context; int memtype; int do_task_header; int tasks_found; struct task_context *task_context; ulong vaddr_start; ulong vaddr_end; ulonglong paddr_start; ulonglong paddr_end; union { /* default ulong search */ struct { ulong value[MAXARGS]; char *opt_string[MAXARGS]; ulong mask; } s_ulong; /* uint search */ struct { uint value[MAXARGS]; char *opt_string[MAXARGS]; uint mask; } s_uint; /* ushort search */ struct { ushort value[MAXARGS]; char *opt_string[MAXARGS]; ushort mask; } s_ushort; /* string (chars) search */ struct { char *value[MAXARGS]; int len[MAXARGS]; int started_flag; /* string search needs history */ } s_chars; } s_parms; char buf[BUFSIZE]; }; static char *memtype_string(int, int); static char *error_handle_string(ulong); static void collect_page_member_data(char *, struct meminfo *); struct integer_data { ulong value; ulong bitfield_value; struct struct_member_data *pmd; }; static int get_bitfield_data(struct integer_data *); static int show_page_member_data(char *, ulong, struct meminfo *, char *); static void dump_mem_map(struct meminfo *); static void dump_mem_map_SPARSEMEM(struct meminfo *); static void fill_mem_map_cache(ulong, ulong, char *); static void page_flags_init(void); static int page_flags_init_from_pageflag_names(void); static int page_flags_init_from_pageflags_enum(void); static int translate_page_flags(char *, ulong); static void dump_free_pages(struct meminfo *); static int dump_zone_page_usage(void); static void dump_multidimensional_free_pages(struct meminfo *); static void dump_free_pages_zones_v1(struct meminfo *); static void dump_free_pages_zones_v2(struct meminfo *); struct free_page_callback_data; static int dump_zone_free_area(ulong, int, ulong, struct free_page_callback_data *); static void dump_page_hash_table(struct meminfo *); static void kmem_search(struct meminfo *); static void kmem_cache_init(void); static void kmem_cache_init_slub(void); static ulong max_cpudata_limit(ulong, ulong *); static int kmem_cache_downsize(void); static int ignore_cache(struct meminfo *, char *); static char *is_kmem_cache_addr(ulong, char *); static char *is_kmem_cache_addr_common(ulong, char *); static void kmem_cache_list(struct meminfo *); static void dump_kmem_cache(struct meminfo *); static void dump_kmem_cache_percpu_v1(struct meminfo *); static void dump_kmem_cache_percpu_v2(struct meminfo *); static void dump_kmem_cache_slub(struct meminfo *); static void kmem_cache_list_common(struct meminfo *); static ulong get_cpu_slab_ptr(struct meminfo *, int, ulong *); static unsigned int oo_order(ulong); static unsigned int oo_objects(ulong); static char *vaddr_to_kmem_cache(ulong, char *, int); static char *is_slab_overload_page(ulong, ulong *, char *); static ulong vaddr_to_slab(ulong); static void do_slab_chain(int, struct meminfo *); static void do_slab_chain_percpu_v1(long, struct meminfo *); static void do_slab_chain_percpu_v2(long, struct meminfo *); static void do_slab_chain_percpu_v2_nodes(long, struct meminfo *); static void do_slab_chain_slab_overload_page(long, struct meminfo *); static int slab_freelist_index_size(void); static int do_slab_slub(struct meminfo *, int); static void do_kmem_cache_slub(struct meminfo *); static void save_slab_data(struct meminfo *); static int slab_data_saved(struct meminfo *); static void dump_saved_slab_data(void); static void dump_slab(struct meminfo *); static void dump_slab_percpu_v1(struct meminfo *); static void dump_slab_percpu_v2(struct meminfo *); static void dump_slab_overload_page(struct meminfo *); static int verify_slab_v1(struct meminfo *, ulong, int); static int verify_slab_v2(struct meminfo *, ulong, int); static int verify_slab_overload_page(struct meminfo *, ulong, int); static void gather_slab_free_list(struct meminfo *); static void gather_slab_free_list_percpu(struct meminfo *); static void gather_slab_free_list_slab_overload_page(struct meminfo *); static void gather_cpudata_list_v1(struct meminfo *); static void gather_cpudata_list_v2(struct meminfo *); static void gather_cpudata_list_v2_nodes(struct meminfo *, int); static int check_cpudata_list(struct meminfo *, ulong); static int check_shared_list(struct meminfo *, ulong); static void gather_slab_cached_count(struct meminfo *); static void dump_slab_objects(struct meminfo *); static void dump_slab_objects_percpu(struct meminfo *); static void dump_vmlist(struct meminfo *); static void dump_vmap_area(struct meminfo *); static int dump_page_lists(struct meminfo *); static void dump_kmeminfo(void); static int page_to_phys(ulong, physaddr_t *); static void display_memory(ulonglong, long, ulong, int, void *); static char *show_opt_string(struct searchinfo *); static void display_with_pre_and_post(void *, ulonglong, struct searchinfo *); static ulong search_ulong(ulong *, ulong, int, struct searchinfo *); static ulong search_uint(ulong *, ulong, int, struct searchinfo *); static ulong search_ushort(ulong *, ulong, int, struct searchinfo *); static ulong search_chars(ulong *, ulong, int, struct searchinfo *); static ulonglong search_ulong_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_uint_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_ushort_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_chars_p(ulong *, ulonglong, int, struct searchinfo *); static void search_virtual(struct searchinfo *); static void search_physical(struct searchinfo *); static int next_upage(struct task_context *, ulong, ulong *); static int next_kpage(ulong, ulong *); static int next_physpage(ulonglong, ulonglong *); static int next_vmlist_vaddr(ulong, ulong *); static int next_module_vaddr(ulong, ulong *); static int next_identity_mapping(ulong, ulong *); static int vm_area_page_dump(ulong, ulong, ulong, ulong, ulong, struct reference *); static void rss_page_types_init(void); static int dump_swap_info(ulong, ulong *, ulong *); static int get_hugetlb_total_pages(ulong *, ulong *); static void swap_info_init(void); static char *get_swapdev(ulong, char *); static void fill_swap_info(ulong); static char *vma_file_offset(ulong, ulong, char *); static ssize_t read_dev_kmem(ulong, char *, long); static void dump_memory_nodes(int); static void dump_zone_stats(void); #define MEMORY_NODES_DUMP (0) #define MEMORY_NODES_INITIALIZE (1) static void node_table_init(void); static int compare_node_data(const void *, const void *); static void do_vm_flags(ulonglong); static ulonglong get_vm_flags(char *); static void PG_reserved_flag_init(void); static void PG_slab_flag_init(void); static ulong nr_blockdev_pages(void); void sparse_mem_init(void); void dump_mem_sections(int); void dump_memory_blocks(int); void list_mem_sections(void); ulong sparse_decode_mem_map(ulong, ulong); char *read_mem_section(ulong); ulong nr_to_section(ulong); int valid_section(ulong); int section_has_mem_map(ulong); ulong section_mem_map_addr(ulong, int); ulong valid_section_nr(ulong); ulong pfn_to_map(ulong); static int get_nodes_online(void); static int next_online_node(int); static ulong next_online_pgdat(int); static int vm_stat_init(void); static int vm_event_state_init(void); static int dump_vm_stat(char *, long *, ulong); static int dump_vm_event_state(void); static int dump_page_states(void); static int generic_read_dumpfile(ulonglong, void *, long, char *, ulong); static int generic_write_dumpfile(ulonglong, void *, long, char *, ulong); static int page_to_nid(ulong); static int get_kmem_cache_list(ulong **); static int get_kmem_cache_root_list(ulong **); static int get_kmem_cache_child_list(ulong **, ulong); static int get_kmem_cache_slub_data(long, struct meminfo *); static ulong compound_head(ulong); static long count_partial(ulong, struct meminfo *, ulong *); static short count_cpu_partial(struct meminfo *, int); static ulong get_freepointer(struct meminfo *, void *); static int count_free_objects(struct meminfo *, ulong); char *is_slab_page(struct meminfo *, char *); static void do_cpu_partial_slub(struct meminfo *, int); static void do_node_lists_slub(struct meminfo *, ulong, int); static int devmem_is_restricted(void); static int switch_to_proc_kcore(void); static int verify_pfn(ulong); static void dump_per_cpu_offsets(void); static void dump_page_flags(ulonglong); static ulong kmem_cache_nodelists(ulong); static void dump_hstates(void); static ulong freelist_ptr(struct meminfo *, ulong, ulong); /* * Memory display modes specific to this file. */ #define DISPLAY_8 (0x2) #define DISPLAY_16 (0x4) #define DISPLAY_32 (0x8) #define DISPLAY_64 (0x10) #define SHOW_OFFSET (0x20) #define SYMBOLIC (0x40) #define HEXADECIMAL (0x80) #define DECIMAL (0x100) #define UDECIMAL (0x200) #define ASCII_ENDLINE (0x400) #define NO_ASCII (0x800) #define SLAB_CACHE (0x1000) #define DISPLAY_ASCII (0x2000) #define NET_ENDIAN (0x4000) #define DISPLAY_RAW (0x8000) #define NO_ERROR (0x10000) #define SLAB_CACHE2 (0x20000) #define DISPLAY_TYPES (DISPLAY_RAW|DISPLAY_ASCII|DISPLAY_8|\ DISPLAY_16|DISPLAY_32|DISPLAY_64) #define ASCII_UNLIMITED ((ulong)(-1) >> 1) static ulong DISPLAY_DEFAULT; /* * Verify that the sizeof the primitive types are reasonable. */ void mem_init(void) { if (sizeof(char) != SIZEOF_8BIT) error(FATAL, "unsupported sizeof(char): %d\n", sizeof(char)); if (sizeof(short) != SIZEOF_16BIT) error(FATAL, "unsupported sizeof(short): %d\n", sizeof(short)); if ((sizeof(int) != SIZEOF_32BIT) && (sizeof(int) != SIZEOF_64BIT)) error(FATAL, "unsupported sizeof(int): %d\n", sizeof(int)); if ((sizeof(long) != SIZEOF_32BIT) && (sizeof(long) != SIZEOF_64BIT)) error(FATAL, "unsupported sizeof(long): %d\n", sizeof(long)); if (sizeof(void *) != sizeof(long)) error(FATAL, "pointer size: %d is not sizeof(long): %d\n", sizeof(void *), sizeof(long)); DISPLAY_DEFAULT = (sizeof(long) == 8) ? DISPLAY_64 : DISPLAY_32; } /* * Stash a few popular offsets and some basic kernel virtual memory * items used by routines in this file. */ void vm_init(void) { char buf[BUFSIZE]; int i, len, dimension, nr_node_ids; struct syment *sp_array[2]; ulong value1, value2; char *kmem_cache_node_struct, *nodelists_field; MEMBER_OFFSET_INIT(task_struct_mm, "task_struct", "mm"); MEMBER_OFFSET_INIT(mm_struct_mmap, "mm_struct", "mmap"); MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "rss"); if (!VALID_MEMBER(mm_struct_rss)) MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "_rss"); MEMBER_OFFSET_INIT(mm_struct_anon_rss, "mm_struct", "_anon_rss"); MEMBER_OFFSET_INIT(mm_struct_file_rss, "mm_struct", "_file_rss"); if (!VALID_MEMBER(mm_struct_anon_rss)) { MEMBER_OFFSET_INIT(mm_struct_rss_stat, "mm_struct", "rss_stat"); MEMBER_OFFSET_INIT(mm_rss_stat_count, "mm_rss_stat", "count"); } MEMBER_OFFSET_INIT(mm_struct_total_vm, "mm_struct", "total_vm"); MEMBER_OFFSET_INIT(mm_struct_start_code, "mm_struct", "start_code"); MEMBER_OFFSET_INIT(mm_struct_mm_count, "mm_struct", "mm_count"); MEMBER_OFFSET_INIT(vm_area_struct_vm_mm, "vm_area_struct", "vm_mm"); MEMBER_OFFSET_INIT(vm_area_struct_vm_next, "vm_area_struct", "vm_next"); MEMBER_OFFSET_INIT(vm_area_struct_vm_end, "vm_area_struct", "vm_end"); MEMBER_OFFSET_INIT(vm_area_struct_vm_start, "vm_area_struct", "vm_start"); MEMBER_OFFSET_INIT(vm_area_struct_vm_flags, "vm_area_struct", "vm_flags"); MEMBER_OFFSET_INIT(vm_area_struct_vm_file, "vm_area_struct", "vm_file"); MEMBER_OFFSET_INIT(vm_area_struct_vm_offset, "vm_area_struct", "vm_offset"); MEMBER_OFFSET_INIT(vm_area_struct_vm_pgoff, "vm_area_struct", "vm_pgoff"); MEMBER_SIZE_INIT(vm_area_struct_vm_flags, "vm_area_struct", "vm_flags"); MEMBER_OFFSET_INIT(vm_struct_addr, "vm_struct", "addr"); MEMBER_OFFSET_INIT(vm_struct_size, "vm_struct", "size"); MEMBER_OFFSET_INIT(vm_struct_next, "vm_struct", "next"); MEMBER_OFFSET_INIT(vmap_area_va_start, "vmap_area", "va_start"); MEMBER_OFFSET_INIT(vmap_area_va_end, "vmap_area", "va_end"); MEMBER_OFFSET_INIT(vmap_area_list, "vmap_area", "list"); MEMBER_OFFSET_INIT(vmap_area_flags, "vmap_area", "flags"); MEMBER_OFFSET_INIT(vmap_area_vm, "vmap_area", "vm"); if (INVALID_MEMBER(vmap_area_vm)) MEMBER_OFFSET_INIT(vmap_area_vm, "vmap_area", "private"); STRUCT_SIZE_INIT(vmap_area, "vmap_area"); if (VALID_MEMBER(vmap_area_va_start) && VALID_MEMBER(vmap_area_va_end) && VALID_MEMBER(vmap_area_list) && VALID_MEMBER(vmap_area_vm) && (VALID_MEMBER(vmap_area_flags) || (OFFSET(vmap_area_vm) == MEMBER_OFFSET("vmap_area", "purge_list"))) && kernel_symbol_exists("vmap_area_list")) vt->flags |= USE_VMAP_AREA; if (kernel_symbol_exists("hstates")) { STRUCT_SIZE_INIT(hstate, "hstate"); MEMBER_OFFSET_INIT(hstate_order, "hstate", "order"); MEMBER_OFFSET_INIT(hstate_nr_huge_pages, "hstate", "nr_huge_pages"); MEMBER_OFFSET_INIT(hstate_free_huge_pages, "hstate", "free_huge_pages"); MEMBER_OFFSET_INIT(hstate_name, "hstate", "name"); } MEMBER_OFFSET_INIT(page_next, "page", "next"); if (VALID_MEMBER(page_next)) MEMBER_OFFSET_INIT(page_prev, "page", "prev"); if (INVALID_MEMBER(page_next)) ANON_MEMBER_OFFSET_INIT(page_next, "page", "next"); MEMBER_OFFSET_INIT(page_list, "page", "list"); if (VALID_MEMBER(page_list)) { ASSIGN_OFFSET(page_list_next) = OFFSET(page_list) + OFFSET(list_head_next); ASSIGN_OFFSET(page_list_prev) = OFFSET(page_list) + OFFSET(list_head_prev); } MEMBER_OFFSET_INIT(page_next_hash, "page", "next_hash"); MEMBER_OFFSET_INIT(page_inode, "page", "inode"); MEMBER_OFFSET_INIT(page_offset, "page", "offset"); MEMBER_OFFSET_INIT(page_count, "page", "count"); if (INVALID_MEMBER(page_count)) { MEMBER_OFFSET_INIT(page_count, "page", "_count"); if (INVALID_MEMBER(page_count)) ANON_MEMBER_OFFSET_INIT(page_count, "page", "_count"); if (INVALID_MEMBER(page_count)) MEMBER_OFFSET_INIT(page_count, "page", "_refcount"); if (INVALID_MEMBER(page_count)) ANON_MEMBER_OFFSET_INIT(page_count, "page", "_refcount"); } MEMBER_OFFSET_INIT(page_flags, "page", "flags"); MEMBER_SIZE_INIT(page_flags, "page", "flags"); MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); if (INVALID_MEMBER(page_mapping)) ANON_MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); if (INVALID_MEMBER(page_mapping) && (THIS_KERNEL_VERSION < LINUX(2,6,17)) && MEMBER_EXISTS("page", "_mapcount")) ASSIGN_OFFSET(page_mapping) = MEMBER_OFFSET("page", "_mapcount") + STRUCT_SIZE("atomic_t") + sizeof(ulong); MEMBER_OFFSET_INIT(page_index, "page", "index"); if (INVALID_MEMBER(page_index)) ANON_MEMBER_OFFSET_INIT(page_index, "page", "index"); MEMBER_OFFSET_INIT(page_buffers, "page", "buffers"); MEMBER_OFFSET_INIT(page_lru, "page", "lru"); if (INVALID_MEMBER(page_lru)) ANON_MEMBER_OFFSET_INIT(page_lru, "page", "lru"); MEMBER_OFFSET_INIT(page_pte, "page", "pte"); MEMBER_OFFSET_INIT(page_compound_head, "page", "compound_head"); if (INVALID_MEMBER(page_compound_head)) ANON_MEMBER_OFFSET_INIT(page_compound_head, "page", "compound_head"); MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); MEMBER_OFFSET_INIT(swap_info_struct_swap_file, "swap_info_struct", "swap_file"); MEMBER_OFFSET_INIT(swap_info_struct_swap_vfsmnt, "swap_info_struct", "swap_vfsmnt"); MEMBER_OFFSET_INIT(swap_info_struct_flags, "swap_info_struct", "flags"); MEMBER_OFFSET_INIT(swap_info_struct_swap_map, "swap_info_struct", "swap_map"); MEMBER_OFFSET_INIT(swap_info_struct_swap_device, "swap_info_struct", "swap_device"); MEMBER_OFFSET_INIT(swap_info_struct_prio, "swap_info_struct", "prio"); MEMBER_OFFSET_INIT(swap_info_struct_max, "swap_info_struct", "max"); MEMBER_OFFSET_INIT(swap_info_struct_pages, "swap_info_struct", "pages"); MEMBER_OFFSET_INIT(swap_info_struct_inuse_pages, "swap_info_struct", "inuse_pages"); MEMBER_OFFSET_INIT(swap_info_struct_old_block_size, "swap_info_struct", "old_block_size"); MEMBER_OFFSET_INIT(block_device_bd_inode, "block_device", "bd_inode"); MEMBER_OFFSET_INIT(block_device_bd_list, "block_device", "bd_list"); MEMBER_OFFSET_INIT(block_device_bd_disk, "block_device", "bd_disk"); MEMBER_OFFSET_INIT(inode_i_mapping, "inode", "i_mapping"); MEMBER_OFFSET_INIT(address_space_page_tree, "address_space", "page_tree"); if (INVALID_MEMBER(address_space_page_tree)) MEMBER_OFFSET_INIT(address_space_page_tree, "address_space", "i_pages"); MEMBER_OFFSET_INIT(address_space_nrpages, "address_space", "nrpages"); if (INVALID_MEMBER(address_space_nrpages)) MEMBER_OFFSET_INIT(address_space_nrpages, "address_space", "__nrpages"); MEMBER_OFFSET_INIT(gendisk_major, "gendisk", "major"); MEMBER_OFFSET_INIT(gendisk_fops, "gendisk", "fops"); MEMBER_OFFSET_INIT(gendisk_disk_name, "gendisk", "disk_name"); STRUCT_SIZE_INIT(block_device, "block_device"); STRUCT_SIZE_INIT(address_space, "address_space"); STRUCT_SIZE_INIT(gendisk, "gendisk"); STRUCT_SIZE_INIT(blk_major_name, "blk_major_name"); if (VALID_STRUCT(blk_major_name)) { MEMBER_OFFSET_INIT(blk_major_name_next, "blk_major_name", "next"); MEMBER_OFFSET_INIT(blk_major_name_name, "blk_major_name", "name"); MEMBER_OFFSET_INIT(blk_major_name_major, "blk_major_name", "major"); } STRUCT_SIZE_INIT(kmem_slab_s, "kmem_slab_s"); STRUCT_SIZE_INIT(slab_s, "slab_s"); STRUCT_SIZE_INIT(slab, "slab"); STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache_s"); STRUCT_SIZE_INIT(pgd_t, "pgd_t"); /* * slab: overload struct slab over struct page * https://lkml.org/lkml/2013/10/16/155 */ if (MEMBER_EXISTS("kmem_cache", "freelist_cache")) { vt->flags |= SLAB_OVERLOAD_PAGE; ANON_MEMBER_OFFSET_INIT(page_s_mem, "page", "s_mem"); ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); ANON_MEMBER_OFFSET_INIT(page_active, "page", "active"); } if (!VALID_STRUCT(kmem_slab_s) && VALID_STRUCT(slab_s)) { vt->flags |= PERCPU_KMALLOC_V1; MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", "objsize"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache_s", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs, "kmem_cache_s", "slabs"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_full, "kmem_cache_s", "slabs_full"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_partial, "kmem_cache_s", "slabs_partial"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_free, "kmem_cache_s", "slabs_free"); MEMBER_OFFSET_INIT(kmem_cache_s_cpudata, "kmem_cache_s", "cpudata"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.cpudata", NULL, 0); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", "colour_off"); MEMBER_OFFSET_INIT(slab_s_list, "slab_s", "list"); MEMBER_OFFSET_INIT(slab_s_s_mem, "slab_s", "s_mem"); MEMBER_OFFSET_INIT(slab_s_inuse, "slab_s", "inuse"); MEMBER_OFFSET_INIT(slab_s_free, "slab_s", "free"); MEMBER_OFFSET_INIT(cpucache_s_avail, "cpucache_s", "avail"); MEMBER_OFFSET_INIT(cpucache_s_limit, "cpucache_s", "limit"); STRUCT_SIZE_INIT(cpucache_s, "cpucache_s"); } else if (!VALID_STRUCT(kmem_slab_s) && !VALID_STRUCT(slab_s) && (VALID_STRUCT(slab) || (vt->flags & SLAB_OVERLOAD_PAGE))) { vt->flags |= PERCPU_KMALLOC_V2; if (VALID_STRUCT(kmem_cache_s)) { MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", "colour_off"); MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", "objsize"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache_s", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache_s", "lists"); MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache_s", "array"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.array", NULL, 0); } else { STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache"); MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "next"); if (INVALID_MEMBER(kmem_cache_s_next)) { /* * slab/slub unification starting in Linux 3.6. */ MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_size, "kmem_cache", "size"); STRUCT_SIZE_INIT(kmem_cache, "kmem_cache"); } MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache", "colour_off"); if (MEMBER_EXISTS("kmem_cache", "objsize")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "objsize"); else if (MEMBER_EXISTS("kmem_cache", "buffer_size")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "buffer_size"); else if (MEMBER_EXISTS("kmem_cache", "size")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "size"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_cpu_cache, "kmem_cache", "cpu_cache"); if (MEMBER_EXISTS("kmem_cache", "lists")) MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists"); else if (MEMBER_EXISTS("kmem_cache", "nodelists") || MEMBER_EXISTS("kmem_cache", "node")) { nodelists_field = MEMBER_EXISTS("kmem_cache", "node") ? "node" : "nodelists"; vt->flags |= PERCPU_KMALLOC_V2_NODES; MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", nodelists_field); if (MEMBER_TYPE("kmem_cache", nodelists_field) == TYPE_CODE_PTR) { /* * nodelists now a pointer to an outside array */ vt->flags |= NODELISTS_IS_PTR; if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; } else if (VALID_MEMBER(kmem_cache_cpu_cache)) { /* * commit bf0dea23a9c094ae869a88bb694fbe966671bf6d * mm/slab: use percpu allocator for cpu cache */ vt->flags |= SLAB_CPU_CACHE; MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node"); if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; } else { /* * This should never happen with kmem_cache.node, * only with kmem_cache.nodelists */ ARRAY_LENGTH_INIT(vt->kmem_cache_len_nodes, NULL, "kmem_cache.nodelists", NULL, 0); } } MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache", "array"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.array", NULL, 0); } if (VALID_STRUCT(slab)) { MEMBER_OFFSET_INIT(slab_list, "slab", "list"); MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); MEMBER_OFFSET_INIT(slab_free, "slab", "free"); /* * slab members were moved to an anonymous union in 2.6.39. */ if (INVALID_MEMBER(slab_list)) ANON_MEMBER_OFFSET_INIT(slab_list, "slab", "list"); if (INVALID_MEMBER(slab_s_mem)) ANON_MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); if (INVALID_MEMBER(slab_inuse)) ANON_MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); if (INVALID_MEMBER(slab_free)) ANON_MEMBER_OFFSET_INIT(slab_free, "slab", "free"); } MEMBER_OFFSET_INIT(array_cache_avail, "array_cache", "avail"); MEMBER_OFFSET_INIT(array_cache_limit, "array_cache", "limit"); STRUCT_SIZE_INIT(array_cache, "array_cache"); /* * kmem_list3 renamed to kmem_cache_node in kernel 3.11-rc1 */ kmem_cache_node_struct = STRUCT_EXISTS("kmem_cache_node") ? "kmem_cache_node" : "kmem_list3"; MEMBER_OFFSET_INIT(kmem_list3_slabs_partial, kmem_cache_node_struct, "slabs_partial"); MEMBER_OFFSET_INIT(kmem_list3_slabs_full, kmem_cache_node_struct, "slabs_full"); MEMBER_OFFSET_INIT(kmem_list3_slabs_free, kmem_cache_node_struct, "slabs_free"); MEMBER_OFFSET_INIT(kmem_list3_free_objects, kmem_cache_node_struct, "free_objects"); MEMBER_OFFSET_INIT(kmem_list3_shared, kmem_cache_node_struct, "shared"); /* * Common to slab/slub */ MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); if (INVALID_MEMBER(page_slab)) ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); if (INVALID_MEMBER(page_slab_page)) ANON_MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); if (INVALID_MEMBER(page_first_page)) ANON_MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); } else if (MEMBER_EXISTS("kmem_cache", "cpu_slab") && STRUCT_EXISTS("kmem_cache_node")) { vt->flags |= KMALLOC_SLUB; STRUCT_SIZE_INIT(kmem_cache, "kmem_cache"); MEMBER_OFFSET_INIT(kmem_cache_size, "kmem_cache", "size"); MEMBER_OFFSET_INIT(kmem_cache_objsize, "kmem_cache", "objsize"); if (INVALID_MEMBER(kmem_cache_objsize)) MEMBER_OFFSET_INIT(kmem_cache_objsize, "kmem_cache", "object_size"); MEMBER_OFFSET_INIT(kmem_cache_offset, "kmem_cache", "offset"); MEMBER_OFFSET_INIT(kmem_cache_order, "kmem_cache", "order"); MEMBER_OFFSET_INIT(kmem_cache_local_node, "kmem_cache", "local_node"); MEMBER_OFFSET_INIT(kmem_cache_objects, "kmem_cache", "objects"); MEMBER_OFFSET_INIT(kmem_cache_inuse, "kmem_cache", "inuse"); MEMBER_OFFSET_INIT(kmem_cache_align, "kmem_cache", "align"); MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node"); MEMBER_OFFSET_INIT(kmem_cache_cpu_slab, "kmem_cache", "cpu_slab"); MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_red_left_pad, "kmem_cache", "red_left_pad"); MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_flags, "kmem_cache", "flags"); MEMBER_OFFSET_INIT(kmem_cache_random, "kmem_cache", "random"); MEMBER_OFFSET_INIT(kmem_cache_cpu_freelist, "kmem_cache_cpu", "freelist"); MEMBER_OFFSET_INIT(kmem_cache_cpu_page, "kmem_cache_cpu", "page"); MEMBER_OFFSET_INIT(kmem_cache_cpu_node, "kmem_cache_cpu", "node"); MEMBER_OFFSET_INIT(kmem_cache_cpu_partial, "kmem_cache_cpu", "partial"); MEMBER_OFFSET_INIT(page_inuse, "page", "inuse"); if (INVALID_MEMBER(page_inuse)) ANON_MEMBER_OFFSET_INIT(page_inuse, "page", "inuse"); MEMBER_OFFSET_INIT(page_offset, "page", "offset"); if (INVALID_MEMBER(page_offset)) ANON_MEMBER_OFFSET_INIT(page_offset, "page", "offset"); MEMBER_OFFSET_INIT(page_slab, "page", "slab"); if (INVALID_MEMBER(page_slab)) ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab"); if (INVALID_MEMBER(page_slab)) { MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); if (INVALID_MEMBER(page_slab)) ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); } MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); if (INVALID_MEMBER(page_slab_page)) ANON_MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); if (INVALID_MEMBER(page_first_page)) ANON_MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); if (INVALID_MEMBER(page_freelist)) ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); if (INVALID_MEMBER(kmem_cache_objects)) { MEMBER_OFFSET_INIT(kmem_cache_oo, "kmem_cache", "oo"); /* NOTE: returns offset of containing bitfield */ ANON_MEMBER_OFFSET_INIT(page_objects, "page", "objects"); } if (VALID_MEMBER(kmem_cache_node)) { ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.node", NULL, 0); vt->flags |= CONFIG_NUMA; } ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.cpu_slab", NULL, 0); STRUCT_SIZE_INIT(kmem_cache_node, "kmem_cache_node"); STRUCT_SIZE_INIT(kmem_cache_cpu, "kmem_cache_cpu"); MEMBER_OFFSET_INIT(kmem_cache_node_nr_partial, "kmem_cache_node", "nr_partial"); MEMBER_OFFSET_INIT(kmem_cache_node_nr_slabs, "kmem_cache_node", "nr_slabs"); MEMBER_OFFSET_INIT(kmem_cache_node_total_objects, "kmem_cache_node", "total_objects"); MEMBER_OFFSET_INIT(kmem_cache_node_partial, "kmem_cache_node", "partial"); MEMBER_OFFSET_INIT(kmem_cache_node_full, "kmem_cache_node", "full"); } else { MEMBER_OFFSET_INIT(kmem_cache_s_c_nextp, "kmem_cache_s", "c_nextp"); MEMBER_OFFSET_INIT(kmem_cache_s_c_name, "kmem_cache_s", "c_name"); MEMBER_OFFSET_INIT(kmem_cache_s_c_num, "kmem_cache_s", "c_num"); MEMBER_OFFSET_INIT(kmem_cache_s_c_org_size, "kmem_cache_s", "c_org_size"); MEMBER_OFFSET_INIT(kmem_cache_s_c_flags, "kmem_cache_s", "c_flags"); MEMBER_OFFSET_INIT(kmem_cache_s_c_offset, "kmem_cache_s", "c_offset"); MEMBER_OFFSET_INIT(kmem_cache_s_c_firstp, "kmem_cache_s", "c_firstp"); MEMBER_OFFSET_INIT(kmem_cache_s_c_gfporder, "kmem_cache_s", "c_gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_c_magic, "kmem_cache_s", "c_magic"); MEMBER_OFFSET_INIT(kmem_cache_s_c_align, "kmem_cache_s", "c_align"); MEMBER_OFFSET_INIT(kmem_slab_s_s_nextp, "kmem_slab_s", "s_nextp"); MEMBER_OFFSET_INIT(kmem_slab_s_s_freep, "kmem_slab_s", "s_freep"); MEMBER_OFFSET_INIT(kmem_slab_s_s_inuse, "kmem_slab_s", "s_inuse"); MEMBER_OFFSET_INIT(kmem_slab_s_s_mem, "kmem_slab_s", "s_mem"); MEMBER_OFFSET_INIT(kmem_slab_s_s_index, "kmem_slab_s", "s_index"); MEMBER_OFFSET_INIT(kmem_slab_s_s_offset, "kmem_slab_s", "s_offset"); MEMBER_OFFSET_INIT(kmem_slab_s_s_magic, "kmem_slab_s", "s_magic"); } if (kernel_symbol_exists("slab_root_caches")) { MEMBER_OFFSET_INIT(kmem_cache_memcg_params, "kmem_cache", "memcg_params"); MEMBER_OFFSET_INIT(memcg_cache_params___root_caches_node, "memcg_cache_params", "__root_caches_node"); MEMBER_OFFSET_INIT(memcg_cache_params_children, "memcg_cache_params", "children"); MEMBER_OFFSET_INIT(memcg_cache_params_children_node, "memcg_cache_params", "children_node"); if (VALID_MEMBER(kmem_cache_memcg_params) && VALID_MEMBER(memcg_cache_params___root_caches_node) && VALID_MEMBER(memcg_cache_params_children) && VALID_MEMBER(memcg_cache_params_children_node)) vt->flags |= SLAB_ROOT_CACHES; } if (!kt->kernel_NR_CPUS) { if (enumerator_value("WORK_CPU_UNBOUND", (long *)&value1)) kt->kernel_NR_CPUS = (int)value1; else if ((i = get_array_length("__per_cpu_offset", NULL, 0))) kt->kernel_NR_CPUS = i; else if (ARRAY_LENGTH(kmem_cache_s_cpudata)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_cpudata); else if (ARRAY_LENGTH(kmem_cache_s_array)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_array); else if (ARRAY_LENGTH(kmem_cache_cpu_slab)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_cpu_slab); } if (CRASHDEBUG(1)) fprintf(fp, "kernel NR_CPUS: %d %s\n", kt->kernel_NR_CPUS, kt->kernel_NR_CPUS ? "" : "(unknown)"); if (kt->kernel_NR_CPUS > NR_CPUS) { error(WARNING, "kernel-configured NR_CPUS (%d) greater than compiled-in NR_CPUS (%d)\n", kt->kernel_NR_CPUS, NR_CPUS); error(FATAL, "recompile crash with larger NR_CPUS\n"); } if (machdep->init_kernel_pgd) machdep->init_kernel_pgd(); else if (symbol_exists("swapper_pg_dir")) { value1 = symbol_value("swapper_pg_dir"); for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = value1; } else if (symbol_exists("cpu_pgd")) { len = get_array_length("cpu_pgd", &dimension, 0); if ((len == NR_CPUS) && (dimension == machdep->ptrs_per_pgd)) { value1 = symbol_value("cpu_pgd"); for (i = 0; i < NR_CPUS; i++) { value2 = i * (SIZE(pgd_t) * machdep->ptrs_per_pgd); vt->kernel_pgd[i] = value1 + value2; } error(WARNING, "no swapper_pg_dir: using first entry of cpu_pgd[%d][%d]\n\n", dimension, len); } else { error(WARNING, "unrecognized dimensions: cpu_pgd[%d][%d]\n", dimension, len); value1 = symbol_value("cpu_pgd"); for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = value1; error(WARNING, "no swapper_pg_dir: using first entry of cpu_pgd[%d][%d]\n\n", dimension, len); } } else error(FATAL, "no swapper_pg_dir or cpu_pgd symbols exist?\n"); get_symbol_data("high_memory", sizeof(ulong), &vt->high_memory); if (kernel_symbol_exists("mem_section")) vt->flags |= SPARSEMEM; else if (kernel_symbol_exists("mem_map")) { get_symbol_data("mem_map", sizeof(char *), &vt->mem_map); vt->flags |= FLATMEM; } else vt->flags |= DISCONTIGMEM; sparse_mem_init(); vt->vmalloc_start = machdep->vmalloc_start(); if (IS_VMALLOC_ADDR(vt->mem_map)) vt->flags |= V_MEM_MAP; vt->total_pages = BTOP(VTOP(vt->high_memory)); if (symbol_exists("_totalram_pages")) { readmem(symbol_value("_totalram_pages") + OFFSET(atomic_t_counter), KVADDR, &vt->totalram_pages, sizeof(ulong), "_totalram_pages", FAULT_ON_ERROR); } else { switch (get_syment_array("totalram_pages", sp_array, 2)) { case 1: get_symbol_data("totalram_pages", sizeof(ulong), &vt->totalram_pages); break; case 2: if (!(readmem(sp_array[0]->value, KVADDR, &value1, sizeof(ulong), "totalram_pages #1", RETURN_ON_ERROR))) break; if (!(readmem(sp_array[1]->value, KVADDR, &value2, sizeof(ulong), "totalram_pages #2", RETURN_ON_ERROR))) break; vt->totalram_pages = MAX(value1, value2); break; } } if (symbol_exists("_totalhigh_pages")) { readmem(symbol_value("_totalhigh_pages") + OFFSET(atomic_t_counter), KVADDR, &vt->totalhigh_pages, sizeof(ulong), "_totalhigh_pages", FAULT_ON_ERROR); vt->total_pages += vt->totalhigh_pages; } else if (symbol_exists("totalhigh_pages")) { switch (get_syment_array("totalhigh_pages", sp_array, 2)) { case 1: get_symbol_data("totalhigh_pages", sizeof(ulong), &vt->totalhigh_pages); break; case 2: if (!(readmem(sp_array[0]->value, KVADDR, &value1, sizeof(ulong), "totalhigh_pages #1", RETURN_ON_ERROR))) break; if (!(readmem(sp_array[1]->value, KVADDR, &value2, sizeof(ulong), "totalhigh_pages #2", RETURN_ON_ERROR))) break; vt->totalhigh_pages = MAX(value1, value2); break; } vt->total_pages += vt->totalhigh_pages; } if (symbol_exists("num_physpages")) get_symbol_data("num_physpages", sizeof(ulong), &vt->num_physpages); if (kernel_symbol_exists("mem_map")) get_symbol_data("max_mapnr", sizeof(ulong), &vt->max_mapnr); if (kernel_symbol_exists("nr_swapfiles")) get_symbol_data("nr_swapfiles", sizeof(unsigned int), &vt->nr_swapfiles); STRUCT_SIZE_INIT(page, "page"); STRUCT_SIZE_INIT(free_area, "free_area"); STRUCT_SIZE_INIT(free_area_struct, "free_area_struct"); STRUCT_SIZE_INIT(zone, "zone"); STRUCT_SIZE_INIT(zone_struct, "zone_struct"); STRUCT_SIZE_INIT(kmem_bufctl_t, "kmem_bufctl_t"); STRUCT_SIZE_INIT(swap_info_struct, "swap_info_struct"); STRUCT_SIZE_INIT(mm_struct, "mm_struct"); STRUCT_SIZE_INIT(vm_area_struct, "vm_area_struct"); STRUCT_SIZE_INIT(pglist_data, "pglist_data"); if (VALID_STRUCT(pglist_data)) { vt->flags |= ZONES; if (symbol_exists("pgdat_list") && !IS_SPARSEMEM()) vt->flags |= NODES; /* * Determine the number of nodes the best way possible, * starting with a default of 1. */ vt->numnodes = 1; if (symbol_exists("numnodes")) get_symbol_data("numnodes", sizeof(int), &vt->numnodes); if (get_nodes_online()) vt->flags |= NODES_ONLINE; MEMBER_OFFSET_INIT(pglist_data_node_zones, "pglist_data", "node_zones"); MEMBER_OFFSET_INIT(pglist_data_node_mem_map, "pglist_data", "node_mem_map"); MEMBER_OFFSET_INIT(pglist_data_node_start_paddr, "pglist_data", "node_start_paddr"); MEMBER_OFFSET_INIT(pglist_data_node_start_mapnr, "pglist_data", "node_start_mapnr"); MEMBER_OFFSET_INIT(pglist_data_node_size, "pglist_data", "node_size"); MEMBER_OFFSET_INIT(pglist_data_node_id, "pglist_data", "node_id"); MEMBER_OFFSET_INIT(pglist_data_node_next, "pglist_data", "node_next"); MEMBER_OFFSET_INIT(pglist_data_bdata, "pglist_data", "bdata"); MEMBER_OFFSET_INIT(pglist_data_nr_zones, "pglist_data", "nr_zones"); MEMBER_OFFSET_INIT(pglist_data_node_start_pfn, "pglist_data", "node_start_pfn"); MEMBER_OFFSET_INIT(pglist_data_pgdat_next, "pglist_data", "pgdat_next"); MEMBER_OFFSET_INIT(pglist_data_node_present_pages, "pglist_data", "node_present_pages"); MEMBER_OFFSET_INIT(pglist_data_node_spanned_pages, "pglist_data", "node_spanned_pages"); ARRAY_LENGTH_INIT(vt->nr_zones, pglist_data_node_zones, "pglist_data.node_zones", NULL, SIZE_OPTION(zone_struct, zone)); vt->ZONE_HIGHMEM = vt->nr_zones - 1; if (VALID_STRUCT(zone_struct)) { MEMBER_OFFSET_INIT(zone_struct_free_pages, "zone_struct", "free_pages"); MEMBER_OFFSET_INIT(zone_struct_free_area, "zone_struct", "free_area"); MEMBER_OFFSET_INIT(zone_struct_zone_pgdat, "zone_struct", "zone_pgdat"); MEMBER_OFFSET_INIT(zone_struct_name, "zone_struct", "name"); MEMBER_OFFSET_INIT(zone_struct_size, "zone_struct", "size"); if (INVALID_MEMBER(zone_struct_size)) MEMBER_OFFSET_INIT(zone_struct_memsize, "zone_struct", "memsize"); MEMBER_OFFSET_INIT(zone_struct_zone_start_pfn, "zone_struct", "zone_start_pfn"); MEMBER_OFFSET_INIT(zone_struct_zone_start_paddr, "zone_struct", "zone_start_paddr"); MEMBER_OFFSET_INIT(zone_struct_zone_start_mapnr, "zone_struct", "zone_start_mapnr"); MEMBER_OFFSET_INIT(zone_struct_zone_mem_map, "zone_struct", "zone_mem_map"); MEMBER_OFFSET_INIT(zone_struct_inactive_clean_pages, "zone_struct", "inactive_clean_pages"); MEMBER_OFFSET_INIT(zone_struct_inactive_clean_list, "zone_struct", "inactive_clean_list"); ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_struct_free_area, "zone_struct.free_area", NULL, SIZE(free_area_struct)); MEMBER_OFFSET_INIT(zone_struct_inactive_dirty_pages, "zone_struct", "inactive_dirty_pages"); MEMBER_OFFSET_INIT(zone_struct_active_pages, "zone_struct", "active_pages"); MEMBER_OFFSET_INIT(zone_struct_pages_min, "zone_struct", "pages_min"); MEMBER_OFFSET_INIT(zone_struct_pages_low, "zone_struct", "pages_low"); MEMBER_OFFSET_INIT(zone_struct_pages_high, "zone_struct", "pages_high"); vt->dump_free_pages = dump_free_pages_zones_v1; } else if (VALID_STRUCT(zone)) { MEMBER_OFFSET_INIT(zone_vm_stat, "zone", "vm_stat"); MEMBER_OFFSET_INIT(zone_free_pages, "zone", "free_pages"); if (INVALID_MEMBER(zone_free_pages) && VALID_MEMBER(zone_vm_stat)) { long nr_free_pages = 0; if (!enumerator_value("NR_FREE_PAGES", &nr_free_pages)) error(WARNING, "cannot determine NR_FREE_PAGES enumerator\n"); ASSIGN_OFFSET(zone_free_pages) = OFFSET(zone_vm_stat) + (nr_free_pages * sizeof(long)); } MEMBER_OFFSET_INIT(zone_free_area, "zone", "free_area"); MEMBER_OFFSET_INIT(zone_zone_pgdat, "zone", "zone_pgdat"); MEMBER_OFFSET_INIT(zone_name, "zone", "name"); MEMBER_OFFSET_INIT(zone_zone_mem_map, "zone", "zone_mem_map"); MEMBER_OFFSET_INIT(zone_zone_start_pfn, "zone", "zone_start_pfn"); MEMBER_OFFSET_INIT(zone_spanned_pages, "zone", "spanned_pages"); MEMBER_OFFSET_INIT(zone_present_pages, "zone", "present_pages"); MEMBER_OFFSET_INIT(zone_pages_min, "zone", "pages_min"); MEMBER_OFFSET_INIT(zone_pages_low, "zone", "pages_low"); MEMBER_OFFSET_INIT(zone_pages_high, "zone", "pages_high"); MEMBER_OFFSET_INIT(zone_watermark, "zone", "watermark"); if (INVALID_MEMBER(zone_watermark)) MEMBER_OFFSET_INIT(zone_watermark, "zone", "_watermark"); MEMBER_OFFSET_INIT(zone_nr_active, "zone", "nr_active"); MEMBER_OFFSET_INIT(zone_nr_inactive, "zone", "nr_inactive"); MEMBER_OFFSET_INIT(zone_all_unreclaimable, "zone", "all_unreclaimable"); MEMBER_OFFSET_INIT(zone_flags, "zone", "flags"); MEMBER_OFFSET_INIT(zone_pages_scanned, "zone", "pages_scanned"); ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_free_area, "zone.free_area", NULL, SIZE(free_area)); vt->dump_free_pages = dump_free_pages_zones_v2; } } else vt->numnodes = 1; node_table_init(); sprintf(buf, "%llx", (ulonglong) MAX((uint64_t)vt->max_mapnr * PAGESIZE(), machdep->memory_size())); vt->paddr_prlen = strlen(buf); if (vt->flags & PERCPU_KMALLOC_V1) vt->dump_kmem_cache = dump_kmem_cache_percpu_v1; else if (vt->flags & PERCPU_KMALLOC_V2) vt->dump_kmem_cache = dump_kmem_cache_percpu_v2; else if (vt->flags & KMALLOC_SLUB) vt->dump_kmem_cache = dump_kmem_cache_slub; else vt->dump_kmem_cache = dump_kmem_cache; if (!(vt->flags & (NODES|ZONES))) { get_array_length("free_area", &dimension, 0); if (dimension) vt->dump_free_pages = dump_multidimensional_free_pages; else vt->dump_free_pages = dump_free_pages; } if (!(vt->vma_cache = (char *)malloc(SIZE(vm_area_struct)*VMA_CACHE))) error(FATAL, "cannot malloc vm_area_struct cache\n"); if (symbol_exists("page_hash_bits")) { unsigned int page_hash_bits; get_symbol_data("page_hash_bits", sizeof(unsigned int), &page_hash_bits); len = (1 << page_hash_bits); builtin_array_length("page_hash_table", len, NULL); get_symbol_data("page_hash_table", sizeof(void *), &vt->page_hash_table); vt->page_hash_table_len = len; STRUCT_SIZE_INIT(page_cache_bucket, "page_cache_bucket"); if (VALID_STRUCT(page_cache_bucket)) MEMBER_OFFSET_INIT(page_cache_bucket_chain, "page_cache_bucket", "chain"); } else if (symbol_exists("page_hash_table")) { vt->page_hash_table = symbol_value("page_hash_table"); vt->page_hash_table_len = 0; } else if (CRASHDEBUG(1)) error(NOTE, "page_hash_table does not exist in this kernel\n"); kmem_cache_init(); page_flags_init(); rss_page_types_init(); vt->flags |= VM_INIT; } /* * This command displays the contents of memory, with the output formatted * in several different manners. The starting address may be entered either * symbolically or by address. The default output size is the size of a long * data type, and the default output format is hexadecimal. When hexadecimal * output is used, the output will be accompanied by an ASCII translation. * These are the options: * * -p address argument is a physical address. * -u address argument is a user virtual address. * -d display output in signed decimal format (default is hexadecimal). * -D display output in unsigned decimal format (default is hexadecimal). * -s displays output symbolically when appropriate. * -8 display output in 8-bit values. * -16 display output in 16-bit values. * -32 display output in 32-bit values (default on 32-bit machines). * -64 display output in 64-bit values (default on 64-bit machines). * * The default number of items to display is 1, but a count argument, if any, * must follow the address. */ void cmd_rd(void) { int c, memtype, reverse; ulong flag; long bcnt, adjust, count; ulonglong addr, endaddr; ulong offset; struct syment *sp; FILE *tmpfp; char *outputfile; flag = HEXADECIMAL|DISPLAY_DEFAULT; endaddr = 0; offset = 0; memtype = KVADDR; tmpfp = NULL; outputfile = NULL; count = -1; adjust = bcnt = 0; reverse = FALSE; while ((c = getopt(argcnt, args, "Raxme:r:pfudDusSNo:81:3:6:")) != EOF) { switch(c) { case 'R': reverse = TRUE; break; case 'a': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_ASCII; break; case '8': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_8; break; case '1': if (!STREQ(optarg, "6")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_16; } break; case '3': if (!STREQ(optarg, "2")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_32; } break; case '6': if (!STREQ(optarg, "4")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_64; } break; case 'e': endaddr = htoll(optarg, FAULT_ON_ERROR, NULL); break; case 'r': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_RAW; outputfile = optarg; if ((tmpfp = fopen(outputfile, "w")) == NULL) error(FATAL, "cannot open output file: %s\n", outputfile); set_tmpfile2(tmpfp); break; case 's': case 'S': if (flag & DISPLAY_DEFAULT) { flag |= SYMBOLIC; if (c == 'S') { if (flag & SLAB_CACHE) flag |= SLAB_CACHE2; else flag |= SLAB_CACHE; } } else { error(INFO, "-%c option" " is only allowed with %d-bit display\n", c, DISPLAY_DEFAULT == DISPLAY_64 ? 64 : 32); argerrs++; } break; case 'o': offset = stol(optarg, FAULT_ON_ERROR, NULL); flag |= SHOW_OFFSET; break; case 'p': memtype &= ~(UVADDR|KVADDR|XENMACHADDR|FILEADDR); memtype = PHYSADDR; break; case 'u': memtype &= ~(KVADDR|PHYSADDR|XENMACHADDR|FILEADDR); memtype = UVADDR; break; case 'd': flag &= ~(HEXADECIMAL|DECIMAL); flag |= DECIMAL; break; case 'D': flag &= ~(HEXADECIMAL|UDECIMAL); flag |= UDECIMAL; break; case 'm': if (!(kt->flags & ARCH_XEN)) error(FATAL, "-m option only applies to xen architecture\n"); memtype &= ~(UVADDR|KVADDR|FILEADDR); memtype = XENMACHADDR; break; case 'f': if (!pc->dumpfile) error(FATAL, "-f option requires a dumpfile\n"); memtype &= ~(KVADDR|UVADDR|PHYSADDR|XENMACHADDR); memtype = FILEADDR; break; case 'x': flag |= NO_ASCII; break; case 'N': flag |= NET_ENDIAN; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if (*args[optind] == '(') addr = evall(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) addr = htoll(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) addr = (ulonglong)sp->value; else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); return; } if (flag & SHOW_OFFSET) addr += offset; if (args[++optind]) count = stol(args[optind], FAULT_ON_ERROR, NULL); if (count == -1) { if (endaddr) { if (endaddr <= addr) error(FATAL, "invalid ending address: %llx\n", endaddr); bcnt = endaddr - addr; switch (flag & (DISPLAY_TYPES)) { case DISPLAY_64: count = bcnt/8; break; case DISPLAY_32: count = bcnt/4; break; case DISPLAY_16: count = bcnt/2; break; case DISPLAY_8: case DISPLAY_ASCII: case DISPLAY_RAW: count = bcnt; break; } if (bcnt == 0) count = 1; } else { if ((flag & DISPLAY_TYPES) == DISPLAY_RAW) error(FATAL, "-r option requires either a count" " argument or the -e option\n"); count = (flag & DISPLAY_ASCII) ? ASCII_UNLIMITED : 1; } } else if (endaddr) error(WARNING, "ending address ignored when count is specified\n"); if ((flag & HEXADECIMAL) && !(flag & SYMBOLIC) && !(flag & NO_ASCII) && !(flag & DISPLAY_ASCII)) flag |= ASCII_ENDLINE; if (memtype == KVADDR) { if (!COMMON_VADDR_SPACE() && !IS_KVADDR(addr)) memtype = UVADDR; } if (reverse) { if (!count) count = 1; switch (flag & (DISPLAY_TYPES)) { case DISPLAY_64: bcnt = (count * 8); adjust = bcnt - 8; break; case DISPLAY_32: bcnt = (count * 4); adjust = bcnt - 4; break; case DISPLAY_16: bcnt = (count * 2); adjust = bcnt - 2; break; case DISPLAY_8: case DISPLAY_ASCII: case DISPLAY_RAW: bcnt = count; adjust = bcnt - 1; break; } addr = (count > 1) ? addr - adjust : addr; } display_memory(addr, count, flag, memtype, outputfile); } /* * display_memory() does the work for cmd_rd(), but can (and is) called by * other routines that want to dump raw data. Based upon the flag, the * output format is tailored to fit in an 80-character line. Hexadecimal * output is accompanied by an end-of-line ASCII translation. */ #define MAX_HEXCHARS_PER_LINE (32) /* line locations where ASCII output starts */ #define ASCII_START_8 (51 + VADDR_PRLEN) #define ASCII_START_16 (43 + VADDR_PRLEN) #define ASCII_START_32 (39 + VADDR_PRLEN) #define ASCII_START_64 (37 + VADDR_PRLEN) #define ENTRIES_8 (16) /* number of entries per line per size */ #define ENTRIES_16 (8) #define ENTRIES_32 (4) #define ENTRIES_64 (2) struct memloc { /* common holder of read memory */ uint8_t u8; uint16_t u16; uint32_t u32; uint64_t u64; uint64_t limit64; }; static void display_memory(ulonglong addr, long count, ulong flag, int memtype, void *opt) { int i, a, j; size_t typesz, sz; long written; void *location; char readtype[20]; char *addrtype; struct memloc mem; int displayed, per_line; int hx, lost; char hexchars[MAX_HEXCHARS_PER_LINE+1]; char ch; int linelen; char buf[BUFSIZE*2]; char slab[BUFSIZE]; int ascii_start; ulong error_handle; char *hex_64_fmt = BITS32() ? "%.*llx " : "%.*lx "; char *dec_64_fmt = BITS32() ? "%12lld " : "%15ld "; char *dec_u64_fmt = BITS32() ? "%12llu " : "%20lu "; if (count <= 0) error(FATAL, "invalid count request: %ld\n", count); switch (memtype) { case KVADDR: addrtype = "KVADDR"; break; case UVADDR: addrtype = "UVADDR"; break; case PHYSADDR: addrtype = "PHYSADDR"; break; case XENMACHADDR: addrtype = "XENMACHADDR"; break; case FILEADDR: addrtype = "FILEADDR"; break; default: addrtype = NULL; break; } if (CRASHDEBUG(4)) fprintf(fp, "\n", addr, count, flag, addrtype); if (flag & DISPLAY_RAW) { for (written = 0; written < count; written += sz) { sz = BUFSIZE > (count - written) ? (size_t)(count - written) : (size_t)BUFSIZE; readmem(addr + written, memtype, buf, (long)sz, "raw dump to file", FAULT_ON_ERROR); if (fwrite(buf, 1, sz, pc->tmpfile2) != sz) error(FATAL, "cannot write to: %s\n", (char *)opt); } close_tmpfile2(); fprintf(fp, "%ld bytes copied from 0x%llx to %s\n", count, addr, (char *)opt); return; } BZERO(&mem, sizeof(struct memloc)); hx = lost = linelen = typesz = per_line = ascii_start = 0; location = NULL; switch (flag & (DISPLAY_TYPES)) { case DISPLAY_64: ascii_start = ASCII_START_64; typesz = SIZEOF_64BIT; location = &mem.u64; sprintf(readtype, "64-bit %s", addrtype); per_line = ENTRIES_64; if (machine_type("IA64")) mem.limit64 = kt->end; break; case DISPLAY_32: ascii_start = ASCII_START_32; typesz = SIZEOF_32BIT; location = &mem.u32; sprintf(readtype, "32-bit %s", addrtype); per_line = ENTRIES_32; break; case DISPLAY_16: ascii_start = ASCII_START_16; typesz = SIZEOF_16BIT; location = &mem.u16; sprintf(readtype, "16-bit %s", addrtype); per_line = ENTRIES_16; break; case DISPLAY_8: ascii_start = ASCII_START_8; typesz = SIZEOF_8BIT; location = &mem.u8; sprintf(readtype, "8-bit %s", addrtype); per_line = ENTRIES_8; break; case DISPLAY_ASCII: typesz = SIZEOF_8BIT; location = &mem.u8; sprintf(readtype, "ascii"); per_line = 60; displayed = 0; break; } if (flag & NO_ERROR) error_handle = RETURN_ON_ERROR|QUIET; else error_handle = FAULT_ON_ERROR; for (i = a = 0; i < count; i++) { if(!readmem(addr, memtype, location, typesz, readtype, error_handle)) { addr += typesz; lost += 1; continue; } if (!(flag & DISPLAY_ASCII) && (((i - lost) % per_line) == 0)) { if ((i - lost)) { if (flag & ASCII_ENDLINE) { fprintf(fp, " %s", hexchars); } fprintf(fp, "\n"); } fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); hx = 0; BZERO(hexchars, MAX_HEXCHARS_PER_LINE+1); linelen = VADDR_PRLEN + strlen(": "); } switch (flag & DISPLAY_TYPES) { case DISPLAY_64: if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { if ((!mem.limit64 || (mem.u64 <= mem.limit64)) && in_ksymbol_range(mem.u64) && strlen(value_to_symstr(mem.u64, buf, 0))) { fprintf(fp, "%-16s ", buf); linelen += strlen(buf)+1; break; } if ((flag & SLAB_CACHE) && vaddr_to_kmem_cache(mem.u64, slab, !VERBOSE)) { if ((flag & SLAB_CACHE2) || CRASHDEBUG(1)) sprintf(buf, "[%llx:%s]", (ulonglong)mem.u64, slab); else sprintf(buf, "[%s]", slab); fprintf(fp, "%-16s ", buf); linelen += strlen(buf)+1; break; } } if (flag & HEXADECIMAL) { fprintf(fp, hex_64_fmt, LONG_LONG_PRLEN, mem.u64); linelen += (LONG_LONG_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, dec_64_fmt, mem.u64); else if (flag & UDECIMAL) fprintf(fp, dec_u64_fmt, mem.u64); break; case DISPLAY_32: if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { if (in_ksymbol_range(mem.u32) && strlen(value_to_symstr(mem.u32, buf, 0))) { fprintf(fp, INT_PRLEN == 16 ? "%-16s " : "%-8s ", buf); linelen += strlen(buf)+1; break; } if ((flag & SLAB_CACHE) && vaddr_to_kmem_cache(mem.u32, slab, !VERBOSE)) { if ((flag & SLAB_CACHE2) || CRASHDEBUG(1)) sprintf(buf, "[%x:%s]", mem.u32, slab); else sprintf(buf, "[%s]", slab); fprintf(fp, INT_PRLEN == 16 ? "%-16s " : "%-8s ", buf); linelen += strlen(buf)+1; break; } } if (flag & NET_ENDIAN) mem.u32 = htonl(mem.u32); if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", INT_PRLEN, mem.u32 ); linelen += (INT_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%12d ", mem.u32 ); else if (flag & UDECIMAL) fprintf(fp, "%12u ", mem.u32 ); break; case DISPLAY_16: if (flag & NET_ENDIAN) mem.u16 = htons(mem.u16); if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", SHORT_PRLEN, mem.u16); linelen += (SHORT_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%5d ", mem.u16); else if (flag & UDECIMAL) fprintf(fp, "%5u ", mem.u16); break; case DISPLAY_8: if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", CHAR_PRLEN, mem.u8); linelen += (CHAR_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%3d ", mem.u8); else if (flag & UDECIMAL) fprintf(fp, "%3u ", mem.u8); break; case DISPLAY_ASCII: if (isprint(mem.u8)) { if ((a % per_line) == 0) { if (displayed && i) fprintf(fp, "\n"); fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); } fprintf(fp, "%c", mem.u8); displayed++; a++; } else { if (count == ASCII_UNLIMITED) return; a = 0; } break; } if (flag & HEXADECIMAL) { char* ptr; switch (flag & DISPLAY_TYPES) { case DISPLAY_64: ptr = (char*)&mem.u64; for (j = 0; j < SIZEOF_64BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_32: ptr = (char*)&mem.u32; for (j = 0; j < (SIZEOF_32BIT); j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_16: ptr = (char*)&mem.u16; for (j = 0; j < SIZEOF_16BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_8: ptr = (char*)&mem.u8; for (j = 0; j < SIZEOF_8BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; } } addr += typesz; } if ((flag & ASCII_ENDLINE) && hx) { pad_line(fp, ascii_start - linelen, ' '); fprintf(fp, " %s", hexchars); } if (lost != count ) fprintf(fp,"\n"); } void display_memory_from_file_offset(ulonglong addr, long count, void *file) { if (file) display_memory(addr, count, DISPLAY_RAW, FILEADDR, file); else display_memory(addr, count, DISPLAY_64|ASCII_ENDLINE|HEXADECIMAL, FILEADDR, file); } /* * cmd_wr() is the sister routine of cmd_rd(), used to modify the contents * of memory. Like the "rd" command, the starting address may be entered * either symbolically or by address. The default modification size * is the size of a long data type. Write permission must exist on the * /dev/mem. The flags are similar to those used by rd: * * -p address argument is a physical address. * -u address argument is user virtual address (only if ambiguous). * -k address argument is user virtual address (only if ambiguous). * -8 write data in an 8-bit value. * -16 write data in a 16-bit value. * -32 write data in a 32-bit values (default on 32-bit machines). * -64 write data in a 64-bit values (default on 64-bit machines). * * Only one value of a given datasize may be modified. */ void cmd_wr(void) { int c; ulonglong value; int addr_entered, value_entered; int memtype; struct memloc mem; ulong addr; void *buf; long size; struct syment *sp; if (DUMPFILE()) error(FATAL, "not allowed on dumpfiles\n"); memtype = 0; buf = NULL; addr = 0; size = sizeof(void*); addr_entered = value_entered = FALSE; while ((c = getopt(argcnt, args, "fukp81:3:6:")) != EOF) { switch(c) { case '8': size = 1; break; case '1': if (!STREQ(optarg, "6")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 2; break; case '3': if (!STREQ(optarg, "2")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 4; break; case '6': if (!STREQ(optarg, "4")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 8; break; case 'p': memtype &= ~(UVADDR|KVADDR|FILEADDR); memtype = PHYSADDR; break; case 'u': memtype &= ~(PHYSADDR|KVADDR|FILEADDR); memtype = UVADDR; break; case 'k': memtype &= ~(PHYSADDR|UVADDR|FILEADDR); memtype = KVADDR; break; case 'f': /* * Unsupported, but can be forcibly implemented * by removing the DUMPFILE() check above and * recompiling. */ if (!pc->dumpfile) error(FATAL, "-f option requires a dumpfile\n"); memtype &= ~(PHYSADDR|UVADDR|KVADDR); memtype = FILEADDR; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind]) { if (*args[optind] == '(') addr = evall(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) addr = htoll(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) addr = sp->value; else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); return; } addr_entered = TRUE; if (args[++optind]) { value = stol(args[optind], FAULT_ON_ERROR, NULL); value_entered = TRUE; switch (size) { case 1: mem.u8 = (uint8_t)value; buf = (void *)&mem.u8; break; case 2: mem.u16 = (uint16_t)value; buf = (void *)&mem.u16; break; case 4: mem.u32 = (uint32_t)value; buf = (void *)&mem.u32; break; case 8: mem.u64 = (uint64_t)value; buf = (void *)&mem.u64; break; } } } if (!addr_entered || !value_entered) cmd_usage(pc->curcmd, SYNOPSIS); if (!memtype) memtype = vaddr_type(addr, CURRENT_CONTEXT()); switch (memtype) { case UVADDR: if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { error(INFO, "invalid user virtual address: %llx\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (!IS_KVADDR(addr)) { error(INFO, "invalid kernel virtual address: %llx\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } break; case PHYSADDR: break; case FILEADDR: break; case AMBIGUOUS: error(INFO, "ambiguous address: %llx (requires -p, -u or -k)\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } writemem(addr, memtype, buf, size, "write memory", FAULT_ON_ERROR); } char * format_stack_entry(struct bt_info *bt, char *retbuf, ulong value, ulong limit) { char buf[BUFSIZE*2]; char slab[BUFSIZE]; if (BITS32()) { if ((bt->flags & BT_FULL_SYM_SLAB) && accessible(value)) { if ((!limit || (value <= limit)) && in_ksymbol_range(value) && strlen(value_to_symstr(value, buf, 0))) sprintf(retbuf, INT_PRLEN == 16 ? "%-16s" : "%-8s", buf); else if (vaddr_to_kmem_cache(value, slab, !VERBOSE)) { if ((bt->flags & BT_FULL_SYM_SLAB2) || CRASHDEBUG(1)) sprintf(buf, "[%lx:%s]", value, slab); else sprintf(buf, "[%s]", slab); sprintf(retbuf, INT_PRLEN == 16 ? "%-16s" : "%-8s", buf); } else sprintf(retbuf, "%08lx", value); } else sprintf(retbuf, "%08lx", value); } else { if ((bt->flags & BT_FULL_SYM_SLAB) && accessible(value)) { if ((!limit || (value <= limit)) && in_ksymbol_range(value) && strlen(value_to_symstr(value, buf, 0))) sprintf(retbuf, "%-16s", buf); else if (vaddr_to_kmem_cache(value, slab, !VERBOSE)) { if ((bt->flags & BT_FULL_SYM_SLAB2) || CRASHDEBUG(1)) sprintf(buf, "[%lx:%s]", value, slab); else sprintf(buf, "[%s]", slab); sprintf(retbuf, "%-16s", buf); } else sprintf(retbuf, "%016lx", value); } else sprintf(retbuf, "%016lx", value); } return retbuf; } /* * For processors with "traditional" kernel/user address space distinction. */ int generic_is_kvaddr(ulong addr) { return (addr >= (ulong)(machdep->kvbase)); } /* * NOTE: Perhaps even this generic version should tighten up requirements * by calling uvtop()? */ int generic_is_uvaddr(ulong addr, struct task_context *tc) { return (addr < (ulong)(machdep->kvbase)); } /* * Raw dump of a task's stack, forcing symbolic output. */ void raw_stack_dump(ulong stackbase, ulong size) { display_memory(stackbase, size/sizeof(ulong), HEXADECIMAL|DISPLAY_DEFAULT|SYMBOLIC, KVADDR, NULL); } /* * Raw data dump, with the option of symbolic output. */ void raw_data_dump(ulong addr, long count, int symbolic) { long wordcnt; ulonglong address; int memtype; switch (sizeof(long)) { case SIZEOF_32BIT: wordcnt = count/SIZEOF_32BIT; if (count % SIZEOF_32BIT) wordcnt++; break; case SIZEOF_64BIT: wordcnt = count/SIZEOF_64BIT; if (count % SIZEOF_64BIT) wordcnt++; break; default: break; } if (pc->curcmd_flags & MEMTYPE_FILEADDR) { address = pc->curcmd_private; memtype = FILEADDR; } else if (pc->curcmd_flags & MEMTYPE_UVADDR) { address = (ulonglong)addr; memtype = UVADDR; } else { address = (ulonglong)addr; memtype = KVADDR; } display_memory(address, wordcnt, HEXADECIMAL|DISPLAY_DEFAULT|(symbolic ? SYMBOLIC : ASCII_ENDLINE), memtype, NULL); } /* * Quietly checks the accessibility of a memory location. */ int accessible(ulong kva) { ulong tmp; return(readmem(kva, KVADDR, &tmp, sizeof(ulong), "accessible check", RETURN_ON_ERROR|QUIET)); } /* * readmem() is by far *the* workhorse of this whole program. It reads * memory from /dev/kmem, /dev/mem the dumpfile or /proc/kcore, whichever * is appropriate: * * addr a user, kernel or physical memory address. * memtype addr type: UVADDR, KVADDR, PHYSADDR, XENMACHADDR or FILEADDR * buffer supplied buffer to read the data into. * size number of bytes to read. * type string describing the request -- helpful when the read fails. * error_handle what to do if the read fails: FAULT_ON_ERROR kills the command * immediately; RETURN_ON_ERROR returns FALSE; QUIET suppresses * the error message. */ #define PRINT_ERROR_MESSAGE ((!(error_handle & QUIET) && !STREQ(pc->curcmd, "search")) || \ (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) || CRASHDEBUG(2)) #define INVALID_UVADDR "invalid user virtual address: %llx type: \"%s\"\n" #define INVALID_KVADDR "invalid kernel virtual address: %llx type: \"%s\"\n" #define SEEK_ERRMSG "seek error: %s address: %llx type: \"%s\"\n" #define READ_ERRMSG "read error: %s address: %llx type: \"%s\"\n" #define WRITE_ERRMSG "write error: %s address: %llx type: \"%s\"\n" #define PAGE_EXCLUDED_ERRMSG "page excluded: %s address: %llx type: \"%s\"\n" #define RETURN_ON_PARTIAL_READ() \ if ((error_handle & RETURN_PARTIAL) && (size < orig_size)) { \ if (CRASHDEBUG(1)) \ error(INFO, "RETURN_PARTIAL: \"%s\" read: %ld of %ld\n",\ type, orig_size - size, orig_size); \ return TRUE; \ } int readmem(ulonglong addr, int memtype, void *buffer, long size, char *type, ulong error_handle) { int fd; long cnt, orig_size; physaddr_t paddr; ulonglong pseudo; char *bufptr; if (CRASHDEBUG(4)) fprintf(fp, "\n", addr, memtype_string(memtype, 1), type, size, error_handle_string(error_handle), (ulong)buffer); bufptr = (char *)buffer; orig_size = size; if (size <= 0) { if (PRINT_ERROR_MESSAGE) error(INFO, "invalid size request: %ld type: \"%s\"\n", size, type); goto readmem_error; } fd = REMOTE_MEMSRC() ? pc->sockfd : (ACTIVE() ? pc->mfd : pc->dfd); /* * Screen out any error conditions. */ switch (memtype) { case UVADDR: if (!CURRENT_CONTEXT()) { if (PRINT_ERROR_MESSAGE) error(INFO, "no current user process\n"); goto readmem_error; } if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto readmem_error; } break; case KVADDR: if (LKCD_DUMPFILE()) addr = fix_lkcd_address(addr); if (!IS_KVADDR(addr)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto readmem_error; } break; case PHYSADDR: case XENMACHADDR: break; case FILEADDR: return generic_read_dumpfile(addr, buffer, size, type, error_handle); } while (size > 0) { switch (memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto readmem_error; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto readmem_error; } break; case PHYSADDR: paddr = addr; break; case XENMACHADDR: pseudo = xen_m2p(addr); if (pseudo == XEN_MACHADDR_NOT_FOUND) { pc->curcmd_flags |= XEN_MACHINE_ADDR; paddr = addr; } else paddr = pseudo | PAGEOFFSET(addr); break; } /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(paddr); if (cnt > size) cnt = size; if (CRASHDEBUG(4)) fprintf(fp, "<%s: addr: %llx paddr: %llx cnt: %ld>\n", readmem_function_name(), addr, (unsigned long long)paddr, cnt); if (memtype == KVADDR) pc->curcmd_flags |= MEMTYPE_KVADDR; else pc->curcmd_flags &= ~MEMTYPE_KVADDR; switch (READMEM(fd, bufptr, cnt, (memtype == PHYSADDR) || (memtype == XENMACHADDR) ? 0 : addr, paddr)) { case SEEK_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(memtype, 0), addr, type); goto readmem_error; case READ_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, READ_ERRMSG, memtype_string(memtype, 0), addr, type); if ((pc->flags & DEVMEM) && (kt->flags & PRE_KERNEL_INIT) && !(error_handle & NO_DEVMEM_SWITCH) && devmem_is_restricted() && switch_to_proc_kcore()) { error_handle &= ~QUIET; return(readmem(addr, memtype, bufptr, size, type, error_handle)); } goto readmem_error; case PAGE_EXCLUDED: RETURN_ON_PARTIAL_READ(); if (PRINT_ERROR_MESSAGE) error(INFO, PAGE_EXCLUDED_ERRMSG, memtype_string(memtype, 0), addr, type); goto readmem_error; default: break; } addr += cnt; bufptr += cnt; size -= cnt; } return TRUE; readmem_error: switch (error_handle) { case (FAULT_ON_ERROR): case (QUIET|FAULT_ON_ERROR): if (pc->flags & IN_FOREACH) RESUME_FOREACH(); RESTART(); case (RETURN_ON_ERROR): case (RETURN_PARTIAL|RETURN_ON_ERROR): case (QUIET|RETURN_ON_ERROR): break; } return FALSE; } /* * Accept anything... */ int generic_verify_paddr(physaddr_t paddr) { return TRUE; } /* * Read from /dev/mem. */ int read_dev_mem(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int readcnt; if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } /* * /dev/mem disallows anything >= __pa(high_memory) * * However it will allow 64-bit lseeks to anywhere, and when followed * by pulling a 32-bit address from the 64-bit file position, it * quietly returns faulty data from the (wrapped-around) address. */ if (vt->high_memory && (paddr >= (physaddr_t)(VTOP(vt->high_memory)))) { readcnt = 0; errno = 0; goto try_dev_kmem; } if (lseek(fd, (off_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; next_read: errno = 0; readcnt = read(fd, bufptr, cnt); if ((readcnt != cnt) && CRASHDEBUG(4)) { if (errno) perror("/dev/mem"); error(INFO, "read(/dev/mem, %lx, %ld): %ld (%lx)\n", paddr, cnt, readcnt, readcnt); } try_dev_kmem: /* * On 32-bit intel architectures high memory can can only be accessed * via vmalloc'd addresses. However, /dev/mem returns 0 bytes, and * non-reserved memory pages can't be mmap'd, so the only alternative * is to read it from /dev/kmem. */ if ((readcnt != cnt) && BITS32() && !readcnt && !errno && IS_VMALLOC_ADDR(addr)) readcnt = read_dev_kmem(addr, bufptr, cnt); /* * The 2.6 valid_phys_addr_range() can potentially shorten the * count of a legitimate read request. So far this has only been * seen on an ia64 where a kernel page straddles an EFI segment. */ if ((readcnt != cnt) && readcnt && (machdep->flags & DEVMEMRD) && !errno) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "read(/dev/mem, %lx, %ld): %ld (%lx)\n", paddr, cnt, readcnt, readcnt); cnt -= readcnt; bufptr += readcnt; goto next_read; } if (readcnt != cnt) return READ_ERROR; return readcnt; } /* * Write to /dev/mem. */ int write_dev_mem(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return WRITE_ERROR; } if (lseek(fd, (off_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; if (write(fd, bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * The first required reads of memory are done in kernel_init(), * so if there's a fatal read error of /dev/mem, display a warning * message if it appears that CONFIG_STRICT_DEVMEM is in effect. * On x86 and x86_64, only the first 256 pages of physical memory * are accessible: * * #ifdef CONFIG_STRICT_DEVMEM * int devmem_is_allowed(unsigned long pagenr) * { * if (pagenr <= 256) * return 1; * if (!page_is_ram(pagenr)) * return 1; * return 0; * } * #endif * * It would probably suffice to simply check for the existence of * devmem_is_allowed(), but on x86 and x86_64 verify pfn 256 reads OK, * and 257 fails. * * Update: a patch has been posted to LKML to fix the off-by-one error * by changing "<= 256" to "< 256": * * https://lkml.org/lkml/2012/8/28/357 * * The X86/X86_64 lower-boundary pfn check below has been changed * (preemptively) from 256 to 255. * * In any case, if that x86/x86_64 check fails to prove CONFIG_STRICT_DEVMEM * is configured, then the function will check that "jiffies" can be read, * as is done for the other architectures. * */ static int devmem_is_restricted(void) { long tmp; int restricted; /* * Check for pre-CONFIG_STRICT_DEVMEM kernels. */ if (!kernel_symbol_exists("devmem_is_allowed")) { if (machine_type("ARM") || machine_type("ARM64") || machine_type("X86") || machine_type("X86_64") || machine_type("PPC") || machine_type("PPC64")) return FALSE; } restricted = FALSE; if (STREQ(pc->live_memsrc, "/dev/mem")) { if (machine_type("X86") || machine_type("X86_64")) { if (readmem(255*PAGESIZE(), PHYSADDR, &tmp, sizeof(long), "devmem_is_allowed - pfn 255", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH) && !(readmem(257*PAGESIZE(), PHYSADDR, &tmp, sizeof(long), "devmem_is_allowed - pfn 257", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH))) restricted = TRUE; } if (kernel_symbol_exists("jiffies") && !readmem(symbol_value("jiffies"), KVADDR, &tmp, sizeof(ulong), "devmem_is_allowed - jiffies", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH)) restricted = TRUE; if (restricted && CRASHDEBUG(1)) error(INFO, "this kernel may be configured with CONFIG_STRICT_DEVMEM," " which\n renders /dev/mem unusable as a live memory " "source.\n"); } return restricted; } static int switch_to_proc_kcore(void) { close(pc->mfd); if (file_exists("/proc/kcore", NULL)) { if (CRASHDEBUG(1)) error(INFO, "trying /proc/kcore as an alternative to /dev/mem\n\n"); } else return FALSE; if ((pc->mfd = open("/proc/kcore", O_RDONLY)) < 0) { error(INFO, "/proc/kcore: %s\n", strerror(errno)); return FALSE; } if (!proc_kcore_init(fp, pc->mfd)) { error(INFO, "/proc/kcore: initialization failed\n"); return FALSE; } pc->flags &= ~DEVMEM; pc->flags |= PROC_KCORE; pc->readmem = read_proc_kcore; pc->writemem = write_proc_kcore; pc->live_memsrc = "/proc/kcore"; return TRUE; } /* * Read from memory driver. */ int read_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (pc->curcmd_flags & XEN_MACHINE_ADDR) return READ_ERROR; if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } lseek(fd, (loff_t)paddr, SEEK_SET); if (read(fd, bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to memory driver. */ int write_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (!(MEMORY_DRIVER_DEVICE_MODE & S_IWUSR)) return (error(FATAL, "cannot write to %s!\n", pc->live_memsrc)); if (lseek(fd, (loff_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; if (write(fd, bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * Read from an MCLX formatted dumpfile. */ int read_mclx_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (vas_lseek((ulong)paddr, SEEK_SET)) return SEEK_ERROR; if (vas_read((void *)bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to an MCLX formatted dumpfile. This only modifies the buffered * copy only; if it gets flushed, the modification is lost. */ int write_mclx_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (vas_lseek((ulong)paddr, SEEK_SET)) return SEEK_ERROR; if (vas_write((void *)bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * Read from an LKCD formatted dumpfile. */ int read_lkcd_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { set_lkcd_fp(fp); if (!lkcd_lseek(paddr)) return SEEK_ERROR; if (lkcd_read((void *)bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to an LKCD formatted dumpfile. (dummy routine -- not allowed) */ int write_lkcd_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return (error(FATAL, "cannot write to an LKCD compressed dump!\n")); } /* * Read from network daemon. */ int read_daemon(int fd, void *bufptr, int cnt, ulong vaddr, physaddr_t paddr) { if (remote_memory_read(pc->rmfd, bufptr, cnt, paddr, -1) == cnt) return cnt; if (!IS_VMALLOC_ADDR(vaddr) || DUMPFILE()) return READ_ERROR; /* * On 32-bit architectures w/memory above ~936MB, * that memory can only be accessed via vmalloc'd * addresses. However, /dev/mem returns 0 bytes, * and non-reserved memory pages can't be mmap'd, so * the only alternative is to read it from /dev/kmem. */ if (BITS32() && remote_memory_read(pc->rkfd, bufptr, cnt, vaddr, -1) == cnt) return cnt; return READ_ERROR; } /* * Write to network daemon. */ int write_daemon(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return (error(FATAL, "writing to daemon not supported yet [TBD]\n")); } /* * Turn the memtype bitmask into a string. */ static char *memtype_string(int memtype, int debug) { static char membuf[40]; switch (memtype) { case UVADDR: sprintf(membuf, debug ? "UVADDR" : "user virtual"); break; case KVADDR: sprintf(membuf, debug ? "KVADDR" : "kernel virtual"); break; case PHYSADDR: sprintf(membuf, debug ? "PHYSADDR" : "physical"); break; case XENMACHADDR: sprintf(membuf, debug ? "XENMACHADDR" : "xen machine"); break; case FILEADDR: sprintf(membuf, debug ? "FILEADDR" : "dumpfile"); break; default: if (debug) sprintf(membuf, "0x%x (?)", memtype); else sprintf(membuf, "unknown"); break; } return membuf; } /* * Turn the error_handle bitmask into a string, * Note: FAULT_ON_ERROR == 0 */ static char *error_handle_string(ulong error_handle) { static char ebuf[20]; int others; sprintf(ebuf, "("); others = 0; if (error_handle & RETURN_ON_ERROR) sprintf(&ebuf[strlen(ebuf)], "%sROE", others++ ? "|" : ""); if (error_handle & FAULT_ON_ERROR) sprintf(&ebuf[strlen(ebuf)], "%sFOE", others++ ? "|" : ""); if (error_handle & QUIET) sprintf(&ebuf[strlen(ebuf)], "%sQ", others++ ? "|" : ""); if (error_handle & HEX_BIAS) sprintf(&ebuf[strlen(ebuf)], "%sHB", others++ ? "|" : ""); if (error_handle & RETURN_PARTIAL) sprintf(&ebuf[strlen(ebuf)], "%sRP", others++ ? "|" : ""); if (error_handle & NO_DEVMEM_SWITCH) sprintf(&ebuf[strlen(ebuf)], "%sNDS", others++ ? "|" : ""); strcat(ebuf, ")"); return ebuf; } /* * Sister routine to readmem(). */ int writemem(ulonglong addr, int memtype, void *buffer, long size, char *type, ulong error_handle) { int fd; long cnt; physaddr_t paddr; char *bufptr; if (CRASHDEBUG(1)) fprintf(fp, "writemem: %llx, %s, \"%s\", %ld, %s %lx\n", addr, memtype_string(memtype, 1), type, size, error_handle_string(error_handle), (ulong)buffer); if (size < 0) { if (PRINT_ERROR_MESSAGE) error(INFO, "invalid size request: %ld\n", size); goto writemem_error; } bufptr = (char *)buffer; fd = ACTIVE() ? pc->mfd : pc->dfd; /* * Screen out any error conditions. */ switch (memtype) { case UVADDR: if (!CURRENT_CONTEXT()) { if (PRINT_ERROR_MESSAGE) error(INFO, "no current user process\n"); goto writemem_error; } if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto writemem_error; } break; case KVADDR: if (!IS_KVADDR(addr)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto writemem_error; } break; case PHYSADDR: break; case FILEADDR: return generic_write_dumpfile(addr, buffer, size, type, error_handle); } while (size > 0) { switch (memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto writemem_error; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto writemem_error; } break; case PHYSADDR: paddr = addr; break; } /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(paddr); if (cnt > size) cnt = size; switch (pc->writemem(fd, bufptr, cnt, addr, paddr)) { case SEEK_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(memtype, 0), addr, type); goto writemem_error; case WRITE_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, WRITE_ERRMSG, memtype_string(memtype, 0), addr, type); goto writemem_error; default: break; } addr += cnt; bufptr += cnt; size -= cnt; } return TRUE; writemem_error: switch (error_handle) { case (FAULT_ON_ERROR): case (QUIET|FAULT_ON_ERROR): RESTART(); case (RETURN_ON_ERROR): case (QUIET|RETURN_ON_ERROR): break; } return FALSE; } /* * When /dev/mem won't allow access, try /dev/kmem. */ static ssize_t read_dev_kmem(ulong vaddr, char *bufptr, long cnt) { ssize_t readcnt; if (pc->kfd < 0) { if ((pc->kfd = open("/dev/kmem", O_RDONLY)) < 0) return 0; } if (lseek(pc->kfd, vaddr, SEEK_SET) == -1) return 0; readcnt = read(pc->kfd, bufptr, cnt); if (readcnt != cnt) readcnt = 0; return readcnt; } /* * Generic dumpfile read/write functions to handle FILEADDR * memtype arguments to readmem() and writemem(). These are * not to be confused with pc->readmem/writemem plug-ins. */ static int generic_read_dumpfile(ulonglong addr, void *buffer, long size, char *type, ulong error_handle) { int fd; int retval; retval = TRUE; if (!pc->dumpfile) error(FATAL, "command requires a dumpfile\n"); if ((fd = open(pc->dumpfile, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (lseek(fd, addr, SEEK_SET) == -1) { if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } else if (read(fd, buffer, size) != size) { if (PRINT_ERROR_MESSAGE) error(INFO, READ_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } close(fd); return retval; } static int generic_write_dumpfile(ulonglong addr, void *buffer, long size, char *type, ulong error_handle) { int fd; int retval; retval = TRUE; if (!pc->dumpfile) error(FATAL, "command requires a dumpfile\n"); if ((fd = open(pc->dumpfile, O_WRONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (lseek(fd, addr, SEEK_SET) == -1) { if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } else if (write(fd, buffer, size) != size) { if (PRINT_ERROR_MESSAGE) error(INFO, WRITE_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } close(fd); return retval; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ int kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { physaddr_t unused; return (machdep->kvtop(tc ? tc : CURRENT_CONTEXT(), kvaddr, paddr ? paddr : &unused, verbose)); } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ int uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { return(machdep->uvtop(tc, vaddr, paddr, verbose)); } /* * The vtop command does a verbose translation of a user or kernel virtual * address into it physical address. The pte translation is shown by * passing the VERBOSE flag to kvtop() or uvtop(). If it's a user virtual * address, the vm_area_struct data containing the page is displayed. * Lastly, the mem_map[] page data containing the address is displayed. */ void cmd_vtop(void) { int c; ulong vaddr, context; int others; ulong vtop_flags, loop_vtop_flags; struct task_context *tc; vtop_flags = loop_vtop_flags = 0; tc = NULL; while ((c = getopt(argcnt, args, "ukc:")) != EOF) { switch(c) { case 'c': switch (str_to_context(optarg, &context, &tc)) { case STR_PID: case STR_TASK: vtop_flags |= USE_USER_PGD; break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); break; } break; case 'u': vtop_flags |= UVADDR; break; case 'k': vtop_flags |= KVADDR; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if (!tc && !(tc = CURRENT_CONTEXT())) error(FATAL, "no current user process\n"); if ((vtop_flags & (UVADDR|KVADDR)) == (UVADDR|KVADDR)) error(FATAL, "-u and -k options are mutually exclusive\n"); others = 0; while (args[optind]) { vaddr = htol(args[optind], FAULT_ON_ERROR, NULL); if (!(vtop_flags & (UVADDR|KVADDR))) { switch (vaddr_type(vaddr, tc)) { case UVADDR: loop_vtop_flags = UVADDR; break; case KVADDR: loop_vtop_flags = KVADDR; break; case AMBIGUOUS: error(FATAL, "ambiguous address: %lx (requires -u or -k)\n", vaddr); break; } } else loop_vtop_flags = 0; if (others++) fprintf(fp, "\n"); do_vtop(vaddr, tc, vtop_flags | loop_vtop_flags); if (REMOTE() && CRASHDEBUG(1)) { ulong paddr = remote_vtop(tc->processor, vaddr); if (paddr) fprintf(fp, "rvtop(%lx)=%lx\n", vaddr, paddr); } optind++; } } /* * Do the work for cmd_vtop(), or less likely, foreach(). */ void do_vtop(ulong vaddr, struct task_context *tc, ulong vtop_flags) { physaddr_t paddr; ulong vma, page; int page_exists; struct meminfo meminfo; char buf1[BUFSIZE]; char buf2[BUFSIZE]; int memtype = 0; switch (vtop_flags & (UVADDR|KVADDR)) { case UVADDR: memtype = UVADDR; break; case KVADDR: memtype = KVADDR; break; case (UVADDR|KVADDR): error(FATAL, "-u and -k options are mutually exclusive\n"); break; default: switch (vaddr_type(vaddr, tc)) { case UVADDR: memtype = UVADDR; break; case KVADDR: memtype = KVADDR; break; case AMBIGUOUS: error(FATAL, "ambiguous address: %lx (requires -u or -k)\n", vaddr); break; } break; } page_exists = paddr = 0; switch (memtype) { case UVADDR: fprintf(fp, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); if (!IN_TASK_VMA(tc->task, vaddr)) { fprintf(fp, "%s (not accessible)\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr))); return; } if (!uvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } uvtop(tc, vaddr, &paddr, VERBOSE); fprintf(fp, "\n"); vma = vm_area_dump(tc->task, UVADDR, vaddr, 0); if (!page_exists) { if (swap_location(paddr, buf1)) fprintf(fp, "\nSWAP: %s\n", buf1); else if (vma_file_offset(vma, vaddr, buf1)) fprintf(fp, "\nFILE: %s\n", buf1); } break; case KVADDR: fprintf(fp, "%s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); if (!IS_KVADDR(vaddr)) { fprintf(fp, "%-8lx (not a kernel virtual address)\n\n", vaddr); return; } if (vtop_flags & USE_USER_PGD) { if (!uvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } uvtop(tc, vaddr, &paddr, VERBOSE); } else { if (!kvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } kvtop(tc, vaddr, &paddr, VERBOSE); } break; } fprintf(fp, "\n"); if (page_exists && phys_to_page(paddr, &page)) { if ((pc->flags & DEVMEM) && (paddr >= VTOP(vt->high_memory))) return; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.flags = ADDRESS_SPECIFIED; meminfo.spec_addr = paddr; meminfo.memtype = PHYSADDR; dump_mem_map(&meminfo); } } /* * Runs PTOV() on the physical address argument or translates * a per-cpu offset and cpu specifier. */ void cmd_ptov(void) { int c, len, unknown; ulong vaddr; physaddr_t paddr, paddr_test; char buf1[BUFSIZE]; char buf2[BUFSIZE]; int others; char *cpuspec; ulong *cpus; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); others = 0; cpuspec = NULL; cpus = NULL; while (args[optind]) { cpuspec = strchr(args[optind], ':'); if (cpuspec) { *cpuspec++ = NULLCHAR; cpus = get_cpumask_buf(); if (STREQ(cpuspec, "")) SET_BIT(cpus, CURRENT_CONTEXT()->processor); else make_cpumask(cpuspec, cpus, FAULT_ON_ERROR, NULL); } paddr = htoll(args[optind], FAULT_ON_ERROR, NULL); if (cpuspec) { sprintf(buf1, "[%d]", kt->cpus-1); len = strlen(buf1) + 2; fprintf(fp, "%sPER-CPU OFFSET: %llx\n", others++ ? "\n" : "", (ulonglong)paddr); fprintf(fp, " %s %s\n", mkstring(buf1, len, LJUST, "CPU"), mkstring(buf2, VADDR_PRLEN, LJUST, "VIRTUAL")); for (c = 0; c < kt->cpus; c++) { if (!NUM_IN_BITMAP(cpus, c)) continue; vaddr = paddr + kt->__per_cpu_offset[c]; sprintf(buf1, "[%d]", c); fprintf(fp, " %s%lx", mkstring(buf2, len, LJUST, buf1), vaddr); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } FREEBUF(cpus); } else { vaddr = PTOV(paddr); unknown = BITS32() && (!kvtop(0, vaddr, &paddr_test, 0) || (paddr_test != paddr)); fprintf(fp, "%s%s %s\n", others++ ? "\n" : "", mkstring(buf1, VADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); fprintf(fp, "%s %s\n", unknown ? mkstring(buf1, VADDR_PRLEN, LJUST, "unknown") : mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); } optind++; } } /* * Runs PTOB() on the page frame number to get the page address. */ void cmd_ptob(void) { ulonglong value; optind = 1; if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { value = stoll(args[optind], FAULT_ON_ERROR, NULL); fprintf(fp, "%llx: %llx\n", value, PTOB(value)); optind++; } } /* * Runs BTOP() on the address to get the page frame number. */ void cmd_btop(void) { ulonglong value; optind = 1; if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { value = htoll(args[optind], FAULT_ON_ERROR, NULL); fprintf(fp, "%llx: %llx\n", value, BTOP(value)); optind++; } } /* * This command displays basic virtual memory information of a context, * consisting of a pointer to its mm_struct, its RSS and total virtual * memory size; and a list of pointers to each vm_area_struct, its starting * and ending address, and vm_flags value. The argument can be a task * address or a PID number; if no args, the current context is used. */ void cmd_vm(void) { int c; ulong flag; ulong value; ulong single_vma; ulonglong llvalue; struct task_context *tc; struct reference reference, *ref; unsigned int radix; int subsequent; flag = 0; single_vma = 0; radix = 0; ref = NULL; BZERO(&reference, sizeof(struct reference)); while ((c = getopt(argcnt, args, "f:pmvR:P:xdM:")) != EOF) { switch(c) { case 'M': pc->curcmd_private = htoll(optarg, FAULT_ON_ERROR, NULL); pc->curcmd_flags |= MM_STRUCT_FORCE; break; case 'f': if (flag) argerrs++; else { llvalue = htoll(optarg, FAULT_ON_ERROR, NULL); do_vm_flags(llvalue); return; } break; case 'p': if (flag) argerrs++; else flag |= PHYSADDR; break; case 'm': if (flag) argerrs++; else flag |= PRINT_MM_STRUCT; break; case 'v': if (flag) argerrs++; else flag |= PRINT_VMA_STRUCTS; break; case 'R': if (ref) { error(INFO, "only one -R option allowed\n"); argerrs++; } else if (flag && !(flag & PHYSADDR)) argerrs++; else { ref = &reference; ref->str = optarg; flag |= PHYSADDR; } break; case 'P': if (flag) argerrs++; else { flag |= PRINT_SINGLE_VMA; single_vma = htol(optarg, FAULT_ON_ERROR, NULL); } break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (radix == 10) flag |= PRINT_RADIX_10; else if (radix == 16) flag |= PRINT_RADIX_16; if (!args[optind]) { if (!ref) print_task_header(fp, CURRENT_CONTEXT(), 0); vm_area_dump(CURRENT_TASK(), flag, single_vma, ref); return; } subsequent = 0; while (args[optind]) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) { if (!ref) print_task_header(fp, tc, subsequent++); vm_area_dump(tc->task, flag, single_vma, ref); } break; case STR_TASK: if (!ref) print_task_header(fp, tc, subsequent++); vm_area_dump(tc->task, flag, single_vma, ref); break; case STR_INVALID: error(INFO, "%sinvalid task or pid value: %s\n", subsequent++ ? "\n" : "", args[optind]); break; } optind++; } } /* * Translate a vm_flags value. */ #define VM_READ 0x00000001ULL /* currently active flags */ #define VM_WRITE 0x00000002ULL #define VM_EXEC 0x00000004ULL #define VM_SHARED 0x00000008ULL #define VM_MAYREAD 0x00000010ULL /* limits for mprotect() etc */ #define VM_MAYWRITE 0x00000020ULL #define VM_MAYEXEC 0x00000040ULL #define VM_MAYSHARE 0x00000080ULL #define VM_GROWSDOWN 0x00000100ULL /* general info on the segment */ #define VM_GROWSUP 0x00000200ULL #define VM_NOHUGEPAGE 0x00000200ULL /* MADV_NOHUGEPAGE marked this vma */ #define VM_SHM 0x00000400ULL /* shared memory area, don't swap out */ #define VM_PFNMAP 0x00000400ULL #define VM_DENYWRITE 0x00000800ULL /* ETXTBSY on write attempts.. */ #define VM_EXECUTABLE 0x00001000ULL #define VM_LOCKED 0x00002000ULL #define VM_IO 0x00004000ULL /* Memory mapped I/O or similar */ #define VM_SEQ_READ 0x00008000ULL /* App will access data sequentially */ #define VM_RAND_READ 0x00010000ULL /* App will not benefit from clustered reads */ #define VM_DONTCOPY 0x00020000ULL /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000ULL /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000ULL /* Don't unmap it from swap_out */ #define VM_BIGPAGE 0x00100000ULL /* bigpage mappings, no pte's */ #define VM_BIGMAP 0x00200000ULL /* user wants bigpage mapping */ #define VM_WRITECOMBINED 0x00100000ULL /* Write-combined */ #define VM_NONCACHED 0x00200000ULL /* Noncached access */ #define VM_HUGETLB 0x00400000ULL /* Huge tlb Page*/ #define VM_ACCOUNT 0x00100000ULL /* Memory is a vm accounted object */ #define VM_NONLINEAR 0x00800000ULL /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000ULL /* T if mapped copy of data (nommu mmap) */ #define VM_HUGEPAGE 0x01000000ULL /* MADV_HUGEPAGE marked this vma */ #define VM_INSERTPAGE 0x02000000ULL /* The vma has had "vm_insert_page()" done on it */ #define VM_ALWAYSDUMP 0x04000000ULL /* Always include in core dumps */ #define VM_CAN_NONLINEAR 0x08000000ULL /* Has ->fault & does nonlinear pages */ #define VM_MIXEDMAP 0x10000000ULL /* Can contain "struct page" and pure PFN pages */ #define VM_SAO 0x20000000ULL /* Strong Access Ordering (powerpc) */ #define VM_PFN_AT_MMAP 0x40000000ULL /* PFNMAP vma that is fully mapped at mmap time */ #define VM_MERGEABLE 0x80000000ULL /* KSM may merge identical pages */ static void do_vm_flags(ulonglong flags) { int others; others = 0; fprintf(fp, "%llx: (", flags); if (flags & VM_READ) { fprintf(fp, "READ"); others++; } if (flags & VM_WRITE) fprintf(fp, "%sWRITE", others++ ? "|" : ""); if (flags & VM_EXEC) fprintf(fp, "%sEXEC", others++ ? "|" : ""); if (flags & VM_SHARED) fprintf(fp, "%sSHARED", others++ ? "|" : ""); if (flags & VM_MAYREAD) fprintf(fp, "%sMAYREAD", others++ ? "|" : ""); if (flags & VM_MAYWRITE) fprintf(fp, "%sMAYWRITE", others++ ? "|" : ""); if (flags & VM_MAYEXEC) fprintf(fp, "%sMAYEXEC", others++ ? "|" : ""); if (flags & VM_MAYSHARE) fprintf(fp, "%sMAYSHARE", others++ ? "|" : ""); if (flags & VM_GROWSDOWN) fprintf(fp, "%sGROWSDOWN", others++ ? "|" : ""); if (kernel_symbol_exists("expand_upwards")) { if (flags & VM_GROWSUP) fprintf(fp, "%sGROWSUP", others++ ? "|" : ""); } else if (flags & VM_NOHUGEPAGE) fprintf(fp, "%sNOHUGEPAGE", others++ ? "|" : ""); if (flags & VM_SHM) { if (THIS_KERNEL_VERSION > LINUX(2,6,17)) fprintf(fp, "%sPFNMAP", others++ ? "|" : ""); else fprintf(fp, "%sSHM", others++ ? "|" : ""); } if (flags & VM_DENYWRITE) fprintf(fp, "%sDENYWRITE", others++ ? "|" : ""); if (flags & VM_EXECUTABLE) fprintf(fp, "%sEXECUTABLE", others++ ? "|" : ""); if (flags & VM_LOCKED) fprintf(fp, "%sLOCKED", others++ ? "|" : ""); if (flags & VM_IO) fprintf(fp, "%sIO", others++ ? "|" : ""); if (flags & VM_SEQ_READ) fprintf(fp, "%sSEQ_READ", others++ ? "|" : ""); if (flags & VM_RAND_READ) fprintf(fp, "%sRAND_READ", others++ ? "|" : ""); if (flags & VM_DONTCOPY) fprintf(fp, "%sDONTCOPY", others++ ? "|" : ""); if (flags & VM_DONTEXPAND) fprintf(fp, "%sDONTEXPAND", others++ ? "|" : ""); if (flags & VM_RESERVED) fprintf(fp, "%sRESERVED", others++ ? "|" : ""); if (symbol_exists("nr_bigpages") && (THIS_KERNEL_VERSION == LINUX(2,4,9))) { if (flags & VM_BIGPAGE) fprintf(fp, "%sBIGPAGE", others++ ? "|" : ""); if (flags & VM_BIGMAP) fprintf(fp, "%sBIGMAP", others++ ? "|" : ""); } else { if ((THIS_KERNEL_VERSION < LINUX(2,4,21)) && (flags & VM_WRITECOMBINED)) fprintf(fp, "%sWRITECOMBINED", others++ ? "|" : ""); if ((THIS_KERNEL_VERSION < LINUX(2,4,21)) && (flags & VM_NONCACHED)) fprintf(fp, "%sNONCACHED", others++ ? "|" : ""); if (flags & VM_HUGETLB) fprintf(fp, "%sHUGETLB", others++ ? "|" : ""); if (flags & VM_ACCOUNT) fprintf(fp, "%sACCOUNT", others++ ? "|" : ""); } if (flags & VM_NONLINEAR) fprintf(fp, "%sNONLINEAR", others++ ? "|" : ""); if (flags & VM_HUGEPAGE) { if (MEMBER_EXISTS("mm_struct", "pmd_huge_pte")) fprintf(fp, "%sHUGEPAGE", others++ ? "|" : ""); else fprintf(fp, "%sMAPPED_COPY", others++ ? "|" : ""); } if (flags & VM_INSERTPAGE) fprintf(fp, "%sINSERTPAGE", others++ ? "|" : ""); if (flags & VM_ALWAYSDUMP) fprintf(fp, "%sALWAYSDUMP", others++ ? "|" : ""); if (flags & VM_CAN_NONLINEAR) fprintf(fp, "%sCAN_NONLINEAR", others++ ? "|" : ""); if (flags & VM_MIXEDMAP) fprintf(fp, "%sMIXEDMAP", others++ ? "|" : ""); if (flags & VM_SAO) fprintf(fp, "%sSAO", others++ ? "|" : ""); if (flags & VM_PFN_AT_MMAP) fprintf(fp, "%sPFN_AT_MMAP", others++ ? "|" : ""); if (flags & VM_MERGEABLE) fprintf(fp, "%sMERGEABLE", others++ ? "|" : ""); fprintf(fp, ")\n"); } /* * Read whatever size vm_area_struct.vm_flags happens to be into a ulonglong. */ static ulonglong get_vm_flags(char *vma_buf) { ulonglong vm_flags = 0; if (SIZE(vm_area_struct_vm_flags) == sizeof(short)) vm_flags = USHORT(vma_buf + OFFSET(vm_area_struct_vm_flags)); else if (SIZE(vm_area_struct_vm_flags) == sizeof(long)) vm_flags = ULONG(vma_buf+ OFFSET(vm_area_struct_vm_flags)); else if (SIZE(vm_area_struct_vm_flags) == sizeof(long long)) vm_flags = ULONGLONG(vma_buf+ OFFSET(vm_area_struct_vm_flags)); else error(INFO, "questionable vm_area_struct.vm_flags size: %d\n", SIZE(vm_area_struct_vm_flags)); return vm_flags; } static void vm_cleanup(void *arg) { struct task_context *tc; pc->cmd_cleanup = NULL; pc->cmd_cleanup_arg = NULL; tc = (struct task_context *)arg; tc->mm_struct = 0; } static int is_valid_mm(ulong mm) { char kbuf[BUFSIZE]; char *p; int mm_count; if (!(p = vaddr_to_kmem_cache(mm, kbuf, VERBOSE))) goto bailout; if (!STRNEQ(p, "mm_struct")) goto bailout; readmem(mm + OFFSET(mm_struct_mm_count), KVADDR, &mm_count, sizeof(int), "mm_struct mm_count", FAULT_ON_ERROR); if (mm_count == 0) error(FATAL, "stale mm_struct address\n"); return mm_count; bailout: error(FATAL, "invalid mm_struct address\n"); return 0; } /* * vm_area_dump() primarily does the work for cmd_vm(), but is also called * from IN_TASK_VMA(), do_vtop(), and foreach(). How it behaves depends * upon the flag and ref arguments: * * UVADDR do_vtop() when dumping the VMA for a uvaddr * UVADDR|VERIFY_ADDR IN_TASK_VMA() macro checks if a uvaddr is in a VMA * PHYSADDR cmd_vm() or foreach(vm) for -p and -R options * PRINT_MM_STRUCT cmd_vm() or foreach(vm) for -m option * PRINT_VMA_STRUCTS cmd_vm() or foreach(vm) for -v option * PRINT_INODES open_files_dump() backdoors foreach(vm) * * ref cmd_vm() or foreach(vm) for -R option that searches * for references -- and only then does a display */ #define PRINT_VM_DATA() \ { \ fprintf(fp, "%s %s ", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST, "MM"), \ mkstring(buf5, VADDR_PRLEN, CENTER|LJUST, "PGD")); \ fprintf(fp, "%s %s\n", \ mkstring(buf4, 6, CENTER|LJUST, "RSS"), \ mkstring(buf5, 8, CENTER|LJUST, "TOTAL_VM")); \ \ fprintf(fp, "%s %s ", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, \ MKSTR(tm->mm_struct_addr)), \ mkstring(buf5, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, \ MKSTR(tm->pgd_addr))); \ \ sprintf(buf4, "%ldk", (tm->rss * PAGESIZE())/1024); \ sprintf(buf5, "%ldk", (tm->total_vm * PAGESIZE())/1024); \ fprintf(fp, "%s %s\n", \ mkstring(buf4, 6, CENTER|LJUST, NULL), \ mkstring(buf5, 8, CENTER|LJUST, NULL)); \ } #define PRINT_VMA_DATA() \ fprintf(fp, "%s%s%s%s%s %6llx%s%s\n", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, MKSTR(vma)), \ space(MINSPACE), \ mkstring(buf2, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_start)), \ space(MINSPACE), \ mkstring(buf3, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_end)), \ vm_flags, space(MINSPACE), buf1); #define FILENAME_COMPONENT(P,C) \ ((STREQ((P), "/") && STREQ((C), "/")) || \ (!STREQ((C), "/") && strstr((P),(C)))) #define VM_REF_SEARCH (0x1) #define VM_REF_DISPLAY (0x2) #define VM_REF_NUMBER (0x4) #define VM_REF_VMA (0x8) #define VM_REF_PAGE (0x10) #define VM_REF_HEADER (0x20) #define DO_REF_SEARCH(X) ((X) && ((X)->cmdflags & VM_REF_SEARCH)) #define DO_REF_DISPLAY(X) ((X) && ((X)->cmdflags & VM_REF_DISPLAY)) #define VM_REF_CHECK_HEXVAL(X,V) \ (DO_REF_SEARCH(X) && ((X)->cmdflags & VM_REF_NUMBER) && ((X)->hexval == (V))) #define VM_REF_CHECK_DECVAL(X,V) \ (DO_REF_SEARCH(X) && ((X)->cmdflags & VM_REF_NUMBER) && ((X)->decval == (V))) #define VM_REF_CHECK_STRING(X,S) \ (DO_REF_SEARCH(X) && (string_exists(S)) && FILENAME_COMPONENT((S),(X)->str)) #define VM_REF_FOUND(X) ((X) && ((X)->cmdflags & VM_REF_HEADER)) ulong vm_area_dump(ulong task, ulong flag, ulong vaddr, struct reference *ref) { struct task_context *tc; ulong vma; ulong vm_start; ulong vm_end; ulong vm_next, vm_mm; char *dentry_buf, *vma_buf, *file_buf; ulonglong vm_flags; ulong vm_file, inode; ulong dentry, vfsmnt; ulong single_vma; unsigned int radix; int single_vma_found; int found; struct task_mem_usage task_mem_usage, *tm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char vma_header[BUFSIZE]; tc = task_to_context(task); tm = &task_mem_usage; get_task_mem_usage(task, tm); single_vma = 0; single_vma_found = FALSE; if (flag & PRINT_SINGLE_VMA) { single_vma = vaddr; vaddr = 0; } if (flag & PRINT_RADIX_10) radix = 10; else if (flag & PRINT_RADIX_16) radix = 16; else radix = 0; if (ref) { ref->cmdflags = VM_REF_SEARCH; if (IS_A_NUMBER(ref->str)) { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); if (decimal(ref->str, 0)) ref->decval = dtol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= VM_REF_NUMBER; } } if (VM_REF_CHECK_HEXVAL(ref, tm->mm_struct_addr) || VM_REF_CHECK_HEXVAL(ref, tm->pgd_addr)) { print_task_header(fp, tc, 0); PRINT_VM_DATA(); fprintf(fp, "\n"); return (ulong)NULL; } if (!(flag & (UVADDR|PRINT_MM_STRUCT|PRINT_VMA_STRUCTS|PRINT_SINGLE_VMA)) && !DO_REF_SEARCH(ref)) PRINT_VM_DATA(); if (!tm->mm_struct_addr) { if (pc->curcmd_flags & MM_STRUCT_FORCE) { if (!is_valid_mm(pc->curcmd_private)) return (ulong)NULL; tc->mm_struct = tm->mm_struct_addr = pc->curcmd_private; /* * tc->mm_struct is changed, use vm_cleanup to * restore it. */ pc->cmd_cleanup_arg = (void *)tc; pc->cmd_cleanup = vm_cleanup; } else return (ulong)NULL; } if (flag & PRINT_MM_STRUCT) { dump_struct("mm_struct", tm->mm_struct_addr, radix); return (ulong)NULL; } readmem(tm->mm_struct_addr + OFFSET(mm_struct_mmap), KVADDR, &vma, sizeof(void *), "mm_struct mmap", FAULT_ON_ERROR); sprintf(vma_header, "%s%s%s%s%s FLAGS%sFILE\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "VMA"), space(MINSPACE), mkstring(buf2, UVADDR_PRLEN, CENTER|RJUST, "START"), space(MINSPACE), mkstring(buf3, UVADDR_PRLEN, CENTER|RJUST, "END"), space(MINSPACE)); if (!(flag & (PHYSADDR|VERIFY_ADDR|PRINT_VMA_STRUCTS|PRINT_SINGLE_VMA)) && !DO_REF_SEARCH(ref)) fprintf(fp, "%s", vma_header); for (found = FALSE; vma; vma = vm_next) { if ((flag & PHYSADDR) && !DO_REF_SEARCH(ref)) fprintf(fp, "%s", vma_header); inode = 0; BZERO(buf1, BUFSIZE); vma_buf = fill_vma_cache(vma); vm_mm = ULONG(vma_buf + OFFSET(vm_area_struct_vm_mm)); vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); vm_next = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next)); vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_flags = get_vm_flags(vma_buf); vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); if (flag & PRINT_SINGLE_VMA) { if (vma != single_vma) continue; fprintf(fp, "%s", vma_header); single_vma_found = TRUE; } if (flag & PRINT_VMA_STRUCTS) { dump_struct("vm_area_struct", vma, radix); continue; } if (vm_file && !(flag & VERIFY_ADDR)) { file_buf = fill_file_cache(vm_file); dentry = ULONG(file_buf + OFFSET(file_f_dentry)); dentry_buf = NULL; if (dentry) { dentry_buf = fill_dentry_cache(dentry); if (VALID_MEMBER(file_f_vfsmnt)) { vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); get_pathname(dentry, buf1, BUFSIZE, 1, vfsmnt); } else { get_pathname(dentry, buf1, BUFSIZE, 1, 0); } } if ((flag & PRINT_INODES) && dentry) { inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); } } if (!(flag & UVADDR) || ((flag & UVADDR) && ((vaddr >= vm_start) && (vaddr < vm_end)))) { found = TRUE; if (flag & VERIFY_ADDR) return vma; if (DO_REF_SEARCH(ref)) { if (VM_REF_CHECK_HEXVAL(ref, vma) || VM_REF_CHECK_HEXVAL(ref, (ulong)vm_flags) || VM_REF_CHECK_STRING(ref, buf1)) { if (!(ref->cmdflags & VM_REF_HEADER)) { print_task_header(fp, tc, 0); PRINT_VM_DATA(); ref->cmdflags |= VM_REF_HEADER; } if (!(ref->cmdflags & VM_REF_VMA) || (ref->cmdflags & VM_REF_PAGE)) { fprintf(fp, "%s", vma_header); ref->cmdflags |= VM_REF_VMA; ref->cmdflags &= ~VM_REF_PAGE; ref->ref1 = vma; } PRINT_VMA_DATA(); } if (vm_area_page_dump(vma, task, vm_start, vm_end, vm_mm, ref)) { if (!(ref->cmdflags & VM_REF_HEADER)) { print_task_header(fp, tc, 0); PRINT_VM_DATA(); ref->cmdflags |= VM_REF_HEADER; } if (!(ref->cmdflags & VM_REF_VMA) || (ref->ref1 != vma)) { fprintf(fp, "%s", vma_header); PRINT_VMA_DATA(); ref->cmdflags |= VM_REF_VMA; ref->ref1 = vma; } ref->cmdflags |= VM_REF_DISPLAY; vm_area_page_dump(vma, task, vm_start, vm_end, vm_mm, ref); ref->cmdflags &= ~VM_REF_DISPLAY; } continue; } if (inode) { fprintf(fp, "%lx%s%s%s%s%s%6llx%s%lx %s\n", vma, space(MINSPACE), mkstring(buf2, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_start)), space(MINSPACE), mkstring(buf3, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_end)), space(MINSPACE), vm_flags, space(MINSPACE), inode, buf1); } else { PRINT_VMA_DATA(); if (flag & (PHYSADDR|PRINT_SINGLE_VMA)) vm_area_page_dump(vma, task, vm_start, vm_end, vm_mm, ref); } if (flag & UVADDR) return vma; } } if (flag & VERIFY_ADDR) return (ulong)NULL; if ((flag & PRINT_SINGLE_VMA) && !single_vma_found) fprintf(fp, "(not found)\n"); if ((flag & UVADDR) && !found) fprintf(fp, "(not found)\n"); if (VM_REF_FOUND(ref)) fprintf(fp, "\n"); return (ulong)NULL; } static int vm_area_page_dump(ulong vma, ulong task, ulong start, ulong end, ulong mm, struct reference *ref) { physaddr_t paddr; ulong offs; char *p1, *p2; int display; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE*2]; char buf4[BUFSIZE]; if (mm == symbol_value("init_mm")) return FALSE; if (!ref || DO_REF_DISPLAY(ref)) fprintf(fp, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), LJUST, "PHYSICAL")); if (DO_REF_DISPLAY(ref)) { start = ref->ref2; } while (start < end) { display = DO_REF_SEARCH(ref) ? FALSE : TRUE; if (VM_REF_CHECK_HEXVAL(ref, start)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } if (uvtop(task_to_context(task), start, &paddr, 0)) { sprintf(buf3, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST|LONGLONG_HEX, MKSTR(&paddr))); if (VM_REF_CHECK_HEXVAL(ref, paddr)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } else if (paddr && swap_location(paddr, buf1)) { sprintf(buf3, "%s SWAP: %s\n", mkstring(buf2, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), buf1); if (DO_REF_SEARCH(ref)) { if (VM_REF_CHECK_DECVAL(ref, THIS_KERNEL_VERSION >= LINUX(2,6,0) ? __swp_offset(paddr) : SWP_OFFSET(paddr))) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } strcpy(buf4, buf3); p1 = strstr(buf4, "SWAP:") + strlen("SWAP: "); p2 = strstr(buf4, " OFFSET:"); *p2 = NULLCHAR; if (VM_REF_CHECK_STRING(ref, p1)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } } else if (vma_file_offset(vma, start, buf1)) { sprintf(buf3, "%s FILE: %s\n", mkstring(buf2, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), buf1); if (DO_REF_SEARCH(ref)) { extract_hex(strstr(buf3, "OFFSET:") + strlen("OFFSET: "), &offs, 0, 0); if (VM_REF_CHECK_HEXVAL(ref, offs)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } } else { sprintf(buf3, "%s (not mapped)\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start))); } if (display) fprintf(fp, "%s", buf3); start += PAGESIZE(); } return FALSE; } /* * Cache the passed-in vm_area_struct. */ char * fill_vma_cache(ulong vma) { int i; char *cache; vt->vma_cache_fills++; for (i = 0; i < VMA_CACHE; i++) { if (vt->cached_vma[i] == vma) { vt->cached_vma_hits[i]++; cache = vt->vma_cache + (SIZE(vm_area_struct)*i); return(cache); } } cache = vt->vma_cache + (SIZE(vm_area_struct)*vt->vma_cache_index); readmem(vma, KVADDR, cache, SIZE(vm_area_struct), "fill_vma_cache", FAULT_ON_ERROR); vt->cached_vma[vt->vma_cache_index] = vma; vt->vma_cache_index = (vt->vma_cache_index+1) % VMA_CACHE; return(cache); } /* * If active, clear the vm_area_struct references. */ void clear_vma_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < VMA_CACHE; i++) { vt->cached_vma[i] = 0; vt->cached_vma_hits[i] = 0; } vt->vma_cache_fills = 0; vt->vma_cache_index = 0; } /* * Check whether an address is a user stack address based * upon its vm_area_struct flags. */ int in_user_stack(ulong task, ulong vaddr) { ulong vma; ulonglong vm_flags; char *vma_buf; if ((vma = vm_area_dump(task, UVADDR|VERIFY_ADDR, vaddr, 0))) { vma_buf = fill_vma_cache(vma); vm_flags = get_vm_flags(vma_buf); if (vm_flags & VM_GROWSDOWN) return TRUE; else if (kernel_symbol_exists("expand_upwards") && (vm_flags & VM_GROWSUP)) return TRUE; /* * per-thread stack */ if ((vm_flags & (VM_READ|VM_WRITE)) == (VM_READ|VM_WRITE)) return TRUE; } return FALSE; } /* * Set the const value of filepages and anonpages * according to MM_FILEPAGES and MM_ANONPAGES. */ static void rss_page_types_init(void) { long anonpages, filepages; if (VALID_MEMBER(mm_struct_rss)) return; if (VALID_MEMBER(mm_struct_rss_stat)) { if (!enumerator_value("MM_FILEPAGES", &filepages) || !enumerator_value("MM_ANONPAGES", &anonpages)) { filepages = 0; anonpages = 1; } tt->filepages = filepages; tt->anonpages = anonpages; } } static struct tgid_context * tgid_quick_search(ulong tgid) { struct tgid_context *last, *next; tt->tgid_searches++; if (!(last = tt->last_tgid)) return NULL; if (tgid == last->tgid) { tt->tgid_cache_hits++; return last; } next = last + 1; if ((next < (tt->tgid_array + RUNNING_TASKS())) && (tgid == next->tgid)) { tt->tgid_cache_hits++; return next; } return NULL; } static void collect_page_member_data(char *optlist, struct meminfo *mi) { int i; int members; char buf[BUFSIZE]; char *memberlist[MAXARGS]; struct struct_member_data *page_member_cache, *pmd; if ((count_chars(optlist, ',')+1) > MAXARGS) error(FATAL, "too many members in comma-separated list\n"); if ((LASTCHAR(optlist) == ',') || (LASTCHAR(optlist) == '.')) error(FATAL, "invalid format: %s\n", optlist); strcpy(buf, optlist); replace_string(optlist, ",", ' '); if (!(members = parse_line(optlist, memberlist))) error(FATAL, "invalid page struct member list format: %s\n", buf); page_member_cache = (struct struct_member_data *) GETBUF(sizeof(struct struct_member_data) * members); for (i = 0, pmd = page_member_cache; i < members; i++, pmd++) { pmd->structure = "page"; pmd->member = memberlist[i]; if (!fill_struct_member_data(pmd)) error(FATAL, "invalid %s struct member: %s\n", pmd->structure, pmd->member); if (CRASHDEBUG(1)) { fprintf(fp, " structure: %s\n", pmd->structure); fprintf(fp, " member: %s\n", pmd->member); fprintf(fp, " type: %ld\n", pmd->type); fprintf(fp, " unsigned_type: %ld\n", pmd->unsigned_type); fprintf(fp, " length: %ld\n", pmd->length); fprintf(fp, " offset: %ld\n", pmd->offset); fprintf(fp, " bitpos: %ld\n", pmd->bitpos); fprintf(fp, " bitsize: %ld%s", pmd->bitsize, members > 1 ? "\n\n" : "\n"); } } mi->nr_members = members; mi->page_member_cache = page_member_cache; } static int get_bitfield_data(struct integer_data *bd) { int pos, size; uint32_t tmpvalue32; uint64_t tmpvalue64; uint32_t mask32; uint64_t mask64; struct struct_member_data *pmd; pmd = bd->pmd; pos = bd->pmd->bitpos; size = bd->pmd->bitsize; if (pos == 0 && size == 0) { bd->bitfield_value = bd->value; return TRUE; } switch (__BYTE_ORDER) { case __LITTLE_ENDIAN: switch (pmd->length) { case 4: tmpvalue32 = (uint32_t)bd->value; tmpvalue32 >>= pos; mask32 = (1 << size) - 1; tmpvalue32 &= mask32; bd->bitfield_value = (ulong)tmpvalue32; break; case 8: tmpvalue64 = (uint64_t)bd->value; tmpvalue64 >>= pos; mask64 = (1UL << size) - 1; tmpvalue64 &= mask64; bd->bitfield_value = tmpvalue64; break; default: return FALSE; } break; case __BIG_ENDIAN: switch (pmd->length) { case 4: tmpvalue32 = (uint32_t)bd->value; tmpvalue32 <<= pos; tmpvalue32 >>= (32-size); mask32 = (1 << size) - 1; tmpvalue32 &= mask32; bd->bitfield_value = (ulong)tmpvalue32; break; case 8: tmpvalue64 = (uint64_t)bd->value; tmpvalue64 <<= pos; tmpvalue64 >>= (64-size); mask64 = (1UL << size) - 1; tmpvalue64 &= mask64; bd->bitfield_value = tmpvalue64; break; default: return FALSE; } break; } return TRUE; } static int show_page_member_data(char *pcache, ulong pp, struct meminfo *mi, char *outputbuffer) { int bufferindex, i, c, cnt, radix, struct_intbuf[10]; ulong longbuf, struct_longbuf[10]; unsigned char boolbuf; void *voidptr; ushort shortbuf; struct struct_member_data *pmd; struct integer_data integer_data; bufferindex = 0; pmd = mi->page_member_cache; bufferindex += sprintf(outputbuffer + bufferindex, "%lx ", pp); for (i = 0; i < mi->nr_members; pmd++, i++) { switch (pmd->type) { case TYPE_CODE_PTR: voidptr = VOID_PTR(pcache + pmd->offset); bufferindex += sprintf(outputbuffer + bufferindex, VADDR_PRLEN == 8 ? "%08lx " : "%016lx ", (ulong)voidptr); break; case TYPE_CODE_INT: switch (pmd->length) { case 1: integer_data.value = UCHAR(pcache + pmd->offset); break; case 2: integer_data.value = USHORT(pcache + pmd->offset); break; case 4: integer_data.value = UINT(pcache + pmd->offset); break; case 8: if (BITS32()) goto unsupported; integer_data.value = ULONG(pcache + pmd->offset); break; default: goto unsupported; } integer_data.pmd = pmd; if (get_bitfield_data(&integer_data)) longbuf = integer_data.bitfield_value; else goto unsupported; if (STREQ(pmd->member, "flags")) radix = 16; else if (STRNEQ(pmd->member, "_count") || STRNEQ(pmd->member, "_mapcount")) radix = 10; else radix = *gdb_output_radix; if (pmd->unsigned_type) { if (pmd->length == sizeof(ulonglong)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%lu " : "%016lx ", longbuf); else if (pmd->length == sizeof(int)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%08x ", (uint)longbuf); else if (pmd->length == sizeof(short)) { bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%04x ", (ushort)longbuf); } else if (pmd->length == sizeof(char)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%02x ", (unsigned char)longbuf); } else { if (pmd->length == sizeof(ulonglong)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%ld " : "%016lx", longbuf); else if (pmd->length == sizeof(int)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%08x ", (int)longbuf); else if (pmd->length == sizeof(short)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%04x ", (short)longbuf); else if (pmd->length == sizeof(char)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%02x ", (char)longbuf); } break; case TYPE_CODE_STRUCT: if (STRNEQ(pmd->member, "_count") || STRNEQ(pmd->member, "_mapcount")) { BCOPY(pcache+pmd->offset, (char *)&struct_intbuf[0], pmd->length); bufferindex += sprintf(outputbuffer + bufferindex, "%d ", struct_intbuf[0]); } else if ((pmd->length % sizeof(long)) == 0) { BCOPY(pcache+pmd->offset, (char *)&struct_longbuf[0], pmd->length); cnt = pmd->length / sizeof(long); for (c = 0; c < cnt; c++) { bufferindex += sprintf(outputbuffer + bufferindex, BITS32() ? "%08lx%s" : "%016lx%s", struct_longbuf[c], (c+1) < cnt ? "," : ""); } bufferindex += sprintf(outputbuffer + bufferindex, " "); } else if ((pmd->length % sizeof(int)) == 0) { BCOPY(pcache+pmd->offset, (char *)&struct_intbuf[0], pmd->length); cnt = pmd->length / sizeof(int); for (c = 0; c < cnt; c++) { bufferindex += sprintf(outputbuffer + bufferindex, "%08x%s", struct_intbuf[c], (c+1) < cnt ? "," : ""); } } else if (pmd->length == sizeof(short)) { BCOPY(pcache+pmd->offset, (char *)&shortbuf, pmd->length); bufferindex += sprintf(outputbuffer + bufferindex, "%04x ", shortbuf); } else goto unsupported; break; case TYPE_CODE_BOOL: radix = *gdb_output_radix; boolbuf = UCHAR(pcache + pmd->offset); if (boolbuf <= 1) bufferindex += sprintf(outputbuffer + bufferindex, "%s ", boolbuf ? "true" : "false"); else bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d" : "%x ", boolbuf); break; default: unsupported: error(FATAL, "unsupported page member reference: %s.%s\n", pmd->structure, pmd->member); break; } } return bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } /* * Fill in the task_mem_usage structure with the RSS, virtual memory size, * percent of physical memory being used, and the mm_struct address. */ void get_task_mem_usage(ulong task, struct task_mem_usage *tm) { struct task_context *tc; long rss = 0; BZERO(tm, sizeof(struct task_mem_usage)); if (IS_ZOMBIE(task) || IS_EXITING(task)) return; tc = task_to_context(task); if (!tc || !tc->mm_struct) /* probably a kernel thread */ return; tm->mm_struct_addr = tc->mm_struct; if (!task_mm(task, TRUE)) return; if (VALID_MEMBER(mm_struct_rss)) /* * mm_struct.rss or mm_struct._rss exist. */ tm->rss = ULONG(tt->mm_struct + OFFSET(mm_struct_rss)); else { /* * Latest kernels have mm_struct.mm_rss_stat[]. */ if (VALID_MEMBER(mm_struct_rss_stat)) { long anonpages, filepages; anonpages = tt->anonpages; filepages = tt->filepages; rss += LONG(tt->mm_struct + OFFSET(mm_struct_rss_stat) + OFFSET(mm_rss_stat_count) + (filepages * sizeof(long))); rss += LONG(tt->mm_struct + OFFSET(mm_struct_rss_stat) + OFFSET(mm_rss_stat_count) + (anonpages * sizeof(long))); } /* Check whether SPLIT_RSS_COUNTING is enabled */ if (VALID_MEMBER(task_struct_rss_stat)) { int sync_rss; struct tgid_context tgid, *tgid_array, *tg, *first, *last; tgid_array = tt->tgid_array; tgid.tgid = task_tgid(task); if (!(tg = tgid_quick_search(tgid.tgid))) tg = (struct tgid_context *)bsearch(&tgid, tgid_array, RUNNING_TASKS(), sizeof(struct tgid_context), sort_by_tgid); if (tg) { /* find the first element which has the same tgid */ first = tg; while ((first > tgid_array) && ((first - 1)->tgid == first->tgid)) first--; /* find the last element which have same tgid */ last = tg; while ((last < (tgid_array + (RUNNING_TASKS() - 1))) && (last->tgid == (last + 1)->tgid)) last++; while (first <= last) { /* count 0 -> filepages */ if (!readmem(first->task + OFFSET(task_struct_rss_stat) + OFFSET(task_rss_stat_count), KVADDR, &sync_rss, sizeof(int), "task_struct rss_stat MM_FILEPAGES", RETURN_ON_ERROR)) continue; rss += sync_rss; /* count 1 -> anonpages */ if (!readmem(first->task + OFFSET(task_struct_rss_stat) + OFFSET(task_rss_stat_count) + sizeof(int), KVADDR, &sync_rss, sizeof(int), "task_struct rss_stat MM_ANONPAGES", RETURN_ON_ERROR)) continue; rss += sync_rss; if (first == last) break; first++; } tt->last_tgid = last; } } /* * mm_struct._anon_rss and mm_struct._file_rss should exist. */ if (VALID_MEMBER(mm_struct_anon_rss)) rss += LONG(tt->mm_struct + OFFSET(mm_struct_anon_rss)); if (VALID_MEMBER(mm_struct_file_rss)) rss += LONG(tt->mm_struct + OFFSET(mm_struct_file_rss)); tm->rss = (unsigned long)rss; } tm->total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); tm->pgd_addr = ULONG(tt->mm_struct + OFFSET(mm_struct_pgd)); if (is_kernel_thread(task) && !tm->rss) return; tm->pct_physmem = ((double)(tm->rss*100)) / ((double)(MIN(vt->total_pages, vt->num_physpages ? vt->num_physpages : vt->total_pages))); } /* * cmd_kmem() is designed as a multi-purpose kernel memory investigator with * the flag argument sending it off in a multitude of areas. To date, the * following options are defined: * * -f displays the contents of the system free_area[] array headers; * also verifies that the page count equals nr_free_pages * -F same as -f, but also dumps all pages linked to that header. * -p displays basic information about each page in the system * mem_map[] array. * -s displays kmalloc() slab data. * -S same as -s, but displays all kmalloc() objects. * -v displays the vmlist entries. * -c displays the number of pages in the page_hash_table. * -C displays all entries in the page_hash_table. * -i displays informational data shown by /proc/meminfo. * -h hugepage information from hstates[] array * * -P forces address to be defined as a physical address * address when used with -f, the address can be either a page pointer * or a physical address; the free_area header containing the page * (if any) is displayed. * When used with -p, the address can be either a page pointer or a * physical address; its basic mem_map page information is displayed. * When used with -c, the page_hash_table entry containing the * page pointer is displayed. */ /* Note: VERBOSE is 0x1, ADDRESS_SPECIFIED is 0x2 */ #define GET_TOTALRAM_PAGES (ADDRESS_SPECIFIED << 1) #define GET_SHARED_PAGES (ADDRESS_SPECIFIED << 2) #define GET_FREE_PAGES (ADDRESS_SPECIFIED << 3) #define GET_FREE_HIGHMEM_PAGES (ADDRESS_SPECIFIED << 4) #define GET_ZONE_SIZES (ADDRESS_SPECIFIED << 5) #define GET_HIGHEST (ADDRESS_SPECIFIED << 6) #define GET_BUFFERS_PAGES (ADDRESS_SPECIFIED << 7) #define GET_SLAB_PAGES (ADDRESS_SPECIFIED << 8) #define GET_PHYS_TO_VMALLOC (ADDRESS_SPECIFIED << 9) #define GET_ACTIVE_LIST (ADDRESS_SPECIFIED << 10) #define GET_INACTIVE_LIST (ADDRESS_SPECIFIED << 11) #define GET_INACTIVE_CLEAN (ADDRESS_SPECIFIED << 12) /* obsolete */ #define GET_INACTIVE_DIRTY (ADDRESS_SPECIFIED << 13) /* obsolete */ #define SLAB_GET_COUNTS (ADDRESS_SPECIFIED << 14) #define SLAB_WALKTHROUGH (ADDRESS_SPECIFIED << 15) #define GET_VMLIST_COUNT (ADDRESS_SPECIFIED << 16) #define GET_VMLIST (ADDRESS_SPECIFIED << 17) #define SLAB_DATA_NOSAVE (ADDRESS_SPECIFIED << 18) #define GET_SLUB_SLABS (ADDRESS_SPECIFIED << 19) #define GET_SLUB_OBJECTS (ADDRESS_SPECIFIED << 20) #define VMLIST_VERIFY (ADDRESS_SPECIFIED << 21) #define SLAB_FIRST_NODE (ADDRESS_SPECIFIED << 22) #define CACHE_SET (ADDRESS_SPECIFIED << 23) #define SLAB_OVERLOAD_PAGE_PTR (ADDRESS_SPECIFIED << 24) #define SLAB_BITFIELD (ADDRESS_SPECIFIED << 25) #define SLAB_GATHER_FAILURE (ADDRESS_SPECIFIED << 26) #define GET_SLAB_ROOT_CACHES (ADDRESS_SPECIFIED << 27) #define GET_ALL \ (GET_SHARED_PAGES|GET_TOTALRAM_PAGES|GET_BUFFERS_PAGES|GET_SLAB_PAGES) void cmd_kmem(void) { int i; int c; int sflag, Sflag, pflag, fflag, Fflag, vflag, zflag, oflag, gflag; int nflag, cflag, Cflag, iflag, lflag, Lflag, Pflag, Vflag, hflag; int rflag; struct meminfo meminfo; ulonglong value[MAXARGS]; char buf[BUFSIZE]; char *p1; int spec_addr, escape; spec_addr = 0; sflag = Sflag = pflag = fflag = Fflag = Pflag = zflag = oflag = 0; vflag = Cflag = cflag = iflag = nflag = lflag = Lflag = Vflag = 0; gflag = hflag = rflag = 0; escape = FALSE; BZERO(&meminfo, sizeof(struct meminfo)); BZERO(&value[0], sizeof(ulonglong)*MAXARGS); pc->curcmd_flags &= ~HEADER_PRINTED; while ((c = getopt(argcnt, args, "gI:sSrFfm:pvczCinl:L:PVoh")) != EOF) { switch(c) { case 'V': Vflag = 1; break; case 'n': nflag = 1; break; case 'z': zflag = 1; break; case 'i': iflag = 1; break; case 'h': hflag = 1; break; case 'C': Cflag = 1, cflag = 0;; break; case 'c': cflag = 1, Cflag = 0; break; case 'v': vflag = 1; break; case 's': sflag = 1; Sflag = rflag = 0; break; case 'S': Sflag = 1; sflag = rflag = 0; break; case 'r': rflag = 1; sflag = Sflag = 0; break; case 'F': Fflag = 1; fflag = 0; break;; case 'f': fflag = 1; Fflag = 0; break;; case 'p': pflag = 1; break; case 'm': pflag = 1; collect_page_member_data(optarg, &meminfo); break; case 'I': meminfo.ignore = optarg; break; case 'l': if (STREQ(optarg, "a")) { meminfo.flags |= GET_ACTIVE_LIST; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "i")) { meminfo.flags |= GET_INACTIVE_LIST; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "ic")) { meminfo.flags |= GET_INACTIVE_CLEAN; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "id")) { meminfo.flags |= GET_INACTIVE_DIRTY; lflag = 1; Lflag = 0; } else argerrs++; break; case 'L': if (STREQ(optarg, "a")) { meminfo.flags |= GET_ACTIVE_LIST; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "i")) { meminfo.flags |= GET_INACTIVE_LIST; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "ic")) { meminfo.flags |= GET_INACTIVE_CLEAN; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "id")) { meminfo.flags |= GET_INACTIVE_DIRTY; Lflag = 1; lflag = 0; } else argerrs++; break; case 'P': Pflag = 1; break; case 'o': oflag = 1; break; case 'g': gflag = 1; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if ((sflag + Sflag + pflag + fflag + Fflag + Vflag + oflag + vflag + Cflag + cflag + iflag + lflag + Lflag + gflag + hflag + rflag) > 1) { error(INFO, "only one flag allowed!\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (sflag || Sflag || rflag || !(vt->flags & KMEM_CACHE_INIT)) kmem_cache_init(); while (args[optind]) { if (hexadecimal(args[optind], 0)) { value[spec_addr++] = htoll(args[optind], FAULT_ON_ERROR, NULL); } else { if (meminfo.reqname) error(FATAL, "only one kmem_cache reference is allowed\n"); meminfo.reqname = args[optind]; if (args[optind][0] == '\\') { meminfo.reqname = &args[optind][1]; escape = TRUE; } else meminfo.reqname = args[optind]; if (!sflag && !Sflag && !rflag) cmd_usage(pc->curcmd, SYNOPSIS); } optind++; } for (i = 0; i < spec_addr; i++) { if (Pflag) meminfo.memtype = PHYSADDR; else meminfo.memtype = IS_KVADDR(value[i]) ? KVADDR : PHYSADDR; if (fflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_free_pages(&meminfo); fflag++; } if (pflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; dump_mem_map(&meminfo); pflag++; } if (sflag || Sflag) { if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); meminfo.flags = Sflag ? VERBOSE : 0; if (meminfo.memtype == PHYSADDR) { if (value[i] < VTOP(vt->high_memory)) { value[i] = PTOV(value[i]); meminfo.memtype = KVADDR; } else error(WARNING, "cannot make virtual-to-physical translation: %llx\n", value[i]); } if ((p1 = is_kmem_cache_addr(value[i], buf))) { if (meminfo.reqname) error(FATAL, "only one kmem_cache reference is allowed\n"); meminfo.reqname = p1; meminfo.cache = value[i]; meminfo.flags |= CACHE_SET; if ((i+1) == spec_addr) { /* done? */ if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_kmem_cache(&meminfo); } meminfo.flags &= ~CACHE_SET; } else { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (Sflag && (vt->flags & KMALLOC_SLUB)) meminfo.flags |= VERBOSE; if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_kmem_cache(&meminfo); } if (sflag) sflag++; if (Sflag) Sflag++; } if (vflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; dump_vmlist(&meminfo); vflag++; } if (cflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); dump_page_hash_table(&meminfo); cflag++; } if (lflag) { meminfo.spec_addr = value[i]; meminfo.flags |= (ADDRESS_SPECIFIED|VERBOSE); if (meminfo.calls++) fprintf(fp, "\n"); dump_page_lists(&meminfo); lflag++; } if (gflag) { if (i) fprintf(fp, "\n"); dump_page_flags(value[i]); gflag++; } /* * no value arguments allowed! */ if (zflag || nflag || iflag || Fflag || Cflag || Lflag || Vflag || oflag || hflag || rflag) { error(INFO, "no address arguments allowed with this option\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (!(sflag + Sflag + pflag + fflag + vflag + cflag + lflag + Lflag + gflag)) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); else kmem_cache_init(); kmem_search(&meminfo); } } if (iflag == 1) dump_kmeminfo(); if (pflag == 1) dump_mem_map(&meminfo); if (fflag == 1) vt->dump_free_pages(&meminfo); if (Fflag == 1) { meminfo.flags = VERBOSE; vt->dump_free_pages(&meminfo); } if (hflag == 1) dump_hstates(); if (sflag == 1 || rflag == 1) { if (rflag) { if (!((vt->flags & KMALLOC_SLUB) && (vt->flags & SLAB_ROOT_CACHES))) option_not_supported('r'); meminfo.flags = GET_SLAB_ROOT_CACHES; } if (!escape && STREQ(meminfo.reqname, "list")) kmem_cache_list(&meminfo); else if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); else vt->dump_kmem_cache(&meminfo); } if (Sflag == 1) { if (STREQ(meminfo.reqname, "list")) kmem_cache_list(&meminfo); else if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); else { meminfo.flags = VERBOSE; vt->dump_kmem_cache(&meminfo); } } if (vflag == 1) dump_vmlist(&meminfo); if (Cflag == 1) { meminfo.flags = VERBOSE; dump_page_hash_table(&meminfo); } if (cflag == 1) dump_page_hash_table(&meminfo); if (nflag == 1) dump_memory_nodes(MEMORY_NODES_DUMP); if (zflag == 1) dump_zone_stats(); if (lflag == 1) { dump_page_lists(&meminfo); } if (Lflag == 1) { meminfo.flags |= VERBOSE; dump_page_lists(&meminfo); } if (Vflag == 1) { dump_vm_stat(NULL, NULL, 0); dump_page_states(); dump_vm_event_state(); } if (oflag == 1) dump_per_cpu_offsets(); if (gflag == 1) dump_page_flags(0); if (!(sflag + Sflag + pflag + fflag + Fflag + vflag + Vflag + zflag + oflag + cflag + Cflag + iflag + nflag + lflag + Lflag + gflag + hflag + rflag + meminfo.calls)) cmd_usage(pc->curcmd, SYNOPSIS); } static void PG_reserved_flag_init(void) { ulong pageptr; int count; ulong vaddr, flags; char *buf; if (enumerator_value("PG_reserved", (long *)&flags)) { vt->PG_reserved = 1 << flags; if (CRASHDEBUG(2)) fprintf(fp, "PG_reserved (enum): %lx\n", vt->PG_reserved); return; } vaddr = kt->stext; if (!vaddr) { if (kernel_symbol_exists("sys_read")) vaddr = symbol_value("sys_read"); else if (kernel_symbol_exists("__x64_sys_read")) vaddr = symbol_value("__x64_sys_read"); } if (!phys_to_page((physaddr_t)VTOP(vaddr), &pageptr)) return; buf = (char *)GETBUF(SIZE(page)); if (!readmem(pageptr, KVADDR, buf, SIZE(page), "reserved page", RETURN_ON_ERROR|QUIET)) { FREEBUF(buf); return; } flags = ULONG(buf + OFFSET(page_flags)); count = INT(buf + OFFSET(page_count)); if (count_bits_long(flags) == 1) vt->PG_reserved = flags; else vt->PG_reserved = 1 << (ffsl(flags)-1); if (count == -1) vt->flags |= PGCNT_ADJ; if (CRASHDEBUG(2)) fprintf(fp, "PG_reserved: vaddr: %lx page: %lx flags: %lx => %lx\n", vaddr, pageptr, flags, vt->PG_reserved); FREEBUF(buf); } static void PG_slab_flag_init(void) { int bit; ulong pageptr; ulong vaddr, flags, flags2; char buf[BUFSIZE]; /* safe for a page struct */ /* * Set the old defaults in case all else fails. */ if (enumerator_value("PG_slab", (long *)&flags)) { vt->PG_slab = flags; if (CRASHDEBUG(2)) fprintf(fp, "PG_slab (enum): %lx\n", vt->PG_slab); } else if (VALID_MEMBER(page_pte)) { if (THIS_KERNEL_VERSION < LINUX(2,6,0)) vt->PG_slab = 10; else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) vt->PG_slab = 7; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { vt->PG_slab = 7; } else { if (try_get_symbol_data("vm_area_cachep", sizeof(void *), &vaddr) && phys_to_page((physaddr_t)VTOP(vaddr), &pageptr) && readmem(pageptr, KVADDR, buf, SIZE(page), "vm_area_cachep page", RETURN_ON_ERROR|QUIET)) { flags = ULONG(buf + OFFSET(page_flags)); if ((bit = ffsl(flags))) { vt->PG_slab = bit - 1; if (CRASHDEBUG(2)) fprintf(fp, "PG_slab bit: vaddr: %lx page: %lx flags: %lx => %ld\n", vaddr, pageptr, flags, vt->PG_slab); } } } if (VALID_MEMBER(page_compound_head)) { if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: (UNUSED): page.compound_head exists!\n"); } else if (vt->flags & KMALLOC_SLUB) { /* * PG_slab and the following are hardwired for * kernels prior to the pageflags enumerator. */ #define PG_compound 14 /* Part of a compound page */ #define PG_reclaim 17 /* To be reclaimed asap */ vt->PG_head_tail_mask = ((1L << PG_compound) | (1L << PG_reclaim)); if (enumerator_value("PG_tail", (long *)&flags)) vt->PG_head_tail_mask = (1L << flags); else if (enumerator_value("PG_compound", (long *)&flags) && enumerator_value("PG_reclaim", (long *)&flags2)) { vt->PG_head_tail_mask = ((1L << flags) | (1L << flags2)); if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: %lx\n", vt->PG_head_tail_mask); } else if (vt->flags & PAGEFLAGS) { vt->PG_head_tail_mask = 0; error(WARNING, "SLUB: cannot determine how compound pages are linked\n\n"); } } else { if (enumerator_value("PG_tail", (long *)&flags)) vt->PG_head_tail_mask = (1L << flags); else if (enumerator_value("PG_compound", (long *)&flags) && enumerator_value("PG_reclaim", (long *)&flags2)) { vt->PG_head_tail_mask = ((1L << flags) | (1L << flags2)); if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: %lx (PG_compound|PG_reclaim)\n", vt->PG_head_tail_mask); } else if (vt->flags & PAGEFLAGS) error(WARNING, "SLAB: cannot determine how compound pages are linked\n\n"); } if (!vt->PG_slab) error(INFO, "cannot determine PG_slab bit value\n"); } /* * dump_mem_map() displays basic data about each entry in the mem_map[] * array, or if an address is specified, just the mem_map[] entry for that * address. Specified addresses can either be physical address or page * structure pointers. */ /* Page flag bit values */ #define v22_PG_locked 0 #define v22_PG_error 1 #define v22_PG_referenced 2 #define v22_PG_dirty 3 #define v22_PG_uptodate 4 #define v22_PG_free_after 5 #define v22_PG_decr_after 6 #define v22_PG_swap_unlock_after 7 #define v22_PG_DMA 8 #define v22_PG_Slab 9 #define v22_PG_swap_cache 10 #define v22_PG_skip 11 #define v22_PG_reserved 31 #define v24_PG_locked 0 #define v24_PG_error 1 #define v24_PG_referenced 2 #define v24_PG_uptodate 3 #define v24_PG_dirty 4 #define v24_PG_decr_after 5 #define v24_PG_active 6 #define v24_PG_inactive_dirty 7 #define v24_PG_slab 8 #define v24_PG_swap_cache 9 #define v24_PG_skip 10 #define v24_PG_inactive_clean 11 #define v24_PG_highmem 12 #define v24_PG_checked 13 /* kill me in 2.5.. */ #define v24_PG_bigpage 14 /* bits 21-30 unused */ #define v24_PG_arch_1 30 #define v24_PG_reserved 31 #define v26_PG_private 12 #define PGMM_CACHED (512) static void dump_mem_map_SPARSEMEM(struct meminfo *mi) { ulong i; long total_pages; int others, page_not_mapped, phys_not_mapped, page_mapping; ulong pp, ppend; physaddr_t phys, physend; ulong tmp, reserved, shared, slabs; ulong PG_reserved_flag; long buffers; ulong inode, offset, flags, mapping, index; uint count; int print_hdr, pg_spec, phys_spec, done; int v22; char hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char *page_cache; char *pcache; ulong section, section_nr, nr_mem_sections, section_size; long buffersize; char *outputbuffer; int bufferindex; buffersize = 1024 * 1024; outputbuffer = GETBUF(buffersize + 512); char style1[100]; char style2[100]; char style3[100]; char style4[100]; sprintf((char *)&style1, "%%lx%s%%%dllx%s%%%dlx%s%%8lx %%2d%s", space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE), space(MINSPACE)); sprintf((char *)&style2, "%%-%dlx%s%%%dllx%s%s%s%s %2s ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, " "), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, " "), " "); sprintf((char *)&style3, "%%-%dlx%s%%%dllx%s%s%s%s %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "-------"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "-----")); sprintf((char *)&style4, "%%-%dlx%s%%%dllx%s%%%dlx%s%%8lx %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE)); v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ if (v22) { sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), space(MINSPACE), mkstring(buf4, 8, CENTER|LJUST, "OFFSET"), space(MINSPACE-1)); } else if (mi->nr_members) { sprintf(hdr, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE")); for (i = 0; i < mi->nr_members; i++) sprintf(&hdr[strlen(hdr)], " %s", mi->page_member_cache[i].member); strcat(hdr, "\n"); } else { sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "INDEX")); } mapping = index = 0; reserved = shared = slabs = buffers = inode = offset = 0; pg_spec = phys_spec = print_hdr = FALSE; switch (mi->flags) { case ADDRESS_SPECIFIED: switch (mi->memtype) { case KVADDR: if (is_page_ptr(mi->spec_addr, NULL)) pg_spec = TRUE; else { if (kvtop(NULL, mi->spec_addr, &phys, 0)) { mi->spec_addr = phys; phys_spec = TRUE; } else return; } break; case PHYSADDR: phys_spec = TRUE; break; default: error(FATAL, "dump_mem_map: no memtype specified\n"); break; } print_hdr = TRUE; break; case GET_ALL: shared = 0; reserved = 0; buffers = 0; slabs = 0; break; case GET_SHARED_PAGES: shared = 0; break; case GET_TOTALRAM_PAGES: reserved = 0; break; case GET_BUFFERS_PAGES: buffers = 0; break; case GET_SLAB_PAGES: slabs = 0; break; default: print_hdr = TRUE; break; } page_cache = GETBUF(SIZE(page) * PGMM_CACHED); done = FALSE; total_pages = 0; nr_mem_sections = NR_MEM_SECTIONS(); bufferindex = 0; /* * Iterate over all possible sections */ for (section_nr = 0; section_nr < nr_mem_sections ; section_nr++) { if (CRASHDEBUG(2)) fprintf(fp, "section_nr = %ld\n", section_nr); /* * If we are looking up a specific address, jump directly * to the section with that page */ if (mi->flags & ADDRESS_SPECIFIED) { ulong pfn; physaddr_t tmp; if (pg_spec) { if (!page_to_phys(mi->spec_addr, &tmp)) return; pfn = tmp >> PAGESHIFT(); } else pfn = mi->spec_addr >> PAGESHIFT(); section_nr = pfn_to_section_nr(pfn); } if (!(section = valid_section_nr(section_nr))) { #ifdef NOTDEF break; /* On a real sparsemem system we need to check * every section as gaps may exist. But this * can be slow. If we know we don't have gaps * just stop validating sections when we * get to the end of the valid ones. * In the future find a way to short circuit * this loop. */ #endif if (mi->flags & ADDRESS_SPECIFIED) break; continue; } if (print_hdr) { if (!(pc->curcmd_flags & HEADER_PRINTED)) fprintf(fp, "%s", hdr); print_hdr = FALSE; pc->curcmd_flags |= HEADER_PRINTED; } pp = section_mem_map_addr(section, 0); pp = sparse_decode_mem_map(pp, section_nr); phys = (physaddr_t) section_nr * PAGES_PER_SECTION() * PAGESIZE(); section_size = PAGES_PER_SECTION(); for (i = 0; i < section_size; i++, pp += SIZE(page), phys += PAGESIZE()) { if ((i % PGMM_CACHED) == 0) { ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); if ((pg_spec && (mi->spec_addr > ppend)) || (phys_spec && (PHYSPAGEBASE(mi->spec_addr) > physend))) { i += (PGMM_CACHED-1); pp = ppend; phys = physend; continue; } fill_mem_map_cache(pp, ppend, page_cache); } pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); if (received_SIGINT()) restart(0); if ((pg_spec && (pp == mi->spec_addr)) || (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) done = TRUE; if (!done && (pg_spec || phys_spec)) continue; if (mi->nr_members) { bufferindex += show_page_member_data(pcache, pp, mi, outputbuffer+bufferindex); goto display_members; } flags = ULONG(pcache + OFFSET(page_flags)); if (SIZE(page_flags) == 4) flags &= 0xffffffff; count = UINT(pcache + OFFSET(page_count)); switch (mi->flags) { case GET_ALL: case GET_BUFFERS_PAGES: if (VALID_MEMBER(page_buffers)) { tmp = ULONG(pcache + OFFSET(page_buffers)); if (tmp) buffers++; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { if ((flags >> v26_PG_private) & 1) buffers++; } else error(FATAL, "cannot determine whether pages have buffers\n"); if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SLAB_PAGES: if (v22) { if ((flags >> v22_PG_Slab) & 1) slabs++; } else if (vt->PG_slab) { if ((flags >> vt->PG_slab) & 1) slabs++; } else { if ((flags >> v24_PG_slab) & 1) slabs++; } if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SHARED_PAGES: case GET_TOTALRAM_PAGES: if (vt->PG_reserved) PG_reserved_flag = vt->PG_reserved; else PG_reserved_flag = v22 ? 1 << v22_PG_reserved : 1 << v24_PG_reserved; if (flags & PG_reserved_flag) { reserved++; } else { if ((int)count > (vt->flags & PGCNT_ADJ ? 0 : 1)) shared++; } continue; } page_mapping = VALID_MEMBER(page_mapping); if (v22) { inode = ULONG(pcache + OFFSET(page_inode)); offset = ULONG(pcache + OFFSET(page_offset)); } else if (page_mapping) { mapping = ULONG(pcache + OFFSET(page_mapping)); index = ULONG(pcache + OFFSET(page_index)); } page_not_mapped = phys_not_mapped = FALSE; if (v22) { bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style1, pp, phys, inode, offset, count); } else { if ((vt->flags & V_MEM_MAP)) { if (!machdep->verify_paddr(phys)) phys_not_mapped = TRUE; if (!kvtop(NULL, pp, NULL, 0)) page_not_mapped = TRUE; } if (page_not_mapped) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style2, pp, phys); else if (!page_mapping) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style3, pp, phys, count); else bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style4, pp, phys, mapping, index, count); } others = 0; #define sprintflag(X) sprintf(outputbuffer + bufferindex, X, others++ ? "," : "") if (v22) { if ((flags >> v22_PG_DMA) & 1) bufferindex += sprintflag("%sDMA"); if ((flags >> v22_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v22_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v22_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v22_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v22_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v22_PG_free_after) & 1) bufferindex += sprintflag("%sfree_after"); if ((flags >> v22_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v22_PG_swap_unlock_after) & 1) bufferindex += sprintflag("%sswap_unlock_after"); if ((flags >> v22_PG_Slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v22_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v22_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v22_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) { if (vt->flags & PAGEFLAGS) bufferindex += translate_page_flags(outputbuffer+bufferindex, flags); else bufferindex += sprintf(outputbuffer+bufferindex, "%lx\n", flags); } else { if ((flags >> v24_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v24_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v24_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v24_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v24_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v24_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v24_PG_active) & 1) bufferindex += sprintflag("%sactive"); if ((flags >> v24_PG_inactive_dirty) & 1) bufferindex += sprintflag("%sinactive_dirty"); if ((flags >> v24_PG_slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v24_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v24_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v24_PG_inactive_clean) & 1) bufferindex += sprintflag("%sinactive_clean"); if ((flags >> v24_PG_highmem) & 1) bufferindex += sprintflag("%shighmem"); if ((flags >> v24_PG_checked) & 1) bufferindex += sprintflag("%schecked"); if ((flags >> v24_PG_bigpage) & 1) bufferindex += sprintflag("%sbigpage"); if ((flags >> v24_PG_arch_1) & 1) bufferindex += sprintflag("%sarch_1"); if ((flags >> v24_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); if (phys_not_mapped) bufferindex += sprintflag("%s[NOT MAPPED]"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } display_members: if (bufferindex > buffersize) { fprintf(fp, "%s", outputbuffer); bufferindex = 0; } if (done) break; } if (done) break; } if (bufferindex > 0) { fprintf(fp, "%s", outputbuffer); } switch (mi->flags) { case GET_TOTALRAM_PAGES: mi->retval = total_pages - reserved; break; case GET_SHARED_PAGES: mi->retval = shared; break; case GET_BUFFERS_PAGES: mi->retval = buffers; break; case GET_SLAB_PAGES: mi->retval = slabs; break; case GET_ALL: mi->get_totalram = total_pages - reserved; mi->get_shared = shared; mi->get_buffers = buffers; mi->get_slabs = slabs; break; case ADDRESS_SPECIFIED: mi->retval = done; break; } if (mi->nr_members) FREEBUF(mi->page_member_cache); FREEBUF(outputbuffer); FREEBUF(page_cache); } static void dump_mem_map(struct meminfo *mi) { long i, n; long total_pages; int others, page_not_mapped, phys_not_mapped, page_mapping; ulong pp, ppend; physaddr_t phys, physend; ulong tmp, reserved, shared, slabs; ulong PG_reserved_flag; long buffers; ulong inode, offset, flags, mapping, index; ulong node_size; uint count; int print_hdr, pg_spec, phys_spec, done; int v22; struct node_table *nt; char hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char *page_cache; char *pcache; long buffersize; char *outputbuffer; int bufferindex; char style1[100]; char style2[100]; char style3[100]; char style4[100]; if (IS_SPARSEMEM()) { dump_mem_map_SPARSEMEM(mi); return; } buffersize = 1024 * 1024; outputbuffer = GETBUF(buffersize + 512); sprintf((char *)&style1, "%%lx%s%%%dllx%s%%%dlx%s%%8lx %%2d%s", space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE), space(MINSPACE)); sprintf((char *)&style2, "%%-%dlx%s%%%dllx%s%s%s%s %2s ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, " "), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, " "), " "); sprintf((char *)&style3, "%%-%dlx%s%%%dllx%s%s%s%s %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "-------"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "-----")); sprintf((char *)&style4, "%%-%dlx%s%%%dllx%s%%%dlx%s%%8lx %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE)); v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ if (v22) { sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), space(MINSPACE), mkstring(buf4, 8, CENTER|LJUST, "OFFSET"), space(MINSPACE-1)); } else if (mi->nr_members) { sprintf(hdr, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE")); for (i = 0; i < mi->nr_members; i++) sprintf(&hdr[strlen(hdr)], " %s", mi->page_member_cache[i].member); strcat(hdr, "\n"); } else { sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "INDEX")); } mapping = index = 0; reserved = shared = slabs = buffers = inode = offset = 0; pg_spec = phys_spec = print_hdr = FALSE; switch (mi->flags) { case ADDRESS_SPECIFIED: switch (mi->memtype) { case KVADDR: if (is_page_ptr(mi->spec_addr, NULL)) pg_spec = TRUE; else { if (kvtop(NULL, mi->spec_addr, &phys, 0)) { mi->spec_addr = phys; phys_spec = TRUE; } else return; } break; case PHYSADDR: phys_spec = TRUE; break; default: error(FATAL, "dump_mem_map: no memtype specified\n"); break; } print_hdr = TRUE; break; case GET_ALL: shared = 0; reserved = 0; buffers = 0; slabs = 0; break; case GET_SHARED_PAGES: shared = 0; break; case GET_TOTALRAM_PAGES: reserved = 0; break; case GET_BUFFERS_PAGES: buffers = 0; break; case GET_SLAB_PAGES: slabs = 0; break; default: print_hdr = TRUE; break; } page_cache = GETBUF(SIZE(page) * PGMM_CACHED); done = FALSE; total_pages = 0; bufferindex = 0; for (n = 0; n < vt->numnodes; n++) { if (print_hdr) { if (!(pc->curcmd_flags & HEADER_PRINTED)) fprintf(fp, "%s%s", n ? "\n" : "", hdr); print_hdr = FALSE; pc->curcmd_flags |= HEADER_PRINTED; } nt = &vt->node_table[n]; total_pages += nt->size; pp = nt->mem_map; phys = nt->start_paddr; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; for (i = 0; i < node_size; i++, pp += SIZE(page), phys += PAGESIZE()) { if ((i % PGMM_CACHED) == 0) { ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); if ((pg_spec && (mi->spec_addr > ppend)) || (phys_spec && (PHYSPAGEBASE(mi->spec_addr) > physend))) { i += (PGMM_CACHED-1); pp = ppend; phys = physend; continue; } fill_mem_map_cache(pp, ppend, page_cache); } pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); if (received_SIGINT()) restart(0); if ((pg_spec && (pp == mi->spec_addr)) || (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) done = TRUE; if (!done && (pg_spec || phys_spec)) continue; if (mi->nr_members) { bufferindex += show_page_member_data(pcache, pp, mi, outputbuffer+bufferindex); goto display_members; } flags = ULONG(pcache + OFFSET(page_flags)); if (SIZE(page_flags) == 4) flags &= 0xffffffff; count = UINT(pcache + OFFSET(page_count)); switch (mi->flags) { case GET_ALL: case GET_BUFFERS_PAGES: if (VALID_MEMBER(page_buffers)) { tmp = ULONG(pcache + OFFSET(page_buffers)); if (tmp) buffers++; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { if ((flags >> v26_PG_private) & 1) buffers++; } else error(FATAL, "cannot determine whether pages have buffers\n"); if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SLAB_PAGES: if (v22) { if ((flags >> v22_PG_Slab) & 1) slabs++; } else if (vt->PG_slab) { if ((flags >> vt->PG_slab) & 1) slabs++; } else { if ((flags >> v24_PG_slab) & 1) slabs++; } if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SHARED_PAGES: case GET_TOTALRAM_PAGES: if (vt->PG_reserved) PG_reserved_flag = vt->PG_reserved; else PG_reserved_flag = v22 ? 1 << v22_PG_reserved : 1 << v24_PG_reserved; if (flags & PG_reserved_flag) { reserved++; } else { if ((int)count > (vt->flags & PGCNT_ADJ ? 0 : 1)) shared++; } continue; } page_mapping = VALID_MEMBER(page_mapping); if (v22) { inode = ULONG(pcache + OFFSET(page_inode)); offset = ULONG(pcache + OFFSET(page_offset)); } else if (page_mapping) { mapping = ULONG(pcache + OFFSET(page_mapping)); index = ULONG(pcache + OFFSET(page_index)); } page_not_mapped = phys_not_mapped = FALSE; if (v22) { bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style1, pp, phys, inode, offset, count); } else { if ((vt->flags & V_MEM_MAP)) { if (!machdep->verify_paddr(phys)) phys_not_mapped = TRUE; if (!kvtop(NULL, pp, NULL, 0)) page_not_mapped = TRUE; } if (page_not_mapped) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style2, pp, phys); else if (!page_mapping) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style3, pp, phys, count); else bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style4, pp, phys, mapping, index, count); } others = 0; #define sprintflag(X) sprintf(outputbuffer + bufferindex, X, others++ ? "," : "") if (v22) { if ((flags >> v22_PG_DMA) & 1) bufferindex += sprintflag("%sDMA"); if ((flags >> v22_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v22_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v22_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v22_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v22_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v22_PG_free_after) & 1) bufferindex += sprintflag("%sfree_after"); if ((flags >> v22_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v22_PG_swap_unlock_after) & 1) bufferindex += sprintflag("%sswap_unlock_after"); if ((flags >> v22_PG_Slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v22_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v22_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v22_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) { if (vt->flags & PAGEFLAGS) bufferindex += translate_page_flags(outputbuffer+bufferindex, flags); else bufferindex += sprintf(outputbuffer+bufferindex, "%lx\n", flags); } else { if ((flags >> v24_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v24_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v24_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v24_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v24_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v24_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v24_PG_active) & 1) bufferindex += sprintflag("%sactive"); if ((flags >> v24_PG_inactive_dirty) & 1) bufferindex += sprintflag("%sinactive_dirty"); if ((flags >> v24_PG_slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v24_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v24_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v24_PG_inactive_clean) & 1) bufferindex += sprintflag("%sinactive_clean"); if ((flags >> v24_PG_highmem) & 1) bufferindex += sprintflag("%shighmem"); if ((flags >> v24_PG_checked) & 1) bufferindex += sprintflag("%schecked"); if ((flags >> v24_PG_bigpage) & 1) bufferindex += sprintflag("%sbigpage"); if ((flags >> v24_PG_arch_1) & 1) bufferindex += sprintflag("%sarch_1"); if ((flags >> v24_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); if (phys_not_mapped) bufferindex += sprintflag("%s[NOT MAPPED]"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } display_members: if (bufferindex > buffersize) { fprintf(fp, "%s", outputbuffer); bufferindex = 0; } if (done) break; } if (done) break; } if (bufferindex > 0) { fprintf(fp, "%s", outputbuffer); } switch (mi->flags) { case GET_TOTALRAM_PAGES: mi->retval = total_pages - reserved; break; case GET_SHARED_PAGES: mi->retval = shared; break; case GET_BUFFERS_PAGES: mi->retval = buffers; break; case GET_SLAB_PAGES: mi->retval = slabs; break; case GET_ALL: mi->get_totalram = total_pages - reserved; mi->get_shared = shared; mi->get_buffers = buffers; mi->get_slabs = slabs; break; case ADDRESS_SPECIFIED: mi->retval = done; break; } if (mi->nr_members) FREEBUF(mi->page_member_cache); FREEBUF(outputbuffer); FREEBUF(page_cache); } /* * Stash a chunk of PGMM_CACHED page structures, starting at addr, into the * passed-in buffer. The mem_map array is normally guaranteed to be * readable except in the case of virtual mem_map usage. When V_MEM_MAP * is in place, read all pages consumed by PGMM_CACHED page structures * that are currently mapped, leaving the unmapped ones just zeroed out. */ static void fill_mem_map_cache(ulong pp, ulong ppend, char *page_cache) { long size, cnt; ulong addr; char *bufptr; /* * Try to read it in one fell swoop. */ if (readmem(pp, KVADDR, page_cache, SIZE(page) * PGMM_CACHED, "page struct cache", RETURN_ON_ERROR|QUIET)) return; /* * Break it into page-size-or-less requests, warning if it's * not a virtual mem_map. */ size = SIZE(page) * PGMM_CACHED; addr = pp; bufptr = page_cache; while (size > 0) { /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(addr); if (cnt > size) cnt = size; if (!readmem(addr, KVADDR, bufptr, cnt, "virtual page struct cache", RETURN_ON_ERROR|QUIET)) { BZERO(bufptr, cnt); if (!((vt->flags & V_MEM_MAP) || (machdep->flags & VMEMMAP)) && ((addr+cnt) < ppend)) error(WARNING, "mem_map[] from %lx to %lx not accessible\n", addr, addr+cnt); } addr += cnt; bufptr += cnt; size -= cnt; } } static void dump_hstates() { char *hstate; int i, len, order; long nr, free; ulong vaddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (!kernel_symbol_exists("hstates")) { error(INFO, "hstates[] array does not exist\n"); option_not_supported('h'); } if (INVALID_SIZE(hstate) || INVALID_MEMBER(hstate_order) || INVALID_MEMBER(hstate_name) || INVALID_MEMBER(hstate_nr_huge_pages) || INVALID_MEMBER(hstate_free_huge_pages)) { error(INFO, "hstate structure or members have changed\n"); option_not_supported('h'); } fprintf(fp, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "HSTATE")); fprintf(fp, " SIZE FREE TOTAL NAME\n"); len = get_array_length("hstates", NULL, 0); hstate = GETBUF(SIZE(hstate)); for (i = 0; i < len; i++) { vaddr = symbol_value("hstates") + (SIZE(hstate) * i); if (!readmem(vaddr, KVADDR, hstate, SIZE(hstate), "hstate", RETURN_ON_ERROR)) break; order = INT(hstate + OFFSET(hstate_order)); if (!order) continue; fprintf(fp, "%lx ", vaddr); pages_to_size(1 << order, buf1); shift_string_left(first_space(buf1), 1); fprintf(fp, "%s ", mkstring(buf2, 5, RJUST, buf1)); free = LONG(hstate + OFFSET(hstate_free_huge_pages)); sprintf(buf1, "%ld", free); fprintf(fp, "%s ", mkstring(buf2, 6, RJUST, buf1)); nr = LONG(hstate + OFFSET(hstate_nr_huge_pages)); sprintf(buf1, "%ld", nr); fprintf(fp, "%s ", mkstring(buf2, 6, RJUST, buf1)); fprintf(fp, "%s\n", hstate + OFFSET(hstate_name)); } FREEBUF(hstate); } static void page_flags_init(void) { if (!page_flags_init_from_pageflag_names()) page_flags_init_from_pageflags_enum(); PG_reserved_flag_init(); PG_slab_flag_init(); } static int page_flags_init_from_pageflag_names(void) { int i, len; char *buffer, *nameptr; char namebuf[BUFSIZE]; ulong mask; void *name; MEMBER_OFFSET_INIT(trace_print_flags_mask, "trace_print_flags", "mask"); MEMBER_OFFSET_INIT(trace_print_flags_name, "trace_print_flags", "name"); STRUCT_SIZE_INIT(trace_print_flags, "trace_print_flags"); if (INVALID_SIZE(trace_print_flags) || INVALID_MEMBER(trace_print_flags_mask) || INVALID_MEMBER(trace_print_flags_name) || !kernel_symbol_exists("pageflag_names") || !(len = get_array_length("pageflag_names", NULL, 0))) return FALSE; buffer = GETBUF(SIZE(trace_print_flags) * len); if (!readmem(symbol_value("pageflag_names"), KVADDR, buffer, SIZE(trace_print_flags) * len, "pageflag_names array", RETURN_ON_ERROR)) { FREEBUF(buffer); return FALSE; } if (!(vt->pageflags_data = (struct pageflags_data *) malloc(sizeof(struct pageflags_data) * len))) { error(INFO, "cannot malloc pageflags_data cache\n"); FREEBUF(buffer); return FALSE; } if (CRASHDEBUG(1)) fprintf(fp, "pageflags from pageflag_names: \n"); for (i = 0; i < len; i++) { mask = ULONG(buffer + (SIZE(trace_print_flags)*i) + OFFSET(trace_print_flags_mask)); name = VOID_PTR(buffer + (SIZE(trace_print_flags)*i) + OFFSET(trace_print_flags_name)); if ((mask == -1UL) && !name) { /* Linux 3.5 and earlier */ len--; break; } if ((mask == 0UL) && !name) { /* Linux 4.6 and later */ len--; break; } if (!read_string((ulong)name, namebuf, BUFSIZE-1)) { error(INFO, "failed to read pageflag_names entry (i: %d name: \"%s\" mask: %ld)\n", i, name, mask); goto pageflags_fail; } if (!(nameptr = (char *)malloc(strlen(namebuf)+1))) { error(INFO, "cannot malloc pageflag_names space\n"); goto pageflags_fail; } strcpy(nameptr, namebuf); vt->pageflags_data[i].name = nameptr; vt->pageflags_data[i].mask = mask; if (CRASHDEBUG(1)) { fprintf(fp, " %08lx %s\n", vt->pageflags_data[i].mask, vt->pageflags_data[i].name); } } FREEBUF(buffer); vt->nr_pageflags = len; vt->flags |= PAGEFLAGS; return TRUE; pageflags_fail: FREEBUF(buffer); free(vt->pageflags_data); vt->pageflags_data = NULL; return FALSE; } static int page_flags_init_from_pageflags_enum(void) { int c; int p, len; char *nameptr; char buf[BUFSIZE]; char *arglist[MAXARGS]; if (!(vt->pageflags_data = (struct pageflags_data *) malloc(sizeof(struct pageflags_data) * 32))) { error(INFO, "cannot malloc pageflags_data cache\n"); return FALSE; } p = 0; pc->flags2 |= ALLOW_FP; open_tmpfile(); if (dump_enumerator_list("pageflags")) { rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (!strstr(buf, " = ")) continue; c = parse_line(buf, arglist); if (strstr(arglist[0], "__NR_PAGEFLAGS")) { len = atoi(arglist[2]); if (!len || (len > 32)) goto enum_fail; vt->nr_pageflags = len; break; } if (!(nameptr = (char *)malloc(strlen(arglist[0])))) { error(INFO, "cannot malloc pageflags name space\n"); goto enum_fail; } strcpy(nameptr, arglist[0] + strlen("PG_")); vt->pageflags_data[p].name = nameptr; vt->pageflags_data[p].mask = 1 << atoi(arglist[2]); p++; } } else goto enum_fail; close_tmpfile(); pc->flags2 &= ~ALLOW_FP; if (CRASHDEBUG(1)) { fprintf(fp, "pageflags from enum: \n"); for (p = 0; p < vt->nr_pageflags; p++) fprintf(fp, " %08lx %s\n", vt->pageflags_data[p].mask, vt->pageflags_data[p].name); } vt->flags |= PAGEFLAGS; return TRUE; enum_fail: close_tmpfile(); pc->flags2 &= ~ALLOW_FP; for (c = 0; c < p; c++) free(vt->pageflags_data[c].name); free(vt->pageflags_data); vt->pageflags_data = NULL; vt->nr_pageflags = 0; return FALSE; } static int translate_page_flags(char *buffer, ulong flags) { char buf[BUFSIZE]; int i, others; sprintf(buf, "%lx", flags); if (flags) { for (i = others = 0; i < vt->nr_pageflags; i++) { if (flags & vt->pageflags_data[i].mask) sprintf(&buf[strlen(buf)], "%s%s", others++ ? "," : " ", vt->pageflags_data[i].name); } } strcat(buf, "\n"); strcpy(buffer, buf); return(strlen(buf)); } /* * Display the mem_map data for a single page. */ int dump_inode_page(ulong page) { struct meminfo meminfo; if (!is_page_ptr(page, NULL)) return 0; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.spec_addr = page; meminfo.memtype = KVADDR; meminfo.flags = ADDRESS_SPECIFIED; dump_mem_map(&meminfo); return meminfo.retval; } /* * dump_page_hash_table() displays the entries in each page_hash_table. */ #define PGHASH_CACHED (1024) static void dump_page_hash_table(struct meminfo *hi) { int i; int len, entry_len; ulong page_hash_table, head; struct list_data list_data, *ld; struct gnu_request req; long total_cached; long page_cache_size; ulong this_addr, searchpage; int errflag, found, cnt, populated, verbose; uint ival; ulong buffer_pages; char buf[BUFSIZE]; char hash_table[BUFSIZE]; char *pcache, *pghash_cache; if (!vt->page_hash_table) { if (hi->flags & VERBOSE) option_not_supported('C'); if (symbol_exists("nr_pagecache")) { buffer_pages = nr_blockdev_pages(); get_symbol_data("nr_pagecache", sizeof(int), &ival); page_cache_size = (ulong)ival; page_cache_size -= buffer_pages; fprintf(fp, "page cache size: %ld\n", page_cache_size); if (hi->flags & ADDRESS_SPECIFIED) option_not_supported('c'); } else option_not_supported('c'); return; } ld = &list_data; if (hi->spec_addr && (hi->flags & ADDRESS_SPECIFIED)) { verbose = TRUE; searchpage = hi->spec_addr; } else if (hi->flags & VERBOSE) { verbose = TRUE; searchpage = 0; } else { verbose = FALSE; searchpage = 0; } if (vt->page_hash_table_len == 0) error(FATAL, "cannot determine size of page_hash_table\n"); page_hash_table = vt->page_hash_table; len = vt->page_hash_table_len; entry_len = VALID_STRUCT(page_cache_bucket) ? SIZE(page_cache_bucket) : sizeof(void *); populated = 0; if (CRASHDEBUG(1)) fprintf(fp, "page_hash_table length: %d\n", len); get_symbol_type("page_cache_size", NULL, &req); if (req.length == sizeof(int)) { get_symbol_data("page_cache_size", sizeof(int), &ival); page_cache_size = (long)ival; } else get_symbol_data("page_cache_size", sizeof(long), &page_cache_size); pghash_cache = GETBUF(sizeof(void *) * PGHASH_CACHED); if (searchpage) open_tmpfile(); hq_open(); for (i = total_cached = 0; i < len; i++, page_hash_table += entry_len) { if ((i % PGHASH_CACHED) == 0) { readmem(page_hash_table, KVADDR, pghash_cache, entry_len * PGHASH_CACHED, "page hash cache", FAULT_ON_ERROR); } pcache = pghash_cache + ((i%PGHASH_CACHED) * entry_len); if (VALID_STRUCT(page_cache_bucket)) pcache += OFFSET(page_cache_bucket_chain); head = ULONG(pcache); if (!head) continue; if (verbose) fprintf(fp, "page_hash_table[%d]\n", i); if (CRASHDEBUG(1)) populated++; BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = head; ld->searchfor = searchpage; ld->member_offset = OFFSET(page_next_hash); cnt = do_list(ld); total_cached += cnt; if (ld->searchfor) break; if (received_SIGINT()) restart(0); } hq_close(); fprintf(fp, "%spage_cache_size: %ld ", verbose ? "\n" : "", page_cache_size); if (page_cache_size != total_cached) fprintf(fp, "(found %ld)\n", total_cached); else fprintf(fp, "(verified)\n"); if (CRASHDEBUG(1)) fprintf(fp, "heads containing page(s): %d\n", populated); if (searchpage) { rewind(pc->tmpfile); found = FALSE; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "retval = TRUE; } } } /* * dump_free_pages() displays basic data about pages currently resident * in the free_area[] memory lists. If the flags contains the VERBOSE * bit, each page slab base address is dumped. If an address is specified * only the free_area[] data containing that page is displayed, along with * the page slab base address. Specified addresses can either be physical * address or page structure pointers. */ char *free_area_hdr1 = \ "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; char *free_area_hdr2 = \ "AREA SIZE FREE_AREA_STRUCT\n"; static void dump_free_pages(struct meminfo *fi) { int i; int order; ulong free_area; char *free_area_buf; ulong *pp; int nr_mem_lists; struct list_data list_data, *ld; long cnt, total_free, chunk_size; int nr_free_pages; char buf[BUFSIZE]; char last_free[BUFSIZE]; char last_free_hdr[BUFSIZE]; int verbose, errflag, found; physaddr_t searchphys; ulong this_addr; physaddr_t this_phys; int do_search; ulong kfp, offset; int flen, dimension; if (vt->flags & (NODES|ZONES)) error(FATAL, "dump_free_pages called with (NODES|ZONES)\n"); nr_mem_lists = ARRAY_LENGTH(free_area); dimension = ARRAY_LENGTH(free_area_DIMENSION); if (nr_mem_lists == 0) error(FATAL, "cannot determine size/dimensions of free_area\n"); if (dimension) error(FATAL, "dump_free_pages called with multidimensional free area\n"); ld = &list_data; total_free = 0; searchphys = 0; chunk_size = 0; do_search = FALSE; get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); switch (fi->flags) { case GET_FREE_HIGHMEM_PAGES: error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); case GET_FREE_PAGES: fi->retval = (ulong)nr_free_pages; return; case ADDRESS_SPECIFIED: switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages: no memtype specified\n"); } do_search = TRUE; break; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; free_area_buf = GETBUF(nr_mem_lists * SIZE(free_area_struct)); kfp = free_area = symbol_value("free_area"); flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); readmem(free_area, KVADDR, free_area_buf, SIZE(free_area_struct) * nr_mem_lists, "free_area_struct", FAULT_ON_ERROR); if (do_search) open_tmpfile(); if (!verbose) fprintf(fp, "%s", free_area_hdr1); hq_open(); for (i = 0; i < nr_mem_lists; i++) { pp = (ulong *)(free_area_buf + (SIZE(free_area_struct)*i)); chunk_size = power(2, i); if (verbose) fprintf(fp, "%s", free_area_hdr2); fprintf(fp, "%3d ", i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, "%5s ", buf); fprintf(fp, "%s %s", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), verbose ? "\n" : ""); if (is_page_ptr(*pp, NULL)) { BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = *pp; ld->end = free_area; cnt = do_list(ld); total_free += (cnt * chunk_size); } else cnt = 0; if (!verbose) fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); free_area += SIZE(free_area_struct); kfp += SIZE(free_area_struct); } hq_close(); fprintf(fp, "\nnr_free_pages: %d ", nr_free_pages); if (total_free != nr_free_pages) fprintf(fp, "(found %ld)\n", total_free); else fprintf(fp, "(verified)\n"); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { order--; fprintf(fp, "%s", last_free_hdr); fprintf(fp, "%s", last_free); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Dump free pages on kernels with a multi-dimensional free_area array. */ char *free_area_hdr5 = \ " AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; char *free_area_hdr6 = \ " AREA SIZE FREE_AREA_STRUCT\n"; static void dump_multidimensional_free_pages(struct meminfo *fi) { int i, j; struct list_data list_data, *ld; long cnt, total_free; ulong kfp, free_area; physaddr_t searchphys; int flen, errflag, verbose, nr_free_pages; int nr_mem_lists, dimension, order, do_search; ulong sum, found, offset; char *free_area_buf, *p; ulong *pp; long chunk_size; ulong this_addr; physaddr_t this_phys; char buf[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (vt->flags & (NODES|ZONES)) error(FATAL, "dump_multidimensional_free_pages called with (NODES|ZONES)\n"); ld = &list_data; if (SIZE(free_area_struct) % sizeof(ulong)) error(FATAL, "free_area_struct not long-word aligned?\n"); total_free = 0; searchphys = 0; chunk_size = 0; do_search = FALSE; get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); switch (fi->flags) { case GET_FREE_HIGHMEM_PAGES: error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); case GET_FREE_PAGES: fi->retval = (ulong)nr_free_pages; return; case ADDRESS_SPECIFIED: switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_multidimensional_free_pages: no memtype specified\n"); } do_search = TRUE; break; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); nr_mem_lists = ARRAY_LENGTH(free_area); dimension = ARRAY_LENGTH(free_area_DIMENSION); if (!nr_mem_lists || !dimension) error(FATAL, "cannot determine free_area dimensions\n"); free_area_buf = GETBUF((nr_mem_lists * SIZE(free_area_struct)) * dimension); kfp = free_area = symbol_value("free_area"); readmem(free_area, KVADDR, free_area_buf, (SIZE(free_area_struct) * nr_mem_lists) * dimension, "free_area arrays", FAULT_ON_ERROR); if (do_search) open_tmpfile(); hq_open(); for (i = sum = found = 0; i < dimension; i++) { if (!verbose) fprintf(fp, "%s", free_area_hdr5); pp = (ulong *)(free_area_buf + ((SIZE(free_area_struct)*nr_mem_lists)*i)); for (j = 0; j < nr_mem_lists; j++) { if (verbose) fprintf(fp, "%s", free_area_hdr6); sprintf(buf, "[%d][%d]", i, j); fprintf(fp, "%7s ", buf); chunk_size = power(2, j); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, "%5s ", buf); fprintf(fp, "%s %s", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), verbose ? "\n" : ""); if (is_page_ptr(*pp, NULL)) { BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = *pp; ld->end = free_area; cnt = do_list(ld); total_free += (cnt * chunk_size); } else cnt = 0; if (!verbose) fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); pp += (SIZE(free_area_struct)/sizeof(ulong)); free_area += SIZE(free_area_struct); kfp += SIZE(free_area_struct); } fprintf(fp, "\n"); } hq_close(); fprintf(fp, "nr_free_pages: %d ", nr_free_pages); if (total_free != nr_free_pages) fprintf(fp, "(found %ld)\n", total_free); else fprintf(fp, "(verified)\n"); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Dump free pages in newer kernels that have zones. This is a work in * progress, because although the framework for memory nodes has been laid * down, complete support has not been put in place. */ static char *zone_hdr = "ZONE NAME SIZE FREE"; static void dump_free_pages_zones_v1(struct meminfo *fi) { int i, n; ulong node_zones; ulong size; long zone_size_offset; long chunk_size; int order, errflag, do_search; ulong offset, verbose, value, sum, found; ulong this_addr; physaddr_t this_phys, searchphys; ulong zone_mem_map; ulong zone_start_paddr; ulong zone_start_mapnr; struct node_table *nt; char buf[BUFSIZE], *p; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char last_node[BUFSIZE]; char last_zone[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (!(vt->flags & (NODES|ZONES))) error(FATAL, "dump_free_pages_zones_v1 called without (NODES|ZONES)\n"); if (fi->flags & ADDRESS_SPECIFIED) { switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages_zones_v1: no memtype specified\n"); } do_search = TRUE; } else { searchphys = 0; do_search = FALSE; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; chunk_size = 0; zone_size_offset = 0; if (VALID_MEMBER(zone_struct_size)) zone_size_offset = OFFSET(zone_struct_size); else if (VALID_MEMBER(zone_struct_memsize)) zone_size_offset = OFFSET(zone_struct_memsize); else error(FATAL, "zone_struct has neither size nor memsize field\n"); if (do_search) open_tmpfile(); hq_open(); for (n = sum = found = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (fi->flags == GET_FREE_PAGES) { readmem(node_zones+ OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; node_zones += SIZE(zone_struct); continue; } if (fi->flags == GET_FREE_HIGHMEM_PAGES) { if (i == vt->ZONE_HIGHMEM) { readmem(node_zones+ OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; } node_zones += SIZE(zone_struct); continue; } if (fi->flags == GET_ZONE_SIZES) { readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones {mem}size", FAULT_ON_ERROR); sum += size; node_zones += SIZE(zone_struct); continue; } if ((i == 0) && (vt->flags & NODES)) { if (n) { fprintf(fp, "\n"); pad_line(fp, VADDR_PRLEN > 8 ? 74 : 66, '-'); fprintf(fp, "\n"); } fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", i > 0 ? "\n" : "", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%3d ", i); readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1)) fprintf(fp, "%-9s ", buf); else fprintf(fp, "(unknown) "); readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones {mem}size", FAULT_ON_ERROR); fprintf(fp, "%6ld ", size); readmem(node_zones+OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); fprintf(fp, "%6ld ", value); readmem(node_zones+OFFSET(zone_struct_zone_start_paddr), KVADDR, &zone_start_paddr, sizeof(ulong), "node_zones zone_start_paddr", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_zone_start_mapnr), KVADDR, &zone_start_mapnr, sizeof(ulong), "node_zones zone_start_mapnr", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), CENTER|LONG_HEX|RJUST, MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); sum += value; if (value) found += dump_zone_free_area(node_zones+ OFFSET(zone_struct_free_area), vt->nr_free_areas, verbose, NULL); node_zones += SIZE(zone_struct); } } hq_close(); if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { fi->retval = sum; return; } fprintf(fp, "\nnr_free_pages: %ld ", sum); if (sum == found) fprintf(fp, "(verified)\n"); else fprintf(fp, "(found %ld)\n", found); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; last_node[0] = NULLCHAR; last_zone[0] = NULLCHAR; last_area[0] = NULLCHAR; last_area_hdr[0] = NULLCHAR; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_node, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "ZONE")) { p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_zone, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "AREA")) { strcpy(last_area_hdr, buf); p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } if (CRASHDEBUG(0) && !hexadecimal(strip_linefeeds(buf), 0)) continue; errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { if (strlen(last_node)) fprintf(fp, "NODE\n%s\n", last_node); fprintf(fp, "%s %s START_PADDR START_MAPNR\n", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s\n", last_zone); fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Callback function for free-list search for a specific page. */ struct free_page_callback_data { ulong searchpage; long chunk_size; ulong page; int found; }; static int free_page_callback(void *page, void *arg) { struct free_page_callback_data *cbd = arg; ulong first_page, last_page; first_page = (ulong)page; last_page = first_page + (cbd->chunk_size * SIZE(page)); if ((cbd->searchpage >= first_page) && (cbd->searchpage <= last_page)) { cbd->page = (ulong)page; cbd->found = TRUE; return TRUE; } return FALSE; } /* * Same as dump_free_pages_zones_v1(), but updated for numerous 2.6 zone * and free_area related data structure changes. */ static void dump_free_pages_zones_v2(struct meminfo *fi) { int i, n; ulong node_zones; ulong size; long zone_size_offset; long chunk_size; int order, errflag, do_search; ulong offset, verbose, value, sum, found; ulong this_addr; physaddr_t phys, this_phys, searchphys, end_paddr; ulong searchpage; struct free_page_callback_data callback_data; ulong pp; ulong zone_mem_map; ulong zone_start_paddr; ulong zone_start_pfn; ulong zone_start_mapnr; struct node_table *nt; char buf[BUFSIZE], *p; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char last_node[BUFSIZE]; char last_zone[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (!(vt->flags & (NODES|ZONES))) error(FATAL, "dump_free_pages_zones_v2 called without (NODES|ZONES)\n"); if (fi->flags & ADDRESS_SPECIFIED) { switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages_zones_v2: no memtype specified\n"); } if (!phys_to_page(searchphys, &searchpage)) { error(INFO, "cannot determine page for %lx\n", fi->spec_addr); return; } do_search = TRUE; callback_data.searchpage = searchpage; callback_data.found = FALSE; } else { searchphys = 0; do_search = FALSE; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; zone_size_offset = 0; chunk_size = 0; this_addr = 0; if (VALID_MEMBER(zone_spanned_pages)) zone_size_offset = OFFSET(zone_spanned_pages); else error(FATAL, "zone struct has no spanned_pages field\n"); if (do_search) open_tmpfile(); hq_open(); for (n = sum = found = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (fi->flags == GET_FREE_PAGES) { readmem(node_zones+ OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; node_zones += SIZE(zone); continue; } if (fi->flags == GET_FREE_HIGHMEM_PAGES) { readmem(node_zones+OFFSET(zone_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1) && STREQ(buf, "HighMem")) vt->ZONE_HIGHMEM = i; if (i == vt->ZONE_HIGHMEM) { readmem(node_zones+ OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; } node_zones += SIZE(zone); continue; } if (fi->flags == GET_ZONE_SIZES) { readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones size", FAULT_ON_ERROR); sum += size; node_zones += SIZE(zone); continue; } if ((i == 0) && ((vt->flags & NODES) || (vt->numnodes > 1))) { if (n) { fprintf(fp, "\n"); pad_line(fp, VADDR_PRLEN > 8 ? 74 : 66, '-'); fprintf(fp, "\n"); } fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", i > 0 ? "\n" : "", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%3d ", i); readmem(node_zones+OFFSET(zone_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1)) fprintf(fp, "%-9s ", buf); else fprintf(fp, "(unknown) "); readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones size", FAULT_ON_ERROR); fprintf(fp, "%6ld ", size); readmem(node_zones+OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); fprintf(fp, "%6ld ", value); if (VALID_MEMBER(zone_zone_mem_map)) { readmem(node_zones+OFFSET(zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); } readmem(node_zones+ OFFSET(zone_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); if (!VALID_MEMBER(zone_zone_mem_map)) { if (IS_SPARSEMEM() || IS_DISCONTIGMEM()) { zone_mem_map = 0; if (size) { phys = PTOB(zone_start_pfn); if (phys_to_page(phys, &pp)) zone_mem_map = pp; } } else if (vt->flags & FLATMEM) { zone_mem_map = 0; if (size) zone_mem_map = nt->mem_map + (zone_start_pfn * SIZE(page)); } else error(FATAL, "\ncannot determine zone mem_map: TBD\n"); } if (zone_mem_map) zone_start_mapnr = (zone_mem_map - nt->mem_map) / SIZE(page); else zone_start_mapnr = 0; fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), CENTER|LONG_HEX|RJUST, MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); sum += value; if (value) { if (do_search) { end_paddr = nt->start_paddr + ((physaddr_t)nt->size * (physaddr_t)PAGESIZE()); if ((searchphys >= nt->start_paddr) && (searchphys < end_paddr)) found += dump_zone_free_area(node_zones+ OFFSET(zone_free_area), vt->nr_free_areas, verbose, &callback_data); if (callback_data.found) goto done_search; } else found += dump_zone_free_area(node_zones+ OFFSET(zone_free_area), vt->nr_free_areas, verbose, NULL); } node_zones += SIZE(zone); } } done_search: hq_close(); if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { fi->retval = sum; return; } fprintf(fp, "\nnr_free_pages: %ld ", sum); if (sum == found) fprintf(fp, "(verified)\n"); else fprintf(fp, "(found %ld)\n", found); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = 0; last_node[0] = NULLCHAR; last_zone[0] = NULLCHAR; last_area[0] = NULLCHAR; last_area_hdr[0] = NULLCHAR; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_node, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "ZONE")) { p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_zone, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "AREA")) { strcpy(last_area_hdr, buf); p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } if (CRASHDEBUG(0) && !hexadecimal(strip_linefeeds(buf), 0)) continue; errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { if (strlen(last_node)) fprintf(fp, "NODE\n%s\n", last_node); fprintf(fp, "%s %s START_PADDR START_MAPNR\n", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s\n", last_zone); fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages)", ordinal(offset+1, buf), chunk_size/PAGESIZE()); } fi->retval = TRUE; fprintf(fp, "\n"); } } static char * page_usage_hdr = "ZONE NAME FREE ACTIVE INACTIVE_DIRTY INACTIVE_CLEAN MIN/LOW/HIGH"; /* * Display info about the non-free pages in each zone. */ static int dump_zone_page_usage(void) { int i, n; ulong value, node_zones; struct node_table *nt; ulong inactive_dirty_pages, inactive_clean_pages, active_pages; ulong free_pages, pages_min, pages_low, pages_high; char namebuf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; if (!VALID_MEMBER(zone_struct_inactive_dirty_pages) || !VALID_MEMBER(zone_struct_inactive_clean_pages) || !VALID_MEMBER(zone_struct_active_pages) || !VALID_MEMBER(zone_struct_pages_min) || !VALID_MEMBER(zone_struct_pages_low) || !VALID_MEMBER(zone_struct_pages_high)) return FALSE; fprintf(fp, "\n"); for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); if ((vt->numnodes > 1) && (vt->flags & NODES)) { fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s\n", page_usage_hdr); for (i = 0; i < vt->nr_zones; i++) { readmem(node_zones+OFFSET(zone_struct_free_pages), KVADDR, &free_pages, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); readmem(node_zones+ OFFSET(zone_struct_inactive_dirty_pages), KVADDR, &inactive_dirty_pages, sizeof(ulong), "node_zones inactive_dirty_pages", FAULT_ON_ERROR); readmem(node_zones+ OFFSET(zone_struct_inactive_clean_pages), KVADDR, &inactive_clean_pages, sizeof(ulong), "node_zones inactive_clean_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_active_pages), KVADDR, &active_pages, sizeof(ulong), "node_zones active_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_min), KVADDR, &pages_min, sizeof(ulong), "node_zones pages_min", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_low), KVADDR, &pages_low, sizeof(ulong), "node_zones pages_low", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_high), KVADDR, &pages_high, sizeof(ulong), "node_zones pages_high", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf1, BUFSIZE-1)) sprintf(namebuf, "%-8s", buf1); else sprintf(namebuf, "(unknown)"); sprintf(buf2, "%ld/%ld/%ld", pages_min, pages_low, pages_high); fprintf(fp, "%3d %s %7ld %7ld %15ld %15ld %s\n", i, namebuf, free_pages, active_pages, inactive_dirty_pages, inactive_clean_pages, mkstring(buf3, strlen("MIN/LOW/HIGH"), CENTER, buf2)); node_zones += SIZE(zone_struct); } } return TRUE; } /* * Dump the num "order" contents of the zone_t free_area array. */ char *free_area_hdr3 = "AREA SIZE FREE_AREA_STRUCT\n"; char *free_area_hdr4 = "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; static int dump_zone_free_area(ulong free_area, int num, ulong verbose, struct free_page_callback_data *callback_data) { int i, j; long chunk_size; int flen, total_free, cnt; char buf[BUFSIZE]; ulong free_area_buf[3]; char *free_area_buf2; char *free_list_buf; ulong free_list; struct list_data list_data, *ld; int list_count; ulong *free_ptr; list_count = 0; free_list_buf = free_area_buf2 = NULL; if (VALID_STRUCT(free_area_struct)) { if (SIZE(free_area_struct) != (3 * sizeof(ulong))) error(FATAL, "unrecognized free_area_struct size: %ld\n", SIZE(free_area_struct)); list_count = 1; } else if (VALID_STRUCT(free_area)) { if (SIZE(free_area) == (3 * sizeof(ulong))) list_count = 1; else { list_count = MEMBER_SIZE("free_area", "free_list")/SIZE(list_head); free_area_buf2 = GETBUF(SIZE(free_area)); free_list_buf = GETBUF(SIZE(list_head)); readmem(free_area, KVADDR, free_area_buf2, SIZE(free_area), "free_area struct", FAULT_ON_ERROR); } } else error(FATAL, "neither free_area_struct or free_area structures exist\n"); ld = &list_data; if (!verbose) fprintf(fp, "%s", free_area_hdr4); total_free = 0; flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); if (list_count > 1) goto multiple_lists; for (i = 0; i < num; i++, free_area += SIZE_OPTION(free_area_struct, free_area)) { if (verbose) fprintf(fp, "%s", free_area_hdr3); fprintf(fp, "%3d ", i); chunk_size = power(2, i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, " %7s ", buf); readmem(free_area, KVADDR, free_area_buf, sizeof(ulong) * 3, "free_area_struct", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_area))); if (free_area_buf[0] == free_area) { if (verbose) fprintf(fp, "\n"); else fprintf(fp, "%6d %6d\n", 0, 0); continue; } if (verbose) fprintf(fp, "\n"); BZERO(ld, sizeof(struct list_data)); ld->flags = verbose | RETURN_ON_DUPLICATE; ld->start = free_area_buf[0]; ld->end = free_area; if (VALID_MEMBER(page_list_next)) ld->list_head_offset = OFFSET(page_list); else if (VALID_MEMBER(page_lru)) ld->list_head_offset = OFFSET(page_lru)+ OFFSET(list_head_next); else error(FATAL, "neither page.list or page.lru exist?\n"); cnt = do_list(ld); if (cnt < 0) { error(pc->curcmd_flags & IGNORE_ERRORS ? INFO : FATAL, "corrupted free list from free_area_struct: %lx\n", free_area); if (pc->curcmd_flags & IGNORE_ERRORS) break; } if (!verbose) fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); total_free += (cnt * chunk_size); } return total_free; multiple_lists: for (i = 0; i < num; i++, free_area += SIZE_OPTION(free_area_struct, free_area)) { readmem(free_area, KVADDR, free_area_buf2, SIZE(free_area), "free_area struct", FAULT_ON_ERROR); for (j = 0, free_list = free_area; j < list_count; j++, free_list += SIZE(list_head)) { if (verbose) fprintf(fp, "%s", free_area_hdr3); fprintf(fp, "%3d ", i); chunk_size = power(2, i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, " %7s ", buf); readmem(free_list, KVADDR, free_list_buf, SIZE(list_head), "free_area free_list", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_list))); free_ptr = (ulong *)free_list_buf; if (*free_ptr == free_list) { if (verbose) fprintf(fp, "\n"); else fprintf(fp, "%6d %6d\n", 0, 0); continue; } if (verbose) fprintf(fp, "\n"); BZERO(ld, sizeof(struct list_data)); ld->flags = verbose | RETURN_ON_DUPLICATE; ld->start = *free_ptr; ld->end = free_list; ld->list_head_offset = OFFSET(page_lru) + OFFSET(list_head_next); if (callback_data) { ld->flags &= ~VERBOSE; ld->flags |= (LIST_CALLBACK|CALLBACK_RETURN); ld->callback_func = free_page_callback; ld->callback_data = (void *)callback_data; callback_data->chunk_size = chunk_size; } cnt = do_list(ld); if (cnt < 0) { error(pc->curcmd_flags & IGNORE_ERRORS ? INFO : FATAL, "corrupted free list %d from free_area struct: %lx\n", j, free_area); if (pc->curcmd_flags & IGNORE_ERRORS) goto bailout; } if (callback_data && callback_data->found) { fprintf(fp, "%lx\n", callback_data->page); goto bailout; } if (!verbose) fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); total_free += (cnt * chunk_size); } } bailout: FREEBUF(free_area_buf2); FREEBUF(free_list_buf); return total_free; } /* * dump_kmeminfo displays basic memory use information typically shown * by /proc/meminfo, and then some... */ char *kmeminfo_hdr = " PAGES TOTAL PERCENTAGE\n"; static void dump_kmeminfo(void) { int i, len; ulong totalram_pages; ulong freeram_pages; ulong used_pages; ulong shared_pages; ulong buffer_pages; ulong subtract_buffer_pages; ulong totalswap_pages, totalused_pages; ulong totalhigh_pages; ulong freehighmem_pages; ulong totallowmem_pages; ulong freelowmem_pages; ulong allowed; long committed; ulong overcommit_kbytes = 0; int overcommit_ratio; ulong hugetlb_total_pages, hugetlb_total_free_pages = 0; int done_hugetlb_calc = 0; long nr_file_pages, nr_slab; ulong swapper_space_nrpages; ulong pct; uint tmp; struct meminfo meminfo; struct gnu_request req; long page_cache_size; ulong get_totalram; ulong get_buffers; ulong get_slabs; char buf[BUFSIZE]; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.flags = GET_ALL; dump_mem_map(&meminfo); get_totalram = meminfo.get_totalram; shared_pages = meminfo.get_shared; get_buffers = meminfo.get_buffers; get_slabs = meminfo.get_slabs; /* * If vm_stat array exists, override page search info. */ if (vm_stat_init()) { if (dump_vm_stat("NR_SLAB", &nr_slab, 0)) get_slabs = nr_slab; else if (dump_vm_stat("NR_SLAB_RECLAIMABLE", &nr_slab, 0)) { get_slabs = nr_slab; if (dump_vm_stat("NR_SLAB_UNRECLAIMABLE", &nr_slab, 0)) get_slabs += nr_slab; } } fprintf(fp, "%s", kmeminfo_hdr); /* * Get total RAM based upon how the various versions of si_meminfo() * have done it, latest to earliest: * * Prior to 2.3.36, count all mem_map pages minus the reserved ones. * From 2.3.36 onwards, use "totalram_pages" if set. */ if (symbol_exists("totalram_pages") || symbol_exists("_totalram_pages")) { totalram_pages = vt->totalram_pages ? vt->totalram_pages : get_totalram; } else totalram_pages = get_totalram; fprintf(fp, "%13s %7ld %11s ----\n", "TOTAL MEM", totalram_pages, pages_to_size(totalram_pages, buf)); /* * Get free pages from dump_free_pages() or its associates. * Used pages are a free-bee... */ meminfo.flags = GET_FREE_PAGES; vt->dump_free_pages(&meminfo); freeram_pages = meminfo.retval; pct = (freeram_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "FREE", freeram_pages, pages_to_size(freeram_pages, buf), pct); used_pages = totalram_pages - freeram_pages; pct = (used_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "USED", used_pages, pages_to_size(used_pages, buf), pct); /* * Get shared pages from dump_mem_map(). Note that this is done * differently than the kernel -- it just tallies the non-reserved * pages that have a count of greater than 1. */ pct = (shared_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "SHARED", shared_pages, pages_to_size(shared_pages, buf), pct); subtract_buffer_pages = 0; if (symbol_exists("buffermem_pages")) { get_symbol_data("buffermem_pages", sizeof(int), &tmp); buffer_pages = (ulong)tmp; } else if (symbol_exists("buffermem")) { get_symbol_data("buffermem", sizeof(int), &tmp); buffer_pages = BTOP(tmp); } else if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && symbol_exists("nr_blockdev_pages")) { subtract_buffer_pages = buffer_pages = nr_blockdev_pages(); } else buffer_pages = 0; pct = (buffer_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "BUFFERS", buffer_pages, pages_to_size(buffer_pages, buf), pct); if (CRASHDEBUG(1)) error(NOTE, "pages with buffers: %ld\n", get_buffers); /* * page_cache_size has evolved from a long to an atomic_t to * not existing at all. */ if (symbol_exists("page_cache_size")) { get_symbol_type("page_cache_size", NULL, &req); if (req.length == sizeof(int)) { get_symbol_data("page_cache_size", sizeof(int), &tmp); page_cache_size = (long)tmp; } else get_symbol_data("page_cache_size", sizeof(long), &page_cache_size); page_cache_size -= subtract_buffer_pages; } else if (symbol_exists("nr_pagecache")) { get_symbol_data("nr_pagecache", sizeof(int), &tmp); page_cache_size = (long)tmp; page_cache_size -= subtract_buffer_pages; } else if (dump_vm_stat("NR_FILE_PAGES", &nr_file_pages, 0)) { char *swapper_space = GETBUF(SIZE(address_space)); swapper_space_nrpages = 0; if (symbol_exists("nr_swapper_spaces") && (len = get_array_length("nr_swapper_spaces", NULL, 0))) { char *nr_swapper_space = GETBUF(len * sizeof(unsigned int)); readmem(symbol_value("nr_swapper_spaces"), KVADDR, nr_swapper_space, len * sizeof(unsigned int), "nr_swapper_space", RETURN_ON_ERROR); for (i = 0; i < len; i++) { int j; unsigned long sa; unsigned int banks = UINT(nr_swapper_space + (i * sizeof(unsigned int))); if (!banks) continue; readmem(symbol_value("swapper_spaces") + (i * sizeof(void *)),KVADDR, &sa, sizeof(void *), "swapper_space", RETURN_ON_ERROR); if (!sa) continue; for (j = 0; j < banks; j++) { readmem(sa + j * SIZE(address_space), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR); swapper_space_nrpages += ULONG(swapper_space + OFFSET(address_space_nrpages)); } } FREEBUF(nr_swapper_space); } else if (symbol_exists("swapper_spaces") && (len = get_array_length("swapper_spaces", NULL, 0))) { for (i = 0; i < len; i++) { if (!readmem(symbol_value("swapper_spaces") + i * SIZE(address_space), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR)) break; swapper_space_nrpages += ULONG(swapper_space + OFFSET(address_space_nrpages)); } } else if (symbol_exists("swapper_space") && readmem(symbol_value("swapper_space"), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR)) swapper_space_nrpages = ULONG(swapper_space + OFFSET(address_space_nrpages)); page_cache_size = nr_file_pages - swapper_space_nrpages - buffer_pages; FREEBUF(swapper_space); } else page_cache_size = 0; pct = (page_cache_size * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "CACHED", page_cache_size, pages_to_size(page_cache_size, buf), pct); /* * Although /proc/meminfo doesn't show it, show how much memory * the slabs take up. */ pct = (get_slabs * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "SLAB", get_slabs, pages_to_size(get_slabs, buf), pct); if (symbol_exists("totalhigh_pages") || symbol_exists("_totalhigh_pages")) { totalhigh_pages = vt->totalhigh_pages; pct = totalhigh_pages ? (totalhigh_pages * 100)/totalram_pages : 0; fprintf(fp, "\n%13s %7ld %11s %3ld%% of TOTAL MEM\n", "TOTAL HIGH", totalhigh_pages, pages_to_size(totalhigh_pages, buf), pct); meminfo.flags = GET_FREE_HIGHMEM_PAGES; vt->dump_free_pages(&meminfo); freehighmem_pages = meminfo.retval; pct = freehighmem_pages ? (freehighmem_pages * 100)/totalhigh_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL HIGH\n", "FREE HIGH", freehighmem_pages, pages_to_size(freehighmem_pages, buf), pct); totallowmem_pages = totalram_pages - totalhigh_pages; pct = (totallowmem_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "TOTAL LOW", totallowmem_pages, pages_to_size(totallowmem_pages, buf), pct); freelowmem_pages = freeram_pages - freehighmem_pages; pct = (freelowmem_pages * 100)/totallowmem_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL LOW\n", "FREE LOW", freelowmem_pages, pages_to_size(freelowmem_pages, buf), pct); } if (get_hugetlb_total_pages(&hugetlb_total_pages, &hugetlb_total_free_pages)) { done_hugetlb_calc = 1; fprintf(fp, "\n%13s %7ld %11s ----\n", "TOTAL HUGE", hugetlb_total_pages, pages_to_size(hugetlb_total_pages, buf)); pct = hugetlb_total_free_pages ? (hugetlb_total_free_pages * 100) / hugetlb_total_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL HUGE\n", "HUGE FREE", hugetlb_total_free_pages, pages_to_size(hugetlb_total_free_pages, buf), pct); } /* * get swap data from dump_swap_info(). */ fprintf(fp, "\n"); if (symbol_exists("swapper_space") || symbol_exists("swapper_spaces")) { if (dump_swap_info(RETURN_ON_ERROR, &totalswap_pages, &totalused_pages)) { fprintf(fp, "%13s %7ld %11s ----\n", "TOTAL SWAP", totalswap_pages, pages_to_size(totalswap_pages, buf)); pct = totalswap_pages ? (totalused_pages * 100) / totalswap_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL SWAP\n", "SWAP USED", totalused_pages, pages_to_size(totalused_pages, buf), pct); pct = totalswap_pages ? ((totalswap_pages - totalused_pages) * 100) / totalswap_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL SWAP\n", "SWAP FREE", totalswap_pages - totalused_pages, pages_to_size(totalswap_pages - totalused_pages, buf), pct); } else error(INFO, "swap_info[%ld].swap_map at %lx is inaccessible\n", totalused_pages, totalswap_pages); } /* * Show committed memory */ if (kernel_symbol_exists("sysctl_overcommit_memory")) { fprintf(fp, "\n"); if (kernel_symbol_exists("sysctl_overcommit_kbytes")) get_symbol_data("sysctl_overcommit_kbytes", sizeof(ulong), &overcommit_kbytes); if (overcommit_kbytes) allowed = overcommit_kbytes >> (machdep->pageshift - 10); else { get_symbol_data("sysctl_overcommit_ratio", sizeof(int), &overcommit_ratio); if (!done_hugetlb_calc) goto bailout; allowed = ((totalram_pages - hugetlb_total_pages) * overcommit_ratio / 100); } if (symbol_exists("vm_committed_as")) { if (INVALID_MEMBER(percpu_counter_count)) goto bailout; readmem(symbol_value("vm_committed_as") + OFFSET(percpu_counter_count), KVADDR, &committed, sizeof(long), "percpu_counter count", FAULT_ON_ERROR); /* Ensure always positive */ if (committed < 0) committed = 0; } else { if (INVALID_MEMBER(atomic_t_counter)) goto bailout; readmem(symbol_value("vm_committed_space") + OFFSET(atomic_t_counter), KVADDR, &committed, sizeof(int), "atomic_t counter", FAULT_ON_ERROR); } allowed += totalswap_pages; fprintf(fp, "%13s %7ld %11s ----\n", "COMMIT LIMIT", allowed, pages_to_size(allowed, buf)); if (allowed) { pct = committed ? ((committed * 100) / allowed) : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL LIMIT\n", "COMMITTED", committed, pages_to_size(committed, buf), pct); } else fprintf(fp, "%13s %7ld %11s ----\n", "COMMITTED", committed, pages_to_size(committed, buf)); } bailout: dump_zone_page_usage(); } /* * Emulate 2.6 nr_blockdev_pages() function. */ static ulong nr_blockdev_pages(void) { struct list_data list_data, *ld; int i, bdevcnt; ulong inode, address_space; ulong nrpages; char *block_device_buf, *inode_buf, *address_space_buf; ld = &list_data; BZERO(ld, sizeof(struct list_data)); get_symbol_data("all_bdevs", sizeof(void *), &ld->start); if (empty_list(ld->start)) return 0; ld->flags |= LIST_ALLOCATE; ld->end = symbol_value("all_bdevs"); ld->list_head_offset = OFFSET(block_device_bd_list); block_device_buf = GETBUF(SIZE(block_device)); inode_buf = GETBUF(SIZE(inode)); address_space_buf = GETBUF(SIZE(address_space)); bdevcnt = do_list(ld); /* * go through the block_device list, emulating: * * ret += bdev->bd_inode->i_mapping->nrpages; */ for (i = nrpages = 0; i < bdevcnt; i++) { readmem(ld->list_ptr[i], KVADDR, block_device_buf, SIZE(block_device), "block_device buffer", FAULT_ON_ERROR); inode = ULONG(block_device_buf + OFFSET(block_device_bd_inode)); readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", FAULT_ON_ERROR); address_space = ULONG(inode_buf + OFFSET(inode_i_mapping)); readmem(address_space, KVADDR, address_space_buf, SIZE(address_space), "address_space buffer", FAULT_ON_ERROR); nrpages += ULONG(address_space_buf + OFFSET(address_space_nrpages)); } FREEBUF(ld->list_ptr); FREEBUF(block_device_buf); FREEBUF(inode_buf); FREEBUF(address_space_buf); return nrpages; } /* * dump_vmlist() displays information from the vmlist. */ static void dump_vmlist(struct meminfo *vi) { char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; ulong vmlist; ulong addr, size, next, pcheck, count, verified; physaddr_t paddr; int mod_vmlist; if (vt->flags & USE_VMAP_AREA) { dump_vmap_area(vi); return; } get_symbol_data("vmlist", sizeof(void *), &vmlist); next = vmlist; count = verified = 0; mod_vmlist = kernel_symbol_exists("mod_vmlist"); while (next) { if (!(pc->curcmd_flags & HEADER_PRINTED) && (next == vmlist) && !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC| GET_VMLIST_COUNT|GET_VMLIST|VMLIST_VERIFY))) { fprintf(fp, "%s ", mkstring(buf, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), CENTER|LJUST, "VM_STRUCT")); fprintf(fp, "%s SIZE\n", mkstring(buf, (VADDR_PRLEN * 2) + strlen(" - "), CENTER|LJUST, "ADDRESS RANGE")); pc->curcmd_flags |= HEADER_PRINTED; } readmem(next+OFFSET(vm_struct_addr), KVADDR, &addr, sizeof(void *), "vmlist addr", FAULT_ON_ERROR); readmem(next+OFFSET(vm_struct_size), KVADDR, &size, sizeof(ulong), "vmlist size", FAULT_ON_ERROR); if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) { /* * Preceding GET_VMLIST_COUNT set vi->retval. */ if (vi->flags & GET_VMLIST) { if (count < vi->retval) { vi->vmlist[count].addr = addr; vi->vmlist[count].size = size; } } count++; goto next_entry; } if (!(vi->flags & ADDRESS_SPECIFIED) || ((vi->memtype == KVADDR) && ((vi->spec_addr >= addr) && (vi->spec_addr < (addr+size))))) { if (vi->flags & VMLIST_VERIFY) { verified++; break; } fprintf(fp, "%s%s %s - %s %6ld\n", mkstring(buf,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(next)), space(MINSPACE-1), mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(addr)), mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(addr+size)), size); } if ((vi->flags & ADDRESS_SPECIFIED) && (vi->memtype == PHYSADDR)) { for (pcheck = addr; pcheck < (addr+size); pcheck += PAGESIZE()) { if (!kvtop(NULL, pcheck, &paddr, 0)) continue; if ((vi->spec_addr >= paddr) && (vi->spec_addr < (paddr+PAGESIZE()))) { if (vi->flags & GET_PHYS_TO_VMALLOC) { vi->retval = pcheck + PAGEOFFSET(paddr); return; } else fprintf(fp, "%s%s %s - %s %6ld\n", mkstring(buf, VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(next)), space(MINSPACE-1), mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(addr)), mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(addr+size)), size); break; } } } next_entry: readmem(next+OFFSET(vm_struct_next), KVADDR, &next, sizeof(void *), "vmlist next", FAULT_ON_ERROR); if (!next && mod_vmlist) { get_symbol_data("mod_vmlist", sizeof(void *), &next); mod_vmlist = FALSE; } } if (vi->flags & GET_HIGHEST) vi->retval = addr+size; if (vi->flags & GET_VMLIST_COUNT) vi->retval = count; if (vi->flags & VMLIST_VERIFY) vi->retval = verified; } static void dump_vmap_area(struct meminfo *vi) { int i, cnt; ulong start, end, vm_struct, flags, vm; struct list_data list_data, *ld; char *vmap_area_buf; ulong size, pcheck, count, verified; physaddr_t paddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; #define VM_VM_AREA 0x4 /* mm/vmalloc.c */ vmap_area_buf = GETBUF(SIZE(vmap_area)); start = count = verified = size = 0; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags = LIST_HEAD_FORMAT|LIST_HEAD_POINTER|LIST_ALLOCATE; get_symbol_data("vmap_area_list", sizeof(void *), &ld->start); ld->list_head_offset = OFFSET(vmap_area_list); ld->end = symbol_value("vmap_area_list"); cnt = do_list(ld); if (cnt < 0) { FREEBUF(vmap_area_buf); error(WARNING, "invalid/corrupt vmap_area_list\n"); vi->retval = 0; return; } for (i = 0; i < cnt; i++) { if (!(pc->curcmd_flags & HEADER_PRINTED) && (i == 0) && !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC| GET_VMLIST_COUNT|GET_VMLIST|VMLIST_VERIFY))) { fprintf(fp, "%s ", mkstring(buf1, MAX(strlen("VMAP_AREA"), VADDR_PRLEN), CENTER|LJUST, "VMAP_AREA")); fprintf(fp, "%s ", mkstring(buf1, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), CENTER|LJUST, "VM_STRUCT")); fprintf(fp, "%s SIZE\n", mkstring(buf1, (VADDR_PRLEN * 2) + strlen(" - "), CENTER|LJUST, "ADDRESS RANGE")); pc->curcmd_flags |= HEADER_PRINTED; } readmem(ld->list_ptr[i], KVADDR, vmap_area_buf, SIZE(vmap_area), "vmap_area struct", FAULT_ON_ERROR); if (VALID_MEMBER(vmap_area_flags)) { flags = ULONG(vmap_area_buf + OFFSET(vmap_area_flags)); if (flags != VM_VM_AREA) continue; } else { vm = ULONG(vmap_area_buf + OFFSET(vmap_area_vm)); if (!vm) continue; } start = ULONG(vmap_area_buf + OFFSET(vmap_area_va_start)); end = ULONG(vmap_area_buf + OFFSET(vmap_area_va_end)); vm_struct = ULONG(vmap_area_buf + OFFSET(vmap_area_vm)); size = end - start; if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) { /* * Preceding GET_VMLIST_COUNT set vi->retval. */ if (vi->flags & GET_VMLIST) { if (count < vi->retval) { vi->vmlist[count].addr = start; vi->vmlist[count].size = size; } } count++; continue; } if (!(vi->flags & ADDRESS_SPECIFIED) || ((vi->memtype == KVADDR) && ((vi->spec_addr >= start) && (vi->spec_addr < (start+size))))) { if (vi->flags & VMLIST_VERIFY) { verified++; break; } fprintf(fp, "%s%s %s%s %s - %s %7ld\n", mkstring(buf1,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(ld->list_ptr[i])), space(MINSPACE-1), mkstring(buf2,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(vm_struct)), space(MINSPACE-1), mkstring(buf3, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(start)), mkstring(buf4, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(start+size)), size); } if ((vi->flags & ADDRESS_SPECIFIED) && (vi->memtype == PHYSADDR)) { for (pcheck = start; pcheck < (start+size); pcheck += PAGESIZE()) { if (!kvtop(NULL, pcheck, &paddr, 0)) continue; if ((vi->spec_addr >= paddr) && (vi->spec_addr < (paddr+PAGESIZE()))) { if (vi->flags & GET_PHYS_TO_VMALLOC) { vi->retval = pcheck + PAGEOFFSET(paddr); FREEBUF(ld->list_ptr); return; } else fprintf(fp, "%s%s %s%s %s - %s %7ld\n", mkstring(buf1,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(ld->list_ptr[i])), space(MINSPACE-1), mkstring(buf2, VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(vm_struct)), space(MINSPACE-1), mkstring(buf3, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(start)), mkstring(buf4, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(start+size)), size); break; } } } } FREEBUF(vmap_area_buf); FREEBUF(ld->list_ptr); if (vi->flags & GET_HIGHEST) vi->retval = start+size; if (vi->flags & GET_VMLIST_COUNT) vi->retval = count; if (vi->flags & VMLIST_VERIFY) vi->retval = verified; } /* * dump_page_lists() displays information from the active_list, * inactive_dirty_list and inactive_clean_list from each zone. */ static int dump_page_lists(struct meminfo *mi) { int i, c, n, retval; ulong node_zones, pgdat; struct node_table *nt; struct list_data list_data, *ld; char buf[BUFSIZE]; ulong value; ulong inactive_clean_pages, inactive_clean_list; int nr_active_pages, nr_inactive_pages; int nr_inactive_dirty_pages; ld = &list_data; retval = FALSE; nr_active_pages = nr_inactive_dirty_pages = -1; BZERO(ld, sizeof(struct list_data)); ld->list_head_offset = OFFSET(page_lru); if (mi->flags & ADDRESS_SPECIFIED) ld->searchfor = mi->spec_addr; else if (mi->flags & VERBOSE) ld->flags |= VERBOSE; if (mi->flags & GET_ACTIVE_LIST) { if (!symbol_exists("active_list")) error(FATAL, "active_list does not exist in this kernel\n"); if (symbol_exists("nr_active_pages")) get_symbol_data("nr_active_pages", sizeof(int), &nr_active_pages); else error(FATAL, "nr_active_pages does not exist in this kernel\n"); ld->end = symbol_value("active_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "active_list:\n"); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_active_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_active_pages); if (c != nr_active_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_LIST) { if (!symbol_exists("inactive_list")) error(FATAL, "inactive_list does not exist in this kernel\n"); if (symbol_exists("nr_inactive_pages")) get_symbol_data("nr_inactive_pages", sizeof(int), &nr_inactive_pages); else error(FATAL, "nr_active_pages does not exist in this kernel\n"); ld->end = symbol_value("inactive_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "inactive_list:\n"); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_inactive_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_inactive_pages); if (c != nr_inactive_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_DIRTY) { if (!symbol_exists("inactive_dirty_list")) error(FATAL, "inactive_dirty_list does not exist in this kernel\n"); if (symbol_exists("nr_inactive_dirty_pages")) get_symbol_data("nr_inactive_dirty_pages", sizeof(int), &nr_inactive_dirty_pages); else error(FATAL, "nr_inactive_dirty_pages does not exist in this kernel\n"); ld->end = symbol_value("inactive_dirty_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "%sinactive_dirty_list:\n", mi->flags & GET_ACTIVE_LIST ? "\n" : ""); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_inactive_dirty_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_inactive_dirty_pages); if (c != nr_inactive_dirty_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_CLEAN) { if (INVALID_MEMBER(zone_struct_inactive_clean_list)) error(FATAL, "inactive_clean_list(s) do not exist in this kernel\n"); get_symbol_data("pgdat_list", sizeof(void *), &pgdat); if ((mi->flags & VERBOSE) && (mi->flags & (GET_ACTIVE_LIST|GET_INACTIVE_DIRTY))) fprintf(fp, "\n"); for (n = 0; pgdat; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "zone_struct name", FAULT_ON_ERROR); if (!read_string(value, buf, BUFSIZE-1)) sprintf(buf, "(unknown) "); if (mi->flags & VERBOSE) { if (vt->numnodes > 1) fprintf(fp, "NODE %d ", n); fprintf(fp, "\"%s\" inactive_clean_list:\n", buf); } readmem(node_zones + OFFSET(zone_struct_inactive_clean_pages), KVADDR, &inactive_clean_pages, sizeof(ulong), "inactive_clean_pages", FAULT_ON_ERROR); readmem(node_zones + OFFSET(zone_struct_inactive_clean_list), KVADDR, &inactive_clean_list, sizeof(ulong), "inactive_clean_list", FAULT_ON_ERROR); ld->start = inactive_clean_list; ld->end = node_zones + OFFSET(zone_struct_inactive_clean_list); if (mi->flags & ADDRESS_SPECIFIED) ld->searchfor = mi->spec_addr; if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { if (vt->numnodes > 1) fprintf(fp, "NODE %d ", n); fprintf(fp, "\"%s\" ", buf); fprintf(fp, "inactive_clean_pages: %ld ", inactive_clean_pages); if (c != inactive_clean_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } node_zones += SIZE(zone_struct); } readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } } return retval; } /* * Check whether an address is a kmem_cache_t address, and if so, return * a pointer to the static buffer containing its name string. Otherwise * return NULL on failure. */ #define PERCPU_NOT_SUPPORTED "per-cpu slab format not supported yet\n" static char * is_kmem_cache_addr(ulong vaddr, char *kbuf) { ulong cache, cache_cache, name; long next_offset, name_offset; if (vt->flags & KMEM_CACHE_UNAVAIL) { error(INFO, "kmem cache slab subsystem not available\n"); return NULL; } if (vt->flags & KMALLOC_SLUB) return is_kmem_cache_addr_common(vaddr, kbuf); if ((vt->flags & KMALLOC_COMMON) && !symbol_exists("cache_cache")) return is_kmem_cache_addr_common(vaddr, kbuf); name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); cache = cache_cache = symbol_value("cache_cache"); do { if (cache == vaddr) { if (vt->kmem_cache_namelen) { readmem(cache+name_offset, KVADDR, kbuf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(cache+name_offset, KVADDR, &name, sizeof(name), "name", FAULT_ON_ERROR); if (!read_string(name, kbuf, BUFSIZE-1)) { if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); else error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(kbuf, "(unknown)"); } } return kbuf; } readmem(cache+next_offset, KVADDR, &cache, sizeof(long), "kmem_cache_s next", FAULT_ON_ERROR); if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) cache -= next_offset; } while (cache != cache_cache); return NULL; } /* * Note same functionality as above, but instead it just * dumps all slab cache names and their addresses. */ static void kmem_cache_list(struct meminfo *mi) { ulong cache, cache_cache, name; long next_offset, name_offset; char *cache_buf; int has_cache_chain; ulong cache_chain; char buf[BUFSIZE]; if (vt->flags & KMEM_CACHE_UNAVAIL) { error(INFO, "kmem cache slab subsystem not available\n"); return; } if (vt->flags & (KMALLOC_SLUB|KMALLOC_COMMON)) { kmem_cache_list_common(mi); return; } if (symbol_exists("cache_chain")) { has_cache_chain = TRUE; cache_chain = symbol_value("cache_chain"); } else { has_cache_chain = FALSE; cache_chain = 0; } name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); cache = cache_cache = symbol_value("cache_cache"); cache_buf = GETBUF(SIZE(kmem_cache_s)); do { readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", FAULT_ON_ERROR); if (vt->kmem_cache_namelen) { BCOPY(cache_buf+name_offset, buf, vt->kmem_cache_namelen); } else { name = ULONG(cache_buf + name_offset); if (!read_string(name, buf, BUFSIZE-1)) { if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); else error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(buf, "(unknown)"); } } fprintf(fp, "%lx %s\n", cache, buf); cache = ULONG(cache_buf + next_offset); if (has_cache_chain && (cache == cache_chain)) readmem(cache, KVADDR, &cache, sizeof(char *), "cache_chain", FAULT_ON_ERROR); if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) cache -= next_offset; } while (cache != cache_cache); FREEBUF(cache_buf); } /* * Translate an address to its physical page number, verify that the * page in fact belongs to the slab subsystem, and if so, return the * name of the cache to which it belongs. */ static char * vaddr_to_kmem_cache(ulong vaddr, char *buf, int verbose) { physaddr_t paddr; ulong page, cache, page_flags; if (!kvtop(NULL, vaddr, &paddr, 0)) { if (verbose) error(WARNING, "cannot make virtual-to-physical translation: %lx\n", vaddr); return NULL; } if (!phys_to_page(paddr, &page)) { if (verbose) error(WARNING, "cannot find mem_map page for address: %lx\n", vaddr); return NULL; } if (vt->PG_slab) { readmem(page+OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", FAULT_ON_ERROR); if (!(page_flags & (1 << vt->PG_slab))) { if (((vt->flags & KMALLOC_SLUB) || VALID_MEMBER(page_compound_head)) || ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab) && VALID_MEMBER(page_first_page))) { readmem(compound_head(page)+OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", FAULT_ON_ERROR); if (!(page_flags & (1 << vt->PG_slab))) return NULL; } else return NULL; } } if ((vt->flags & KMALLOC_SLUB) || ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab) && (VALID_MEMBER(page_compound_head) || VALID_MEMBER(page_first_page)))) { readmem(compound_head(page)+OFFSET(page_slab), KVADDR, &cache, sizeof(void *), "page.slab", FAULT_ON_ERROR); } else if (VALID_MEMBER(page_next)) readmem(page+OFFSET(page_next), KVADDR, &cache, sizeof(void *), "page.next", FAULT_ON_ERROR); else if (VALID_MEMBER(page_list_next)) readmem(page+OFFSET(page_list_next), KVADDR, &cache, sizeof(void *), "page.list.next", FAULT_ON_ERROR); else if (VALID_MEMBER(page_lru)) readmem(page+OFFSET(page_lru)+OFFSET(list_head_next), KVADDR, &cache, sizeof(void *), "page.lru.next", FAULT_ON_ERROR); else error(FATAL, "cannot determine slab cache from page struct\n"); return(is_kmem_cache_addr(cache, buf)); } static char * is_slab_overload_page(ulong vaddr, ulong *page_head, char *buf) { ulong cache; char *p; if ((vt->flags & SLAB_OVERLOAD_PAGE) && is_page_ptr(vaddr, NULL) && VALID_MEMBER(page_slab) && (VALID_MEMBER(page_compound_head) || VALID_MEMBER(page_first_page))) { readmem(compound_head(vaddr)+OFFSET(page_slab), KVADDR, &cache, sizeof(void *), "page.slab", FAULT_ON_ERROR); p = is_kmem_cache_addr(cache, buf); if (p) *page_head = compound_head(vaddr); return p; } return NULL; } /* * Translate an address to its physical page number, verify that the * page in fact belongs to the slab subsystem, and if so, return the * address of the slab to which it belongs. */ static ulong vaddr_to_slab(ulong vaddr) { physaddr_t paddr; ulong page; ulong slab; if (!kvtop(NULL, vaddr, &paddr, 0)) { error(WARNING, "cannot make virtual-to-physical translation: %lx\n", vaddr); return 0; } if (!phys_to_page(paddr, &page)) { error(WARNING, "cannot find mem_map page for address: %lx\n", vaddr); return 0; } slab = 0; if ((vt->flags & KMALLOC_SLUB) || VALID_MEMBER(page_compound_head)) slab = compound_head(page); else if (vt->flags & SLAB_OVERLOAD_PAGE) slab = compound_head(page); else if ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab_page)) readmem(page+OFFSET(page_slab_page), KVADDR, &slab, sizeof(void *), "page.slab_page", FAULT_ON_ERROR); else if (VALID_MEMBER(page_prev)) readmem(page+OFFSET(page_prev), KVADDR, &slab, sizeof(void *), "page.prev", FAULT_ON_ERROR); else if (VALID_MEMBER(page_list_prev)) readmem(page+OFFSET(page_list_prev), KVADDR, &slab, sizeof(void *), "page.list.prev", FAULT_ON_ERROR); else if (VALID_MEMBER(page_lru)) readmem(page+OFFSET(page_lru)+OFFSET(list_head_prev), KVADDR, &slab, sizeof(void *), "page.lru.prev", FAULT_ON_ERROR); else error(FATAL, "unknown definition of struct page?\n"); return slab; } /* * Initialize any data required for scouring the kmalloc subsystem more * efficiently. */ char slab_hdr[100] = { 0 }; char kmem_cache_hdr[100] = { 0 }; char free_inuse_hdr[100] = { 0 }; static void kmem_cache_init(void) { ulong cache, cache_end, max_cnum, max_limit, max_cpus, tmp, tmp2; long cache_count, num_offset, next_offset; char *cache_buf; if (vt->flags & KMEM_CACHE_UNAVAIL) return; if ((vt->flags & KMEM_CACHE_DELAY) && !(pc->flags & RUNTIME)) return; if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT)) return; please_wait("gathering kmem slab cache data"); if (!strlen(slab_hdr)) { if (vt->flags & KMALLOC_SLUB) sprintf(slab_hdr, "SLAB%sMEMORY%sNODE TOTAL ALLOCATED FREE\n", space(VADDR_PRLEN > 8 ? 14 : 6), space(VADDR_PRLEN > 8 ? 12 : 4)); else sprintf(slab_hdr, "SLAB%sMEMORY%sTOTAL ALLOCATED FREE\n", space(VADDR_PRLEN > 8 ? 14 : 6), space(VADDR_PRLEN > 8 ? 12 : 4)); } if (!strlen(kmem_cache_hdr)) sprintf(kmem_cache_hdr, "CACHE%s OBJSIZE ALLOCATED TOTAL SLABS SSIZE NAME\n", space(VADDR_PRLEN > 8 ? 12 : 4)); if (!strlen(free_inuse_hdr)) sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n"); if (vt->flags & KMALLOC_SLUB) { kmem_cache_init_slub(); please_wait_done(); return; } num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); max_cnum = max_limit = max_cpus = cache_count = tmp2 = 0; /* * Pre-2.6 versions used the "cache_cache" as the head of the * slab chain list. 2.6 uses the "cache_chain" list_head. * In 3.6 SLAB and SLUB use the "slab_caches" list_head. */ if (vt->flags & PERCPU_KMALLOC_V2) { if (kernel_symbol_exists("cache_chain")) { get_symbol_data("cache_chain", sizeof(ulong), &cache); cache_end = symbol_value("cache_chain"); } else if (kernel_symbol_exists("slab_caches")) { vt->flags |= KMALLOC_COMMON; get_symbol_data("slab_caches", sizeof(ulong), &cache); cache_end = symbol_value("slab_caches"); } else { error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); return; } cache -= next_offset; } else cache = cache_end = symbol_value("cache_cache"); if (!(pc->flags & RUNTIME)) { if (kmem_cache_downsize()) add_to_downsized("kmem_cache"); } cache_buf = GETBUF(SIZE(kmem_cache_s)); hq_open(); do { cache_count++; if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", RETURN_ON_ERROR)) { FREEBUF(cache_buf); vt->flags |= KMEM_CACHE_UNAVAIL; error(INFO, "%sunable to initialize kmem slab cache subsystem\n\n", DUMPFILE() ? "\n" : ""); hq_close(); return; } if (!hq_enter(cache)) { error(WARNING, "%sduplicate kmem_cache entry in cache list: %lx\n", DUMPFILE() ? "\n" : "", cache); error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); vt->flags |= KMEM_CACHE_UNAVAIL; hq_close(); return; } tmp = (ulong)(UINT(cache_buf + num_offset)); if (tmp > max_cnum) max_cnum = tmp; if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit) max_limit = tmp; /* * Recognize and bail out on any max_cpudata_limit() failures. */ if (vt->flags & KMEM_CACHE_UNAVAIL) { FREEBUF(cache_buf); hq_close(); return; } if (tmp2 > max_cpus) max_cpus = tmp2; cache = ULONG(cache_buf + next_offset); switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { case PERCPU_KMALLOC_V1: cache -= next_offset; break; case PERCPU_KMALLOC_V2: if (cache != cache_end) cache -= next_offset; break; } } while (cache != cache_end); hq_close(); FREEBUF(cache_buf); vt->kmem_max_c_num = max_cnum; vt->kmem_max_limit = max_limit; vt->kmem_max_cpus = max_cpus; vt->kmem_cache_count = cache_count; if (CRASHDEBUG(2)) { fprintf(fp, "kmem_cache_init:\n"); fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); } if (!(vt->flags & KMEM_CACHE_INIT)) { if (vt->flags & PERCPU_KMALLOC_V1) ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, kmem_cache_s_name, "kmem_cache_s.name", NULL, sizeof(char)); else if (vt->flags & PERCPU_KMALLOC_V2) vt->kmem_cache_namelen = 0; else ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, kmem_cache_s_c_name, "kmem_cache_s.c_name", NULL, 0); } please_wait_done(); vt->flags |= KMEM_CACHE_INIT; } static ulong kmem_cache_nodelists(ulong cache) { ulong nodelists = 0; if (vt->flags & NODELISTS_IS_PTR) { /* * nodelists is pointer to the array */ if (!readmem(cache+OFFSET(kmem_cache_s_lists), KVADDR, &nodelists, sizeof(ulong), "nodelists pointer", RETURN_ON_ERROR)) error(WARNING, "cannot read kmem_cache nodelists pointer"); return nodelists; } else return cache+OFFSET(kmem_cache_s_lists); } static int kmem_cache_downsize(void) { char *cache_buf; ulong kmem_cache; uint buffer_size, object_size; int nr_node_ids; int nr_cpu_ids; if (vt->flags & KMALLOC_SLUB) { if (kernel_symbol_exists("kmem_cache") && VALID_MEMBER(kmem_cache_objsize) && try_get_symbol_data("kmem_cache", sizeof(ulong), &kmem_cache) && readmem(kmem_cache + OFFSET(kmem_cache_objsize), KVADDR, &object_size, sizeof(int), "kmem_cache objsize/object_size", RETURN_ON_ERROR)) { ASSIGN_SIZE(kmem_cache) = object_size; if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache)); } if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache)) return TRUE; else return FALSE; } if ((THIS_KERNEL_VERSION < LINUX(2,6,22)) || !(vt->flags & PERCPU_KMALLOC_V2_NODES) || (!kernel_symbol_exists("cache_cache") && !kernel_symbol_exists("kmem_cache_boot")) || (!MEMBER_EXISTS("kmem_cache", "buffer_size") && !MEMBER_EXISTS("kmem_cache", "size"))) { return FALSE; } if (vt->flags & NODELISTS_IS_PTR) { /* * More recent kernels have kmem_cache.array[] sized * by the number of cpus plus the number of nodes. */ if (kernel_symbol_exists("kmem_cache_boot") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("kmem_cache_boot") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "kmem_cache_boot object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else if (kernel_symbol_exists("cache_cache") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("cache_cache") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "cache_cache object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else object_size = 0; /* * Older kernels have kmem_cache.array[] sized by * the number of cpus; real value is nr_cpu_ids, * but fallback is kt->cpus. */ if (kernel_symbol_exists("nr_cpu_ids")) get_symbol_data("nr_cpu_ids", sizeof(int), &nr_cpu_ids); else nr_cpu_ids = kt->cpus; ARRAY_LENGTH(kmem_cache_s_array) = nr_cpu_ids; if (!object_size) ASSIGN_SIZE(kmem_cache_s) = OFFSET(kmem_cache_s_array) + sizeof(ulong) * nr_cpu_ids; if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache_s)); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } else if (vt->flags & SLAB_CPU_CACHE) { if (kernel_symbol_exists("kmem_cache_boot") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("kmem_cache_boot") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "kmem_cache_boot object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else { object_size = OFFSET(kmem_cache_node) + (sizeof(void *) * vt->kmem_cache_len_nodes); ASSIGN_SIZE(kmem_cache_s) = object_size; } if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache_s)); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } cache_buf = GETBUF(SIZE(kmem_cache_s)); if (!readmem(symbol_value("cache_cache"), KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", RETURN_ON_ERROR)) { FREEBUF(cache_buf); return FALSE; } buffer_size = UINT(cache_buf + MEMBER_OFFSET("kmem_cache", "buffer_size")); if (buffer_size < SIZE(kmem_cache_s)) { if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; if (buffer_size >= (uint)(OFFSET(kmem_cache_s_lists) + (sizeof(void *) * vt->kmem_cache_len_nodes))) ASSIGN_SIZE(kmem_cache_s) = buffer_size; else error(WARNING, "questionable cache_cache.buffer_size: %d\n", buffer_size); if (CRASHDEBUG(1)) { fprintf(fp, "\nkmem_cache_downsize: %ld to %d\n", STRUCT_SIZE("kmem_cache"), buffer_size); fprintf(fp, "kmem_cache_downsize: nr_node_ids: %ld\n", vt->kmem_cache_len_nodes); } FREEBUF(cache_buf); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } FREEBUF(cache_buf); return FALSE; } /* * Stash a list of presumably-corrupted slab cache addresses. */ static void mark_bad_slab_cache(ulong cache) { size_t sz; if (vt->nr_bad_slab_caches) { sz = sizeof(ulong) * (vt->nr_bad_slab_caches + 1); if (!(vt->bad_slab_caches = realloc(vt->bad_slab_caches, sz))) { error(INFO, "cannot realloc bad_slab_caches array\n"); vt->nr_bad_slab_caches = 0; return; } } else { if (!(vt->bad_slab_caches = (ulong *)malloc(sizeof(ulong)))) { error(INFO, "cannot malloc bad_slab_caches array\n"); return; } } vt->bad_slab_caches[vt->nr_bad_slab_caches++] = cache; } static int bad_slab_cache(ulong cache) { int i; for (i = 0; i < vt->nr_bad_slab_caches; i++) { if (vt->bad_slab_caches[i] == cache) return TRUE; } return FALSE; } /* * Determine the largest cpudata limit for a given cache. */ static ulong max_cpudata_limit(ulong cache, ulong *cpus) { int i; ulong cpudata[NR_CPUS]; int limit; ulong max_limit; ulong shared, percpu_ptr; ulong *start_address; if (vt->flags & PERCPU_KMALLOC_V2_NODES) goto kmem_cache_s_array_nodes; if (vt->flags & PERCPU_KMALLOC_V2) goto kmem_cache_s_array; if (INVALID_MEMBER(kmem_cache_s_cpudata)) { *cpus = 0; return 0; } if (!readmem(cache+OFFSET(kmem_cache_s_cpudata), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), "cpudata array", RETURN_ON_ERROR)) goto bail_out; for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && cpudata[i]; i++) { if (!readmem(cpudata[i]+OFFSET(cpucache_s_limit), KVADDR, &limit, sizeof(int), "cpucache limit", RETURN_ON_ERROR)) goto bail_out; if (limit > max_limit) max_limit = limit; } *cpus = i; return max_limit; kmem_cache_s_array: if (!readmem(cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), "array cache array", RETURN_ON_ERROR)) goto bail_out; for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && cpudata[i]; i++) { if (!readmem(cpudata[i]+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "array cache limit", RETURN_ON_ERROR)) goto bail_out; if (limit > max_limit) max_limit = limit; } /* * If the shared list can be accessed, check its size as well. */ if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) && readmem(cache+OFFSET(kmem_cache_s_lists)+OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) && readmem(shared+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "shared array_cache limit", RETURN_ON_ERROR|QUIET)) { if (limit > max_limit) max_limit = limit; } *cpus = i; return max_limit; kmem_cache_s_array_nodes: if (CRASHDEBUG(3)) fprintf(fp, "kmem_cache: %lx\n", cache); if (vt->flags & SLAB_CPU_CACHE) { if (!readmem(cache+OFFSET(kmem_cache_cpu_cache), KVADDR, &percpu_ptr, sizeof(void *), "kmem_cache.cpu_cache", RETURN_ON_ERROR)) goto bail_out; for (i = 0; i < kt->cpus; i++) cpudata[i] = percpu_ptr + kt->__per_cpu_offset[i]; } else { if (!readmem(cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * MIN(NR_CPUS, ARRAY_LENGTH(kmem_cache_s_array)), "array cache array", RETURN_ON_ERROR)) goto bail_out; } for (i = max_limit = 0; i < kt->cpus; i++) { if (check_offline_cpu(i)) continue; if (!cpudata[i]) break; if (!readmem(cpudata[i]+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "array cache limit", RETURN_ON_ERROR)) { error(INFO, "kmem_cache: %lx: invalid array_cache pointer: %lx\n", cache, cpudata[i]); mark_bad_slab_cache(cache); return max_limit; } if (CRASHDEBUG(3)) fprintf(fp, " array limit[%d]: %d\n", i, limit); if ((unsigned int)limit > INT_MAX) error(INFO, "kmem_cache: %lx: invalid array limit[%d]: %d\n", cache, i, limit); else if (limit > max_limit) max_limit = limit; } *cpus = i; /* * Check the shared list of all the nodes. */ start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) && readmem(kmem_cache_nodelists(cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) { for (i = 0; i < vt->kmem_cache_len_nodes; i++) { if (start_address[i] == 0) continue; if (readmem(start_address[i] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET)) { if (!shared) break; } else continue; if (readmem(shared + OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "shared array_cache limit", RETURN_ON_ERROR|QUIET)) { if (CRASHDEBUG(3)) fprintf(fp, " shared node limit[%d]: %d\n", i, limit); if ((unsigned int)limit > INT_MAX) error(INFO, "kmem_cache: %lx: shared node limit[%d]: %d\n", cache, i, limit); else if (limit > max_limit) max_limit = limit; break; } } } FREEBUF(start_address); return max_limit; bail_out: vt->flags |= KMEM_CACHE_UNAVAIL; error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); *cpus = 0; return 0; } /* * Determine whether the current slab cache is contained in * the comma-separated list from a "kmem -I list1,list2 ..." * command entry. */ static int ignore_cache(struct meminfo *si, char *name) { int i, argc; char *p1; char *arglist[MAXARGS]; char buf[BUFSIZE]; if (!si->ignore) return FALSE; strcpy(buf, si->ignore); p1 = buf; while (*p1) { if (*p1 == ',') *p1 = ' '; p1++; } argc = parse_line(buf, arglist); for (i = 0; i < argc; i++) { if (STREQ(name, arglist[i])) return TRUE; } return FALSE; } /* * dump_kmem_cache() displays basic information about kmalloc() slabs. * At this point, only kmem_cache_s structure data for each slab is dumped. * * TBD: Given a specified physical address, and determine which slab it came * from, and whether it's in use or not. */ #define SLAB_C_MAGIC 0x4F17A36DUL #define SLAB_MAGIC_ALLOC 0xA5C32F2BUL /* slab is alive */ #define SLAB_MAGIC_DESTROYED 0xB2F23C5AUL /* slab has been destroyed */ #define SLAB_CFLGS_BUFCTL 0x020000UL /* bufctls in own cache */ #define SLAB_CFLGS_OBJFREELIST 0x40000000UL /* Freelist as an object */ #define KMEM_SLAB_ADDR (1) #define KMEM_BUFCTL_ADDR (2) #define KMEM_OBJECT_ADDR_FREE (3) #define KMEM_OBJECT_ADDR_INUSE (4) #define KMEM_OBJECT_ADDR_CACHED (5) #define KMEM_ON_SLAB (6) #define KMEM_OBJECT_ADDR_SHARED (7) #define KMEM_SLAB_OVERLOAD_PAGE (8) #define KMEM_SLAB_FREELIST (9) #define DUMP_KMEM_CACHE_TAG(addr, name, tag) \ fprintf(fp, "%lx %-43s %s\n", addr, tag, name) #define DUMP_KMEM_CACHE_INFO() dump_kmem_cache_info(si) static void dump_kmem_cache_info(struct meminfo *si) { char b1[BUFSIZE]; ulong objsize, allocated, total; if (si->flags & SLAB_GATHER_FAILURE) error(INFO, "%s: cannot gather relevant slab data\n", si->curname); objsize = (vt->flags & KMALLOC_SLUB) ? si->objsize : si->size; fprintf(fp, "%s %8ld ", mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache)), objsize); if (si->flags & SLAB_GATHER_FAILURE) { fprintf(fp, "%9s %8s %5s ", "?", "?", "?"); } else { allocated = (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) ? si->inuse - si->cpucached_cache : si->inuse; total = (vt->flags & KMALLOC_SLUB) ? si->inuse + si->free : si->num_slabs * si->c_num; fprintf(fp, "%9ld %8ld %5ld ", allocated, total, si->num_slabs); } fprintf(fp, "%4ldk %s\n", si->slabsize/1024, si->curname); } #define DUMP_SLAB_INFO() \ { \ char b1[BUFSIZE], b2[BUFSIZE]; \ ulong allocated, freeobjs, slab; \ if (vt->flags & SLAB_OVERLOAD_PAGE) \ slab = si->slab - OFFSET(page_lru); \ else \ slab = si->slab; \ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { \ allocated = si->s_inuse - si->cpucached_slab; \ freeobjs = si->c_num - allocated - si->cpucached_slab; \ } else { \ allocated = si->s_inuse; \ freeobjs = si->c_num - si->s_inuse; \ } \ fprintf(fp, "%s %s %5ld %9ld %4ld\n", \ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(slab)), \ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->s_mem)), \ si->c_num, allocated, \ vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? \ freeobjs + si->cpucached_slab : freeobjs); \ } static void dump_kmem_cache(struct meminfo *si) { char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_cache; ulong name, magic; int cnt; char *p1; if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(FATAL, "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) { readmem(si->cache+OFFSET(kmem_cache_s_c_nextp), KVADDR, &cache_cache, sizeof(ulong), "kmem_cache next", FAULT_ON_ERROR); } else si->cache = cache_cache = symbol_value("cache_cache"); if (si->flags & ADDRESS_SPECIFIED) { if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; si->cache_buf = GETBUF(SIZE(kmem_cache_s)); do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", FAULT_ON_ERROR); if (vt->kmem_cache_namelen) { BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name), buf, vt->kmem_cache_namelen); } else { name = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_name)); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { DUMP_KMEM_CACHE_TAG(si->cache, buf, "[IGNORED]"); goto next_cache; } si->curname = buf; if (CRASHDEBUG(1)) fprintf(fp, "cache: %lx %s\n", si->cache, si->curname); console("cache: %lx %s\n", si->cache, si->curname); magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic)); if (magic == SLAB_C_MAGIC) { si->size = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_org_size)); if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: c_org_size: %ld\n", si->curname, si->size); si->errors++; } } si->c_flags = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_flags)); si->c_offset = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_offset)); si->order = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_gfporder)); si->c_num = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_num)); do_slab_chain(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) DUMP_KMEM_CACHE_INFO(); if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; do_slab_chain(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO(); fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(ON-SLAB kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(ON-SLAB kmem_slab_t)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; } break; } } } else { error(INFO, "\"%s\" cache: invalid c_magic: %lx\n", si->curname, magic); si->errors++; } next_cache: si->cache = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_nextp)); } while (si->cache != cache_cache); FREEBUF(si->cache_buf); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); } /* * dump_kmem_cache() adapted for newer percpu slab format. */ static void dump_kmem_cache_percpu_v1(struct meminfo *si) { int i; char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_cache; ulong name; int cnt; uint tmp_val; /* Used as temporary variable to read sizeof(int) and assigned to ulong variable. We are doing this to mask the endian issue */ char *p1; if (!(vt->flags & PERCPU_KMALLOC_V1)) error(FATAL, "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V1\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); for (i = 0; i < vt->kmem_max_cpus; i++) si->cpudata[i] = (ulong *) GETBUF(vt->kmem_max_limit * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) { readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &cache_cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); } else si->cache = cache_cache = symbol_value("cache_cache"); if (si->flags & ADDRESS_SPECIFIED) { if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); if (vt->kmem_cache_namelen) { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, buf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, &name, sizeof(ulong), "name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { DUMP_KMEM_CACHE_TAG(si->cache, buf, "[IGNORED]"); goto next_cache; } si->curname = buf; readmem(si->cache+OFFSET(kmem_cache_s_objsize), KVADDR, &tmp_val, sizeof(uint), "objsize", FAULT_ON_ERROR); si->size = (ulong)tmp_val; if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: objsize: %ld\n", si->curname, si->size); si->errors++; } } readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); si->c_flags = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_gfporder), KVADDR, &tmp_val, sizeof(uint), "gfporder", FAULT_ON_ERROR); si->order = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_num), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s num", FAULT_ON_ERROR); si->c_num = (ulong)tmp_val; do_slab_chain_percpu_v1(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { DUMP_KMEM_CACHE_INFO(); if (CRASHDEBUG(3)) dump_struct("kmem_cache_s", si->cache, 0); } if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { gather_cpudata_list_v1(si); si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; do_slab_chain_percpu_v1(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO(); fprintf(fp, "%s", slab_hdr); gather_slab_cached_count(si); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp,"(kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(slab_s)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_CACHED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (cpu %d cache)\n", si->container ? si->container : (ulong)si->spec_addr, si->cpu); break; } break; } } next_cache: readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &si->cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); si->cache -= OFFSET(kmem_cache_s_next); } while (si->cache != cache_cache); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); FREEBUF(si->kmem_bufctl); for (i = 0; i < vt->kmem_max_cpus; i++) FREEBUF(si->cpudata[i]); } /* * Updated for 2.6 slab substructure. */ static void dump_kmem_cache_percpu_v2(struct meminfo *si) { int i; char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_end; ulong name, page_head; int cnt; uint tmp_val; /* Used as temporary variable to read sizeof(int) and assigned to ulong variable. We are doing this to mask the endian issue */ char *p1; if (!(vt->flags & PERCPU_KMALLOC_V2)) error(FATAL, "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); if (vt->flags & SLAB_OVERLOAD_PAGE) { si->freelist = si->kmem_bufctl; si->freelist_index_size = slab_freelist_index_size(); } for (i = 0; i < vt->kmem_max_cpus; i++) si->cpudata[i] = (ulong *) GETBUF(vt->kmem_max_limit * sizeof(ulong)); if(vt->flags & PERCPU_KMALLOC_V2_NODES) si->shared_array_cache = (ulong *) GETBUF(vt->kmem_cache_len_nodes * (vt->kmem_max_limit+1) * sizeof(ulong)); else si->shared_array_cache = (ulong *) GETBUF((vt->kmem_max_limit+1) * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &cache_end, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); else { if (vt->flags & KMALLOC_COMMON) { get_symbol_data("slab_caches", sizeof(ulong), &si->cache); si->cache -= OFFSET(kmem_cache_s_next); cache_end = symbol_value("slab_caches"); } else { get_symbol_data("cache_chain", sizeof(ulong), &si->cache); si->cache -= OFFSET(kmem_cache_s_next); cache_end = symbol_value("cache_chain"); } } if (si->flags & ADDRESS_SPECIFIED) { if ((p1 = is_slab_overload_page(si->spec_addr, &page_head, kbuf))) { si->flags |= SLAB_OVERLOAD_PAGE_PTR; si->spec_addr = page_head; } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); if (vt->kmem_cache_namelen) { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, buf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, &name, sizeof(ulong), "name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { DUMP_KMEM_CACHE_TAG(si->cache, buf, "[IGNORED]"); goto next_cache; } if (bad_slab_cache(si->cache)) { DUMP_KMEM_CACHE_TAG(si->cache, buf, "[INVALID/CORRUPTED]"); goto next_cache; } si->curname = buf; readmem(si->cache+OFFSET(kmem_cache_s_objsize), KVADDR, &tmp_val, sizeof(uint), "objsize", FAULT_ON_ERROR); si->size = (ulong)tmp_val; if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: objsize: %ld\n", si->curname, si->size); si->errors++; } } readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); si->c_flags = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_gfporder), KVADDR, &tmp_val, sizeof(uint), "gfporder", FAULT_ON_ERROR); si->order = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_num), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s num", FAULT_ON_ERROR); si->c_num = (ulong)tmp_val; if (vt->flags & PERCPU_KMALLOC_V2_NODES) { if (vt->flags & SLAB_OVERLOAD_PAGE) do_slab_chain_slab_overload_page(SLAB_GET_COUNTS, si); else do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si); } else do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { DUMP_KMEM_CACHE_INFO(); if (CRASHDEBUG(3)) dump_struct("kmem_cache_s", si->cache, 0); } if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { if (!(vt->flags & PERCPU_KMALLOC_V2_NODES)) gather_cpudata_list_v2(si); si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; if (vt->flags & PERCPU_KMALLOC_V2_NODES) { if (vt->flags & SLAB_OVERLOAD_PAGE) do_slab_chain_slab_overload_page(SLAB_WALKTHROUGH, si); else do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si); } else do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO(); fprintf(fp, "%s", slab_hdr); gather_slab_cached_count(si); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp,"(kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(slab)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_SLAB_FREELIST: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(on-slab freelist)\n"); break; case KMEM_SLAB_OVERLOAD_PAGE: si->flags &= ~ADDRESS_SPECIFIED; dump_slab_objects_percpu(si); si->flags |= ADDRESS_SPECIFIED; break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_CACHED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (cpu %d cache)\n", si->container ? si->container : (ulong)si->spec_addr, si->cpu); break; case KMEM_OBJECT_ADDR_SHARED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (shared cache)\n", si->container ? si->container : (ulong)si->spec_addr); break; } break; } } next_cache: readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &si->cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); if (si->cache != cache_end) si->cache -= OFFSET(kmem_cache_s_next); } while (si->cache != cache_end); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); FREEBUF(si->kmem_bufctl); for (i = 0; i < vt->kmem_max_cpus; i++) FREEBUF(si->cpudata[i]); FREEBUF(si->shared_array_cache); } /* * Walk through the slab chain hanging off a kmem_cache_s structure, * gathering basic statistics. * * TBD: Given a specified physical address, determine whether it's in this * slab chain, and whether it's in use or not. */ #define INSLAB(obj, si) \ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == si->s_mem) static void do_slab_chain(int cmd, struct meminfo *si) { ulong tmp, magic; ulong kmem_slab_end; char *kmem_slab_s_buf; si->slabsize = (power(2, si->order) * PAGESIZE()); kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset); switch (cmd) { case SLAB_GET_COUNTS: si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); if (slab_data_saved(si)) return; si->num_slabs = si->inuse = 0; if (si->slab == kmem_slab_end) return; kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); do { if (received_SIGINT()) { FREEBUF(kmem_slab_s_buf); restart(0); } readmem(si->slab, KVADDR, kmem_slab_s_buf, SIZE(kmem_slab_s), "kmem_slab_s buffer", FAULT_ON_ERROR); magic = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_magic)); if (magic == SLAB_MAGIC_ALLOC) { tmp = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_inuse)); si->inuse += tmp; si->num_slabs++; } else { fprintf(fp, "\"%s\" cache: invalid s_magic: %lx\n", si->curname, magic); si->errors++; FREEBUF(kmem_slab_s_buf); return; } si->slab = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_nextp)); } while (si->slab != kmem_slab_end); FREEBUF(kmem_slab_s_buf); save_slab_data(si); break; case SLAB_WALKTHROUGH: if (!si->slab) si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); if (si->slab == kmem_slab_end) return; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } si->slab_buf = kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); do { if (received_SIGINT()) { FREEBUF(kmem_slab_s_buf); restart(0); } readmem(si->slab, KVADDR, kmem_slab_s_buf, SIZE(kmem_slab_s), "kmem_slab_s buffer", FAULT_ON_ERROR); dump_slab(si); if (si->found) { FREEBUF(kmem_slab_s_buf); return; } si->slab = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_nextp)); } while (si->slab != kmem_slab_end); FREEBUF(kmem_slab_s_buf); break; } } /* * do_slab_chain() adapted for newer percpu slab format. */ #define SLAB_BASE(X) (PTOB(BTOP(X))) #define INSLAB_PERCPU(obj, si) \ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem)) #define SLAB_CHAINS (3) static char *slab_chain_name_v1[] = {"full", "partial", "free"}; static void do_slab_chain_percpu_v1(long cmd, struct meminfo *si) { int i, tmp, s; int list_borked; char *slab_s_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; list_borked = 0; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; if (VALID_MEMBER(kmem_cache_s_slabs)) { slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs); slab_chains[1] = 0; slab_chains[2] = 0; } else { slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full); slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial); slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free); } if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "full: %lx partial: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } switch (cmd) { case SLAB_GET_COUNTS: si->flags |= SLAB_GET_COUNTS; si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; gather_cpudata_list_v1(si); slab_s_buf = GETBUF(SIZE(slab_s)); for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v1[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_s_buf); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_s_buf); restart(0); } if (!verify_slab_v1(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_s_list); readmem(si->slab, KVADDR, slab_s_buf, SIZE(slab_s), "slab_s buffer", FAULT_ON_ERROR); tmp = INT(slab_s_buf + OFFSET(slab_s_inuse)); si->inuse += tmp; if (ACTIVE()) gather_cpudata_list_v1(si); si->s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_s_buf + OFFSET(slab_s_list)); si->slab -= OFFSET(slab_s_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v1[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } FREEBUF(slab_s_buf); if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= SLAB_WALKTHROUGH; si->flags &= ~SLAB_GET_COUNTS; for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v1[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) restart(0); if (!verify_slab_v1(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_s_list); dump_slab_percpu_v1(si); if (si->found) { return; } readmem(si->slab+OFFSET(slab_s_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_s_list); } while (si->slab != slab_chains[s] && !list_borked); } break; } } /* * Try to preclude any attempt to translate a bogus slab structure. */ static int verify_slab_v1(struct meminfo *si, ulong last, int s) { char slab_s_buf[BUFSIZE]; struct kernel_list_head *list_head; unsigned int inuse; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v1[s]; errcnt = 0; if (!readmem(si->slab, KVADDR, slab_s_buf, SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); return FALSE; } list_head = (struct kernel_list_head *) (slab_s_buf + OFFSET(slab_s_list)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse)); if (inuse > si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } if (!last) goto no_inuse_check_v1; switch (s) { case 0: /* full -- but can be one singular list */ if (VALID_MEMBER(kmem_cache_s_slabs_full) && (inuse != si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 1: /* partial */ if ((inuse == 0) || (inuse == si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 2: /* free */ if (inuse > 0) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; } no_inuse_check_v1: s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; return(errcnt ? FALSE : TRUE); } /* * Updated for 2.6 slab substructure. */ static char *slab_chain_name_v2[] = {"partial", "full", "free"}; static void do_slab_chain_percpu_v2(long cmd, struct meminfo *si) { int i, tmp, s; int list_borked; char *slab_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; list_borked = 0; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; slab_chains[0] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_full); slab_chains[2] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_free); if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } switch (cmd) { case SLAB_GET_COUNTS: si->flags |= SLAB_GET_COUNTS; si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; gather_cpudata_list_v2(si); slab_buf = GETBUF(SIZE(slab)); for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_buf); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_buf); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); tmp = INT(slab_buf + OFFSET(slab_inuse)); si->inuse += tmp; if (ACTIVE()) gather_cpudata_list_v2(si); si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_buf + OFFSET(slab_list)); si->slab -= OFFSET(slab_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } FREEBUF(slab_buf); if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= SLAB_WALKTHROUGH; si->flags &= ~SLAB_GET_COUNTS; for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) restart(0); if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); dump_slab_percpu_v2(si); if (si->found) { return; } readmem(si->slab+OFFSET(slab_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_list); } while (si->slab != slab_chains[s] && !list_borked); } break; } } /* * Added To Traverse the Nodelists */ static void do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si) { int i, tmp, s, node; int list_borked; char *slab_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; ulong *start_address; int index; list_borked = 0; slab_buf = NULL; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) error(INFO, "cannot read kmem_cache nodelists array"); switch (cmd) { case SLAB_GET_COUNTS: si->flags |= (SLAB_GET_COUNTS|SLAB_FIRST_NODE); si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; slab_buf = GETBUF(SIZE(slab)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_buf); FREEBUF(start_address); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_buf); FREEBUF(start_address); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); tmp = INT(slab_buf + OFFSET(slab_inuse)); si->inuse += tmp; si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_buf + OFFSET(slab_list)); si->slab -= OFFSET(slab_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } } if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= (SLAB_WALKTHROUGH|SLAB_FIRST_NODE); si->flags &= ~SLAB_GET_COUNTS; slab_buf = GETBUF(SIZE(slab)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: " "bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) { FREEBUF(start_address); FREEBUF(slab_buf); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); dump_slab_percpu_v2(si); if (si->found) { FREEBUF(start_address); FREEBUF(slab_buf); return; } readmem(si->slab+OFFSET(slab_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_list); } while (si->slab != slab_chains[s] && !list_borked); } } break; } FREEBUF(slab_buf); FREEBUF(start_address); } static int slab_freelist_index_size(void) { struct datatype_member datatype, *dm; dm = &datatype; BZERO(dm, sizeof(*dm)); dm->name = "freelist_idx_t"; if (is_typedef(dm->name)) return DATATYPE_SIZE(dm); if (CRASHDEBUG(1)) error(INFO, "freelist_idx_t does not exist\n"); return sizeof(int); } static void do_slab_chain_slab_overload_page(long cmd, struct meminfo *si) { int i, tmp, s, node; int list_borked; char *page_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; ulong *start_address; int index; list_borked = 0; page_buf = NULL; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) error(INFO, "cannot read kmem_cache nodelists array"); switch (cmd) { case SLAB_GET_COUNTS: si->flags |= (SLAB_GET_COUNTS|SLAB_FIRST_NODE); si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; page_buf = GETBUF(SIZE(page)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad page/slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(page_buf); FREEBUF(start_address); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(page_buf); FREEBUF(start_address); restart(0); } if (!verify_slab_overload_page(si, last, s)) { list_borked = 1; continue; } last = si->slab; readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, SIZE(page), "page (slab) buffer", FAULT_ON_ERROR); tmp = INT(page_buf + OFFSET(page_active)); si->inuse += tmp; si->s_mem = ULONG(page_buf + OFFSET(page_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(page_buf + OFFSET(page_lru)); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } } if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: if (si->flags & SLAB_OVERLOAD_PAGE_PTR) { specified_slab = si->spec_addr; si->slab = si->spec_addr + OFFSET(page_lru); } else { specified_slab = si->slab; if (si->slab) si->slab += OFFSET(page_lru); } si->flags |= (SLAB_WALKTHROUGH|SLAB_FIRST_NODE); si->flags &= ~SLAB_GET_COUNTS; page_buf = GETBUF(SIZE(page)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: " "bad page/slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, SIZE(page), "page (slab) buffer", FAULT_ON_ERROR); si->s_mem = ULONG(page_buf + OFFSET(page_s_mem)); if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) { FREEBUF(start_address); FREEBUF(page_buf); restart(0); } if (!verify_slab_overload_page(si, last, s)) { list_borked = 1; continue; } last = si->slab; dump_slab_overload_page(si); if (si->found) { FREEBUF(start_address); FREEBUF(page_buf); return; } readmem(si->slab, KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); } while (si->slab != slab_chains[s] && !list_borked); } } break; } FREEBUF(page_buf); FREEBUF(start_address); } /* * Try to preclude any attempt to translate a bogus slab structure. */ static int verify_slab_v2(struct meminfo *si, ulong last, int s) { char slab_buf[BUFSIZE]; struct kernel_list_head *list_head; unsigned int inuse; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v2[s]; errcnt = 0; if (!readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); return FALSE; } list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } inuse = UINT(slab_buf + OFFSET(slab_inuse)); if (inuse > si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } if (!last) goto no_inuse_check_v2; switch (s) { case 0: /* partial */ if ((inuse == 0) || (inuse == si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 1: /* full */ if (inuse != si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 2: /* free */ if (inuse > 0) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; } no_inuse_check_v2: s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; return(errcnt ? FALSE : TRUE); } static int verify_slab_overload_page(struct meminfo *si, ulong last, int s) { char *page_buf; struct kernel_list_head *list_head; unsigned int active; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v2[s]; page_buf = GETBUF(SIZE(page)); errcnt = 0; if (!readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, SIZE(page), "page (slab) buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); FREEBUF(page_buf); return FALSE; } list_head = (struct kernel_list_head *)(page_buf + OFFSET(page_lru)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: page/slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: page/slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } active = UINT(page_buf + OFFSET(page_active)); if (active > si->c_num) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } if (!last) goto no_inuse_check_v2; switch (s) { case 0: /* partial */ if ((active == 0) || (active == si->c_num)) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; case 1: /* full */ if (active != si->c_num) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; case 2: /* free */ if (active > 0) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; } no_inuse_check_v2: s_mem = ULONG(page_buf + OFFSET(page_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: page/slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; FREEBUF(page_buf); return(errcnt ? FALSE : TRUE); } /* * If it's a dumpfile, save the essential slab data to avoid re-reading * the whole slab chain more than once. This may seem like overkill, but * if the problem is a memory leak, or just the over-use of the buffer_head * cache, it's painful to wait each time subsequent kmem -s or -i commands * simply need the basic slab counts. */ struct slab_data { ulong cache_addr; int num_slabs; int inuse; ulong cpucached_cache; }; #define NO_SLAB_DATA ((void *)(-1)) static void save_slab_data(struct meminfo *si) { int i; if (si->flags & SLAB_DATA_NOSAVE) { si->flags &= ~SLAB_DATA_NOSAVE; return; } if (ACTIVE()) return; if (vt->slab_data == NO_SLAB_DATA) return; if (!vt->slab_data) { if (!(vt->slab_data = (struct slab_data *) malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) { error(INFO, "cannot malloc slab_data table"); vt->slab_data = NO_SLAB_DATA; return; } for (i = 0; i < vt->kmem_cache_count; i++) { vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA; vt->slab_data[i].num_slabs = 0; vt->slab_data[i].inuse = 0; vt->slab_data[i].cpucached_cache = 0; } } for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == si->cache) break; if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) { vt->slab_data[i].cache_addr = si->cache; vt->slab_data[i].num_slabs = si->num_slabs; vt->slab_data[i].inuse = si->inuse; vt->slab_data[i].cpucached_cache = si->cpucached_cache; break; } } } static int slab_data_saved(struct meminfo *si) { int i; if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) return FALSE; for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == si->cache) { si->inuse = vt->slab_data[i].inuse; si->num_slabs = vt->slab_data[i].num_slabs; si->cpucached_cache = vt->slab_data[i].cpucached_cache; return TRUE; } } return FALSE; } static void dump_saved_slab_data(void) { int i; if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) return; for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) break; fprintf(fp, " cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n", vt->slab_data[i].cache_addr, vt->slab_data[i].inuse, vt->slab_data[i].num_slabs, vt->slab_data[i].cpucached_cache); } } /* * Dump the contents of a kmem slab. */ static void dump_slab(struct meminfo *si) { si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem)); si->s_mem = PTOB(BTOP(si->s_mem)); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))) { si->found = KMEM_SLAB_ADDR; return; } if (INSLAB(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep)); si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse)); si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index)); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects(si); } /* * dump_slab() adapted for newer percpu slab format. */ static void dump_slab_percpu_v1(struct meminfo *si) { int tmp; readmem(si->slab+OFFSET(slab_s_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "s_mem", FAULT_ON_ERROR); /* * Include the array of kmem_bufctl_t's appended to slab. */ tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB_PERCPU(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+tmp))) { if (si->spec_addr >= (si->slab + SIZE(slab_s))) si->found = KMEM_BUFCTL_ADDR; else si->found = KMEM_SLAB_ADDR; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(si->slab+OFFSET(slab_s_inuse), KVADDR, &tmp, sizeof(int), "inuse", FAULT_ON_ERROR); si->s_inuse = tmp; readmem(si->slab+OFFSET(slab_s_free), KVADDR, &si->free, SIZE(kmem_bufctl_t), "kmem_bufctl_t", FAULT_ON_ERROR); gather_slab_free_list_percpu(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } /* * Updated for 2.6 slab substructure. */ static void dump_slab_percpu_v2(struct meminfo *si) { int tmp; readmem(si->slab+OFFSET(slab_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "s_mem", FAULT_ON_ERROR); /* * Include the array of kmem_bufctl_t's appended to slab. */ tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB_PERCPU(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+tmp))) { if (si->spec_addr >= (si->slab + SIZE(slab))) si->found = KMEM_BUFCTL_ADDR; else si->found = KMEM_SLAB_ADDR; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(si->slab+OFFSET(slab_inuse), KVADDR, &tmp, sizeof(int), "inuse", FAULT_ON_ERROR); si->s_inuse = tmp; readmem(si->slab+OFFSET(slab_free), KVADDR, &si->free, SIZE(kmem_bufctl_t), "kmem_bufctl_t", FAULT_ON_ERROR); gather_slab_free_list_percpu(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } static void dump_slab_overload_page(struct meminfo *si) { int tmp; ulong slab_overload_page, freelist; slab_overload_page = si->slab - OFFSET(page_lru); readmem(slab_overload_page + OFFSET(page_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "page.s_mem", FAULT_ON_ERROR); readmem(slab_overload_page + OFFSET(page_freelist), KVADDR, &freelist, sizeof(ulong), "page.freelist", FAULT_ON_ERROR); if (si->flags & ADDRESS_SPECIFIED) { if ((si->spec_addr >= slab_overload_page) && (si->spec_addr < (slab_overload_page+SIZE(page)))) { si->found = KMEM_SLAB_OVERLOAD_PAGE; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(slab_overload_page + OFFSET(page_active), KVADDR, &tmp, sizeof(int), "active", FAULT_ON_ERROR); si->s_inuse = tmp; gather_slab_free_list_slab_overload_page(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } /* * Gather the free objects in a slab into the si->addrlist, checking for * specified addresses that are in-slab kmem_bufctls, and making error checks * along the way. Object address checks are deferred to dump_slab_objects(). */ #define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size))) static void gather_slab_free_list(struct meminfo *si) { ulong *next, obj; ulong expected, cnt; BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); if (!si->s_freep) return; cnt = 0; expected = si->c_num - si->s_inuse; next = si->s_freep; do { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } /* * Off-slab kmem_bufctls are contained in arrays of object * pointers that point to: * 1. next kmem_bufctl (or NULL) if the object is free. * 2. to the object if it the object is in use. * * On-slab kmem_bufctls resides just after the object itself, * and point to: * 1. next kmem_bufctl (or NULL) if object is free. * 2. the containing slab if the object is in use. */ if (si->c_flags & SLAB_CFLGS_BUFCTL) obj = si->s_mem + ((next - si->s_index) * si->c_offset); else obj = (ulong)next - si->c_offset; si->addrlist[cnt] = obj; if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB(next, si) && (si->spec_addr >= (ulong)next) && (si->spec_addr < (ulong)(next + 1))) { si->found = KMEM_BUFCTL_ADDR; return; } } cnt++; if (!INSLAB(obj, si)) { error(INFO, "\"%s\" cache: address not contained within slab: %lx\n", si->curname, obj); si->errors++; } readmem((ulong)next, KVADDR, &next, sizeof(void *), "s_freep chain entry", FAULT_ON_ERROR); } while (next); if (cnt != expected) { error(INFO, "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } /* * gather_slab_free_list() adapted for newer percpu slab format. */ #define BUFCTL_END 0xffffFFFF static void gather_slab_free_list_percpu(struct meminfo *si) { int i; ulong obj; ulong expected, cnt; int free_index; ulong kmembp; short *kbp; BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); if (CRASHDEBUG(1)) fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", si->slab, si->s_inuse, si->c_num); if (si->s_inuse == si->c_num ) return; kmembp = si->slab + SIZE_OPTION(slab_s, slab); readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, SIZE(kmem_bufctl_t) * si->c_num, "kmem_bufctl array", FAULT_ON_ERROR); if (CRASHDEBUG(1)) { for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && (i < si->c_num); i++) fprintf(fp, "%d ", si->kmem_bufctl[i]); for (kbp = (short *)&si->kmem_bufctl[0], i = 0; (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num); i++) fprintf(fp, "%d ", *(kbp + i)); fprintf(fp, "\n"); } cnt = 0; expected = si->c_num - si->s_inuse; if (SIZE(kmem_bufctl_t) == sizeof(int)) { for (free_index = si->free; free_index != BUFCTL_END; free_index = si->kmem_bufctl[free_index]) { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } obj = si->s_mem + (free_index*si->size); si->addrlist[cnt] = obj; cnt++; } } else if (SIZE(kmem_bufctl_t) == sizeof(short)) { kbp = (short *)&si->kmem_bufctl[0]; for (free_index = si->free; free_index != BUFCTL_END; free_index = (int)*(kbp + free_index)) { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } obj = si->s_mem + (free_index*si->size); si->addrlist[cnt] = obj; cnt++; } } else error(FATAL, "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n", SIZE(kmem_bufctl_t)); if (cnt != expected) { error(INFO, "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } static void gather_slab_free_list_slab_overload_page(struct meminfo *si) { int i, active, start_offset; ulong obj, objnr, cnt, freelist; unsigned char *ucharptr; unsigned short *ushortptr; unsigned int *uintptr; unsigned int cache_flags, overload_active; ulong slab_overload_page; if (CRASHDEBUG(1)) fprintf(fp, "slab page: %lx active: %ld si->c_num: %ld\n", si->slab - OFFSET(page_lru), si->s_inuse, si->c_num); if (si->s_inuse == si->c_num ) return; slab_overload_page = si->slab - OFFSET(page_lru); readmem(slab_overload_page + OFFSET(page_freelist), KVADDR, &freelist, sizeof(void *), "page freelist", FAULT_ON_ERROR); readmem(freelist, KVADDR, si->freelist, si->freelist_index_size * si->c_num, "freelist array", FAULT_ON_ERROR); readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &cache_flags, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); readmem(slab_overload_page + OFFSET(page_active), KVADDR, &overload_active, sizeof(uint), "active", FAULT_ON_ERROR); BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); cnt = objnr = 0; ucharptr = NULL; ushortptr = NULL; uintptr = NULL; active = si->s_inuse; /* * On an OBJFREELIST slab, the object might have been recycled * and everything before the active count can be random data. */ start_offset = 0; if (cache_flags & SLAB_CFLGS_OBJFREELIST) start_offset = overload_active; switch (si->freelist_index_size) { case 1: ucharptr = (unsigned char *)si->freelist + start_offset; break; case 2: ushortptr = (unsigned short *)si->freelist + start_offset; break; case 4: uintptr = (unsigned int *)si->freelist + start_offset; break; } for (i = start_offset; i < si->c_num; i++) { switch (si->freelist_index_size) { case 1: objnr = (ulong)*ucharptr++; break; case 2: objnr = (ulong)*ushortptr++; break; case 4: objnr = (ulong)*uintptr++; break; } if (objnr >= si->c_num) { error(INFO, "\"%s\" cache: invalid/corrupt freelist entry: %ld\n", si->curname, objnr); si->errors++; } if (i >= active) { obj = si->s_mem + (objnr * si->size); si->addrlist[cnt++] = obj; if (CRASHDEBUG(1)) fprintf(fp, "%ld ", objnr); } else if (CRASHDEBUG(1)) fprintf(fp, "[%ld] ", objnr); } if (CRASHDEBUG(1)) fprintf(fp, "\n"); } /* * Dump the FREE, [ALLOCATED] and objects of a slab. */ #define DUMP_SLAB_OBJECT() \ for (j = on_free_list = 0; j < si->c_num; j++) { \ if (obj == si->addrlist[j]) { \ on_free_list = TRUE; \ break; \ } \ } \ \ if (on_free_list) { \ if (!(si->flags & ADDRESS_SPECIFIED)) \ fprintf(fp, " %lx\n", obj); \ if (si->flags & ADDRESS_SPECIFIED) { \ if (INOBJECT(si->spec_addr, obj)) { \ si->found = \ KMEM_OBJECT_ADDR_FREE; \ si->container = obj; \ return; \ } \ } \ } else { \ if (!(si->flags & ADDRESS_SPECIFIED)) \ fprintf(fp, " [%lx]\n", obj); \ cnt++; \ if (si->flags & ADDRESS_SPECIFIED) { \ if (INOBJECT(si->spec_addr, obj)) { \ si->found = \ KMEM_OBJECT_ADDR_INUSE; \ si->container = obj; \ return; \ } \ } \ } static void dump_slab_objects(struct meminfo *si) { int i, j; ulong *next; int on_free_list; ulong cnt, expected; ulong bufctl, obj; gather_slab_free_list(si); if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) return; cnt = 0; expected = si->s_inuse; si->container = 0; if (CRASHDEBUG(1)) for (i = 0; i < si->c_num; i++) { fprintf(fp, "si->addrlist[%d]: %lx\n", i, si->addrlist[i]); } if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s", free_inuse_hdr); /* For on-slab bufctls, c_offset is the distance between the start of * an obj and its related bufctl. For off-slab bufctls, c_offset is * the distance between objs in the slab. */ if (si->c_flags & SLAB_CFLGS_BUFCTL) { for (i = 0, next = si->s_index; i < si->c_num; i++, next++) { obj = si->s_mem + ((next - si->s_index) * si->c_offset); DUMP_SLAB_OBJECT(); } } else { /* * Get the "real" s_mem, i.e., without the offset stripped off. * It contains the address of the first object. */ readmem(si->slab+OFFSET(kmem_slab_s_s_mem), KVADDR, &obj, sizeof(ulong), "s_mem", FAULT_ON_ERROR); for (i = 0; i < si->c_num; i++) { DUMP_SLAB_OBJECT(); if (si->flags & ADDRESS_SPECIFIED) { bufctl = obj + si->c_offset; if ((si->spec_addr >= bufctl) && (si->spec_addr < (bufctl + SIZE(kmem_bufctl_t)))) { si->found = KMEM_BUFCTL_ADDR; return; } } obj += (si->c_offset + SIZE(kmem_bufctl_t)); } } if (cnt != expected) { error(INFO, "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } /* * dump_slab_objects() adapted for newer percpu slab format. */ static void dump_slab_objects_percpu(struct meminfo *si) { int i, j; int on_free_list, on_cpudata_list, on_shared_list; ulong cnt, expected; ulong obj, freelist; if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) if (!(si->found & KMEM_SLAB_OVERLOAD_PAGE)) return; cnt = 0; expected = si->s_inuse; si->container = 0; if (CRASHDEBUG(1)) for (i = 0; i < si->c_num; i++) { fprintf(fp, "si->addrlist[%d]: %lx\n", i, si->addrlist[i]); } if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s", free_inuse_hdr); for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { on_free_list = FALSE; on_cpudata_list = FALSE; on_shared_list = FALSE; for (j = 0; j < si->c_num; j++) { if (obj == si->addrlist[j]) { on_free_list = TRUE; break; } } on_cpudata_list = check_cpudata_list(si, obj); on_shared_list = check_shared_list(si, obj); if (on_free_list && on_cpudata_list) { error(INFO, "\"%s\" cache: object %lx on both free and cpu %d lists\n", si->curname, obj, si->cpu); si->errors++; } if (on_free_list && on_shared_list) { error(INFO, "\"%s\" cache: object %lx on both free and shared lists\n", si->curname, obj); si->errors++; } if (on_cpudata_list && on_shared_list) { error(INFO, "\"%s\" cache: object %lx on both cpu %d and shared lists\n", si->curname, obj, si->cpu); si->errors++; } if (on_free_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx\n", obj); if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_FREE; si->container = obj; return; } } } else if (on_cpudata_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx (cpu %d cache)\n", obj, si->cpu); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_CACHED; si->container = obj; return; } } } else if (on_shared_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx (shared cache)\n", obj); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_SHARED; si->container = obj; return; } } } else { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " [%lx]\n", obj); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_INUSE; si->container = obj; return; } } } } if (cnt != expected) { error(INFO, "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } if ((si->flags & ADDRESS_SPECIFIED) && (vt->flags & SLAB_OVERLOAD_PAGE)) { readmem(si->slab - OFFSET(page_lru) + OFFSET(page_freelist), KVADDR, &freelist, sizeof(ulong), "page.freelist", FAULT_ON_ERROR); if ((si->spec_addr >= freelist) && (si->spec_addr < si->s_mem)) si->found = KMEM_SLAB_FREELIST; } } /* * Determine how many of the "inuse" slab objects are actually cached * in the kmem_cache_s header. Set the per-slab count and update the * cumulative per-cache count. With the addition of the shared list * check, the terms "cpucached_cache" and "cpucached_slab" are somewhat * misleading. But they both are types of objects that are cached * in the kmem_cache_s header, just not necessarily per-cpu. */ static void gather_slab_cached_count(struct meminfo *si) { int i; ulong obj; int in_cpudata, in_shared; si->cpucached_slab = 0; for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { in_cpudata = in_shared = 0; if (check_cpudata_list(si, obj)) { in_cpudata = TRUE; si->cpucached_slab++; if (si->flags & SLAB_GET_COUNTS) { si->cpucached_cache++; } } if (check_shared_list(si, obj)) { in_shared = TRUE; if (!in_cpudata) { si->cpucached_slab++; if (si->flags & SLAB_GET_COUNTS) { si->cpucached_cache++; } } } if (in_cpudata && in_shared) { si->flags |= SLAB_DATA_NOSAVE; if (!(si->flags & VERBOSE)) error(INFO, "\"%s\" cache: object %lx on both cpu %d and shared lists\n", si->curname, obj, si->cpu); } } } /* * Populate the percpu object list for a given slab. */ static void gather_cpudata_list_v1(struct meminfo *si) { int i, j; int avail; ulong cpudata[NR_CPUS]; if (INVALID_MEMBER(kmem_cache_s_cpudata)) return; readmem(si->cache+OFFSET(kmem_cache_s_cpudata), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), "cpudata array", FAULT_ON_ERROR); for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && cpudata[i]; i++) { BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); readmem(cpudata[i]+OFFSET(cpucache_s_avail), KVADDR, &avail, sizeof(int), "cpucache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: cpucache_s.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(cpucache_s), KVADDR, si->cpudata[i], sizeof(void *) * avail, "cpucache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx\n", si->cpudata[i][j]); } } /* * Updated for 2.6 slab percpu data structure, this also gathers * the shared array_cache list as well. */ static void gather_cpudata_list_v2(struct meminfo *si) { int i, j; int avail; ulong cpudata[NR_CPUS]; ulong shared; readmem(si->cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), "array_cache array", FAULT_ON_ERROR); for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && cpudata[i]; i++) { BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); readmem(cpudata[i]+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "array cache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(array_cache), KVADDR, si->cpudata[i], sizeof(void *) * avail, "array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); } /* * If the shared list contains anything, gather them as well. */ BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit); if (!VALID_MEMBER(kmem_list3_shared) || !VALID_MEMBER(kmem_cache_s_lists) || !readmem(si->cache+OFFSET(kmem_cache_s_lists)+ OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || !readmem(shared+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) return; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; return; } if (CRASHDEBUG(2)) fprintf(fp, "%s: shared avail: %d\n", si->curname, avail); readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache, sizeof(void *) * avail, "shared array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); } /* * Updated gather_cpudata_list_v2 for per-node kmem_list3's in kmem_cache */ static void gather_cpudata_list_v2_nodes(struct meminfo *si, int index) { int i, j; int avail; ulong cpudata[NR_CPUS]; ulong shared, percpu_ptr; ulong *start_address; start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (vt->flags & SLAB_CPU_CACHE) { readmem(si->cache+OFFSET(kmem_cache_cpu_cache), KVADDR, &percpu_ptr, sizeof(void *), "kmem_cache.cpu_cache", FAULT_ON_ERROR); for (i = 0; i < vt->kmem_max_cpus; i++) cpudata[i] = percpu_ptr + kt->__per_cpu_offset[i]; } else { readmem(si->cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * vt->kmem_max_cpus, "array_cache array", FAULT_ON_ERROR); } for (i = 0; (i < vt->kmem_max_cpus) && cpudata[i] && !(index); i++) { if (si->cpudata[i]) BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); else continue; readmem(cpudata[i]+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "array cache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; continue; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(array_cache), KVADDR, si->cpudata[i], sizeof(void *) * avail, "array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); } /* * If the shared list contains anything, gather them as well. */ if (si->flags & SLAB_FIRST_NODE) { BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit * vt->kmem_cache_len_nodes); si->current_cache_index = 0; } if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes , "array nodelist array", RETURN_ON_ERROR) || !readmem(start_address[index] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || !shared || !readmem(shared + OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) { FREEBUF(start_address); return; } if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; FREEBUF(start_address); return; } if (CRASHDEBUG(2)) fprintf(fp, "%s: shared avail: %d\n", si->curname, avail); readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache + si->current_cache_index, sizeof(void *) * avail, "shared array_cache avail", FAULT_ON_ERROR); if ((si->current_cache_index + avail) > (vt->kmem_max_limit * vt->kmem_cache_len_nodes)) { error(INFO, "\"%s\" cache: total shared array_cache.avail %d greater than total limit %ld\n", si->curname, si->current_cache_index + avail, vt->kmem_max_limit * vt->kmem_cache_len_nodes); si->errors++; FREEBUF(start_address); return; } if (CRASHDEBUG(2)) for (j = si->current_cache_index; j < (si->current_cache_index + avail); j++) fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); si->current_cache_index += avail; FREEBUF(start_address); } /* * Check whether a given address is contained in the previously-gathered * percpu object cache. */ static int check_cpudata_list(struct meminfo *si, ulong obj) { int i, j; for (i = 0; i < vt->kmem_max_cpus; i++) { for (j = 0; si->cpudata[i][j]; j++) if (si->cpudata[i][j] == obj) { si->cpu = i; return TRUE; } } return FALSE; } /* * Check whether a given address is contained in the previously-gathered * shared object cache. */ static int check_shared_list(struct meminfo *si, ulong obj) { int i; if (INVALID_MEMBER(kmem_list3_shared) || !si->shared_array_cache) return FALSE; for (i = 0; si->shared_array_cache[i]; i++) { if (si->shared_array_cache[i] == obj) return TRUE; } return FALSE; } /* * Search the various memory subsystems for instances of this address. * Start with the most specific areas, ending up with at least the * mem_map page data. */ static void kmem_search(struct meminfo *mi) { struct syment *sp; struct meminfo tmp_meminfo; char buf[BUFSIZE]; ulong vaddr, orig_flags; physaddr_t paddr; ulong offset; ulong task; ulong show_flags; struct task_context *tc; vaddr = 0; pc->curcmd_flags &= ~HEADER_PRINTED; pc->curcmd_flags |= IGNORE_ERRORS; switch (mi->memtype) { case KVADDR: vaddr = mi->spec_addr; break; case PHYSADDR: vaddr = mi->spec_addr < VTOP(vt->high_memory) ? PTOV(mi->spec_addr) : BADADDR; break; } orig_flags = mi->flags; mi->retval = 0; /* * Check first for a possible symbolic display of the virtual * address associated with mi->spec_addr or PTOV(mi->spec_addr). */ if (((vaddr >= kt->stext) && (vaddr <= kt->end)) || IS_MODULE_VADDR(mi->spec_addr)) { if ((sp = value_search(vaddr, &offset))) { show_flags = SHOW_LINENUM | SHOW_RADIX(); if (module_symbol(sp->value, NULL, NULL, NULL, 0)) show_flags |= SHOW_MODULE; show_symbol(sp, offset, show_flags); fprintf(fp, "\n"); } } /* * Check for a valid mapped address. */ if ((mi->memtype == KVADDR) && IS_VMALLOC_ADDR(mi->spec_addr)) { if (kvtop(NULL, mi->spec_addr, &paddr, 0)) { mi->flags = orig_flags | VMLIST_VERIFY; dump_vmlist(mi); if (mi->retval) { mi->flags = orig_flags; dump_vmlist(mi); fprintf(fp, "\n"); mi->spec_addr = paddr; mi->memtype = PHYSADDR; goto mem_map; } } } /* * If the address is physical, check whether it's in vmalloc space. */ if (mi->memtype == PHYSADDR) { mi->flags = orig_flags; mi->flags |= GET_PHYS_TO_VMALLOC; mi->retval = 0; dump_vmlist(mi); mi->flags &= ~GET_PHYS_TO_VMALLOC; if (mi->retval) { if ((sp = value_search(mi->retval, &offset))) { show_symbol(sp, offset, SHOW_LINENUM | SHOW_RADIX()); fprintf(fp, "\n"); } dump_vmlist(mi); fprintf(fp, "\n"); goto mem_map; } } /* * Check whether the containing page belongs to the slab subsystem. */ mi->flags = orig_flags; mi->retval = 0; if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf, VERBOSE)) { BZERO(&tmp_meminfo, sizeof(struct meminfo)); tmp_meminfo.spec_addr = vaddr; tmp_meminfo.memtype = KVADDR; tmp_meminfo.flags = mi->flags; vt->dump_kmem_cache(&tmp_meminfo); fprintf(fp, "\n"); } if ((vaddr != BADADDR) && is_slab_page(mi, buf)) { BZERO(&tmp_meminfo, sizeof(struct meminfo)); tmp_meminfo.spec_addr = vaddr; tmp_meminfo.memtype = KVADDR; tmp_meminfo.flags = mi->flags; vt->dump_kmem_cache(&tmp_meminfo); fprintf(fp, "\n"); } /* * Check free list. */ mi->flags = orig_flags; mi->retval = 0; vt->dump_free_pages(mi); if (mi->retval) fprintf(fp, "\n"); if (vt->page_hash_table) { /* * Check the page cache. */ mi->flags = orig_flags; mi->retval = 0; dump_page_hash_table(mi); if (mi->retval) fprintf(fp, "\n"); } /* * Check whether it's a current task or stack address. */ if ((mi->memtype == KVADDR) && (task = vaddr_in_task_struct(vaddr)) && (tc = task_to_context(task))) { show_context(tc); fprintf(fp, "\n"); } else if ((mi->memtype == KVADDR) && (task = stkptr_to_task(vaddr)) && (tc = task_to_context(task))) { show_context(tc); fprintf(fp, "\n"); } mem_map: mi->flags = orig_flags; pc->curcmd_flags &= ~HEADER_PRINTED; if (vaddr != BADADDR) dump_mem_map(mi); else mi->retval = FALSE; if (!mi->retval) fprintf(fp, "%llx: %s address not found in mem map\n", mi->spec_addr, memtype_string(mi->memtype, 0)); } int generic_is_page_ptr(ulong addr, physaddr_t *phys) { return FALSE; } /* * Determine whether an address is a page pointer from the mem_map[] array. * If the caller requests it, return the associated physical address. */ int is_page_ptr(ulong addr, physaddr_t *phys) { int n; ulong ppstart, ppend; struct node_table *nt; ulong pgnum, node_size; ulong nr, sec_addr; ulong nr_mem_sections; ulong coded_mem_map, mem_map, end_mem_map; physaddr_t section_paddr; if (machdep->is_page_ptr(addr, phys)) return TRUE; if (IS_SPARSEMEM()) { nr_mem_sections = vt->max_mem_section_nr+1; for (nr = 0; nr < nr_mem_sections ; nr++) { if ((sec_addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(sec_addr, 0); mem_map = sparse_decode_mem_map(coded_mem_map, nr); end_mem_map = mem_map + (PAGES_PER_SECTION() * SIZE(page)); if ((addr >= mem_map) && (addr < end_mem_map)) { if ((addr - mem_map) % SIZE(page)) return FALSE; if (phys) { section_paddr = PTOB(section_nr_to_pfn(nr)); pgnum = (addr - mem_map) / SIZE(page); *phys = section_paddr + ((physaddr_t)pgnum * PAGESIZE()); } return TRUE; } } } return FALSE; } for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; ppstart = nt->mem_map; ppend = ppstart + (node_size * SIZE(page)); if ((addr < ppstart) || (addr >= ppend)) continue; /* * We're in the mem_map range -- but it is a page pointer? */ if ((addr - ppstart) % SIZE(page)) return FALSE; if (phys) { pgnum = (addr - nt->mem_map) / SIZE(page); *phys = ((physaddr_t)pgnum * PAGESIZE()) + nt->start_paddr; } return TRUE; } return FALSE; #ifdef PRE_NODES ppstart = vt->mem_map; ppend = ppstart + (vt->total_pages * vt->page_struct_len); if ((addr < ppstart) || (addr >= ppend)) return FALSE; if ((addr - ppstart) % vt->page_struct_len) return FALSE; return TRUE; #endif } /* * Return the physical address associated with this page pointer. */ static int page_to_phys(ulong pp, physaddr_t *phys) { return(is_page_ptr(pp, phys)); } /* * Return the page pointer associated with this physical address. */ int phys_to_page(physaddr_t phys, ulong *pp) { int n; ulong pgnum; struct node_table *nt; physaddr_t pstart, pend; ulong node_size; if (IS_SPARSEMEM()) { ulong map; map = pfn_to_map(phys >> PAGESHIFT()); if (map) { *pp = map; return TRUE; } return FALSE; } for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; pstart = nt->start_paddr; pend = pstart + ((ulonglong)node_size * PAGESIZE()); if ((phys < pstart) || (phys >= pend)) continue; /* * We're in the physical range -- calculate the page. */ pgnum = BTOP(phys - pstart); *pp = nt->mem_map + (pgnum * SIZE(page)); return TRUE; } return FALSE; #ifdef PRE_NODES if (phys >= (vt->total_pages * PAGESIZE())) return FALSE; pgnum = PTOB(BTOP(phys)) / PAGESIZE(); *pp = vt->mem_map + (pgnum * vt->page_struct_len); return TRUE; #endif } /* * Fill the caller's buffer with up to maxlen non-NULL bytes * starting from kvaddr, returning the number of consecutive * non-NULL bytes found. If the buffer gets filled with * maxlen bytes without a NULL, then the caller is reponsible * for handling it. */ int read_string(ulong kvaddr, char *buf, int maxlen) { int i; BZERO(buf, maxlen); readmem(kvaddr, KVADDR, buf, maxlen, "read_string characters", QUIET|RETURN_ON_ERROR); for (i = 0; i < maxlen; i++) { if (buf[i] == NULLCHAR) { BZERO(&buf[i], maxlen-i); break; } } return i; } /* * "help -v" output */ void dump_vm_table(int verbose) { int i; struct node_table *nt; int others; ulong *up; others = 0; fprintf(fp, " flags: %lx %s(", vt->flags, count_bits_long(vt->flags) > 4 ? "\n " : ""); if (vt->flags & NODES) fprintf(fp, "%sNODES", others++ ? "|" : ""); if (vt->flags & NODES_ONLINE) fprintf(fp, "%sNODES_ONLINE", others++ ? "|" : ""); if (vt->flags & ZONES) fprintf(fp, "%sZONES", others++ ? "|" : ""); if (vt->flags & PERCPU_KMALLOC_V1) fprintf(fp, "%sPERCPU_KMALLOC_V1", others++ ? "|" : ""); if (vt->flags & PERCPU_KMALLOC_V2) fprintf(fp, "%sPERCPU_KMALLOC_V2", others++ ? "|" : ""); if (vt->flags & COMMON_VADDR) fprintf(fp, "%sCOMMON_VADDR", others++ ? "|" : ""); if (vt->flags & KMEM_CACHE_INIT) fprintf(fp, "%sKMEM_CACHE_INIT", others++ ? "|" : ""); if (vt->flags & V_MEM_MAP) fprintf(fp, "%sV_MEM_MAP", others++ ? "|" : ""); if (vt->flags & KMEM_CACHE_UNAVAIL) fprintf(fp, "%sKMEM_CACHE_UNAVAIL", others++ ? "|" : ""); if (vt->flags & DISCONTIGMEM) fprintf(fp, "%sDISCONTIGMEM", others++ ? "|" : ""); if (vt->flags & FLATMEM) fprintf(fp, "%sFLATMEM", others++ ? "|" : ""); if (vt->flags & SPARSEMEM) fprintf(fp, "%sSPARSEMEM", others++ ? "|" : "");\ if (vt->flags & SPARSEMEM_EX) fprintf(fp, "%sSPARSEMEM_EX", others++ ? "|" : "");\ if (vt->flags & KMEM_CACHE_DELAY) fprintf(fp, "%sKMEM_CACHE_DELAY", others++ ? "|" : "");\ if (vt->flags & PERCPU_KMALLOC_V2_NODES) fprintf(fp, "%sPERCPU_KMALLOC_V2_NODES", others++ ? "|" : "");\ if (vt->flags & VM_STAT) fprintf(fp, "%sVM_STAT", others++ ? "|" : "");\ if (vt->flags & KMALLOC_SLUB) fprintf(fp, "%sKMALLOC_SLUB", others++ ? "|" : "");\ if (vt->flags & KMALLOC_COMMON) fprintf(fp, "%sKMALLOC_COMMON", others++ ? "|" : "");\ if (vt->flags & SLAB_OVERLOAD_PAGE) fprintf(fp, "%sSLAB_OVERLOAD_PAGE", others++ ? "|" : "");\ if (vt->flags & SLAB_CPU_CACHE) fprintf(fp, "%sSLAB_CPU_CACHE", others++ ? "|" : "");\ if (vt->flags & SLAB_ROOT_CACHES) fprintf(fp, "%sSLAB_ROOT_CACHES", others++ ? "|" : "");\ if (vt->flags & USE_VMAP_AREA) fprintf(fp, "%sUSE_VMAP_AREA", others++ ? "|" : "");\ if (vt->flags & CONFIG_NUMA) fprintf(fp, "%sCONFIG_NUMA", others++ ? "|" : "");\ if (vt->flags & VM_EVENT) fprintf(fp, "%sVM_EVENT", others++ ? "|" : "");\ if (vt->flags & PGCNT_ADJ) fprintf(fp, "%sPGCNT_ADJ", others++ ? "|" : "");\ if (vt->flags & PAGEFLAGS) fprintf(fp, "%sPAGEFLAGS", others++ ? "|" : "");\ if (vt->flags & SWAPINFO_V1) fprintf(fp, "%sSWAPINFO_V1", others++ ? "|" : "");\ if (vt->flags & SWAPINFO_V2) fprintf(fp, "%sSWAPINFO_V2", others++ ? "|" : "");\ if (vt->flags & NODELISTS_IS_PTR) fprintf(fp, "%sNODELISTS_IS_PTR", others++ ? "|" : "");\ if (vt->flags & VM_INIT) fprintf(fp, "%sVM_INIT", others++ ? "|" : "");\ fprintf(fp, ")\n"); if (vt->kernel_pgd[0] == vt->kernel_pgd[1]) fprintf(fp, " kernel_pgd[NR_CPUS]: %lx ...\n", vt->kernel_pgd[0]); else { fprintf(fp, " kernel_pgd[NR_CPUS]: "); for (i = 0; i < NR_CPUS; i++) { if ((i % 4) == 0) fprintf(fp, "\n "); fprintf(fp, "%lx ", vt->kernel_pgd[i]); } fprintf(fp, "\n"); } fprintf(fp, " high_memory: %lx\n", vt->high_memory); fprintf(fp, " vmalloc_start: %lx\n", vt->vmalloc_start); fprintf(fp, " mem_map: %lx\n", vt->mem_map); fprintf(fp, " total_pages: %ld\n", vt->total_pages); fprintf(fp, " max_mapnr: %ld\n", vt->max_mapnr); fprintf(fp, " totalram_pages: %ld\n", vt->totalram_pages); fprintf(fp, " totalhigh_pages: %ld\n", vt->totalhigh_pages); fprintf(fp, " num_physpages: %ld\n", vt->num_physpages); fprintf(fp, " page_hash_table: %lx\n", vt->page_hash_table); fprintf(fp, "page_hash_table_len: %d\n", vt->page_hash_table_len); fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen); fprintf(fp, "kmem_cache_len_nodes: %ld\n", vt->kmem_cache_len_nodes); fprintf(fp, " nr_bad_slab_caches: %d\n", vt->nr_bad_slab_caches); if (!vt->nr_bad_slab_caches) fprintf(fp, " bad_slab_caches: (unused)\n"); else { for (i = 0; i < vt->nr_bad_slab_caches; i++) { fprintf(fp, " bad_slab_caches[%d]: %lx\n", i, vt->bad_slab_caches[i]); } } fprintf(fp, " paddr_prlen: %d\n", vt->paddr_prlen); fprintf(fp, " numnodes: %d\n", vt->numnodes); fprintf(fp, " nr_zones: %d\n", vt->nr_zones); fprintf(fp, " nr_free_areas: %d\n", vt->nr_free_areas); for (i = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; fprintf(fp, " node_table[%d]: \n", i); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } fprintf(fp, " dump_free_pages: "); if (vt->dump_free_pages == dump_free_pages) fprintf(fp, "dump_free_pages()\n"); else if (vt->dump_free_pages == dump_free_pages_zones_v1) fprintf(fp, "dump_free_pages_zones_v1()\n"); else if (vt->dump_free_pages == dump_free_pages_zones_v2) fprintf(fp, "dump_free_pages_zones_v2()\n"); else if (vt->dump_free_pages == dump_multidimensional_free_pages) fprintf(fp, "dump_multidimensional_free_pages()\n"); else fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages); fprintf(fp, " dump_kmem_cache: "); if (vt->dump_kmem_cache == dump_kmem_cache) fprintf(fp, "dump_kmem_cache()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v1) fprintf(fp, "dump_kmem_cache_percpu_v1()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v2) fprintf(fp, "dump_kmem_cache_percpu_v2()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_slub) fprintf(fp, "dump_kmem_cache_slub()\n"); else fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_kmem_cache); fprintf(fp, " slab_data: %lx\n", (ulong)vt->slab_data); if (verbose) dump_saved_slab_data(); fprintf(fp, " cpu_slab_type: %d\n", vt->cpu_slab_type); fprintf(fp, " nr_swapfiles: %d\n", vt->nr_swapfiles); fprintf(fp, " last_swap_read: %lx\n", vt->last_swap_read); fprintf(fp, " swap_info_struct: %lx\n", (ulong)vt->swap_info_struct); fprintf(fp, " mem_sec: %lx\n", (ulong)vt->mem_sec); fprintf(fp, " mem_section: %lx\n", (ulong)vt->mem_section); fprintf(fp, " max_mem_section_nr: %ld\n", (ulong)vt->max_mem_section_nr); fprintf(fp, " ZONE_HIGHMEM: %d\n", vt->ZONE_HIGHMEM); fprintf(fp, "node_online_map_len: %d\n", vt->node_online_map_len); if (vt->node_online_map_len) { fprintf(fp, " node_online_map: "); up = (ulong *)vt->node_online_map; for (i = 0; i < vt->node_online_map_len; i++) { fprintf(fp, "%s%lx", i ? ", " : "[", *up); up++; } fprintf(fp, "]\n"); } else { fprintf(fp, " node_online_map: (unused)\n"); } fprintf(fp, " nr_vm_stat_items: %d\n", vt->nr_vm_stat_items); fprintf(fp, " vm_stat_items: %s", (vt->flags & VM_STAT) ? "\n" : "(not used)\n"); for (i = 0; i < vt->nr_vm_stat_items; i++) fprintf(fp, " [%d] %s\n", i, vt->vm_stat_items[i]); fprintf(fp, " nr_vm_event_items: %d\n", vt->nr_vm_event_items); fprintf(fp, " vm_event_items: %s", (vt->flags & VM_EVENT) ? "\n" : "(not used)\n"); for (i = 0; i < vt->nr_vm_event_items; i++) fprintf(fp, " [%d] %s\n", i, vt->vm_event_items[i]); fprintf(fp, " PG_reserved: %lx\n", vt->PG_reserved); fprintf(fp, " PG_slab: %ld (%lx)\n", vt->PG_slab, (ulong)1 << vt->PG_slab); fprintf(fp, " PG_head_tail_mask: %lx\n", vt->PG_head_tail_mask); fprintf(fp, " nr_pageflags: %d\n", vt->nr_pageflags); fprintf(fp, " pageflags_data: %s\n", vt->nr_pageflags ? "" : "(not used)"); for (i = 0; i < vt->nr_pageflags; i++) { fprintf(fp, " %s[%d] %08lx: %s\n", i < 10 ? " " : "", i, vt->pageflags_data[i].mask, vt->pageflags_data[i].name); } dump_vma_cache(VERBOSE); } /* * Calculate the amount of memory referenced in the kernel-specific "nodes". */ uint64_t total_node_memory() { int i; struct node_table *nt; uint64_t total; for (i = total = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; if (CRASHDEBUG(1)) { console("node_table[%d]: \n", i); console(" id: %d\n", nt->node_id); console(" pgdat: %lx\n", nt->pgdat); console(" size: %ld\n", nt->size); console(" present: %ld\n", nt->present); console(" mem_map: %lx\n", nt->mem_map); console(" start_paddr: %lx\n", nt->start_paddr); console(" start_mapnr: %ld\n", nt->start_mapnr); } if (nt->present) total += (uint64_t)((uint64_t)nt->present * (uint64_t)PAGESIZE()); else total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); } return total; } /* * Dump just the vm_area_struct cache table data so that it can be * called from above or for debug purposes. */ void dump_vma_cache(ulong verbose) { int i; ulong vhits; if (!verbose) goto show_hits; for (i = 0; i < VMA_CACHE; i++) fprintf(fp, " cached_vma[%2d]: %lx (%ld)\n", i, vt->cached_vma[i], vt->cached_vma_hits[i]); fprintf(fp, " vma_cache: %lx\n", (ulong)vt->vma_cache); fprintf(fp, " vma_cache_index: %d\n", vt->vma_cache_index); fprintf(fp, " vma_cache_fills: %ld\n", vt->vma_cache_fills); fflush(fp); show_hits: if (vt->vma_cache_fills) { for (i = vhits = 0; i < VMA_CACHE; i++) vhits += vt->cached_vma_hits[i]; fprintf(fp, "%s vma hit rate: %2ld%% (%ld of %ld)\n", verbose ? "" : " ", (vhits * 100)/vt->vma_cache_fills, vhits, vt->vma_cache_fills); } } /* * Guess at the "real" amount of physical memory installed, formatting * it in a MB or GB based string. */ char * get_memory_size(char *buf) { uint64_t total; ulong next_gig; #ifdef OLDWAY ulong mbs, gbs; #endif total = machdep->memory_size(); if ((next_gig = roundup(total, GIGABYTES(1)))) { if ((next_gig - total) <= MEGABYTES(64)) total = next_gig; } return (pages_to_size((ulong)(total/PAGESIZE()), buf)); #ifdef OLDWAY gbs = (ulong)(total/GIGABYTES(1)); mbs = (ulong)(total/MEGABYTES(1)); if (gbs) mbs = (total % GIGABYTES(1))/MEGABYTES(1); if (total%MEGABYTES(1)) mbs++; if (gbs) sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs); else sprintf(buf, "%ld MB", mbs); return buf; #endif } /* * For use by architectures not having machine-specific manners for * best determining physical memory size. */ uint64_t generic_memory_size(void) { if (machdep->memsize) return machdep->memsize; return (machdep->memsize = total_node_memory()); } /* * Determine whether a virtual address is user or kernel or ambiguous. */ int vaddr_type(ulong vaddr, struct task_context *tc) { int memtype, found; if (!tc) tc = CURRENT_CONTEXT(); memtype = found = 0; if (machdep->is_uvaddr(vaddr, tc)) { memtype |= UVADDR; found++; } if (machdep->is_kvaddr(vaddr)) { memtype |= KVADDR; found++; } if (found == 1) return memtype; else return AMBIGUOUS; } /* * Determine the first valid user space address */ static int address_space_start(struct task_context *tc, ulong *addr) { ulong vma; char *vma_buf; if (!tc->mm_struct) return FALSE; fill_mm_struct(tc->mm_struct); vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); if (!vma) return FALSE; vma_buf = fill_vma_cache(vma); *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); return TRUE; } int generic_get_kvaddr_ranges(struct vaddr_range *rp) { int cnt; if (XEN_HYPER_MODE()) return 0; cnt = 0; rp[cnt].type = KVADDR_UNITY_MAP; rp[cnt].start = machdep->kvbase; rp[cnt++].end = vt->vmalloc_start; rp[cnt].type = KVADDR_VMALLOC; rp[cnt].start = vt->vmalloc_start; rp[cnt++].end = (ulong)(-1); return cnt; } /* * Search for a given value between a starting and ending address range, * applying an optional mask for "don't care" bits. As an alternative * to entering the starting address value, -k means "start of kernel address * space". For processors with ambiguous user/kernel address spaces, * -u or -k must be used (with or without -s) as a differentiator. */ void cmd_search(void) { int i, c, memtype, ranges, context, max; ulonglong start, end; ulong value, mask, len; ulong uvaddr_start, uvaddr_end; ulong kvaddr_start, kvaddr_end, range_end; int sflag, Kflag, Vflag, pflag, Tflag, tflag; struct searchinfo searchinfo; struct syment *sp; struct node_table *nt; struct vaddr_range vaddr_ranges[MAX_KVADDR_RANGES]; struct vaddr_range *vrp; struct task_context *tc; #define vaddr_overflow(ADDR) (BITS32() && ((ADDR) > 0xffffffffULL)) #define uint_overflow(VALUE) ((VALUE) > 0xffffffffUL) #define ushort_overflow(VALUE) ((VALUE) > 0xffffUL) context = max = 0; start = end = 0; value = mask = sflag = pflag = Kflag = Vflag = memtype = len = Tflag = tflag = 0; kvaddr_start = kvaddr_end = 0; uvaddr_start = UNINITIALIZED; uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase; BZERO(&searchinfo, sizeof(struct searchinfo)); vrp = &vaddr_ranges[0]; ranges = machdep->get_kvaddr_ranges(vrp); if (CRASHDEBUG(1)) { fprintf(fp, "kvaddr ranges:\n"); for (i = 0; i < ranges; i++) { fprintf(fp, " [%d] %lx %lx ", i, vrp[i].start, vrp[i].end); switch (vrp[i].type) { case KVADDR_UNITY_MAP: fprintf(fp, "KVADDR_UNITY_MAP\n"); break; case KVADDR_START_MAP: fprintf(fp, "KVADDR_START_MAP\n"); break; case KVADDR_VMALLOC: fprintf(fp, "KVADDR_VMALLOC\n"); break; case KVADDR_MODULES: fprintf(fp, "KVADDR_MODULES\n"); break; case KVADDR_VMEMMAP: fprintf(fp, "KVADDR_VMEMMAP\n"); break; } } } searchinfo.mode = SEARCH_ULONG; /* default search */ while ((c = getopt(argcnt, args, "Ttl:ukKVps:e:v:m:hwcx:")) != EOF) { switch(c) { case 'u': if (XEN_HYPER_MODE()) error(FATAL, "-u option is not applicable to the " "Xen hypervisor\n"); if (is_kernel_thread(CURRENT_TASK()) || !task_mm(CURRENT_TASK(), TRUE)) error(FATAL, "current context has no user address space\n"); if (!sflag) { address_space_start(CURRENT_CONTEXT(), &uvaddr_start); start = (ulonglong)uvaddr_start; } memtype = UVADDR; sflag++; break; case 'p': if (XEN_HYPER_MODE()) error(FATAL, "-p option is not applicable to the " "Xen hypervisor\n"); memtype = PHYSADDR; if (!sflag) { nt = &vt->node_table[0]; start = nt->start_paddr; } sflag++; break; case 'V': case 'K': case 'k': if (XEN_HYPER_MODE()) error(FATAL, "-%c option is not applicable to the " "Xen hypervisor\n", c); if (!sflag) start = vrp[0].start; memtype = KVADDR; sflag++; if (c == 'K') Kflag++; else if (c == 'V') Vflag++; break; case 's': if ((sp = symbol_search(optarg))) start = (ulonglong)sp->value; else start = htoll(optarg, FAULT_ON_ERROR, NULL); sflag++; break; case 'e': if ((sp = symbol_search(optarg))) end = (ulonglong)sp->value; else end = htoll(optarg, FAULT_ON_ERROR, NULL); if (!end) error(FATAL, "invalid ending address: 0\n"); break; case 'l': len = stol(optarg, FAULT_ON_ERROR, NULL); break; case 'm': mask = htol(optarg, FAULT_ON_ERROR, NULL); break; case 'h': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search mode with \"h\"\n"); searchinfo.mode = SEARCH_USHORT; break; case 'w': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search mode with \"w\"\n"); searchinfo.mode = SEARCH_UINT; break; case 'c': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search type with \"c\"\n"); searchinfo.mode = SEARCH_CHARS; break; case 'x': context = dtoi(optarg, FAULT_ON_ERROR, NULL); break; case 'T': case 't': if (XEN_HYPER_MODE()) error(FATAL, "-%c option is not applicable to the " "Xen hypervisor\n", c); if (c == 'T') Tflag++; else if (c == 't') tflag++; if (tflag && Tflag) error(FATAL, "-t and -T options are mutually exclusive\n"); break; default: argerrs++; break; } } if ((tflag || Tflag) && (memtype || start || end || len)) error(FATAL, "-%c option cannot be used with other " "memory-selection options\n", tflag ? 't' : 'T'); if (XEN_HYPER_MODE()) { memtype = KVADDR; if (!sflag) error(FATAL, "the \"-s start\" option is required for" " the Xen hypervisor\n"); } else if (!memtype) { memtype = KVADDR; if (!tflag && !sflag++) start = vrp[0].start; } if (argerrs || (!sflag && !tflag) || !args[optind] || (len && end) || !memtype) cmd_usage(pc->curcmd, SYNOPSIS); searchinfo.memtype = memtype; /* * Verify starting address. */ switch (memtype) { case UVADDR: if (vaddr_overflow(start) || !IS_UVADDR((ulong)start, CURRENT_CONTEXT())) { error(INFO, "invalid user virtual address: %llx\n", start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (tflag) break; if (vaddr_overflow(start) || !IS_KVADDR((ulong)start)) { error(INFO, "invalid kernel virtual address: %llx\n", (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case AMBIGUOUS: error(INFO, "ambiguous virtual address: %llx (requires -u or -k)\n", (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } /* * Set up ending address if necessary. */ if (!end && !len && !tflag) { switch (memtype) { case UVADDR: end = (ulonglong)uvaddr_end; break; case KVADDR: if (XEN_HYPER_MODE()) end = (ulong)(-1); else { range_end = 0; for (i = 0; i < ranges; i++) { if (vrp[i].end > range_end) range_end = vrp[i].end; } end = (ulonglong)range_end; } break; case PHYSADDR: nt = &vt->node_table[vt->numnodes-1]; end = nt->start_paddr + (nt->size * PAGESIZE()); break; } } else if (len) end = start + len; /* * Final verification and per-type start/end variable setting. */ switch (memtype) { case UVADDR: uvaddr_start = (ulong)start; if (end > (ulonglong)uvaddr_end) { error(INFO, "ending address %lx is in kernel space: %llx\n", end); cmd_usage(pc->curcmd, SYNOPSIS); } if (end < (ulonglong)uvaddr_end) uvaddr_end = (ulong)end; if (uvaddr_end < uvaddr_start) { error(INFO, "ending address %lx is below starting address %lx\n", uvaddr_end, uvaddr_start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (tflag) break; kvaddr_start = (ulong)start; kvaddr_end = (ulong)end; if (kvaddr_end < kvaddr_start) { error(INFO, "ending address %lx is below starting address %lx\n", kvaddr_end, kvaddr_start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case PHYSADDR: if (end < start) { error(INFO, "ending address %llx is below starting address %llx\n", (ulonglong)end, (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } break; } if (mask) { switch (searchinfo.mode) { case SEARCH_ULONG: searchinfo.s_parms.s_ulong.mask = mask; break; case SEARCH_UINT: searchinfo.s_parms.s_uint.mask = mask; break; case SEARCH_USHORT: searchinfo.s_parms.s_ushort.mask = mask; break; case SEARCH_CHARS: error(INFO, "mask ignored on string search\n"); break; } } if (context) { switch (searchinfo.mode) { case SEARCH_ULONG: max = PAGESIZE()/sizeof(long); break; case SEARCH_UINT: max = PAGESIZE()/sizeof(int); break; case SEARCH_USHORT: max = PAGESIZE()/sizeof(short); break; case SEARCH_CHARS: error(FATAL, "-x option is not allowed with -c\n"); break; } if (context > max) error(FATAL, "context value %d is too large: maximum is %d\n", context, max); searchinfo.context = context; } searchinfo.vcnt = 0; searchinfo.val = UNUSED; while (args[optind]) { switch (searchinfo.mode) { case SEARCH_ULONG: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ulong.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_ulong.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ulong.value[searchinfo.vcnt] = value; searchinfo.vcnt++; break; case SEARCH_UINT: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_uint.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_uint.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_uint.value[searchinfo.vcnt] = value; if (uint_overflow(value)) error(FATAL, "value too large for -w option: %lx %s\n", value, show_opt_string(&searchinfo)); searchinfo.vcnt++; break; case SEARCH_USHORT: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ushort.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_ushort.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ushort.value[searchinfo.vcnt] = value; if (ushort_overflow(value)) error(FATAL, "value too large for -h option: %lx %s\n", value, show_opt_string(&searchinfo)); searchinfo.vcnt++; break; case SEARCH_CHARS: /* parser can deliver empty strings */ if (strlen(args[optind])) { searchinfo.s_parms.s_chars.value[searchinfo.vcnt] = args[optind]; searchinfo.s_parms.s_chars.len[searchinfo.vcnt] = strlen(args[optind]); searchinfo.vcnt++; } break; } optind++; } if (!searchinfo.vcnt) cmd_usage(pc->curcmd, SYNOPSIS); switch (memtype) { case PHYSADDR: searchinfo.paddr_start = start; searchinfo.paddr_end = end; search_physical(&searchinfo); break; case UVADDR: searchinfo.vaddr_start = uvaddr_start; searchinfo.vaddr_end = uvaddr_end; search_virtual(&searchinfo); break; case KVADDR: if (XEN_HYPER_MODE()) { searchinfo.vaddr_start = kvaddr_start; searchinfo.vaddr_end = kvaddr_end; search_virtual(&searchinfo); break; } if (tflag || Tflag) { searchinfo.tasks_found = 0; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (Tflag && !is_task_active(tc->task)) continue; searchinfo.vaddr_start = GET_STACKBASE(tc->task); searchinfo.vaddr_end = GET_STACKTOP(tc->task); searchinfo.task_context = tc; searchinfo.do_task_header = TRUE; search_virtual(&searchinfo); } break; } for (i = 0; i < ranges; i++) { if ((kvaddr_start >= vrp[i].end) || (kvaddr_end <= vrp[i].start)) continue; switch (vrp[i].type) { case KVADDR_UNITY_MAP: case KVADDR_START_MAP: if (Vflag) continue; break; case KVADDR_VMALLOC: case KVADDR_MODULES: case KVADDR_VMEMMAP: if (Kflag) continue; break; } pc->curcmd_private = vrp[i].type; searchinfo.vaddr_start = kvaddr_start > vrp[i].start ? kvaddr_start : vrp[i].start; searchinfo.vaddr_end = (kvaddr_end < vrp[i].end) ? kvaddr_end : vrp[i].end; search_virtual(&searchinfo); } break; } } /* * Do the work for cmd_search(). */ static char * show_opt_string(struct searchinfo *si) { char *opt_string; int index; index = (si->val == UNUSED) ? si->vcnt : si->val; switch (si->mode) { case SEARCH_USHORT: opt_string = si->s_parms.s_ushort.opt_string[index]; break; case SEARCH_UINT: opt_string = si->s_parms.s_uint.opt_string[index]; break; case SEARCH_ULONG: default: opt_string = si->s_parms.s_ulong.opt_string[index]; break; } if (!opt_string) return ""; else if (FIRSTCHAR(opt_string) == '(') return opt_string; else { sprintf(si->buf, "(%s)", opt_string); return si->buf; } } #define SEARCHMASK(X) ((X) | mask) static void display_with_pre_and_post(void *bufptr, ulonglong addr, struct searchinfo *si) { int ctx, memtype, t, amount; ulonglong addr_d; ulong flag; char buf[BUFSIZE]; ctx = si->context; memtype = si->memtype; flag = HEXADECIMAL|NO_ERROR|ASCII_ENDLINE; switch (si->mode) { case SEARCH_USHORT: t = sizeof(ushort); break; case SEARCH_UINT: t = sizeof(uint); break; case SEARCH_ULONG: default: t = sizeof(ulong); break; } switch (t) { case 8: flag |= DISPLAY_64; break; case 4: flag |= DISPLAY_32; break; case 2: flag |= DISPLAY_16; break; } amount = ctx * t; addr_d = addr - amount; display_memory(addr_d, ctx, flag, memtype, NULL); BZERO(buf, BUFSIZE); fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); switch(si->mode) { case SEARCH_ULONG: fprintf(fp, "%lx %s\n", *((ulong *)bufptr), show_opt_string(si)); break; case SEARCH_UINT: fprintf(fp, "%x %s\n", *((uint *)bufptr), show_opt_string(si)); break; case SEARCH_USHORT: fprintf(fp, "%x %s\n", *((ushort *)bufptr), show_opt_string(si)); break; } addr_d = addr + t; display_memory(addr_d, ctx, flag, memtype, NULL); fprintf(fp, "\n"); } static ulong search_ulong(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; ulong mask = si->s_parms.s_ulong.mask; for (i = 0; i < longcnt; i++, bufptr++, addr += sizeof(long)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*bufptr) == SEARCHMASK(si->s_parms.s_ulong.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(bufptr, addr, si); else fprintf(fp, "%lx: %lx %s\n", addr, *bufptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_ulong_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; ulong mask = si->s_parms.s_ulong.mask; for (i = 0; i < longcnt; i++, bufptr++, addr += sizeof(long)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*bufptr) == SEARCHMASK(si->s_parms.s_ulong.value[si->val])) { if (si->context) display_with_pre_and_post(bufptr, addr, si); else fprintf(fp, "%llx: %lx %s\n", addr, *bufptr, show_opt_string(si)); } } } return addr; } static ulong search_uint(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(int)); uint *ptr = (uint *)bufptr; uint mask = si->s_parms.s_uint.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(int)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_uint.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%lx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_uint_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(int)); uint *ptr = (uint *)bufptr; uint mask = si->s_parms.s_uint.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(int)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_uint.value[si->val])) { if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%llx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } static ulong search_ushort(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(short)); ushort *ptr = (ushort *)bufptr; ushort mask = si->s_parms.s_ushort.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(short)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_ushort.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%lx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_ushort_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(short)); ushort *ptr = (ushort *)bufptr; ushort mask = si->s_parms.s_ushort.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(short)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_ushort.value[si->val])) { if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%llx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* * String search "memory" to remember possible matches that cross * page (or search buffer) boundaries. * The cross_match zone is the last strlen-1 chars of the page for * each of the possible targets. */ struct cross_match { int cnt; /* possible hits in the cross_match zone */ ulong addr; /* starting addr of crossing match zone for this target */ ulonglong addr_p; /* for physical search */ char hit[BUFSIZE]; /* array of hit locations in the crossing match zone */ /* This should really be the much-smaller MAXARGLEN, but * no one seems to be enforcing that in the parser. */ } cross[MAXARGS]; ulong cross_match_next_addr; /* the expected starting value of the next page */ ulonglong cross_match_next_addr_p; /* the expected starting value of the next physical page */ #define CHARS_CTX 56 static void report_match(struct searchinfo *si, ulong addr, char *ptr1, int len1, char *ptr2, int len2) { int i; if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } fprintf(fp, "%lx: ", addr); for (i = 0; i < len1; i++) { if (isprint(ptr1[i])) fprintf(fp, "%c", ptr1[i]); else fprintf(fp, "."); } for (i = 0; i < len2; i++) { if (isprint(ptr2[i])) fprintf(fp, "%c", ptr2[i]); else fprintf(fp, "."); } fprintf(fp, "\n"); } static ulong search_chars(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i, j; int len; char *target; int charcnt = longcnt * sizeof(long); char *ptr = (char *)bufptr; /* is this the first page of this search? */ if (si->s_parms.s_chars.started_flag == 0) { for (j = 0; j < si->vcnt; j++) { cross[j].cnt = 0; /* no hits */ } cross_match_next_addr = (ulong)-1; /* no page match for first page */ si->s_parms.s_chars.started_flag++; } if (cross_match_next_addr == addr) { for (j = 0; j < si->vcnt; j++) { if (cross[j].cnt) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; for (i = 0; i < len - 1; i++) { if (cross[j].hit[i] && !strncmp(&target[len - 1 - i], ptr, i + 1)) report_match(si, cross[j].addr + i, target, len, &ptr[i+1], CHARS_CTX - len); } } } } /* set up for possible cross matches on this page */ cross_match_next_addr = addr + charcnt; for (j = 0; j < si->vcnt; j++) { len = si->s_parms.s_chars.len[j]; cross[j].cnt = 0; cross[j].addr = addr + longcnt * sizeof(long) - (len - 1); for (i = 0; i < len - 1; i++) cross[j].hit[i] = 0; } for (i = 0; i < charcnt; i++, ptr++, addr++) { for (j = 0; j < si->vcnt; j++) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; if ((i + len) > charcnt) { /* check for cross match */ if (!strncmp(target, ptr, charcnt - i)) { cross[j].hit[len + i - charcnt - 1] = 1; cross[j].cnt++; } } else { if (!strncmp(target, ptr, len)) { int slen = CHARS_CTX; if ((i + CHARS_CTX) > charcnt) slen = charcnt - i; report_match(si, addr, ptr, slen, (char *)0, 0); } } } } return addr; } static void report_match_p(ulonglong addr, char *ptr1, int len1, char *ptr2, int len2) { int i; fprintf(fp, "%llx: ", addr); for (i = 0; i < len1; i++) { if (isprint(ptr1[i])) fprintf(fp, "%c", ptr1[i]); else fprintf(fp, "."); } for (i = 0; i < len2; i++) { if (isprint(ptr2[i])) fprintf(fp, "%c", ptr2[i]); else fprintf(fp, "."); } fprintf(fp, "\n"); } static ulonglong search_chars_p(ulong *bufptr, ulonglong addr_p, int longcnt, struct searchinfo *si) { int i, j; int len; char *target; int charcnt = longcnt * sizeof(long); char *ptr = (char *)bufptr; /* is this the first page of this search? */ if (si->s_parms.s_chars.started_flag == 0) { for (j = 0; j < si->vcnt; j++) { cross[j].cnt = 0; /* no hits */ } cross_match_next_addr_p = (ulonglong)-1; /* no page match for first page */ si->s_parms.s_chars.started_flag++; } if (cross_match_next_addr_p == addr_p) { for (j = 0; j < si->vcnt; j++) { if (cross[j].cnt) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; for (i = 0; i < len - 1; i++) { if (cross[j].hit[i] && !strncmp(&target[len - 1 - i], ptr, i + 1)) report_match_p(cross[j].addr_p + i, target, len, &ptr[i+1], CHARS_CTX - len); } } } } /* set up for possible cross matches on this page */ cross_match_next_addr_p = addr_p + charcnt; for (j = 0; j < si->vcnt; j++) { len = si->s_parms.s_chars.len[j]; cross[j].cnt = 0; cross[j].addr_p = addr_p + longcnt * sizeof(long) - (len - 1); for (i = 0; i < len - 1; i++) cross[j].hit[i] = 0; } for (i = 0; i < charcnt; i++, ptr++, addr_p++) { for (j = 0; j < si->vcnt; j++) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; if ((i + len) > charcnt) { /* check for cross match */ if (!strncmp(target, ptr, charcnt - i)) { cross[j].hit[len + i - charcnt - 1] = 1; cross[j].cnt++; } } else { if (!strncmp(target, ptr, len)) { int slen = CHARS_CTX; if ((i + CHARS_CTX) > charcnt) slen = charcnt - i; report_match_p(addr_p, ptr, slen, (char *)0, 0); } } } } return addr_p; } static void search_virtual(struct searchinfo *si) { ulong start, end; ulong pp, next, *ubp; int wordcnt, lastpage; ulong page; physaddr_t paddr; char *pagebuf; ulong pct, pages_read, pages_checked; time_t begin, finish; start = si->vaddr_start; end = si->vaddr_end; pages_read = pages_checked = 0; begin = finish = 0; pagebuf = GETBUF(PAGESIZE()); if (start & (sizeof(long)-1)) { start &= ~(sizeof(long)-1); error(INFO, "rounding down start address to: %lx\n", start); } if (CRASHDEBUG(1)) { begin = time(NULL); fprintf(fp, "search_virtual: start: %lx end: %lx\n", start, end); } next = start; for (pp = VIRTPAGEBASE(start); next < end; next = pp) { pages_checked++; lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end)); if (LKCD_DUMPFILE()) set_lkcd_nohash(); /* * Keep it virtual for Xen hypervisor. */ if (XEN_HYPER_MODE()) { if (!readmem(pp, KVADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { if (CRASHDEBUG(1)) fprintf(fp, "search suspended at: %lx\n", pp); goto done; } goto virtual; } switch (si->memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || !phys_to_page(paddr, &page)) { if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) goto done; continue; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || !phys_to_page(paddr, &page)) { if (!next_kpage(pp, &pp)) goto done; continue; } break; } if (!readmem(paddr, PHYSADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { pp += PAGESIZE(); continue; } virtual: pages_read++; ubp = (ulong *)&pagebuf[next - pp]; if (lastpage) { if (end == (ulong)(-1)) wordcnt = PAGESIZE()/sizeof(long); else wordcnt = (end - next)/sizeof(long); } else wordcnt = (PAGESIZE() - (next - pp))/sizeof(long); switch (si->mode) { case SEARCH_ULONG: next = search_ulong(ubp, next, wordcnt, si); break; case SEARCH_UINT: next = search_uint(ubp, next, wordcnt, si); break; case SEARCH_USHORT: next = search_ushort(ubp, next, wordcnt, si); break; case SEARCH_CHARS: next = search_chars(ubp, next, wordcnt, si); break; default: /* unimplemented search type */ next += wordcnt * (sizeof(long)); break; } if (CRASHDEBUG(1)) if ((pp % (1024*1024)) == 0) console("%lx\n", pp); pp += PAGESIZE(); } done: if (CRASHDEBUG(1)) { finish = time(NULL); pct = (pages_read * 100)/pages_checked; fprintf(fp, "search_virtual: read %ld (%ld%%) of %ld pages checked in %ld seconds\n", pages_read, pct, pages_checked, finish - begin); } FREEBUF(pagebuf); } static void search_physical(struct searchinfo *si) { ulonglong start_in, end_in; ulong *ubp; int wordcnt, lastpage; ulonglong pnext, ppp; char *pagebuf; ulong pct, pages_read, pages_checked; time_t begin, finish; ulong page; start_in = si->paddr_start; end_in = si->paddr_end; pages_read = pages_checked = 0; begin = finish = 0; pagebuf = GETBUF(PAGESIZE()); if (start_in & (sizeof(ulonglong)-1)) { start_in &= ~(sizeof(ulonglong)-1); error(INFO, "rounding down start address to: %llx\n", (ulonglong)start_in); } if (CRASHDEBUG(1)) { begin = time(NULL); fprintf(fp, "search_physical: start: %llx end: %llx\n", start_in, end_in); } pnext = start_in; for (ppp = PHYSPAGEBASE(start_in); pnext < end_in; pnext = ppp) { pages_checked++; lastpage = (PHYSPAGEBASE(pnext) == PHYSPAGEBASE(end_in)); if (LKCD_DUMPFILE()) set_lkcd_nohash(); if (!phys_to_page(ppp, &page) || !readmem(ppp, PHYSADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { if (!next_physpage(ppp, &ppp)) break; continue; } pages_read++; ubp = (ulong *)&pagebuf[pnext - ppp]; if (lastpage) { if (end_in == (ulonglong)(-1)) wordcnt = PAGESIZE()/sizeof(long); else wordcnt = (end_in - pnext)/sizeof(long); } else wordcnt = (PAGESIZE() - (pnext - ppp))/sizeof(long); switch (si->mode) { case SEARCH_ULONG: pnext = search_ulong_p(ubp, pnext, wordcnt, si); break; case SEARCH_UINT: pnext = search_uint_p(ubp, pnext, wordcnt, si); break; case SEARCH_USHORT: pnext = search_ushort_p(ubp, pnext, wordcnt, si); break; case SEARCH_CHARS: pnext = search_chars_p(ubp, pnext, wordcnt, si); break; default: /* unimplemented search type */ pnext += wordcnt * (sizeof(long)); break; } ppp += PAGESIZE(); } if (CRASHDEBUG(1)) { finish = time(NULL); pct = (pages_read * 100)/pages_checked; fprintf(fp, "search_physical: read %ld (%ld%%) of %ld pages checked in %ld seconds\n", pages_read, pct, pages_checked, finish - begin); } FREEBUF(pagebuf); } /* * Return the next mapped user virtual address page that comes after * the passed-in address. */ static int next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr) { ulong vma, total_vm; char *vma_buf; ulong vm_start, vm_end; ulong vm_next; if (!tc->mm_struct) return FALSE; fill_mm_struct(tc->mm_struct); vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); if (!vma || (total_vm == 0)) return FALSE; vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ for ( ; vma; vma = vm_next) { vma_buf = fill_vma_cache(vma); vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); vm_next = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next)); if (vaddr <= vm_start) { *nextvaddr = vm_start; return TRUE; } if ((vaddr > vm_start) && (vaddr < vm_end)) { *nextvaddr = vaddr; return TRUE; } } return FALSE; } /* * Return the next mapped kernel virtual address in the vmlist * that is equal to or comes after the passed-in address. * Prevent repeated calls to dump_vmlist() by only doing it * one time for dumpfiles, or one time per (active) command. */ static int next_vmlist_vaddr(ulong vaddr, ulong *nextvaddr) { int i, retval; ulong cnt; struct meminfo meminfo, *mi; static int count = 0; static struct vmlist *vmlist = NULL; static ulong cmdgencur = BADVAL; /* * Search the stashed vmlist if possible. */ if (vmlist && ACTIVE()) { if (pc->cmdgencur != cmdgencur) { free(vmlist); vmlist = NULL; } } if (vmlist) { for (i = 0, retval = FALSE; i < count; i++) { if (vaddr <= vmlist[i].addr) { *nextvaddr = vmlist[i].addr; retval = TRUE; break; } if (vaddr < (vmlist[i].addr + vmlist[i].size)) { *nextvaddr = vaddr; retval = TRUE; break; } } return retval; } mi = &meminfo; BZERO(mi, sizeof(struct meminfo)); mi->flags = GET_VMLIST_COUNT; dump_vmlist(mi); cnt = mi->retval; if (!cnt) return FALSE; mi->vmlist = (struct vmlist *)GETBUF(sizeof(struct vmlist)*cnt); mi->flags = GET_VMLIST; dump_vmlist(mi); for (i = 0, retval = FALSE; i < cnt; i++) { if (vaddr <= mi->vmlist[i].addr) { *nextvaddr = mi->vmlist[i].addr; retval = TRUE; break; } if (vaddr < (mi->vmlist[i].addr + mi->vmlist[i].size)) { *nextvaddr = vaddr; retval = TRUE; break; } } if (!vmlist) { vmlist = (struct vmlist *) malloc(sizeof(struct vmlist)*cnt); if (vmlist) { BCOPY(mi->vmlist, vmlist, sizeof(struct vmlist)*cnt); count = cnt; cmdgencur = pc->cmdgencur; } } FREEBUF(mi->vmlist); return retval; } /* * Determine whether a virtual address is inside a vmlist segment. */ int in_vmlist_segment(ulong vaddr) { ulong next; if (next_vmlist_vaddr(vaddr, &next) && (vaddr == next)) return TRUE; return FALSE; } /* * Return the next kernel module virtual address that is * equal to or comes after the passed-in address. */ static int next_module_vaddr(ulong vaddr, ulong *nextvaddr) { int i; ulong start, end; struct load_module *lm; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; start = lm->mod_base; end = lm->mod_base + lm->mod_size; if (vaddr >= end) continue; /* * Either below or in this module. */ if (vaddr < start) *nextvaddr = start; else *nextvaddr = vaddr; return TRUE; } return FALSE; } /* * Return the next kernel virtual address page in a designated * kernel virtual address range that comes after the passed-in, * untranslatable, address. */ static int next_kpage(ulong vaddr, ulong *nextvaddr) { ulong vaddr_orig; vaddr_orig = vaddr; vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ if (vaddr < vaddr_orig) /* wrapped back to zero? */ return FALSE; switch (pc->curcmd_private) { case KVADDR_UNITY_MAP: return next_identity_mapping(vaddr, nextvaddr); case KVADDR_VMALLOC: return next_vmlist_vaddr(vaddr, nextvaddr); case KVADDR_VMEMMAP: *nextvaddr = vaddr; return TRUE; case KVADDR_START_MAP: *nextvaddr = vaddr; return TRUE; case KVADDR_MODULES: return next_module_vaddr(vaddr, nextvaddr); } return FALSE; } /* * Return the next physical address page that comes after * the passed-in, unreadable, address. */ static int next_physpage(ulonglong paddr, ulonglong *nextpaddr) { int n; ulonglong node_start; ulonglong node_end; struct node_table *nt; for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_start = nt->start_paddr; node_end = nt->start_paddr + (nt->size * PAGESIZE()); if (paddr >= node_end) continue; if (paddr < node_start) { *nextpaddr = node_start; return TRUE; } if (paddr < node_end) { *nextpaddr = paddr + PAGESIZE(); return TRUE; } } return FALSE; } static int get_hugetlb_total_pages(ulong *nr_total_pages, ulong *nr_total_free_pages) { ulong hstate_p, vaddr; int i, len; ulong nr_huge_pages; ulong free_huge_pages; uint horder; *nr_total_pages = *nr_total_free_pages = 0; if (kernel_symbol_exists("hstates")) { if (INVALID_SIZE(hstate) || INVALID_MEMBER(hstate_order) || INVALID_MEMBER(hstate_nr_huge_pages) || INVALID_MEMBER(hstate_free_huge_pages)) return FALSE; len = get_array_length("hstates", NULL, 0); hstate_p = symbol_value("hstates"); for (i = 0; i < len; i++) { vaddr = hstate_p + (SIZE(hstate) * i); readmem(vaddr + OFFSET(hstate_order), KVADDR, &horder, sizeof(uint), "hstate_order", FAULT_ON_ERROR); if (!horder) continue; readmem(vaddr + OFFSET(hstate_nr_huge_pages), KVADDR, &nr_huge_pages, sizeof(ulong), "hstate_nr_huge_pages", FAULT_ON_ERROR); readmem(vaddr + OFFSET(hstate_free_huge_pages), KVADDR, &free_huge_pages, sizeof(ulong), "hstate_free_huge_pages", FAULT_ON_ERROR); *nr_total_pages += nr_huge_pages * (1 << horder); *nr_total_free_pages += free_huge_pages * (1 << horder); } } else if (kernel_symbol_exists("nr_huge_pages")) { unsigned long hpage_shift = 21; if ((machine_type("X86") && !(machdep->flags & PAE))) hpage_shift = 22; get_symbol_data("nr_huge_pages", sizeof(ulong), &nr_huge_pages); get_symbol_data("free_huge_pages", sizeof(ulong), &free_huge_pages); *nr_total_pages = nr_huge_pages * ((1 << hpage_shift) / machdep->pagesize); *nr_total_free_pages = free_huge_pages * ((1 << hpage_shift) / machdep->pagesize); } return TRUE; } /* * Display swap statistics. */ void cmd_swap(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); dump_swap_info(VERBOSE, NULL, NULL); } /* * Do the work for cmd_swap(). */ #define SWP_USED 1 #define SWAP_MAP_BAD 0x8000 char *swap_info_hdr = \ "SWAP_INFO_STRUCT TYPE SIZE USED PCT PRI FILENAME\n"; static int dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages) { int i, j; int swap_device, prio; ulong pages, usedswap; ulong flags, swap_file, max, swap_map, pct; ulong vfsmnt; ulong swap_info, swap_info_ptr; ushort *smap; ulong inuse_pages, totalswap, totalused; char *devname; char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; if (!symbol_exists("nr_swapfiles")) error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); if (!symbol_exists("swap_info")) error(FATAL, "swap_info doesn't exist in this kernel!\n"); swap_info_init(); swap_info = symbol_value("swap_info"); if (swapflags & VERBOSE) fprintf(fp, "%s", swap_info_hdr); totalswap = totalused = 0; for (i = 0; i < vt->nr_swapfiles; i++, swap_info += (vt->flags & SWAPINFO_V1 ? SIZE(swap_info_struct) : sizeof(void *))) { if (vt->flags & SWAPINFO_V2) { if (!readmem(swap_info, KVADDR, &swap_info_ptr, sizeof(void *), "swap_info pointer", QUIET|RETURN_ON_ERROR)) continue; if (!swap_info_ptr) continue; fill_swap_info(swap_info_ptr); } else fill_swap_info(swap_info); if (MEMBER_SIZE("swap_info_struct", "flags") == sizeof(uint)) flags = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_flags)); else flags = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_flags)); if (!(flags & SWP_USED)) continue; swap_file = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_file)); swap_device = INT(vt->swap_info_struct + OFFSET_OPTION(swap_info_struct_swap_device, swap_info_struct_old_block_size)); pages = INT(vt->swap_info_struct + OFFSET(swap_info_struct_pages)); totalswap += pages; pages <<= (PAGESHIFT() - 10); inuse_pages = 0; if (MEMBER_SIZE("swap_info_struct", "prio") == sizeof(short)) prio = SHORT(vt->swap_info_struct + OFFSET(swap_info_struct_prio)); else prio = INT(vt->swap_info_struct + OFFSET(swap_info_struct_prio)); if (MEMBER_SIZE("swap_info_struct", "max") == sizeof(int)) max = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_max)); else max = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_max)); if (VALID_MEMBER(swap_info_struct_inuse_pages)) { if (MEMBER_SIZE("swap_info_struct", "inuse_pages") == sizeof(int)) inuse_pages = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_inuse_pages)); else inuse_pages = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_inuse_pages)); } swap_map = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_map)); if (swap_file) { if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { vfsmnt = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_vfsmnt)); get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { devname = vfsmount_devname(file_to_vfsmnt(swap_file), buf1, BUFSIZE); get_pathname(file_to_dentry(swap_file), buf, BUFSIZE, 1, file_to_vfsmnt(swap_file)); if ((STREQ(devname, "devtmpfs") || STREQ(devname, "udev")) && !STRNEQ(buf, "/dev/")) string_insert("/dev", buf); } else { get_pathname(swap_file, buf, BUFSIZE, 1, 0); } } else sprintf(buf, "(unknown)"); smap = NULL; if (vt->flags & SWAPINFO_V1) { smap = (ushort *)GETBUF(sizeof(ushort) * max); if (!readmem(swap_map, KVADDR, smap, sizeof(ushort) * max, "swap_info swap_map data", RETURN_ON_ERROR|QUIET)) { if (swapflags & RETURN_ON_ERROR) { *totalswap_pages = swap_map; *totalused_pages = i; FREEBUF(smap); return FALSE; } else error(FATAL, "swap_info[%d].swap_map at %lx is inaccessible\n", i, swap_map); } } usedswap = 0; if (smap) { for (j = 0; j < max; j++) { switch (smap[j]) { case SWAP_MAP_BAD: case 0: continue; default: usedswap++; } } FREEBUF(smap); } else usedswap = inuse_pages; totalused += usedswap; usedswap <<= (PAGESHIFT() - 10); pct = (usedswap * 100)/pages; if (swapflags & VERBOSE) { sprintf(buf1, "%lx", (vt->flags & SWAPINFO_V2) ? swap_info_ptr : swap_info); sprintf(buf2, "%ldk", pages); sprintf(buf3, "%ldk", usedswap); sprintf(buf4, "%2ld%%", pct); sprintf(buf5, "%d", prio); fprintf(fp, "%s %s %s %s %s %s %s\n", mkstring(buf1, MAX(VADDR_PRLEN, strlen("SWAP_INFO_STRUCT")), CENTER|LJUST, NULL), swap_device ? "PARTITION" : " FILE ", mkstring(buf2, 10, CENTER|RJUST, NULL), mkstring(buf3, 10, CENTER|RJUST, NULL), mkstring(buf4, 4, CENTER|RJUST, NULL), mkstring(buf5, 4, RJUST, NULL), buf); } } if (totalswap_pages) *totalswap_pages = totalswap; if (totalused_pages) *totalused_pages = totalused; return TRUE; } /* * Determine the swap_info_struct usage. */ static void swap_info_init(void) { struct gnu_request *req; if (vt->flags & (SWAPINFO_V1|SWAPINFO_V2)) return; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); if ((get_symbol_type("swap_info", NULL, req) == TYPE_CODE_ARRAY) && ((req->target_typecode == TYPE_CODE_PTR) || (req->target_typecode == TYPE_CODE_STRUCT))) { switch (req->target_typecode) { case TYPE_CODE_STRUCT: vt->flags |= SWAPINFO_V1; break; case TYPE_CODE_PTR: vt->flags |= SWAPINFO_V2; break; } } else { if (THIS_KERNEL_VERSION >= LINUX(2,6,33)) vt->flags |= SWAPINFO_V2; else vt->flags |= SWAPINFO_V1; } FREEBUF(req); } /* * Translate a PTE into a swap device and offset string. */ char * swap_location(ulonglong pte, char *buf) { char swapdev[BUFSIZE]; if (!pte) return NULL; if (!symbol_exists("nr_swapfiles") || !symbol_exists("swap_info")) return NULL; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) sprintf(buf, "%s OFFSET: %lld", get_swapdev(__swp_type(pte), swapdev), (ulonglong)__swp_offset(pte)); else sprintf(buf, "%s OFFSET: %llx", get_swapdev(SWP_TYPE(pte), swapdev), (ulonglong)SWP_OFFSET(pte)); return buf; } /* * Given the type field from a PTE, return the name of the swap device. */ static char * get_swapdev(ulong type, char *buf) { unsigned int i, swap_info_len; ulong swap_info, swap_info_ptr, swap_file; struct syment *sp; ulong vfsmnt; char *devname; char buf1[BUFSIZE]; swap_info_init(); swap_info = symbol_value("swap_info"); swap_info_len = (i = ARRAY_LENGTH(swap_info)) ? i : get_array_length("swap_info", NULL, 0); /* * Even though the swap_info[] array is declared statically as: * * struct swap_info_struct *swap_info[MAX_SWAPFILES]; * * the dimension may not be shown by the debuginfo data, * for example: * * struct swap_info_struct *swap_info[28]; * or * struct swap_info_struct *swap_info[]; * * In that case, calculate its length by checking the next * symbol's value. */ if ((swap_info_len == 0) && (vt->flags & SWAPINFO_V2) && (sp = next_symbol("swap_info", NULL))) swap_info_len = (sp->value - swap_info) / sizeof(void *); sprintf(buf, "(unknown swap location)"); if (type >= swap_info_len) return buf; switch (vt->flags & (SWAPINFO_V1|SWAPINFO_V2)) { case SWAPINFO_V1: swap_info += type * SIZE(swap_info_struct); fill_swap_info(swap_info); break; case SWAPINFO_V2: swap_info += type * sizeof(void *); if (!readmem(swap_info, KVADDR, &swap_info_ptr, sizeof(void *), "swap_info pointer", RETURN_ON_ERROR|QUIET)) return buf; if (!swap_info_ptr) return buf; fill_swap_info(swap_info_ptr); break; } swap_file = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_file)); if (swap_file) { if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { vfsmnt = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_vfsmnt)); get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { devname = vfsmount_devname(file_to_vfsmnt(swap_file), buf1, BUFSIZE); get_pathname(file_to_dentry(swap_file), buf, BUFSIZE, 1, file_to_vfsmnt(swap_file)); if ((STREQ(devname, "devtmpfs") || STREQ(devname, "udev")) && !STRNEQ(buf, "/dev/")) string_insert("/dev", buf); } else { get_pathname(swap_file, buf, BUFSIZE, 1, 0); } } return buf; } /* * If not currently stashed, cache the passed-in swap_info_struct. */ static void fill_swap_info(ulong swap_info) { if (vt->last_swap_read == swap_info) return; if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *) malloc(SIZE(swap_info_struct)))) error(FATAL, "cannot malloc swap_info_struct space\n"); readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct), "fill_swap_info", FAULT_ON_ERROR); vt->last_swap_read = swap_info; } /* * If active, clear references to the swap_info references. */ void clear_swap_info_cache(void) { if (ACTIVE()) vt->last_swap_read = 0; } /* * Translage a vm_area_struct and virtual address into a filename * and offset string. */ #define PAGE_CACHE_SHIFT (machdep->pageshift) /* This is supposed to change! */ static char * vma_file_offset(ulong vma, ulong vaddr, char *buf) { ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset; ulong vfsmnt; char file[BUFSIZE]; char *vma_buf, *file_buf; if (!vma) return NULL; vma_buf = fill_vma_cache(vma); vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); if (!vm_file) goto no_file_offset; file_buf = fill_file_cache(vm_file); dentry = ULONG(file_buf + OFFSET(file_f_dentry)); if (!dentry) goto no_file_offset; file[0] = NULLCHAR; if (VALID_MEMBER(file_f_vfsmnt)) { vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); get_pathname(dentry, file, BUFSIZE, 1, vfsmnt); } else get_pathname(dentry, file, BUFSIZE, 1, 0); if (!strlen(file)) goto no_file_offset; vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_offset = vm_pgoff = 0xdeadbeef; if (VALID_MEMBER(vm_area_struct_vm_offset)) vm_offset = ULONG(vma_buf + OFFSET(vm_area_struct_vm_offset)); else if (VALID_MEMBER(vm_area_struct_vm_pgoff)) vm_pgoff = ULONG(vma_buf + OFFSET(vm_area_struct_vm_pgoff)); else goto no_file_offset; offset = 0; if (vm_offset != 0xdeadbeef) offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset; else if (vm_pgoff != 0xdeadbeef) { offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff; offset <<= PAGE_CACHE_SHIFT; } sprintf(buf, "%s OFFSET: %lx", file, offset); return buf; no_file_offset: return NULL; } /* * Translate a PTE into its physical address and flags. */ void cmd_pte(void) { int c; ulonglong pte; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { pte = htoll(args[optind], FAULT_ON_ERROR, NULL); machdep->translate_pte((ulong)pte, NULL, pte); optind++; } } static char *node_zone_hdr = "ZONE NAME SIZE"; /* * On systems supporting memory nodes, display the basic per-node data. */ static void dump_memory_nodes(int initialize) { int i, j; int n, id, node, flen, slen, badaddr; ulong node_mem_map; ulong temp_node_start_paddr; ulonglong node_start_paddr; ulong node_start_pfn; ulong node_start_mapnr; ulong node_spanned_pages, node_present_pages; ulong free_pages, zone_size, node_size, cum_zone_size; ulong zone_start_paddr, zone_start_mapnr, zone_mem_map; physaddr_t phys; ulong pp; ulong zone_start_pfn; ulong bdata; ulong pgdat; ulong node_zones; ulong value; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; struct node_table *nt; node = slen = 0; if (!(vt->flags & (NODES|NODES_ONLINE)) && initialize) { nt = &vt->node_table[0]; nt->node_id = 0; if (symbol_exists("contig_page_data")) nt->pgdat = symbol_value("contig_page_data"); else nt->pgdat = 0; nt->size = vt->total_pages; nt->mem_map = vt->mem_map; nt->start_paddr = 0; nt->start_mapnr = 0; if (CRASHDEBUG(1)) { fprintf(fp, "node_table[%d]: \n", 0); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } return; } if (initialize) { pgdat = UNINITIALIZED; /* * This order may have to change based upon architecture... */ if (symbol_exists("pgdat_list") && (VALID_MEMBER(pglist_data_node_next) || VALID_MEMBER(pglist_data_pgdat_next))) { get_symbol_data("pgdat_list", sizeof(void *), &pgdat); vt->flags &= ~NODES_ONLINE; } else if (vt->flags & NODES_ONLINE) { if ((node = next_online_node(0)) < 0) { error(WARNING, "cannot determine first node from node_online_map\n\n"); return; } if (!(pgdat = next_online_pgdat(node))) { error(WARNING, "cannot determine pgdat list for this kernel/architecture\n\n"); return; } } } else pgdat = vt->node_table[0].pgdat; if (initialize && (pgdat == UNINITIALIZED)) { error(WARNING, "cannot initialize pgdat list\n\n"); return; } for (n = 0, badaddr = FALSE; pgdat; n++) { if (n >= vt->numnodes) error(FATAL, "numnodes out of sync with pgdat_list?\n"); nt = &vt->node_table[n]; readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id, sizeof(int), "pglist node_id", FAULT_ON_ERROR); if (VALID_MEMBER(pglist_data_node_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &node_mem_map, sizeof(ulong), "node_mem_map", FAULT_ON_ERROR); } else { node_mem_map = BADADDR; badaddr = TRUE; } if (VALID_MEMBER(pglist_data_node_start_paddr)) { readmem(pgdat+OFFSET(pglist_data_node_start_paddr), KVADDR, &temp_node_start_paddr, sizeof(ulong), "pglist node_start_paddr", FAULT_ON_ERROR); node_start_paddr = temp_node_start_paddr; } else if (VALID_MEMBER(pglist_data_node_start_pfn)) { readmem(pgdat+OFFSET(pglist_data_node_start_pfn), KVADDR, &node_start_pfn, sizeof(ulong), "pglist node_start_pfn", FAULT_ON_ERROR); node_start_mapnr = node_start_pfn; node_start_paddr = PTOB(node_start_pfn); if (badaddr && IS_SPARSEMEM()) { if (!verify_pfn(node_start_pfn)) error(WARNING, "questionable node_start_pfn: %lx\n", node_start_pfn); phys = PTOB(node_start_pfn); if (phys_to_page(phys, &pp)) node_mem_map = pp; } } else error(INFO, "cannot determine zone starting physical address\n"); if (VALID_MEMBER(pglist_data_node_start_mapnr)) readmem(pgdat+OFFSET(pglist_data_node_start_mapnr), KVADDR, &node_start_mapnr, sizeof(ulong), "pglist node_start_mapnr", FAULT_ON_ERROR); if (VALID_MEMBER(pglist_data_node_size)) readmem(pgdat+OFFSET(pglist_data_node_size), KVADDR, &node_size, sizeof(ulong), "pglist node_size", FAULT_ON_ERROR); else if (VALID_MEMBER(pglist_data_node_spanned_pages)) { readmem(pgdat+OFFSET(pglist_data_node_spanned_pages), KVADDR, &node_spanned_pages, sizeof(ulong), "pglist node_spanned_pages", FAULT_ON_ERROR); node_size = node_spanned_pages; } else error(INFO, "cannot determine zone size\n"); if (VALID_MEMBER(pglist_data_node_present_pages)) readmem(pgdat+OFFSET(pglist_data_node_present_pages), KVADDR, &node_present_pages, sizeof(ulong), "pglist node_present_pages", FAULT_ON_ERROR); else node_present_pages = 0; if (VALID_MEMBER(pglist_data_bdata)) readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata, sizeof(ulong), "pglist bdata", FAULT_ON_ERROR); else bdata = BADADDR; if (initialize) { nt->node_id = id; nt->pgdat = pgdat; if (VALID_MEMBER(zone_struct_memsize)) nt->size = 0; /* initialize below */ else nt->size = node_size; nt->present = node_present_pages; nt->mem_map = node_mem_map; nt->start_paddr = node_start_paddr; nt->start_mapnr = node_start_mapnr; if (CRASHDEBUG(1)) { fprintf(fp, "node_table[%d]: \n", n); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } } if (!initialize) { if (n) { fprintf(fp, "\n"); pad_line(fp, slen, '-'); } flen = MAX(VADDR_PRLEN, strlen("BOOTMEM_DATA")); fprintf(fp, "%sNODE %s %s %s %s\n", n ? "\n\n" : "", mkstring(buf1, 8, CENTER, "SIZE"), mkstring(buf2, flen, CENTER|LJUST, "PGLIST_DATA"), mkstring(buf3, flen, CENTER|LJUST, "BOOTMEM_DATA"), mkstring(buf4, flen, CENTER|LJUST, "NODE_ZONES")); node_zones = pgdat + OFFSET(pglist_data_node_zones); sprintf(buf5, " %2d %s %s %s %s\n", id, mkstring(buf1, 8, CENTER|LJUST|LONG_DEC, MKSTR(node_size)), mkstring(buf2, flen, CENTER|LJUST|LONG_HEX, MKSTR(pgdat)), bdata == BADADDR ? mkstring(buf3, flen, CENTER, "----") : mkstring(buf3, flen, CENTER|LONG_HEX, MKSTR(bdata)), mkstring(buf4, flen, CENTER|LJUST|LONG_HEX, MKSTR(node_zones))); fprintf(fp, "%s", buf5); j = 12 + strlen(buf1) + strlen(buf2) + strlen(buf3) + count_leading_spaces(buf4); for (i = 1; i < vt->nr_zones; i++) { node_zones += SIZE_OPTION(zone_struct, zone); INDENT(j); fprintf(fp, "%lx\n", node_zones); } fprintf(fp, "%s START_PADDR START_MAPNR\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(node_mem_map)), mkstring(buf2, strlen(" START_PADDR "), CENTER|LONGLONG_HEX|RJUST, MKSTR(&node_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(node_start_mapnr))); sprintf(buf2, "%s %s START_PADDR START_MAPNR", node_zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, "MEM_MAP")); slen = strlen(buf2); fprintf(fp, "\n%s\n", buf2); } node_zones = pgdat + OFFSET(pglist_data_node_zones); cum_zone_size = 0; for (i = 0; i < vt->nr_zones; i++) { if (CRASHDEBUG(7)) fprintf(fp, "zone %d at %lx\n", i, node_zones); if (VALID_MEMBER(zone_struct_size)) readmem(node_zones+OFFSET(zone_struct_size), KVADDR, &zone_size, sizeof(ulong), "zone_struct size", FAULT_ON_ERROR); else if (VALID_MEMBER(zone_struct_memsize)) { readmem(node_zones+OFFSET(zone_struct_memsize), KVADDR, &zone_size, sizeof(ulong), "zone_struct memsize", FAULT_ON_ERROR); nt->size += zone_size; } else if (VALID_MEMBER(zone_spanned_pages)) { readmem(node_zones+ OFFSET(zone_spanned_pages), KVADDR, &zone_size, sizeof(ulong), "zone spanned_pages", FAULT_ON_ERROR); } else error(FATAL, "zone_struct has neither size nor memsize field\n"); readmem(node_zones+ OFFSET_OPTION(zone_struct_free_pages, zone_free_pages), KVADDR, &free_pages, sizeof(ulong), "zone[_struct] free_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET_OPTION(zone_struct_name, zone_name), KVADDR, &value, sizeof(void *), "zone[_struct] name", FAULT_ON_ERROR); if (!read_string(value, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown) "); if (VALID_STRUCT(zone_struct)) { if (VALID_MEMBER(zone_struct_zone_start_paddr)) { readmem(node_zones+OFFSET (zone_struct_zone_start_paddr), KVADDR, &zone_start_paddr, sizeof(ulong), "node_zones zone_start_paddr", FAULT_ON_ERROR); } else { readmem(node_zones+ OFFSET(zone_struct_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); } readmem(node_zones+ OFFSET(zone_struct_zone_start_mapnr), KVADDR, &zone_start_mapnr, sizeof(ulong), "node_zones zone_start_mapnr", FAULT_ON_ERROR); } else { readmem(node_zones+ OFFSET(zone_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); if (IS_SPARSEMEM()) { zone_mem_map = 0; zone_start_mapnr = 0; if (zone_size) { phys = PTOB(zone_start_pfn); zone_start_mapnr = phys/PAGESIZE(); } } else if (!(vt->flags & NODES) && INVALID_MEMBER(zone_zone_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &zone_mem_map, sizeof(void *), "contig_page_data mem_map", FAULT_ON_ERROR); if (zone_size) zone_mem_map += cum_zone_size * SIZE(page); } else readmem(node_zones+ OFFSET(zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); if (zone_mem_map) zone_start_mapnr = (zone_mem_map - node_mem_map) / SIZE(page); else if (!IS_SPARSEMEM()) zone_start_mapnr = 0; } if (IS_SPARSEMEM()) { zone_mem_map = 0; if (zone_size) { phys = PTOB(zone_start_pfn); if (phys_to_page(phys, &pp)) zone_mem_map = pp; } } else if (!(vt->flags & NODES) && INVALID_MEMBER(zone_struct_zone_mem_map) && INVALID_MEMBER(zone_zone_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &zone_mem_map, sizeof(void *), "contig_page_data mem_map", FAULT_ON_ERROR); if (zone_size) zone_mem_map += cum_zone_size * SIZE(page); else zone_mem_map = 0; } else readmem(node_zones+ OFFSET_OPTION(zone_struct_zone_mem_map, zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); if (!initialize) { fprintf(fp, " %2d %-9s %7ld ", i, buf1, zone_size); cum_zone_size += zone_size; fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), LONG_HEX|RJUST,MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); } node_zones += SIZE_OPTION(zone_struct, zone); } if (initialize) { if (vt->flags & NODES_ONLINE) { if ((node = next_online_node(node+1)) < 0) pgdat = 0; else if (!(pgdat = next_online_pgdat(node))) { error(WARNING, "cannot determine pgdat list for this kernel/architecture (node %d)\n\n", node); pgdat = 0; } } else readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } else { if ((n+1) < vt->numnodes) pgdat = vt->node_table[n+1].pgdat; else pgdat = 0; } } if (n != vt->numnodes) { if (CRASHDEBUG(2)) error(NOTE, "changing numnodes from %d to %d\n", vt->numnodes, n); vt->numnodes = n; } if (IS_SPARSEMEM()) { dump_mem_sections(initialize); dump_memory_blocks(initialize); } } /* * At least verify that page-shifted physical address. */ static int verify_pfn(ulong pfn) { int i; physaddr_t mask; if (!machdep->max_physmem_bits) return TRUE; mask = 0; for (i = machdep->max_physmem_bits; i < machdep->bits; i++) mask |= ((physaddr_t)1 << i); if (mask & PTOB(pfn)) return FALSE; return TRUE; } static void dump_zone_stats(void) { int i, n; ulong pgdat, node_zones; char *zonebuf; char buf1[BUFSIZE]; int ivalue; ulong value1; ulong value2; ulong value3; ulong value4; ulong value5; ulong value6; long min, low, high; value1 = value2 = value3 = value4 = value5 = value6 = 0; min = low = high = 0; pgdat = vt->node_table[0].pgdat; zonebuf = GETBUF(SIZE_OPTION(zone_struct, zone)); vm_stat_init(); for (n = 0; pgdat; n++) { node_zones = pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (!readmem(node_zones, KVADDR, zonebuf, SIZE_OPTION(zone_struct, zone), "zone buffer", FAULT_ON_ERROR)) break; value1 = ULONG(zonebuf + OFFSET_OPTION(zone_struct_name, zone_name)); if (!read_string(value1, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown) "); if (VALID_MEMBER(zone_struct_size)) value1 = value6 = ULONG(zonebuf + OFFSET(zone_struct_size)); else if (VALID_MEMBER(zone_struct_memsize)) { value1 = value6 = ULONG(zonebuf + OFFSET(zone_struct_memsize)); } else if (VALID_MEMBER(zone_spanned_pages)) { value1 = ULONG(zonebuf + OFFSET(zone_spanned_pages)); value6 = ULONG(zonebuf + OFFSET(zone_present_pages)); } else error(FATAL, "zone struct has unknown size field\n"); if (VALID_MEMBER(zone_watermark)) { if (!enumerator_value("WMARK_MIN", &min) || !enumerator_value("WMARK_LOW", &low) || !enumerator_value("WMARK_HIGH", &high)) { min = 0; low = 1; high = 2; } value2 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * min)); value3 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * low)); value4 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * high)); } else { value2 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_min, zone_struct_pages_min)); value3 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_low, zone_struct_pages_low)); value4 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_high, zone_struct_pages_high)); } value5 = ULONG(zonebuf + OFFSET_OPTION(zone_free_pages, zone_struct_free_pages)); fprintf(fp, "NODE: %d ZONE: %d ADDR: %lx NAME: \"%s\"\n", n, i, node_zones, buf1); if (!value1) { fprintf(fp, " [unpopulated]\n"); goto next_zone; } fprintf(fp, " SIZE: %ld", value1); if (value6 < value1) fprintf(fp, " PRESENT: %ld", value6); fprintf(fp, " MIN/LOW/HIGH: %ld/%ld/%ld", value2, value3, value4); if (VALID_MEMBER(zone_vm_stat)) dump_vm_stat("NR_FREE_PAGES", (long *)&value5, node_zones + OFFSET(zone_vm_stat)); if (VALID_MEMBER(zone_nr_active) && VALID_MEMBER(zone_nr_inactive)) { value1 = ULONG(zonebuf + OFFSET(zone_nr_active)); value2 = ULONG(zonebuf + OFFSET(zone_nr_inactive)); fprintf(fp, "\n NR_ACTIVE: %ld NR_INACTIVE: %ld FREE: %ld\n", value1, value2, value5); if (VALID_MEMBER(zone_vm_stat)) { fprintf(fp, " VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } } else if (VALID_MEMBER(zone_vm_stat) && dump_vm_stat("NR_ACTIVE", (long *)&value1, node_zones + OFFSET(zone_vm_stat)) && dump_vm_stat("NR_INACTIVE", (long *)&value2, node_zones + OFFSET(zone_vm_stat))) { fprintf(fp, "\n VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } else { if (VALID_MEMBER(zone_vm_stat)) { fprintf(fp, "\n VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } else fprintf(fp, " FREE: %ld\n", value5); } if (VALID_MEMBER(zone_all_unreclaimable)) { ivalue = UINT(zonebuf + OFFSET(zone_all_unreclaimable)); fprintf(fp, " ALL_UNRECLAIMABLE: %s ", ivalue ? "yes" : "no"); } else if (VALID_MEMBER(zone_flags) && enumerator_value("ZONE_ALL_UNRECLAIMABLE", (long *)&value1)) { value2 = ULONG(zonebuf + OFFSET(zone_flags)); value3 = value2 & (1 << value1); fprintf(fp, " ALL_UNRECLAIMABLE: %s ", value3 ? "yes" : "no"); } if (VALID_MEMBER(zone_pages_scanned)) { value1 = ULONG(zonebuf + OFFSET(zone_pages_scanned)); fprintf(fp, "PAGES_SCANNED: %lu ", value1); } fprintf(fp, "\n"); next_zone: fprintf(fp, "\n"); node_zones += SIZE_OPTION(zone_struct, zone); } if ((n+1) < vt->numnodes) pgdat = vt->node_table[n+1].pgdat; else pgdat = 0; } FREEBUF(zonebuf); } /* * Gather essential information regarding each memory node. */ static void node_table_init(void) { int n; ulong pgdat; /* * Override numnodes -- some kernels may leave it at 1 on a system * with multiple memory nodes. */ if ((vt->flags & NODES) && (VALID_MEMBER(pglist_data_node_next) || VALID_MEMBER(pglist_data_pgdat_next))) { get_symbol_data("pgdat_list", sizeof(void *), &pgdat); for (n = 0; pgdat; n++) { readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } if (n != vt->numnodes) { if (CRASHDEBUG(2)) error(NOTE, "changing numnodes from %d to %d\n", vt->numnodes, n); vt->numnodes = n; } } else vt->flags &= ~NODES; if (!(vt->node_table = (struct node_table *) malloc(sizeof(struct node_table) * vt->numnodes))) error(FATAL, "cannot malloc node_table %s(%d nodes)", vt->numnodes > 1 ? "array " : "", vt->numnodes); BZERO(vt->node_table, sizeof(struct node_table) * vt->numnodes); dump_memory_nodes(MEMORY_NODES_INITIALIZE); qsort((void *)vt->node_table, (size_t)vt->numnodes, sizeof(struct node_table), compare_node_data); if (CRASHDEBUG(2)) dump_memory_nodes(MEMORY_NODES_DUMP); } /* * The comparison function must return an integer less than, * equal to, or greater than zero if the first argument is * considered to be respectively less than, equal to, or * greater than the second. If two members compare as equal, * their order in the sorted array is undefined. */ static int compare_node_data(const void *v1, const void *v2) { struct node_table *t1, *t2; t1 = (struct node_table *)v1; t2 = (struct node_table *)v2; return (t1->node_id < t2->node_id ? -1 : t1->node_id == t2->node_id ? 0 : 1); } /* * Depending upon the processor, and whether we're running live or on a * dumpfile, get the system page size. */ uint memory_page_size(void) { uint psz; if (machdep->pagesize) return machdep->pagesize; if (REMOTE_MEMSRC()) return remote_page_size(); switch (pc->flags & MEMORY_SOURCES) { case DISKDUMP: psz = diskdump_page_size(); break; case XENDUMP: psz = xendump_page_size(); break; case KDUMP: psz = kdump_page_size(); break; case NETDUMP: psz = netdump_page_size(); break; case MCLXCD: psz = (uint)mclx_page_size(); break; case LKCD: #if 0 /* REMIND: */ psz = lkcd_page_size(); /* dh_dump_page_size is HW page size; should add dh_page_size */ #else psz = (uint)getpagesize(); #endif break; case DEVMEM: case MEMMOD: case CRASHBUILTIN: case KVMDUMP: case PROC_KCORE: case LIVE_RAMDUMP: psz = (uint)getpagesize(); break; case S390D: psz = s390_page_size(); break; case SADUMP: psz = sadump_page_size(); break; case VMWARE_VMSS: psz = vmware_vmss_page_size(); break; default: psz = 0; error(FATAL, "memory_page_size: invalid pc->flags: %lx\n", pc->flags & MEMORY_SOURCES); } return psz; } /* * If the page size cannot be determined by the dumpfile (like kdump), * and the processor default cannot be used, allow the force-feeding * of a crash command-line page size option. */ void force_page_size(char *s) { int k, err; ulong psize; k = 1; err = FALSE; psize = 0; switch (LASTCHAR(s)) { case 'k': case 'K': LASTCHAR(s) = NULLCHAR; if (!decimal(s, 0)) { err = TRUE; break; } k = 1024; /* FALLTHROUGH */ default: if (decimal(s, 0)) psize = dtol(s, QUIET|RETURN_ON_ERROR, &err); else if (hexadecimal(s, 0)) psize = htol(s, QUIET|RETURN_ON_ERROR, &err); else err = TRUE; break; } if (err) error(INFO, "invalid page size: %s\n", s); else machdep->pagesize = psize * k; } /* * Return the vmalloc address referenced by the first vm_struct * on the vmlist. This can normally be used by the machine-specific * xxx_vmalloc_start() routines. */ ulong first_vmalloc_address(void) { static ulong vmalloc_start = 0; ulong vm_struct, vmap_area; if (DUMPFILE() && vmalloc_start) return vmalloc_start; if (vt->flags & USE_VMAP_AREA) { get_symbol_data("vmap_area_list", sizeof(void *), &vmap_area); if (!vmap_area) return 0; if (!readmem(vmap_area - OFFSET(vmap_area_list) + OFFSET(vmap_area_va_start), KVADDR, &vmalloc_start, sizeof(void *), "first vmap_area va_start", RETURN_ON_ERROR)) non_matching_kernel(); } else if (kernel_symbol_exists("vmlist")) { get_symbol_data("vmlist", sizeof(void *), &vm_struct); if (!vm_struct) return 0; if (!readmem(vm_struct+OFFSET(vm_struct_addr), KVADDR, &vmalloc_start, sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) non_matching_kernel(); } return vmalloc_start; } /* * Return the highest vmalloc address in the vmlist. */ ulong last_vmalloc_address(void) { struct meminfo meminfo; static ulong vmalloc_limit = 0; if (!vmalloc_limit || ACTIVE()) { BZERO(&meminfo, sizeof(struct meminfo)); meminfo.memtype = KVADDR; meminfo.spec_addr = 0; meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); dump_vmlist(&meminfo); vmalloc_limit = meminfo.retval; } return vmalloc_limit; } /* * Determine whether an identity-mapped virtual address * refers to an existant physical page, and if not bump * it up to the next node. */ static int next_identity_mapping(ulong vaddr, ulong *nextvaddr) { int n, retval; struct node_table *nt; ulonglong paddr, pstart, psave, pend; ulong node_size; paddr = VTOP(vaddr); psave = 0; retval = FALSE; for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; pstart = nt->start_paddr; pend = pstart + ((ulonglong)node_size * PAGESIZE()); /* * Check the next node. */ if (paddr >= pend) continue; /* * Bump up to the next node, but keep looking in * case of non-sequential nodes. */ if (paddr < pstart) { if (psave && (psave < pstart)) continue; *nextvaddr = PTOV(pstart); psave = pstart; retval = TRUE; continue; } /* * We're in the physical range. */ *nextvaddr = vaddr; retval = TRUE; break; } return retval; } /* * Return the L1 cache size in bytes, which can be found stored in the * cache_cache. */ int l1_cache_size(void) { ulong cache; ulong c_align; int colour_off; int retval; retval = -1; if (VALID_MEMBER(kmem_cache_s_c_align)) { cache = symbol_value("cache_cache"); readmem(cache+OFFSET(kmem_cache_s_c_align), KVADDR, &c_align, sizeof(ulong), "c_align", FAULT_ON_ERROR); retval = (int)c_align; } else if (VALID_MEMBER(kmem_cache_s_colour_off)) { cache = symbol_value("cache_cache"); readmem(cache+OFFSET(kmem_cache_s_colour_off), KVADDR, &colour_off, sizeof(int), "colour_off", FAULT_ON_ERROR); retval = colour_off; } return retval; } /* * Multi-purpose routine used to query/control dumpfile memory usage. */ int dumpfile_memory(int cmd) { int retval; retval = 0; switch (cmd) { case DUMPFILE_MEM_USED: if (REMOTE_DUMPFILE()) retval = remote_memory_used(); else if (pc->flags & NETDUMP) retval = netdump_memory_used(); else if (pc->flags & KDUMP) retval = kdump_memory_used(); else if (pc->flags & XENDUMP) retval = xendump_memory_used(); else if (pc->flags & KVMDUMP) retval = kvmdump_memory_used(); else if (pc->flags & DISKDUMP) retval = diskdump_memory_used(); else if (pc->flags & LKCD) retval = lkcd_memory_used(); else if (pc->flags & MCLXCD) retval = vas_memory_used(); else if (pc->flags & S390D) retval = s390_memory_used(); else if (pc->flags & SADUMP) retval = sadump_memory_used(); break; case DUMPFILE_FREE_MEM: if (REMOTE_DUMPFILE()) retval = remote_free_memory(); else if (pc->flags & NETDUMP) retval = netdump_free_memory(); else if (pc->flags & KDUMP) retval = kdump_free_memory(); else if (pc->flags & XENDUMP) retval = xendump_free_memory(); else if (pc->flags & KVMDUMP) retval = kvmdump_free_memory(); else if (pc->flags & DISKDUMP) retval = diskdump_free_memory(); else if (pc->flags & LKCD) retval = lkcd_free_memory(); else if (pc->flags & MCLXCD) retval = vas_free_memory(NULL); else if (pc->flags & S390D) retval = s390_free_memory(); else if (pc->flags & SADUMP) retval = sadump_free_memory(); break; case DUMPFILE_MEM_DUMP: if (REMOTE_DUMPFILE()) retval = remote_memory_dump(0); else if (pc->flags & NETDUMP) retval = netdump_memory_dump(fp); else if (pc->flags & KDUMP) retval = kdump_memory_dump(fp); else if (pc->flags & XENDUMP) retval = xendump_memory_dump(fp); else if (pc->flags & KVMDUMP) retval = kvmdump_memory_dump(fp); else if (pc->flags & DISKDUMP) retval = diskdump_memory_dump(fp); else if (pc->flags & LKCD) retval = lkcd_memory_dump(set_lkcd_fp(fp)); else if (pc->flags & MCLXCD) retval = vas_memory_dump(fp); else if (pc->flags & S390D) retval = s390_memory_dump(fp); else if (pc->flags & PROC_KCORE) retval = kcore_memory_dump(fp); else if (pc->flags & SADUMP) retval = sadump_memory_dump(fp); else if (pc->flags & VMWARE_VMSS) retval = vmware_vmss_memory_dump(fp); break; case DUMPFILE_ENVIRONMENT: if (pc->flags & LKCD) { set_lkcd_fp(fp); dump_lkcd_environment(0); } else if (pc->flags & REM_LKCD) retval = remote_memory_dump(VERBOSE); break; } return retval; } /* * Functions for sparse mem support */ ulong sparse_decode_mem_map(ulong coded_mem_map, ulong section_nr) { return coded_mem_map + (section_nr_to_pfn(section_nr) * SIZE(page)); } void sparse_mem_init(void) { ulong addr; ulong mem_section_size; int len, dimension, mem_section_is_ptr; if (!IS_SPARSEMEM()) return; MEMBER_OFFSET_INIT(mem_section_section_mem_map, "mem_section", "section_mem_map"); if (!MAX_PHYSMEM_BITS()) error(FATAL, "CONFIG_SPARSEMEM kernels not supported for this architecture\n"); /* * The kernel's mem_section changed from array to pointer in this commit: * * commit 83e3c48729d9ebb7af5a31a504f3fd6aff0348c4 * mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y */ mem_section_is_ptr = get_symbol_type("mem_section", NULL, NULL) == TYPE_CODE_PTR ? TRUE : FALSE; if (((len = get_array_length("mem_section", &dimension, 0)) == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) || mem_section_is_ptr || !dimension) vt->flags |= SPARSEMEM_EX; if (IS_SPARSEMEM_EX()) { machdep->sections_per_root = _SECTIONS_PER_ROOT_EXTREME(); mem_section_size = sizeof(void *) * NR_SECTION_ROOTS(); } else { machdep->sections_per_root = _SECTIONS_PER_ROOT(); mem_section_size = SIZE(mem_section) * NR_SECTION_ROOTS(); } if (CRASHDEBUG(1)) { fprintf(fp, "PAGESIZE=%d\n",PAGESIZE()); fprintf(fp,"mem_section_size = %ld\n", mem_section_size); fprintf(fp, "NR_SECTION_ROOTS = %ld\n", NR_SECTION_ROOTS()); fprintf(fp, "NR_MEM_SECTIONS = %ld\n", NR_MEM_SECTIONS()); fprintf(fp, "SECTIONS_PER_ROOT = %ld\n", SECTIONS_PER_ROOT() ); fprintf(fp, "SECTION_ROOT_MASK = 0x%lx\n", SECTION_ROOT_MASK()); fprintf(fp, "PAGES_PER_SECTION = %ld\n", PAGES_PER_SECTION()); if (!mem_section_is_ptr && IS_SPARSEMEM_EX() && !len) error(WARNING, "SPARSEMEM_EX: questionable section values\n"); } if (!(vt->mem_sec = (void *)malloc(mem_section_size))) error(FATAL, "cannot malloc mem_sec cache\n"); if (!(vt->mem_section = (char *)malloc(SIZE(mem_section)))) error(FATAL, "cannot malloc mem_section cache\n"); if (mem_section_is_ptr) get_symbol_data("mem_section", sizeof(void *), &addr); else addr = symbol_value("mem_section"); readmem(addr, KVADDR, vt->mem_sec, mem_section_size, "memory section root table", FAULT_ON_ERROR); } char * read_mem_section(ulong addr) { if ((addr == 0) || !IS_KVADDR(addr)) return 0; readmem(addr, KVADDR, vt->mem_section, SIZE(mem_section), "memory section", FAULT_ON_ERROR); return vt->mem_section; } ulong nr_to_section(ulong nr) { ulong addr; ulong *mem_sec = vt->mem_sec; if (IS_SPARSEMEM_EX()) { if (SECTION_NR_TO_ROOT(nr) >= NR_SECTION_ROOTS()) { if (!STREQ(pc->curcmd, "rd") && !STREQ(pc->curcmd, "search") && !STREQ(pc->curcmd, "kmem")) error(WARNING, "sparsemem: invalid section number: %ld\n", nr); return 0; } } if (IS_SPARSEMEM_EX()) { if ((mem_sec[SECTION_NR_TO_ROOT(nr)] == 0) || !IS_KVADDR(mem_sec[SECTION_NR_TO_ROOT(nr)])) return 0; addr = mem_sec[SECTION_NR_TO_ROOT(nr)] + (nr & SECTION_ROOT_MASK()) * SIZE(mem_section); } else addr = symbol_value("mem_section") + (SECTIONS_PER_ROOT() * SECTION_NR_TO_ROOT(nr) + (nr & SECTION_ROOT_MASK())) * SIZE(mem_section); if (!IS_KVADDR(addr)) return 0; return addr; } /* * We use the lower bits of the mem_map pointer to store * a little bit of information. The pointer is calculated * as mem_map - section_nr_to_pfn(pnum). The result is * aligned to the minimum alignment of the two values: * 1. All mem_map arrays are page-aligned. * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT * lowest bits. PFN_SECTION_SHIFT is arch-specific * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the * worst combination is powerpc with 256k pages, * which results in PFN_SECTION_SHIFT equal 6. * To sum it up, at least 6 bits are available. */ #define SECTION_MARKED_PRESENT (1UL<<0) #define SECTION_HAS_MEM_MAP (1UL<<1) #define SECTION_IS_ONLINE (1UL<<2) #define SECTION_IS_EARLY (1UL<<3) #define SECTION_MAP_LAST_BIT (1UL<<4) #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) int valid_section(ulong addr) { char *mem_section; if ((mem_section = read_mem_section(addr))) return (ULONG(mem_section + OFFSET(mem_section_section_mem_map)) & SECTION_MARKED_PRESENT); return 0; } int section_has_mem_map(ulong addr) { char *mem_section; ulong kernel_version_bit; if (THIS_KERNEL_VERSION >= LINUX(2,6,24)) kernel_version_bit = SECTION_HAS_MEM_MAP; else kernel_version_bit = SECTION_MARKED_PRESENT; if ((mem_section = read_mem_section(addr))) return (ULONG(mem_section + OFFSET(mem_section_section_mem_map)) & kernel_version_bit); return 0; } ulong section_mem_map_addr(ulong addr, int raw) { char *mem_section; ulong map; if ((mem_section = read_mem_section(addr))) { map = ULONG(mem_section + OFFSET(mem_section_section_mem_map)); if (!raw) map &= SECTION_MAP_MASK; return map; } return 0; } ulong valid_section_nr(ulong nr) { ulong addr = nr_to_section(nr); if (valid_section(addr)) return addr; return 0; } ulong pfn_to_map(ulong pfn) { ulong section, page_offset; ulong section_nr; ulong coded_mem_map, mem_map; section_nr = pfn_to_section_nr(pfn); if (!(section = valid_section_nr(section_nr))) return 0; if (section_has_mem_map(section)) { page_offset = pfn - section_nr_to_pfn(section_nr); coded_mem_map = section_mem_map_addr(section, 0); mem_map = sparse_decode_mem_map(coded_mem_map, section_nr) + (page_offset * SIZE(page)); return mem_map; } return 0; } static void fill_mem_section_state(ulong state, char *buf) { int bufidx = 0; memset(buf, 0, sizeof(*buf) * BUFSIZE); if (state & SECTION_MARKED_PRESENT) bufidx += sprintf(buf + bufidx, "%s", "P"); if (state & SECTION_HAS_MEM_MAP) bufidx += sprintf(buf + bufidx, "%s", "M"); if (state & SECTION_IS_ONLINE) bufidx += sprintf(buf + bufidx, "%s", "O"); if (state & SECTION_IS_EARLY) bufidx += sprintf(buf + bufidx, "%s", "E"); } void dump_mem_sections(int initialize) { ulong nr, max, addr; ulong nr_mem_sections; ulong coded_mem_map, mem_map, pfn; char statebuf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; nr_mem_sections = NR_MEM_SECTIONS(); if (initialize) { for (nr = max = 0; nr < nr_mem_sections ; nr++) { if (valid_section_nr(nr)) max = nr; } vt->max_mem_section_nr = max; return; } fprintf(fp, "\n"); pad_line(fp, BITS32() ? 59 : 67, '-'); fprintf(fp, "\n\nNR %s %s %s %s PFN\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SECTION"), mkstring(buf2, MAX(VADDR_PRLEN,strlen("CODED_MEM_MAP")), CENTER|LJUST, "CODED_MEM_MAP"), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP"), mkstring(buf4, strlen("STATE"), CENTER, "STATE")); for (nr = 0; nr < nr_mem_sections ; nr++) { if ((addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(addr, 0); mem_map = sparse_decode_mem_map(coded_mem_map,nr); pfn = section_nr_to_pfn(nr); fill_mem_section_state(section_mem_map_addr(addr, 1), statebuf); fprintf(fp, "%2ld %s %s %s %s %s\n", nr, mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(addr)), mkstring(buf2, MAX(VADDR_PRLEN, strlen("CODED_MEM_MAP")), CENTER|LONG_HEX|RJUST, MKSTR(coded_mem_map)), mkstring(buf3, VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(mem_map)), mkstring(buf4, strlen("STATE"), CENTER, statebuf), pc->output_radix == 10 ? mkstring(buf5, VADDR_PRLEN, LONG_DEC|LJUST, MKSTR(pfn)) : mkstring(buf5, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(pfn))); } } } #define MEM_ONLINE (1<<0) #define MEM_GOING_OFFLINE (1<<1) #define MEM_OFFLINE (1<<2) #define MEM_GOING_ONLINE (1<<3) #define MEM_CANCEL_ONLINE (1<<4) #define MEM_CANCEL_OFFLINE (1<<5) static void fill_memory_block_state(ulong memblock, char *buf) { ulong state; memset(buf, 0, sizeof(*buf) * BUFSIZE); readmem(memblock + OFFSET(memory_block_state), KVADDR, &state, sizeof(void *), "memory_block state", FAULT_ON_ERROR); switch (state) { case MEM_ONLINE: sprintf(buf, "%s", "ONLINE"); break; case MEM_GOING_OFFLINE: sprintf(buf, "%s", "GOING_OFFLINE"); break; case MEM_OFFLINE: sprintf(buf, "%s", "OFFLINE"); break; case MEM_GOING_ONLINE: sprintf(buf, "%s", "GOING_ONLINE"); break; case MEM_CANCEL_ONLINE: sprintf(buf, "%s", "CANCEL_ONLINE"); break; case MEM_CANCEL_OFFLINE: sprintf(buf, "%s", "CANCEL_OFFLINE"); break; default: sprintf(buf, "%s", "UNKNOWN"); } } static ulong pfn_to_phys(ulong pfn) { return pfn << PAGESHIFT(); } static void fill_memory_block_name(ulong memblock, char *name) { ulong kobj, value; memset(name, 0, sizeof(*name) * BUFSIZE); kobj = memblock + OFFSET(memory_block_dev) + OFFSET(device_kobj); readmem(kobj + OFFSET(kobject_name), KVADDR, &value, sizeof(void *), "kobject name", FAULT_ON_ERROR); read_string(value, name, BUFSIZE-1); } static void fill_memory_block_parange(ulong saddr, ulong eaddr, char *parange) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; memset(parange, 0, sizeof(*parange) * BUFSIZE); if (eaddr == ULLONG_MAX) sprintf(parange, "%s", mkstring(buf1, PADDR_PRLEN*2 + 3, CENTER|LONG_HEX, MKSTR(saddr))); else sprintf(parange, "%s - %s", mkstring(buf1, PADDR_PRLEN, RJUST|LONG_HEX, MKSTR(saddr)), mkstring(buf2, PADDR_PRLEN, RJUST|LONG_HEX, MKSTR(eaddr))); } static void fill_memory_block_srange(ulong start_sec, char *srange) { memset(srange, 0, sizeof(*srange) * BUFSIZE); sprintf(srange, "%lu", start_sec); } static void print_memory_block(ulong memory_block) { ulong start_sec, end_sec, nid; ulong memblock_size, mbs, start_addr, end_addr = (ulong)ULLONG_MAX; char statebuf[BUFSIZE]; char srangebuf[BUFSIZE]; char parangebuf[BUFSIZE]; char name[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; char buf7[BUFSIZE]; readmem(memory_block + OFFSET(memory_block_start_section_nr), KVADDR, &start_sec, sizeof(void *), "memory_block start_section_nr", FAULT_ON_ERROR); start_addr = pfn_to_phys(section_nr_to_pfn(start_sec)); if (symbol_exists("memory_block_size_probed")) { memblock_size = symbol_value("memory_block_size_probed"); readmem(memblock_size, KVADDR, &mbs, sizeof(ulong), "memory_block_size_probed", FAULT_ON_ERROR); end_addr = start_addr + mbs - 1; } else if (MEMBER_EXISTS("memory_block", "end_section_nr")) { readmem(memory_block + OFFSET(memory_block_end_section_nr), KVADDR, &end_sec, sizeof(void *), "memory_block end_section_nr", FAULT_ON_ERROR); end_addr = pfn_to_phys(section_nr_to_pfn(end_sec + 1)) - 1; } fill_memory_block_state(memory_block, statebuf); fill_memory_block_name(memory_block, name); fill_memory_block_parange(start_addr, end_addr, parangebuf); fill_memory_block_srange(start_sec, srangebuf); if (MEMBER_EXISTS("memory_block", "nid")) { readmem(memory_block + OFFSET(memory_block_nid), KVADDR, &nid, sizeof(void *), "memory_block nid", FAULT_ON_ERROR); fprintf(fp, " %s %s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(memory_block)), mkstring(buf2, 12, CENTER, name), parangebuf, mkstring(buf5, strlen("NODE"), CENTER|LONG_DEC, MKSTR(nid)), mkstring(buf6, strlen("OFFLINE"), LJUST, statebuf), mkstring(buf7, 12, LJUST, srangebuf)); } else fprintf(fp, " %s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(memory_block)), mkstring(buf2, 10, CENTER, name), parangebuf, mkstring(buf5, strlen("OFFLINE"), LJUST, statebuf), mkstring(buf6, 12, LJUST, srangebuf)); } static void init_memory_block_offset(void) { MEMBER_OFFSET_INIT(bus_type_p, "bus_type", "p"); MEMBER_OFFSET_INIT(subsys_private_klist_devices, "subsys_private", "klist_devices"); MEMBER_OFFSET_INIT(klist_k_list, "klist", "k_list"); MEMBER_OFFSET_INIT(klist_node_n_node, "klist_node", "n_node"); MEMBER_OFFSET_INIT(device_kobj, "device", "kobj"); MEMBER_OFFSET_INIT(kobject_name, "kobject", "name"); MEMBER_OFFSET_INIT(device_private_knode_bus, "device_private", "knode_bus"); MEMBER_OFFSET_INIT(device_private_device, "device_private", "device"); MEMBER_OFFSET_INIT(memory_block_dev, "memory_block", "dev"); MEMBER_OFFSET_INIT(memory_block_start_section_nr, "memory_block", "start_section_nr"); MEMBER_OFFSET_INIT(memory_block_end_section_nr, "memory_block", "end_section_nr"); MEMBER_OFFSET_INIT(memory_block_state, "memory_block", "state"); if (MEMBER_EXISTS("memory_block", "nid")) MEMBER_OFFSET_INIT(memory_block_nid, "memory_block", "nid"); } static void init_memory_block(struct list_data *ld, int *klistcnt, ulong **klistbuf) { ulong memory_subsys = symbol_value("memory_subsys"); ulong private, klist, start; init_memory_block_offset(); readmem(memory_subsys + OFFSET(bus_type_p), KVADDR, &private, sizeof(void *), "memory_subsys.private", FAULT_ON_ERROR); klist = private + OFFSET(subsys_private_klist_devices) + OFFSET(klist_k_list); BZERO(ld, sizeof(struct list_data)); readmem(klist, KVADDR, &start, sizeof(void *), "klist klist", FAULT_ON_ERROR); ld->start = start; ld->end = klist; ld->list_head_offset = OFFSET(klist_node_n_node) + OFFSET(device_private_knode_bus); hq_open(); *klistcnt = do_list(ld); *klistbuf = (ulong *)GETBUF(*klistcnt * sizeof(ulong)); *klistcnt = retrieve_list(*klistbuf, *klistcnt); hq_close(); } void dump_memory_blocks(int initialize) { ulong memory_block, device; ulong *klistbuf; int klistcnt, i; struct list_data list_data; char mb_hdr[BUFSIZE]; char paddr_hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char buf6[BUFSIZE]; if ((!STRUCT_EXISTS("memory_block")) || (!symbol_exists("memory_subsys"))) return; if (initialize) return; init_memory_block(&list_data, &klistcnt, &klistbuf); if ((symbol_exists("memory_block_size_probed")) || (MEMBER_EXISTS("memory_block", "end_section_nr"))) sprintf(paddr_hdr, "%s", "PHYSICAL RANGE"); else sprintf(paddr_hdr, "%s", "PHYSICAL START"); if (MEMBER_EXISTS("memory_block", "nid")) sprintf(mb_hdr, "\n%s %s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_BLOCK"), mkstring(buf2, 10, CENTER, "NAME"), mkstring(buf3, PADDR_PRLEN*2 + 2, CENTER, paddr_hdr), mkstring(buf4, strlen("NODE"), CENTER, "NODE"), mkstring(buf5, strlen("OFFLINE"), LJUST, "STATE"), mkstring(buf6, 12, LJUST, "START_SECTION_NO")); else sprintf(mb_hdr, "\n%s %s %s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_BLOCK"), mkstring(buf2, 10, CENTER, "NAME"), mkstring(buf3, PADDR_PRLEN*2, CENTER, paddr_hdr), mkstring(buf4, strlen("OFFLINE"), LJUST, "STATE"), mkstring(buf5, 12, LJUST, "START_SECTION_NO")); fprintf(fp, "%s", mb_hdr); for (i = 0; i < klistcnt; i++) { readmem(klistbuf[i] + OFFSET(device_private_device), KVADDR, &device, sizeof(void *), "device_private device", FAULT_ON_ERROR); memory_block = device - OFFSET(memory_block_dev); print_memory_block(memory_block); } FREEBUF(klistbuf); } void list_mem_sections(void) { ulong nr,addr; ulong nr_mem_sections = NR_MEM_SECTIONS(); ulong coded_mem_map; for (nr = 0; nr <= nr_mem_sections ; nr++) { if ((addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(addr, 0); fprintf(fp, "nr=%ld section = %lx coded_mem_map=%lx pfn=%ld mem_map=%lx\n", nr, addr, coded_mem_map, section_nr_to_pfn(nr), sparse_decode_mem_map(coded_mem_map,nr)); } } } /* * For kernels containing the node_online_map or node_states[], * return the number of online node bits set. */ static int get_nodes_online(void) { int i, len, online; struct gnu_request req; ulong *maskptr; long N_ONLINE; ulong mapaddr; if (!symbol_exists("node_online_map") && !symbol_exists("node_states")) return 0; len = mapaddr = 0; if (symbol_exists("node_online_map")) { if (LKCD_KERNTYPES()) { if ((len = STRUCT_SIZE("nodemask_t")) < 0) error(FATAL, "cannot determine type nodemask_t\n"); mapaddr = symbol_value("node_online_map"); } else { len = get_symbol_type("node_online_map", NULL, &req) == TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; mapaddr = symbol_value("node_online_map"); } } else if (symbol_exists("node_states")) { if ((get_symbol_type("node_states", NULL, &req) != TYPE_CODE_ARRAY) || !(len = get_array_length("node_states", NULL, 0)) || !enumerator_value("N_ONLINE", &N_ONLINE)) return 0; len = req.length / len; mapaddr = symbol_value("node_states") + (N_ONLINE * len); } if (!(vt->node_online_map = (ulong *)malloc(len))) error(FATAL, "cannot malloc node_online_map\n"); if (!readmem(mapaddr, KVADDR, (void *)&vt->node_online_map[0], len, "node_online_map", QUIET|RETURN_ON_ERROR)) error(FATAL, "cannot read node_online_map/node_states\n"); vt->node_online_map_len = len/sizeof(ulong); online = 0; maskptr = (ulong *)vt->node_online_map; for (i = 0; i < vt->node_online_map_len; i++, maskptr++) online += count_bits_long(*maskptr); if (CRASHDEBUG(1)) { fprintf(fp, "node_online_map: ["); for (i = 0; i < vt->node_online_map_len; i++) fprintf(fp, "%s%lx", i ? ", " : "", vt->node_online_map[i]); fprintf(fp, "] -> nodes online: %d\n", online); } if (online) vt->numnodes = online; return online; } /* * Return the next node index, with "first" being the first acceptable node. */ static int next_online_node(int first) { int i, j, node; ulong mask, *maskptr; if ((first/BITS_PER_LONG) >= vt->node_online_map_len) return -1; maskptr = (ulong *)vt->node_online_map; for (i = node = 0; i < vt->node_online_map_len; i++, maskptr++) { mask = *maskptr; for (j = 0; j < BITS_PER_LONG; j++, node++) { if (mask & 1) { if (node >= first) return node; } mask >>= 1; } } return -1; } /* * Modify appropriately for architecture/kernel nuances. */ static ulong next_online_pgdat(int node) { char buf[BUFSIZE]; ulong pgdat; /* * Default -- look for type: struct pglist_data node_data[] */ if (LKCD_KERNTYPES()) { if (!kernel_symbol_exists("node_data")) goto pgdat2; /* * Just index into node_data[] without checking that it is * an array; kerntypes have no such symbol information. */ } else { if (get_symbol_type("node_data", NULL, NULL) != TYPE_CODE_ARRAY) goto pgdat2; open_tmpfile(); sprintf(buf, "whatis node_data"); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); goto pgdat2; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "type = ")) break; } close_tmpfile(); if ((!strstr(buf, "struct pglist_data *") && !strstr(buf, "pg_data_t *")) || (count_chars(buf, '[') != 1) || (count_chars(buf, ']') != 1)) goto pgdat2; } if (!readmem(symbol_value("node_data") + (node * sizeof(void *)), KVADDR, &pgdat, sizeof(void *), "node_data", RETURN_ON_ERROR) || !IS_KVADDR(pgdat)) goto pgdat2; return pgdat; pgdat2: if (LKCD_KERNTYPES()) { if (!kernel_symbol_exists("pgdat_list")) goto pgdat3; } else { if (get_symbol_type("pgdat_list",NULL,NULL) != TYPE_CODE_ARRAY) goto pgdat3; open_tmpfile(); sprintf(buf, "whatis pgdat_list"); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); goto pgdat3; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "type = ")) break; } close_tmpfile(); if ((!strstr(buf, "struct pglist_data *") && !strstr(buf, "pg_data_t *")) || (count_chars(buf, '[') != 1) || (count_chars(buf, ']') != 1)) goto pgdat3; } if (!readmem(symbol_value("pgdat_list") + (node * sizeof(void *)), KVADDR, &pgdat, sizeof(void *), "pgdat_list", RETURN_ON_ERROR) || !IS_KVADDR(pgdat)) goto pgdat3; return pgdat; pgdat3: if (symbol_exists("contig_page_data") && (node == 0)) return symbol_value("contig_page_data"); return 0; } /* * Make the vm_stat[] array contents easily accessible. */ static int vm_stat_init(void) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int i, count, stringlen, total; int c ATTRIBUTE_UNUSED; struct gnu_request *req; char *start; long enum_value, zone_cnt = -1, node_cnt = -1; int split_vmstat = 0, ni = 0; if (vt->flags & VM_STAT) return TRUE; if ((vt->nr_vm_stat_items == -1) || (!symbol_exists("vm_stat") && !symbol_exists("vm_zone_stat"))) goto bailout; /* * look for type: type = atomic_long_t [] */ if (LKCD_KERNTYPES()) { if ((!symbol_exists("vm_stat") && !symbol_exists("vm_zone_stat"))) goto bailout; /* * Just assume that vm_stat is an array; there is * no symbol info in a kerntypes file. */ } else { if (symbol_exists("vm_stat") && get_symbol_type("vm_stat", NULL, NULL) == TYPE_CODE_ARRAY) { vt->nr_vm_stat_items = get_array_length("vm_stat", NULL, 0); } else if (symbol_exists("vm_zone_stat") && get_symbol_type("vm_zone_stat", NULL, NULL) == TYPE_CODE_ARRAY) { if (symbol_exists("vm_numa_stat") && get_array_length("vm_numa_stat", NULL, 0)) { vt->nr_vm_stat_items = get_array_length("vm_zone_stat", NULL, 0) + get_array_length("vm_node_stat", NULL, 0) + ARRAY_LENGTH(vm_numa_stat); split_vmstat = 2; enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zone_cnt); enumerator_value("NR_VM_NODE_STAT_ITEMS", &node_cnt); } else { vt->nr_vm_stat_items = get_array_length("vm_zone_stat", NULL, 0) + get_array_length("vm_node_stat", NULL, 0); split_vmstat = 1; enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zone_cnt); } } else { goto bailout; } } open_tmpfile(); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_GET_DATATYPE; req->name = "zone_stat_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); if (split_vmstat >= 1) { req->command = GNU_GET_DATATYPE; req->name = "node_stat_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); } if (split_vmstat == 2) { req->command = GNU_GET_DATATYPE; req->name = "numa_stat_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); } FREEBUF(req); stringlen = 1; count = -1; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; clean_line(buf); c = parse_line(buf, arglist); if ((!split_vmstat && STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) || ((split_vmstat == 1) && STREQ(arglist[0], "NR_VM_NODE_STAT_ITEMS")) || ((split_vmstat == 2) && STREQ(arglist[0], "NR_VM_NUMA_STAT_ITEMS"))) { if (LKCD_KERNTYPES()) vt->nr_vm_stat_items = MAX(atoi(arglist[2]), count); break; } else if ((split_vmstat == 1) && STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) { continue; } else if ((split_vmstat == 2) && STREQ(arglist[0], "NR_VM_NODE_STAT_ITEMS")) { continue; } else { stringlen += strlen(arglist[0]) + 1; count++; } } total = stringlen + (sizeof(void *) * vt->nr_vm_stat_items); if (!(vt->vm_stat_items = (char **)malloc(total))) { close_tmpfile(); error(FATAL, "cannot malloc vm_stat_items cache\n"); } BZERO(vt->vm_stat_items, total); start = (char *)&vt->vm_stat_items[vt->nr_vm_stat_items]; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; c = parse_line(buf, arglist); if (!enumerator_value(arglist[0], &enum_value)) { close_tmpfile(); goto bailout; } i = ni + enum_value; if (!ni && (enum_value == zone_cnt)) { ni = zone_cnt; continue; } else if ((ni == zone_cnt) && (enum_value == node_cnt)) { ni += node_cnt; continue; } if (i < vt->nr_vm_stat_items) { vt->vm_stat_items[i] = start; strcpy(start, arglist[0]); start += strlen(arglist[0]) + 1; } } close_tmpfile(); vt->flags |= VM_STAT; return TRUE; bailout: vt->nr_vm_stat_items = -1; return FALSE; } /* * Either dump all vm_stat entries, or return the value of * the specified vm_stat item. Use the global counter unless * a zone-specific address is passed. */ static int dump_vm_stat(char *item, long *retval, ulong zone) { char *buf; ulong *vp; ulong location; int i, maxlen, len, node_start = -1, numa_start = 1; long total_cnt, zone_cnt = 0, node_cnt = 0, numa_cnt = 0; int split_vmstat = 0; if (!vm_stat_init()) { if (!item) if (CRASHDEBUG(1)) error(INFO, "vm_stat not available in this kernel\n"); return FALSE; } buf = GETBUF(sizeof(ulong) * vt->nr_vm_stat_items); if (symbol_exists("vm_node_stat") && symbol_exists("vm_zone_stat") && symbol_exists("vm_numa_stat") && ARRAY_LENGTH(vm_numa_stat)) split_vmstat = 2; else if (symbol_exists("vm_node_stat") && symbol_exists("vm_zone_stat")) split_vmstat = 1; else location = zone ? zone : symbol_value("vm_stat"); if (split_vmstat == 1) { enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zone_cnt); location = zone ? zone : symbol_value("vm_zone_stat"); readmem(location, KVADDR, buf, sizeof(ulong) * zone_cnt, "vm_zone_stat", FAULT_ON_ERROR); if (!zone) { location = symbol_value("vm_node_stat"); enumerator_value("NR_VM_NODE_STAT_ITEMS", &node_cnt); readmem(location, KVADDR, buf + (sizeof(ulong) * zone_cnt), sizeof(ulong) * node_cnt, "vm_node_stat", FAULT_ON_ERROR); } node_start = zone_cnt; total_cnt = zone_cnt + node_cnt; } else if (split_vmstat == 2) { enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zone_cnt); location = zone ? zone : symbol_value("vm_zone_stat"); readmem(location, KVADDR, buf, sizeof(ulong) * zone_cnt, "vm_zone_stat", FAULT_ON_ERROR); if (!zone) { location = symbol_value("vm_node_stat"); enumerator_value("NR_VM_NODE_STAT_ITEMS", &node_cnt); readmem(location, KVADDR, buf + (sizeof(ulong) * zone_cnt), sizeof(ulong) * node_cnt, "vm_node_stat", FAULT_ON_ERROR); } node_start = zone_cnt; if (!zone) { location = symbol_value("vm_numa_stat"); enumerator_value("NR_VM_NUMA_STAT_ITEMS", &numa_cnt); readmem(location, KVADDR, buf + (sizeof(ulong) * (zone_cnt+node_cnt)), sizeof(ulong) * numa_cnt, "vm_numa_stat", FAULT_ON_ERROR); } numa_start = zone_cnt+node_cnt; total_cnt = zone_cnt + node_cnt + numa_cnt; } else { readmem(location, KVADDR, buf, sizeof(ulong) * vt->nr_vm_stat_items, "vm_stat", FAULT_ON_ERROR); total_cnt = vt->nr_vm_stat_items; } if (!item) { if (!zone) { if (symbol_exists("vm_zone_stat")) fprintf(fp, " VM_ZONE_STAT:\n"); else fprintf(fp, " VM_STAT:\n"); } for (i = maxlen = 0; i < total_cnt; i++) if ((len = strlen(vt->vm_stat_items[i])) > maxlen) maxlen = len; vp = (ulong *)buf; for (i = 0; i < total_cnt; i++) { if (!zone) { if ((i == node_start) && symbol_exists("vm_node_stat")) fprintf(fp, "\n VM_NODE_STAT:\n"); if ((i == numa_start) && symbol_exists("vm_numa_stat") && ARRAY_LENGTH(vm_numa_stat)) fprintf(fp, "\n VM_NUMA_STAT:\n"); } fprintf(fp, "%s%s: %ld\n", space(maxlen - strlen(vt->vm_stat_items[i])), vt->vm_stat_items[i], vp[i]); } return TRUE; } vp = (ulong *)buf; for (i = 0; i < total_cnt; i++) { if (STREQ(vt->vm_stat_items[i], item)) { *retval = vp[i]; return TRUE; } } return FALSE; } /* * Dump the cumulative totals of the per_cpu__page_states counters. */ int dump_page_states(void) { struct syment *sp; ulong addr, value; int i, c, fd, len, instance, members; char buf[BUFSIZE]; char *arglist[MAXARGS]; struct entry { char *name; ulong value; } *entry_list; struct stat stat; char *namebuf, *nameptr; if (!(sp = per_cpu_symbol_search("per_cpu__page_states"))) { if (CRASHDEBUG(1)) error(INFO, "per_cpu__page_states" "not available in this kernel\n"); return FALSE; } instance = members = len = 0; sprintf(buf, "ptype struct page_state"); open_tmpfile(); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); return FALSE; } fflush(pc->tmpfile); fd = fileno(pc->tmpfile); fstat(fd, &stat); namebuf = GETBUF(stat.st_size); nameptr = namebuf; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state") || strstr(buf, "}")) continue; members++; } entry_list = (struct entry *) GETBUF(sizeof(struct entry) * members); rewind(pc->tmpfile); i = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state") || strstr(buf, "}")) continue; strip_ending_char(strip_linefeeds(buf), ';'); c = parse_line(buf, arglist); strcpy(nameptr, arglist[c-1]); entry_list[i].name = nameptr; if (strlen(nameptr) > len) len = strlen(nameptr); nameptr += strlen(nameptr)+2; i++; } close_tmpfile(); open_tmpfile(); for (c = 0; c < kt->cpus; c++) { addr = sp->value + kt->__per_cpu_offset[c]; dump_struct("page_state", addr, RADIX(16)); } i = 0; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state")) { instance++; i = 0; continue; } if (strstr(buf, "}")) continue; strip_linefeeds(buf); extract_hex(buf, &value, ',', TRUE); entry_list[i].value += value; i++; } close_tmpfile(); fprintf(fp, " PAGE_STATES:\n"); for (i = 0; i < members; i++) { sprintf(buf, "%s", entry_list[i].name); fprintf(fp, "%s", mkstring(buf, len+2, RJUST, 0)); fprintf(fp, ": %ld\n", entry_list[i].value); } FREEBUF(namebuf); FREEBUF(entry_list); return TRUE; } /* * Dump the cumulative totals of the per_cpu__vm_event_state * counters. */ static int dump_vm_event_state(void) { int i, c, maxlen, len; struct syment *sp; ulong addr; ulong *events, *cumulative; if (!vm_event_state_init()) return FALSE; events = (ulong *)GETBUF((sizeof(ulong) * vt->nr_vm_event_items) * 2); cumulative = &events[vt->nr_vm_event_items]; sp = per_cpu_symbol_search("per_cpu__vm_event_states"); for (c = 0; c < kt->cpus; c++) { addr = sp->value + kt->__per_cpu_offset[c]; if (CRASHDEBUG(1)) { fprintf(fp, "[%d]: %lx\n", c, addr); dump_struct("vm_event_state", addr, RADIX(16)); } readmem(addr, KVADDR, events, sizeof(ulong) * vt->nr_vm_event_items, "vm_event_states buffer", FAULT_ON_ERROR); for (i = 0; i < vt->nr_vm_event_items; i++) cumulative[i] += events[i]; } fprintf(fp, "\n VM_EVENT_STATES:\n"); for (i = maxlen = 0; i < vt->nr_vm_event_items; i++) if ((len = strlen(vt->vm_event_items[i])) > maxlen) maxlen = len; for (i = 0; i < vt->nr_vm_event_items; i++) fprintf(fp, "%s%s: %lu\n", space(maxlen - strlen(vt->vm_event_items[i])), vt->vm_event_items[i], cumulative[i]); FREEBUF(events); return TRUE; } static int vm_event_state_init(void) { int i, stringlen, total; int c ATTRIBUTE_UNUSED; long count, enum_value; struct gnu_request *req; char *arglist[MAXARGS]; char buf[BUFSIZE]; char *start; if (vt->flags & VM_EVENT) return TRUE; if ((vt->nr_vm_event_items == -1) || !per_cpu_symbol_search("per_cpu__vm_event_states")) goto bailout; if (!enumerator_value("NR_VM_EVENT_ITEMS", &count)) return FALSE; vt->nr_vm_event_items = count; open_tmpfile(); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_GET_DATATYPE; req->name = "vm_event_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); FREEBUF(req); stringlen = 1; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; clean_line(buf); c = parse_line(buf, arglist); if (STREQ(arglist[0], "NR_VM_EVENT_ITEMS")) break; else stringlen += strlen(arglist[0]); } total = stringlen + vt->nr_vm_event_items + (sizeof(void *) * vt->nr_vm_event_items); if (!(vt->vm_event_items = (char **)malloc(total))) { close_tmpfile(); error(FATAL, "cannot malloc vm_event_items cache\n"); } BZERO(vt->vm_event_items, total); start = (char *)&vt->vm_event_items[vt->nr_vm_event_items]; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; c = parse_line(buf, arglist); if (enumerator_value(arglist[0], &enum_value)) i = enum_value; else { close_tmpfile(); goto bailout; } if (i < vt->nr_vm_event_items) { vt->vm_event_items[i] = start; strcpy(start, arglist[0]); start += strlen(arglist[0]) + 1; } } close_tmpfile(); vt->flags |= VM_EVENT; return TRUE; bailout: vt->nr_vm_event_items = -1; return FALSE; } /* * Dump the per-cpu offset values that are used to * resolve per-cpu symbol values. */ static void dump_per_cpu_offsets(void) { int c; char buf[BUFSIZE]; fprintf(fp, "PER-CPU OFFSET VALUES:\n"); for (c = 0; c < kt->cpus; c++) { sprintf(buf, "CPU %d", c); fprintf(fp, "%7s: %lx", buf, kt->__per_cpu_offset[c]); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } } /* * Dump the value(s) of a page->flags bitmap. */ void dump_page_flags(ulonglong flags) { int c ATTRIBUTE_UNUSED; int sz, val, found, largest, longest, header_printed; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char header[BUFSIZE]; char *arglist[MAXARGS]; ulonglong tmpflag; found = longest = largest = header_printed = 0; open_tmpfile(); if (dump_enumerator_list("pageflags")) { rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (strstr(buf1, " = ")) { c = parse_line(buf1, arglist); if ((sz = strlen(arglist[0])) > longest) longest = sz; if (strstr(arglist[0], "PG_") && ((val = atoi(arglist[2])) > largest)) largest = val; } } } else error(FATAL, "enum pageflags does not exist in this kernel\n"); largest = (largest+1)/4 + 1; sprintf(header, "%s BIT VALUE\n", mkstring(buf1, longest, LJUST, "PAGE-FLAG")); rewind(pc->tmpfile); if (flags) fprintf(pc->saved_fp, "FLAGS: %llx\n", flags); fprintf(pc->saved_fp, "%s%s", flags ? " " : "", header); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (strstr(buf1, " = ") && strstr(buf1, "PG_")) { c = parse_line(buf1, arglist); val = atoi(arglist[2]); tmpflag = 1ULL << val; if (!flags || (flags & tmpflag)) { fprintf(pc->saved_fp, "%s%s %2d %.*lx\n", flags ? " " : "", mkstring(buf2, longest, LJUST, arglist[0]), val, largest, (ulong)(1ULL << val)); if (flags & tmpflag) found++; } } } if (flags && !found) fprintf(pc->saved_fp, " (none found)\n"); close_tmpfile(); } /* * Support for slub.c slab cache. */ static void kmem_cache_init_slub(void) { if (vt->flags & KMEM_CACHE_INIT) return; if (CRASHDEBUG(1) && !(vt->flags & CONFIG_NUMA) && (vt->numnodes > 1)) error(WARNING, "kmem_cache_init_slub: numnodes: %d without CONFIG_NUMA\n", vt->numnodes); if (kmem_cache_downsize()) add_to_downsized("kmem_cache"); vt->cpu_slab_type = MEMBER_TYPE("kmem_cache", "cpu_slab"); vt->flags |= KMEM_CACHE_INIT; } static void kmem_cache_list_common(struct meminfo *mi) { int i, cnt; ulong *cache_list; ulong name; char buf[BUFSIZE]; if (mi->flags & GET_SLAB_ROOT_CACHES) cnt = get_kmem_cache_root_list(&cache_list); else cnt = get_kmem_cache_list(&cache_list); for (i = 0; i < cnt; i++) { fprintf(fp, "%lx ", cache_list[i]); readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) sprintf(buf, "(unknown)\n"); fprintf(fp, "%s\n", buf); } FREEBUF(cache_list); } static void dump_kmem_cache_slub(struct meminfo *si) { int i; ulong name, oo; unsigned int size, objsize, objects, order, offset; char *reqname, *p1; char kbuf[BUFSIZE]; char buf[BUFSIZE]; if (INVALID_MEMBER(kmem_cache_node_nr_slabs)) { error(INFO, "option requires kmem_cache_node.nr_slabs member!\n" "(the kernel must be built with CONFIG_SLUB_DEBUG)\n"); return; } order = objects = 0; if (si->flags & GET_SLAB_ROOT_CACHES) si->cache_count = get_kmem_cache_root_list(&si->cache_list); else si->cache_count = get_kmem_cache_list(&si->cache_list); si->cache_buf = GETBUF(SIZE(kmem_cache)); if (VALID_MEMBER(page_objects) && OFFSET(page_objects) == OFFSET(page_inuse)) si->flags |= SLAB_BITFIELD; if (!si->reqname && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); if (si->flags & ADDRESS_SPECIFIED) { if ((p1 = is_slab_page(si, kbuf))) { si->flags |= VERBOSE; si->slab = (ulong)si->spec_addr; } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); goto bailout; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; for (i = 0; i < si->cache_count; i++) { BZERO(si->cache_buf, SIZE(kmem_cache)); if (!readmem(si->cache_list[i], KVADDR, si->cache_buf, SIZE(kmem_cache), "kmem_cache buffer", RETURN_ON_ERROR|RETURN_PARTIAL)) goto next_cache; name = ULONG(si->cache_buf + OFFSET(kmem_cache_name)); if (!read_string(name, buf, BUFSIZE-1)) sprintf(buf, "(unknown)"); if (reqname) { if (!STREQ(reqname, buf)) continue; fprintf(fp, "%s", kmem_cache_hdr); } if (ignore_cache(si, buf)) { DUMP_KMEM_CACHE_TAG(si->cache_list[i], buf, "[IGNORED]"); goto next_cache; } objsize = UINT(si->cache_buf + OFFSET(kmem_cache_objsize)); size = UINT(si->cache_buf + OFFSET(kmem_cache_size)); offset = UINT(si->cache_buf + OFFSET(kmem_cache_offset)); if (VALID_MEMBER(kmem_cache_objects)) { objects = UINT(si->cache_buf + OFFSET(kmem_cache_objects)); order = UINT(si->cache_buf + OFFSET(kmem_cache_order)); } else if (VALID_MEMBER(kmem_cache_oo)) { oo = ULONG(si->cache_buf + OFFSET(kmem_cache_oo)); objects = oo_objects(oo); order = oo_order(oo); } else error(FATAL, "cannot determine " "kmem_cache objects/order values\n"); si->cache = si->cache_list[i]; si->curname = buf; si->objsize = objsize; si->size = size; si->objects = objects; si->slabsize = (PAGESIZE() << order); si->inuse = si->num_slabs = 0; si->slab_offset = offset; si->random = VALID_MEMBER(kmem_cache_random) ? ULONG(si->cache_buf + OFFSET(kmem_cache_random)) : 0; if (!get_kmem_cache_slub_data(GET_SLUB_SLABS, si) || !get_kmem_cache_slub_data(GET_SLUB_OBJECTS, si)) si->flags |= SLAB_GATHER_FAILURE; /* accumulate children's slabinfo */ if (si->flags & GET_SLAB_ROOT_CACHES) { struct meminfo *mi; int j; char buf2[BUFSIZE]; mi = (struct meminfo *)GETBUF(sizeof(struct meminfo)); memcpy(mi, si, sizeof(struct meminfo)); mi->cache_count = get_kmem_cache_child_list(&mi->cache_list, si->cache_list[i]); if (!mi->cache_count) goto no_children; mi->cache_buf = GETBUF(SIZE(kmem_cache)); for (j = 0; j < mi->cache_count; j++) { BZERO(mi->cache_buf, SIZE(kmem_cache)); if (!readmem(mi->cache_list[j], KVADDR, mi->cache_buf, SIZE(kmem_cache), "kmem_cache buffer", RETURN_ON_ERROR|RETURN_PARTIAL)) continue; name = ULONG(mi->cache_buf + OFFSET(kmem_cache_name)); if (!read_string(name, buf2, BUFSIZE-1)) sprintf(buf2, "(unknown)"); objsize = UINT(mi->cache_buf + OFFSET(kmem_cache_objsize)); size = UINT(mi->cache_buf + OFFSET(kmem_cache_size)); offset = UINT(mi->cache_buf + OFFSET(kmem_cache_offset)); if (VALID_MEMBER(kmem_cache_objects)) { objects = UINT(mi->cache_buf + OFFSET(kmem_cache_objects)); order = UINT(mi->cache_buf + OFFSET(kmem_cache_order)); } else if (VALID_MEMBER(kmem_cache_oo)) { oo = ULONG(mi->cache_buf + OFFSET(kmem_cache_oo)); objects = oo_objects(oo); order = oo_order(oo); } else error(FATAL, "cannot determine " "kmem_cache objects/order values\n"); mi->cache = mi->cache_list[j]; mi->curname = buf2; mi->objsize = objsize; mi->size = size; mi->objects = objects; mi->slabsize = (PAGESIZE() << order); mi->inuse = mi->num_slabs = 0; mi->slab_offset = offset; mi->random = VALID_MEMBER(kmem_cache_random) ? ULONG(mi->cache_buf + OFFSET(kmem_cache_random)) : 0; if (!get_kmem_cache_slub_data(GET_SLUB_SLABS, mi) || !get_kmem_cache_slub_data(GET_SLUB_OBJECTS, mi)) { si->flags |= SLAB_GATHER_FAILURE; continue; } si->inuse += mi->inuse; si->free += mi->free; si->num_slabs += mi->num_slabs; if (CRASHDEBUG(1)) dump_kmem_cache_info(mi); } FREEBUF(mi->cache_buf); FREEBUF(mi->cache_list); no_children: FREEBUF(mi); } DUMP_KMEM_CACHE_INFO(); if (si->flags & SLAB_GATHER_FAILURE) { si->flags &= ~SLAB_GATHER_FAILURE; goto next_cache; } if (si->flags & ADDRESS_SPECIFIED) { if (!si->slab) si->slab = vaddr_to_slab(si->spec_addr); do_slab_slub(si, VERBOSE); } else if (si->flags & VERBOSE) { do_kmem_cache_slub(si); if (!reqname && ((i+1) < si->cache_count)) fprintf(fp, "%s", kmem_cache_hdr); } next_cache: if (reqname) break; } bailout: FREEBUF(si->cache_list); FREEBUF(si->cache_buf); } static ushort slub_page_objects(struct meminfo *si, ulong page) { ulong objects_vaddr; ushort objects; /* * Pre-2.6.27, the object count and order were fixed in the * kmem_cache structure. Now they may change, say if a high * order slab allocation fails, so the per-slab object count * is kept in the slab. */ if (VALID_MEMBER(page_objects)) { objects_vaddr = page + OFFSET(page_objects); if (si->flags & SLAB_BITFIELD) objects_vaddr += sizeof(ushort); if (!readmem(objects_vaddr, KVADDR, &objects, sizeof(ushort), "page.objects", RETURN_ON_ERROR)) return 0; /* * Strip page.frozen bit. */ if (si->flags & SLAB_BITFIELD) { if (__BYTE_ORDER == __LITTLE_ENDIAN) { objects <<= 1; objects >>= 1; } if (__BYTE_ORDER == __BIG_ENDIAN) objects >>= 1; } if (CRASHDEBUG(1) && (objects != si->objects)) error(NOTE, "%s: slab: %lx oo objects: %ld " "slab objects: %d\n", si->curname, page, si->objects, objects); if (objects == (ushort)(-1)) { error(INFO, "%s: slab: %lx invalid page.objects: -1\n", si->curname, page); return 0; } } else objects = (ushort)si->objects; return objects; } static short count_cpu_partial(struct meminfo *si, int cpu) { short cpu_partial_inuse, cpu_partial_objects, free_objects; ulong cpu_partial; free_objects = 0; if (VALID_MEMBER(kmem_cache_cpu_partial) && VALID_MEMBER(page_objects)) { readmem(ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu] + OFFSET(kmem_cache_cpu_partial), KVADDR, &cpu_partial, sizeof(ulong), "kmem_cache_cpu.partial", RETURN_ON_ERROR); while (cpu_partial) { if (!is_page_ptr(cpu_partial, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, cpu_partial); return 0; } if (!readmem(cpu_partial + OFFSET(page_inuse), KVADDR, &cpu_partial_inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) return 0; if (cpu_partial_inuse == -1) return 0; cpu_partial_objects = slub_page_objects(si, cpu_partial); if (!cpu_partial_objects) return 0; free_objects += cpu_partial_objects - cpu_partial_inuse; readmem(cpu_partial + OFFSET(page_next), KVADDR, &cpu_partial, sizeof(ulong), "page.next", RETURN_ON_ERROR); } } return free_objects; } /* * Emulate the total count calculation done by the * slab_objects() sysfs function in slub.c. */ static int get_kmem_cache_slub_data(long cmd, struct meminfo *si) { int i, n, node; ulong total_objects, total_slabs, free_objects; ulong cpu_slab_ptr, node_ptr, cpu_freelist, orig_slab; ulong node_nr_partial, node_nr_slabs, node_total_objects; int full_slabs, objects, node_total_avail; long p; short inuse; ulong *nodes, *per_cpu; struct node_table *nt; /* * nodes[n] is not being used (for now) * per_cpu[n] is a count of cpu_slab pages per node. */ nodes = (ulong *)GETBUF(2 * sizeof(ulong) * vt->numnodes); per_cpu = nodes + vt->numnodes; total_slabs = total_objects = free_objects = cpu_freelist = 0; node_total_avail = VALID_MEMBER(kmem_cache_node_total_objects) ? TRUE : FALSE; for (i = 0; i < kt->cpus; i++) { cpu_slab_ptr = get_cpu_slab_ptr(si, i, &cpu_freelist); if (!cpu_slab_ptr) continue; if ((node = page_to_nid(cpu_slab_ptr)) < 0) goto bailout; switch (cmd) { case GET_SLUB_OBJECTS: { /* For better error report, set cur slab to si->slab. */ orig_slab = si->slab; si->slab = cpu_slab_ptr; if (!readmem(cpu_slab_ptr + OFFSET(page_inuse), KVADDR, &inuse, sizeof(short), "page inuse", RETURN_ON_ERROR)) { si->slab = orig_slab; return FALSE; } objects = slub_page_objects(si, cpu_slab_ptr); if (!objects) { si->slab = orig_slab; return FALSE; } free_objects += objects - inuse; free_objects += count_free_objects(si, cpu_freelist); free_objects += count_cpu_partial(si, i); if (!node_total_avail) total_objects += inuse; total_slabs++; si->slab = orig_slab; } break; case GET_SLUB_SLABS: total_slabs++; break; } per_cpu[node]++; } for (n = 0; n < vt->numnodes; n++) { if (vt->flags & CONFIG_NUMA) { nt = &vt->node_table[n]; node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *) * nt->node_id)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); if (!node_ptr) continue; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), KVADDR, &node_nr_partial, sizeof(ulong), "kmem_cache_node nr_partial", RETURN_ON_ERROR)) goto bailout; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), KVADDR, &node_nr_slabs, sizeof(ulong), "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) goto bailout; if (node_total_avail) { if (!readmem(node_ptr + OFFSET(kmem_cache_node_total_objects), KVADDR, &node_total_objects, sizeof(ulong), "kmem_cache_node total_objects", RETURN_ON_ERROR)) goto bailout; } switch (cmd) { case GET_SLUB_OBJECTS: if ((p = count_partial(node_ptr, si, &free_objects)) < 0) return FALSE; if (!node_total_avail) total_objects += p; total_slabs += node_nr_partial; break; case GET_SLUB_SLABS: total_slabs += node_nr_partial; break; } full_slabs = node_nr_slabs - per_cpu[n] - node_nr_partial; objects = si->objects; switch (cmd) { case GET_SLUB_OBJECTS: if (node_total_avail) total_objects += node_total_objects; else total_objects += (full_slabs * objects); total_slabs += full_slabs; break; case GET_SLUB_SLABS: total_slabs += full_slabs; break; } if (!(vt->flags & CONFIG_NUMA)) break; } switch (cmd) { case GET_SLUB_OBJECTS: if (!node_total_avail) si->inuse = total_objects; else si->inuse = total_objects - free_objects; if (VALID_MEMBER(page_objects) && node_total_avail) si->free = free_objects; else si->free = (total_slabs * si->objects) - si->inuse; break; case GET_SLUB_SLABS: si->num_slabs = total_slabs; break; } FREEBUF(nodes); return TRUE; bailout: FREEBUF(nodes); return FALSE; } static void do_cpu_partial_slub(struct meminfo *si, int cpu) { ulong cpu_slab_ptr; void *partial; cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu]; readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_partial), KVADDR, &partial, sizeof(void *), "kmem_cache_cpu.partial", RETURN_ON_ERROR); fprintf(fp, "CPU %d PARTIAL:\n%s", cpu, partial ? "" : " (empty)\n"); /* * kmem_cache_cpu.partial points to the first page of per cpu partial * list. */ while (partial) { si->slab = (ulong)partial; if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, si->slab); break; } if (!do_slab_slub(si, VERBOSE)) break; readmem((ulong)partial + OFFSET(page_next), KVADDR, &partial, sizeof(void *), "page.next", RETURN_ON_ERROR); } } static void do_kmem_cache_slub(struct meminfo *si) { int i, n; ulong cpu_slab_ptr, node_ptr; ulong node_nr_partial, node_nr_slabs; ulong *per_cpu; struct node_table *nt; per_cpu = (ulong *)GETBUF(sizeof(ulong) * vt->numnodes); for (i = 0; i < kt->cpus; i++) { if (hide_offline_cpu(i)) { fprintf(fp, "CPU %d [OFFLINE]\n", i); continue; } cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[i]; fprintf(fp, "CPU %d KMEM_CACHE_CPU:\n %lx\n", i, cpu_slab_ptr); cpu_slab_ptr = get_cpu_slab_ptr(si, i, NULL); fprintf(fp, "CPU %d SLAB:\n%s", i, cpu_slab_ptr ? "" : " (empty)\n"); if (cpu_slab_ptr) { if ((n = page_to_nid(cpu_slab_ptr)) >= 0) per_cpu[n]++; si->slab = cpu_slab_ptr; if (!do_slab_slub(si, VERBOSE)) continue; } if (VALID_MEMBER(kmem_cache_cpu_partial)) do_cpu_partial_slub(si, i); if (received_SIGINT()) restart(0); } for (n = 0; n < vt->numnodes; n++) { if (vt->flags & CONFIG_NUMA) { nt = &vt->node_table[n]; node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *)* nt->node_id)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); if (node_ptr) { if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), KVADDR, &node_nr_partial, sizeof(ulong), "kmem_cache_node nr_partial", RETURN_ON_ERROR)) break; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), KVADDR, &node_nr_slabs, sizeof(ulong), "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) break; } else node_nr_partial = node_nr_slabs = 0; fprintf(fp, "KMEM_CACHE_NODE NODE SLABS PARTIAL PER-CPU\n"); fprintf(fp, "%lx%s", node_ptr, space(VADDR_PRLEN > 8 ? 2 : 10)); fprintf(fp, "%4d %5ld %7ld %7ld\n", n, node_nr_slabs, node_nr_partial, per_cpu[n]); do_node_lists_slub(si, node_ptr, n); if (!(vt->flags & CONFIG_NUMA)) break; } fprintf(fp, "\n"); FREEBUF(per_cpu); } #define DUMP_SLAB_INFO_SLUB() \ { \ char b1[BUFSIZE], b2[BUFSIZE]; \ fprintf(fp, " %s %s %4d %5d %9d %4d\n", \ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), \ node, objects, inuse, objects - inuse); \ } static int do_slab_slub(struct meminfo *si, int verbose) { physaddr_t paddr; ulong vaddr; ushort inuse, objects; ulong freelist, cpu_freelist, cpu_slab_ptr; int i, free_objects, cpu_slab, is_free, node; ulong p, q; #define SLAB_RED_ZONE 0x00000400UL ulong flags, red_left_pad; if (!si->slab) { if (CRASHDEBUG(1)) error(INFO, "-S option not supported for CONFIG_SLUB\n"); return FALSE; } if (!page_to_phys(si->slab, &paddr)) { error(INFO, "%s: invalid slab address: %lx\n", si->curname, si->slab); return FALSE; } node = page_to_nid(si->slab); vaddr = PTOV(paddr); if (verbose) fprintf(fp, " %s", slab_hdr); if (!readmem(si->slab + OFFSET(page_inuse), KVADDR, &inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) return FALSE; if (!readmem(si->slab + OFFSET(page_freelist), KVADDR, &freelist, sizeof(void *), "page.freelist", RETURN_ON_ERROR)) return FALSE; objects = slub_page_objects(si, si->slab); if (!objects) return FALSE; if (!verbose) { DUMP_SLAB_INFO_SLUB(); return TRUE; } cpu_freelist = 0; for (i = 0, cpu_slab = -1; i < kt->cpus; i++) { cpu_slab_ptr = get_cpu_slab_ptr(si, i, &cpu_freelist); if (!cpu_slab_ptr) continue; if (cpu_slab_ptr == si->slab) { cpu_slab = i; /* * Later slub scheme uses the per-cpu freelist * so count the free objects by hand. */ if ((free_objects = count_free_objects(si, cpu_freelist)) < 0) return FALSE; /* * If the object is freed on foreign cpu, the * object is liked to page->freelist. */ if (freelist) free_objects += objects - inuse; inuse = objects - free_objects; break; } } DUMP_SLAB_INFO_SLUB(); fprintf(fp, " %s", free_inuse_hdr); #define PAGE_MAPPING_ANON 1 if (CRASHDEBUG(8)) { fprintf(fp, "< SLUB: free list START: >\n"); i = 0; for (q = freelist; q; q = get_freepointer(si, (void *)q)) { if (q & PAGE_MAPPING_ANON) { fprintf(fp, "< SLUB: free list END: %lx (%d found) >\n", q, i); break; } fprintf(fp, " %lx\n", q); i++; } if (!q) fprintf(fp, "< SLUB: free list END (%d found) >\n", i); } red_left_pad = 0; if (VALID_MEMBER(kmem_cache_red_left_pad)) { flags = ULONG(si->cache_buf + OFFSET(kmem_cache_flags)); if (flags & SLAB_RED_ZONE) red_left_pad = ULONG(si->cache_buf + OFFSET(kmem_cache_red_left_pad)); } for (p = vaddr; p < vaddr + objects * si->size; p += si->size) { hq_open(); is_free = FALSE; /* Search an object on both of freelist and cpu_freelist */ ulong lists[] = { freelist, cpu_freelist, }; for (i = 0; i < sizeof(lists) / sizeof(lists[0]); i++) { for (is_free = 0, q = lists[i]; q; q = get_freepointer(si, (void *)q)) { if (q == BADADDR) { hq_close(); return FALSE; } if (q & PAGE_MAPPING_ANON) break; if ((p + red_left_pad) == q) { is_free = TRUE; goto found_object; } if (!hq_enter(q)) { hq_close(); error(INFO, "%s: slab: %lx duplicate freelist object: %lx\n", si->curname, si->slab, q); return FALSE; } } } found_object: hq_close(); if (si->flags & ADDRESS_SPECIFIED) { if ((si->spec_addr < p) || (si->spec_addr >= (p + si->size))) { if (!(si->flags & VERBOSE)) continue; } } fprintf(fp, " %s%lx%s", is_free ? " " : "[", pc->flags2 & REDZONE ? p : p + red_left_pad, is_free ? " " : "]"); if (is_free && (cpu_slab >= 0)) fprintf(fp, "(cpu %d cache)", cpu_slab); fprintf(fp, "\n"); } return TRUE; } static int count_free_objects(struct meminfo *si, ulong freelist) { int c; ulong q; hq_open(); c = 0; for (q = freelist; q; q = get_freepointer(si, (void *)q)) { if (q & PAGE_MAPPING_ANON) break; if (!hq_enter(q)) { error(INFO, "%s: slab: %lx duplicate freelist object: %lx\n", si->curname, si->slab, q); break; } c++; } hq_close(); return c; } static ulong freelist_ptr(struct meminfo *si, ulong ptr, ulong ptr_addr) { if (si->random) /* CONFIG_SLAB_FREELIST_HARDENED */ return (ptr ^ si->random ^ ptr_addr); else return ptr; } static ulong get_freepointer(struct meminfo *si, void *object) { ulong vaddr, nextfree; vaddr = (ulong)(object + si->slab_offset); if (!readmem(vaddr, KVADDR, &nextfree, sizeof(void *), "get_freepointer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: slab: %lx invalid freepointer: %lx\n", si->curname, si->slab, vaddr); return BADADDR; } return (freelist_ptr(si, nextfree, vaddr)); } static void do_node_lists_slub(struct meminfo *si, ulong node_ptr, int node) { ulong next, last, list_head, flags; int first; if (!node_ptr) return; list_head = node_ptr + OFFSET(kmem_cache_node_partial); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node partial", RETURN_ON_ERROR)) return; fprintf(fp, "NODE %d PARTIAL:\n%s", node, next == list_head ? " (empty)\n" : ""); first = 0; while (next != list_head) { si->slab = last = next - OFFSET(page_lru); if (first++ == 0) fprintf(fp, " %s", slab_hdr); if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, si->slab); return; } if (!do_slab_slub(si, !VERBOSE)) return; if (received_SIGINT()) restart(0); if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) return; if (!IS_KVADDR(next) || ((next != list_head) && !is_page_ptr(next - OFFSET(page_lru), NULL))) { error(INFO, "%s: partial list slab: %lx invalid page.lru.next: %lx\n", si->curname, last, next); return; } } #define SLAB_STORE_USER (0x00010000UL) flags = ULONG(si->cache_buf + OFFSET(kmem_cache_flags)); if (INVALID_MEMBER(kmem_cache_node_full) || !(flags & SLAB_STORE_USER)) { fprintf(fp, "NODE %d FULL:\n (not tracked)\n", node); return; } list_head = node_ptr + OFFSET(kmem_cache_node_full); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node full", RETURN_ON_ERROR)) return; fprintf(fp, "NODE %d FULL:\n%s", node, next == list_head ? " (empty)\n" : ""); first = 0; while (next != list_head) { si->slab = next - OFFSET(page_lru); if (first++ == 0) fprintf(fp, " %s", slab_hdr); if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid full list slab pointer: %lx\n", si->curname, si->slab); return; } if (!do_slab_slub(si, !VERBOSE)) return; if (received_SIGINT()) restart(0); if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) return; if (!IS_KVADDR(next)) { error(INFO, "%s: full list slab: %lx page.lru.next: %lx\n", si->curname, si->slab, next); return; } } } static char * is_kmem_cache_addr_common(ulong vaddr, char *kbuf) { int i, cnt; ulong *cache_list; ulong name; int found; cnt = get_kmem_cache_list(&cache_list); for (i = 0, found = FALSE; i < cnt; i++) { if (cache_list[i] != vaddr) continue; if (!readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", RETURN_ON_ERROR)) break; if (!read_string(name, kbuf, BUFSIZE-1)) sprintf(kbuf, "(unknown)"); found = TRUE; break; } FREEBUF(cache_list); return (found ? kbuf : NULL); } /* * Kernel-config-neutral page-to-node evaluator. */ static int page_to_nid(ulong page) { int i; physaddr_t paddr; struct node_table *nt; physaddr_t end_paddr; if (!page_to_phys(page, &paddr)) { error(INFO, "page_to_nid: invalid page: %lx\n", page); return -1; } for (i = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; end_paddr = nt->start_paddr + ((physaddr_t)nt->size * (physaddr_t)PAGESIZE()); if ((paddr >= nt->start_paddr) && (paddr < end_paddr)) return i; } error(INFO, "page_to_nid: cannot determine node for pages: %lx\n", page); return -1; } /* * Allocate and fill the passed-in buffer with a list of * the current kmem_cache structures. */ static int get_kmem_cache_list(ulong **cache_buf) { int cnt; ulong vaddr; struct list_data list_data, *ld; get_symbol_data("slab_caches", sizeof(void *), &vaddr); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; ld->start = vaddr; ld->list_head_offset = OFFSET(kmem_cache_list); ld->end = symbol_value("slab_caches"); if (CRASHDEBUG(3)) ld->flags |= VERBOSE; cnt = do_list(ld); *cache_buf = ld->list_ptr; return cnt; } static int get_kmem_cache_root_list(ulong **cache_buf) { int cnt; ulong vaddr; struct list_data list_data, *ld; get_symbol_data("slab_root_caches", sizeof(void *), &vaddr); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; ld->start = vaddr; ld->list_head_offset = OFFSET(kmem_cache_memcg_params) + OFFSET(memcg_cache_params___root_caches_node); ld->end = symbol_value("slab_root_caches"); if (CRASHDEBUG(3)) ld->flags |= VERBOSE; cnt = do_list(ld); *cache_buf = ld->list_ptr; return cnt; } static int get_kmem_cache_child_list(ulong **cache_buf, ulong root) { int cnt; ulong vaddr, children; struct list_data list_data, *ld; children = root + OFFSET(kmem_cache_memcg_params) + OFFSET(memcg_cache_params_children); readmem(children, KVADDR, &vaddr, sizeof(ulong), "kmem_cache.memcg_params.children", FAULT_ON_ERROR); /* * When no children, since there is the difference of offset * of children list between root and child, do_list returns * an incorrect cache_buf[0]. So we determine wheather it has * children or not with the value of list_head.next. */ if (children == vaddr) return 0; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; ld->start = vaddr; ld->list_head_offset = OFFSET(kmem_cache_memcg_params) + OFFSET(memcg_cache_params_children_node); ld->end = children; if (CRASHDEBUG(3)) ld->flags |= VERBOSE; cnt = do_list(ld); *cache_buf = ld->list_ptr; return cnt; } /* * Get the address of the head page of a compound page. */ static ulong compound_head(ulong page) { ulong flags, first_page, compound_head; first_page = page; if (VALID_MEMBER(page_compound_head)) { if (readmem(page+OFFSET(page_compound_head), KVADDR, &compound_head, sizeof(ulong), "page.compound_head", RETURN_ON_ERROR)) { if (compound_head & 1) first_page = compound_head - 1; } } else if (readmem(page+OFFSET(page_flags), KVADDR, &flags, sizeof(ulong), "page.flags", RETURN_ON_ERROR)) { if ((flags & vt->PG_head_tail_mask) == vt->PG_head_tail_mask) readmem(page+OFFSET(page_first_page), KVADDR, &first_page, sizeof(ulong), "page.first_page", RETURN_ON_ERROR); } return first_page; } long count_partial(ulong node, struct meminfo *si, ulong *free) { ulong list_head, next, last; short inuse, objects; ulong total_inuse; ulong count = 0; count = 0; total_inuse = 0; list_head = node + OFFSET(kmem_cache_node_partial); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node.partial", RETURN_ON_ERROR)) return -1; hq_open(); while (next != list_head) { if (!readmem(next - OFFSET(page_lru) + OFFSET(page_inuse), KVADDR, &inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) { hq_close(); return -1; } last = next - OFFSET(page_lru); if (inuse == -1) { error(INFO, "%s: partial list slab: %lx invalid page.inuse: -1\n", si->curname, last); break; } total_inuse += inuse; if (VALID_MEMBER(page_objects)) { objects = slub_page_objects(si, last); if (!objects) { hq_close(); return -1; } *free += objects - inuse; } if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) { hq_close(); return -1; } if (!IS_KVADDR(next) || ((next != list_head) && !is_page_ptr(next - OFFSET(page_lru), NULL))) { error(INFO, "%s: partial list slab: %lx invalid page.lru.next: %lx\n", si->curname, last, next); break; } /* * Keep track of the last 1000 entries, and check * whether the list has recursed back onto itself. */ if ((++count % 1000) == 0) { hq_close(); hq_open(); } if (!hq_enter(next)) { error(INFO, "%s: partial list slab: %lx duplicate slab entry: %lx\n", si->curname, last, next); hq_close(); return -1; } } hq_close(); return total_inuse; } char * is_slab_page(struct meminfo *si, char *buf) { int i, cnt; ulong page_slab, page_flags, name; ulong *cache_list; char *retval; if (!(vt->flags & KMALLOC_SLUB)) return NULL; if (!is_page_ptr((ulong)si->spec_addr, NULL)) return NULL; if (!readmem(si->spec_addr + OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", RETURN_ON_ERROR|QUIET)) return NULL; if (!(page_flags & (1 << vt->PG_slab))) return NULL; if (!readmem(si->spec_addr + OFFSET(page_slab), KVADDR, &page_slab, sizeof(ulong), "page.slab", RETURN_ON_ERROR|QUIET)) return NULL; retval = NULL; cnt = get_kmem_cache_list(&cache_list); for (i = 0; i < cnt; i++) { if (page_slab == cache_list[i]) { if (!readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", QUIET|RETURN_ON_ERROR)) goto bailout; if (!read_string(name, buf, BUFSIZE-1)) goto bailout; retval = buf; break; } } bailout: FREEBUF(cache_list); return retval; } /* * Figure out which of the kmem_cache.cpu_slab declarations * is used by this kernel, and return a pointer to the slab * page being used. Return the kmem_cache_cpu.freelist pointer * if requested. */ static ulong get_cpu_slab_ptr(struct meminfo *si, int cpu, ulong *cpu_freelist) { ulong cpu_slab_ptr, page, freelist; if (cpu_freelist) *cpu_freelist = 0; switch (vt->cpu_slab_type) { case TYPE_CODE_STRUCT: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + OFFSET(kmem_cache_cpu_page)); if (cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) *cpu_freelist = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + OFFSET(kmem_cache_cpu_freelist)); break; case TYPE_CODE_ARRAY: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + (sizeof(void *)*cpu)); if (cpu_slab_ptr && cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) { if (readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_freelist), KVADDR, &freelist, sizeof(void *), "kmem_cache_cpu.freelist", RETURN_ON_ERROR)) *cpu_freelist = freelist; } if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) { if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page), KVADDR, &page, sizeof(void *), "kmem_cache_cpu.page", RETURN_ON_ERROR)) cpu_slab_ptr = 0; else cpu_slab_ptr = page; } break; case TYPE_CODE_PTR: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu]; if (cpu_slab_ptr && cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) { if (readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_freelist), KVADDR, &freelist, sizeof(void *), "kmem_cache_cpu.freelist", RETURN_ON_ERROR)) *cpu_freelist = freelist; } if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) { if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page), KVADDR, &page, sizeof(void *), "kmem_cache_cpu.page", RETURN_ON_ERROR)) cpu_slab_ptr = 0; else cpu_slab_ptr = page; } break; default: cpu_slab_ptr = 0; error(FATAL, "cannot determine location of kmem_cache.cpu_slab page\n"); } return cpu_slab_ptr; } /* * In 2.6.27 kmem_cache.order and kmem_cache.objects were merged * into the kmem_cache.oo, a kmem_cache_order_objects structure. * oo_order() and oo_objects() emulate the kernel functions * of the same name. */ static unsigned int oo_order(ulong oo) { return (oo >> 16); } static unsigned int oo_objects(ulong oo) { return (oo & ((1 << 16) - 1)); } #ifdef NOT_USED ulong slab_to_kmem_cache_node(struct meminfo *si, ulong slab_page) { int node; ulong node_ptr; if (vt->flags & CONFIG_NUMA) { node = page_to_nid(slab_page); node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *)*node)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); return node_ptr; } ulong get_kmem_cache_by_name(char *request) { int i, cnt; ulong *cache_list; ulong name; char buf[BUFSIZE]; ulong found; cnt = get_kmem_cache_list(&cache_list); cache_buf = GETBUF(SIZE(kmem_cache)); found = 0; for (i = 0; i < cnt; i++) { readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) continue; if (STREQ(buf, request)) { found = cache_list[i]; break; } } FREEBUF(cache_list); return found; } #endif /* NOT_USED */ crash-7.2.8/sparc64.c0000664000000000000000000007601613614623427013000 0ustar rootroot/* sparc64.c - core analysis suite * * Copyright (C) 2016 Oracle Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifdef SPARC64 #include "defs.h" #include #include #include #include /* TT (Trap Type) is encoded into magic pt_regs field */ #define MAGIC_TT_MASK (0x1ff) static const unsigned long not_valid_pte = ~0UL; static struct machine_specific sparc64_machine_specific; static unsigned long sparc64_ksp_offset; static unsigned long __va(unsigned long paddr) { return paddr + PAGE_OFFSET; } static unsigned long __pa(unsigned long vaddr) { return vaddr - PAGE_OFFSET; } static void sparc64_parse_cmdline_args(void) { } /* This interface might not be required. */ static void sparc64_clear_machdep_cache(void) { } /* * "mach" command output. */ static void sparc64_display_machine_stats(void) { int c; struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %ld\n", PAGE_SIZE); fprintf(fp, " KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, " KERNEL VMALLOC BASE: %lx\n", SPARC64_VMALLOC_START); fprintf(fp, " KERNEL MODULES BASE: %lx\n", SPARC64_MODULES_VADDR); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); fprintf(fp, "HARD IRQ STACK SIZE: %ld\n", THREAD_SIZE); fprintf(fp, " HARD IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->hardirq_ctx[c]) continue; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->hardirq_ctx[c]); } fprintf(fp, "SOFT IRQ STACK SIZE: %ld\n", THREAD_SIZE); fprintf(fp, " SOFT IRQ STACKS:\n"); for (c = 0; c < kt->cpus; c++) { if (!tt->softirq_ctx[c]) continue; sprintf(buf, "CPU %d", c); fprintf(fp, "%19s: %lx\n", buf, tt->softirq_ctx[c]); } } static void sparc64_display_memmap(void) { unsigned long iomem_resource; unsigned long resource; unsigned long start, end, nameptr; int size = STRUCT_SIZE("resource"); char *buf; char name[32]; buf = GETBUF(size); iomem_resource = symbol_value("iomem_resource"); readmem(iomem_resource + MEMBER_OFFSET("resource", "child"), KVADDR, &resource, sizeof(resource), "iomem_resource", FAULT_ON_ERROR); fprintf(fp, " PHYSICAL ADDRESS RANGE TYPE\n"); while (resource) { readmem(resource, KVADDR, buf, size, "resource", FAULT_ON_ERROR); start = ULONG(buf + MEMBER_OFFSET("resource", "start")); end = ULONG(buf + MEMBER_OFFSET("resource", "end")); nameptr = ULONG(buf + MEMBER_OFFSET("resource", "name")); readmem(nameptr, KVADDR, name, sizeof(name), "resource.name", FAULT_ON_ERROR); fprintf(fp, "%016lx - %016lx %-32s\n", start, end, name); resource = ULONG(buf + MEMBER_OFFSET("resource", "sibling")); } FREEBUF(buf); } static void sparc64_cmd_mach(void) { int c; int mflag = 0; while ((c = getopt(argcnt, args, "cdmx")) != EOF) { switch (c) { case 'm': mflag++; sparc64_display_memmap(); break; case 'c': fprintf(fp, "SPARC64: '-%c' option is not supported\n", c); return; case 'd': case 'x': /* Just ignore these */ break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!mflag) sparc64_display_machine_stats(); } struct sparc64_mem_ranges { unsigned long start; unsigned long end; }; #define NR_PHYS_RANGES (128) static unsigned int nr_phys_ranges; struct sparc64_mem_ranges phys_ranges[NR_PHYS_RANGES]; #define NR_IMAGE_RANGES (16) static unsigned int nr_kimage_ranges; struct sparc64_mem_ranges kimage_ranges[NR_IMAGE_RANGES]; /* There are three live cases: * one) normal kernel * two) --load-panic kernel * and * three) --load kernel * One and two can be treated the same because the kernel is physically * contiguous. Three isn't contiguous. The kernel is allocated in order * nine allocation pages. We don't handle case three yet. */ static int sparc64_phys_live_valid(unsigned long paddr) { unsigned int nr; int rc = FALSE; for (nr = 0; nr != nr_phys_ranges; nr++) { if (paddr >= phys_ranges[nr].start && paddr < phys_ranges[nr].end) { rc = TRUE; break; } } return rc; } static int sparc64_phys_kdump_valid(unsigned long paddr) { return TRUE; } static int sparc64_verify_paddr(unsigned long paddr) { int rc; if (ACTIVE()) rc = sparc64_phys_live_valid(paddr); else rc = sparc64_phys_kdump_valid(paddr); return rc; } static void sparc6_phys_base_live_limits(void) { if (nr_phys_ranges >= NR_PHYS_RANGES) error(FATAL, "sparc6_phys_base_live_limits: " "NR_PHYS_RANGES exceeded.\n"); else if (nr_kimage_ranges >= NR_IMAGE_RANGES) error(FATAL, "sparc6_phys_base_live_limits: " "NR_IMAGE_RANGES exceeded.\n"); } static void sparc64_phys_base_live_valid(void) { if (!nr_phys_ranges) error(FATAL, "No physical memory ranges."); else if (!nr_kimage_ranges) error(FATAL, "No vmlinux memory ranges."); } static void sparc64_phys_base_live(void) { char line[BUFSIZE]; FILE *fp; fp = fopen("/proc/iomem", "r"); if (fp == NULL) error(FATAL, "Can't open /proc/iomem. We can't proceed."); while (fgets(line, sizeof(line), fp) != 0) { unsigned long start, end; int count, consumed; char *ch; sparc6_phys_base_live_limits(); count = sscanf(line, "%lx-%lx : %n", &start, &end, &consumed); if (count != 2) continue; ch = line + consumed; if (memcmp(ch, "System RAM\n", 11) == 0) { end = end + 1; phys_ranges[nr_phys_ranges].start = start; phys_ranges[nr_phys_ranges].end = end; nr_phys_ranges++; } else if ((memcmp(ch, "Kernel code\n", 12) == 0) || (memcmp(ch, "Kernel data\n", 12) == 0) || (memcmp(ch, "Kernel bss\n", 11) == 0)) { kimage_ranges[nr_kimage_ranges].start = start; kimage_ranges[nr_kimage_ranges].end = end; nr_kimage_ranges++; } } (void) fclose(fp); sparc64_phys_base_live_valid(); } static void sparc64_phys_base_kdump(void) { } static void sparc64_phys_base(void) { if (ACTIVE()) return sparc64_phys_base_live(); else return sparc64_phys_base_kdump(); } static unsigned long kva_start, kva_end; static unsigned long kpa_start, kpa_end; static void sparc64_kimage_limits_live(void) { kpa_start = kimage_ranges[0].start; kpa_end = kpa_start + (kva_end - kva_start); } static void sparc64_kimage_limits_kdump(void) { unsigned long phys_base; if (DISKDUMP_DUMPFILE()) { if (diskdump_phys_base(&phys_base)) { kpa_start = phys_base | (kva_start & 0xffff); kpa_end = kpa_start + (kva_end - kva_start); return; } } fprintf(stderr, "Can't determine phys_base\n"); } static unsigned long kimage_va_translate(unsigned long addr) { unsigned long paddr = (addr - kva_start) + kpa_start; return paddr; } static int kimage_va_range(unsigned long addr) { if (addr >= kva_start && addr < kva_end) return TRUE; else return FALSE; } static void sparc64_kimage_limits(void) { kva_start = symbol_value("_stext"); kva_end = symbol_value("_end"); if (ACTIVE()) sparc64_kimage_limits_live(); else sparc64_kimage_limits_kdump(); } static int sparc64_is_linear_mapped(unsigned long vaddr) { return (vaddr & PAGE_OFFSET) == PAGE_OFFSET; } static unsigned long pte_to_pa(unsigned long pte) { unsigned long paddr = pte & _PAGE_PFN_MASK; return paddr; } static unsigned long fetch_page_table_level(unsigned long pte_kva, unsigned long vaddr, unsigned int shift, unsigned int mask, const char *name, int verbose) { unsigned int pte_index = (vaddr >> shift) & mask; unsigned long page_table[PTES_PER_PAGE]; unsigned long pte = 0UL; int rc; rc = readmem(pte_kva, KVADDR, page_table, sizeof(page_table), (char *)name, RETURN_ON_ERROR); if (!rc) goto out; pte = page_table[pte_index]; if (verbose) fprintf(fp, "%s(0x%.16lx) fetch of pte @index[0x%.4x]=0x%.16lx\n", name, pte_kva, pte_index, pte); out: return pte; } static unsigned long pmd_is_huge(unsigned long pmd, unsigned long vaddr, int verbose) { unsigned long hpage_mask; unsigned long paddr = 0UL; if ((pmd & PAGE_PMD_HUGE) == 0UL) goto out; hpage_mask = ~((1UL << HPAGE_SHIFT) - 1UL); paddr = pte_to_pa(pmd) + (vaddr & ~hpage_mask); if (verbose) fprintf(fp, "Huge Page/THP pmd=0x%.16lx paddr=0x%.16lx\n", pmd, paddr); out: return paddr; } static unsigned long sparc64_page_table_walk(unsigned long pgd, unsigned long vaddr, int verbose) { static const char *pgd_text = "pgd fetch"; static const char *pud_text = "pud fetch"; static const char *pmd_text = "pmd fetch"; static const char *pte_text = "pte fetch"; unsigned long kva = pgd; unsigned long paddr; unsigned long pte; if (!sparc64_is_linear_mapped(kva)) error(FATAL, "sparc64_page_table_walk: pgd must be identity mapped" " but isn't (0xlx).", pgd); pte = fetch_page_table_level(kva, vaddr, PGDIR_SHIFT, PTES_PER_PAGE_MASK, pgd_text, verbose); if (!pte) goto bad; kva = __va(pte); pte = fetch_page_table_level(kva, vaddr, PUD_SHIFT, PTES_PER_PAGE_MASK, pud_text, verbose); if (!pte) goto bad; kva = __va(pte); pte = fetch_page_table_level(kva, vaddr, PMD_SHIFT, PTES_PER_PAGE_MASK, pmd_text, verbose); if (!pte) goto bad; /* Check for a huge/THP page */ paddr = pmd_is_huge(pte, vaddr, verbose); if (paddr) goto out; kva = __va(pte); pte = fetch_page_table_level(kva, vaddr, PAGE_SHIFT, PTRS_PER_PTE - 1, pte_text, verbose); if ((pte & _PAGE_VALID) == 0UL) goto bad; paddr = pte_to_pa(pte); paddr = paddr | (vaddr & ~PAGE_MASK); out: return paddr; bad: return not_valid_pte; } static void sparc64_init_kernel_pgd(void) { int cpu, rc; ulong v; v = symbol_value("init_mm"); rc = readmem(v + OFFSET(mm_struct_pgd), KVADDR, &v, sizeof(v), "init_mm.pgd", RETURN_ON_ERROR); if (!rc) { error(WARNING, "Can not determine pgd location.\n"); goto out; } for (cpu = 0; cpu < NR_CPUS; cpu++) vt->kernel_pgd[cpu] = v; out: return; } static int sparc64_get_smp_cpus(void) { int ncpu = MAX(get_cpus_online(), get_highest_cpu_online() + 1); return ncpu; } static ulong sparc64_vmalloc_start(void) { return SPARC64_VMALLOC_START; } int sparc64_IS_VMALLOC_ADDR(ulong vaddr) { return (vaddr >= SPARC64_VMALLOC_START) && (vaddr < machdep->machspec->vmalloc_end); } static void pt_clear_cache(void) { machdep->last_pgd_read = 0UL; machdep->last_pud_read = 0UL; machdep->last_pmd_read = 0UL; machdep->last_ptbl_read = 0UL; } static void pt_level_alloc(char **lvl, char *name) { size_t sz = PAGE_SIZE; void *pointer = malloc(sz); if (!pointer) error(FATAL, name); *lvl = pointer; } static int sparc64_verify_symbol(const char *name, unsigned long value, char type) { return TRUE; } static int sparc64_verify_line_number(unsigned long pc, unsigned long low, unsigned long high) { return TRUE; } static int sparc64_dis_filter(ulong vaddr, char *inbuf, unsigned int radix) { return FALSE; } struct eframe { struct sparc_stackf sf; struct pt_regs pr; }; /* Need to handle hardirq and softirq stacks. */ static int kstack_valid(struct bt_info *bt, unsigned long sp) { unsigned long thread_info = SIZE(thread_info); unsigned long base = bt->stackbase + thread_info; unsigned long top = bt->stacktop - sizeof(struct eframe); int rc = FALSE; if (sp & (16U - 1)) goto out; if ((sp >= base) && (sp <= top)) rc = TRUE; out: return rc; } static void sparc64_print_eframe(struct bt_info *bt) { struct eframe k_entry; struct pt_regs *regs = &k_entry.pr; unsigned long efp; unsigned int tt; int rc; struct reg_window window; unsigned long rw; efp = bt->stkptr + STACK_BIAS - TRACEREG_SZ - STACKFRAME_SZ; if (!kstack_valid(bt, efp)) goto try_stacktop; rc = readmem(efp, KVADDR, &k_entry, sizeof(k_entry), "Stack frame and pt_regs.", RETURN_ON_ERROR); if (rc && ((regs->magic & ~MAGIC_TT_MASK) == PT_REGS_MAGIC)) goto print_frame; try_stacktop: efp = bt->stacktop - sizeof(struct eframe); rc = readmem(efp, KVADDR, &k_entry, sizeof(k_entry), "Stack frame and pt_regs.", RETURN_ON_ERROR); if (!rc) goto out; /* Kernel thread or not in kernel any longer? */ if ((regs->magic & ~MAGIC_TT_MASK) != PT_REGS_MAGIC) goto out; print_frame: tt = regs->magic & MAGIC_TT_MASK; fprintf(fp, "TSTATE=0x%lx TT=0x%x TPC=0x%lx TNPC=0x%lx\n", regs->tstate, tt, regs->tpc, regs->tnpc); fprintf(fp, " g0=0x%.16lx g1=0x%.16lx g2=0x%.16lx\n", regs->u_regs[0], regs->u_regs[1], regs->u_regs[2]); fprintf(fp, " g3=0x%.16lx g4=0x%.16lx g5=0x%.16lx\n", regs->u_regs[3], regs->u_regs[4], regs->u_regs[5]); #define ___INS (8) fprintf(fp, " g6=0x%.16lx g7=0x%.16lx\n", regs->u_regs[6], regs->u_regs[7]); fprintf(fp, " o0=0x%.16lx o1=0x%.16lx o2=0x%.16lx\n", regs->u_regs[___INS+0], regs->u_regs[___INS+1], regs->u_regs[___INS+2]); fprintf(fp, " o3=0x%.16lx o4=0x%.16lx o5=0x%.16lx\n", regs->u_regs[___INS+3], regs->u_regs[___INS+4], regs->u_regs[___INS+5]); fprintf(fp, " sp=0x%.16lx ret_pc=0x%.16lx\n", regs->u_regs[___INS+6], regs->u_regs[___INS+7]); #undef ___INS rw = bt->stkptr + STACK_BIAS; if (!kstack_valid(bt, rw)) goto out; rc = readmem(rw, KVADDR, &window, sizeof(window), "Register window.", RETURN_ON_ERROR); if (!rc) goto out; fprintf(fp, " l0=0x%.16lx l1=0x%.16lx l2=0x%.16lx\n", window.locals[0], window.locals[1], window.locals[2]); fprintf(fp, " l3=0x%.16lx l4=0x%.16lx l5=0x%.16lx\n", window.locals[3], window.locals[4], window.locals[5]); fprintf(fp, " l6=0x%.16lx l7=0x%.16lx\n", window.locals[6], window.locals[7]); fprintf(fp, " i0=0x%.16lx i1=0x%.16lx i2=0x%.16lx\n", window.ins[0], window.ins[1], window.ins[2]); fprintf(fp, " i3=0x%.16lx i4=0x%.16lx i5=0x%.16lx\n", window.ins[3], window.ins[4], window.ins[5]); fprintf(fp, " i6=0x%.16lx i7=0x%.16lx\n", window.ins[6], window.ins[7]); out: return; } static int sparc64_eframe_search(struct bt_info *bt) { sparc64_print_eframe(bt); return 0; } static void sparc64_print_frame(struct bt_info *bt, int cnt, unsigned long ip, unsigned long ksp) { char *symbol = closest_symbol(ip); fprintf(fp, "#%d [%lx] %s at %lx\n", cnt, ksp, symbol, ip); if (bt->flags & BT_LINE_NUMBERS) { char buf[BUFSIZE]; get_line_number(ip, buf, FALSE); if (strlen(buf)) fprintf(fp, "\t%s\n", buf); } } static void sparc64_back_trace(struct bt_info *bt) { unsigned long ip = bt->instptr; unsigned long ksp = bt->stkptr; struct reg_window window; int cnt = 0; int rc; do { if (!kstack_valid(bt, ksp + STACK_BIAS)) break; rc = readmem(ksp + STACK_BIAS, KVADDR, &window, sizeof(window), "KSP window fetch.", RETURN_ON_ERROR); if (!rc) goto out; sparc64_print_frame(bt, cnt, ip, ksp); ksp = window.ins[6]; ip = window.ins[7]; cnt++; } while (cnt != 50); sparc64_print_eframe(bt); out: return; } static ulong sparc64_processor_speed(void) { int cpu; unsigned long clock_tick; struct syment *sp; if (!MEMBER_EXISTS("cpuinfo_sparc", "clock_tick")) { error(WARNING, "sparc64 expects clock_tick\n"); return 0UL; } sp = per_cpu_symbol_search("__cpu_data"); if (!sp) return 0UL; for (cpu = 0; cpu < kt->cpus; cpu++) { if (!in_cpu_map(ONLINE, cpu)) continue; if (!readmem(sp->value + kt->__per_cpu_offset[cpu] + MEMBER_OFFSET("cpuinfo_sparc", "clock_tick"), KVADDR, &clock_tick, sizeof(clock_tick), "clock_tick", QUIET|RETURN_ON_ERROR)) continue; return clock_tick/1000000; } return 0UL; } static ulong sparc64_get_task_pgd(ulong task) { struct task_context *tc = task_to_context(task); ulong pgd = NO_TASK; if (!tc) goto out; readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(unsigned long), "User pgd.", RETURN_ON_ERROR); out: return pgd; } static int sparc64_uvtop(struct task_context *tc, ulong va, physaddr_t *ppaddr, int verbose) { unsigned long pgd = sparc64_get_task_pgd(tc->task); unsigned long paddr; int rc = FALSE; if (pgd == NO_TASK) goto out; paddr = sparc64_page_table_walk(pgd, va, verbose); /* For now not_valid_pte skips checking for swap pte. */ if (paddr == not_valid_pte) { *ppaddr = 0UL; goto out; } *ppaddr = paddr; rc = TRUE; out: return rc; } static unsigned long sparc64_vmalloc_translate(unsigned long vaddr, int verbose) { unsigned long paddr = sparc64_page_table_walk(vt->kernel_pgd[0], vaddr, verbose); return paddr; } static unsigned long sparc64_linear_translate(unsigned long vaddr) { unsigned long paddr = __pa(vaddr); if (sparc64_verify_paddr(paddr) == FALSE) error(FATAL, "sparc64_linear_translate: This physical address" " (0x%lx) is invalid.", paddr); return paddr; } static int sparc64_is_vmalloc_mapped(unsigned long vaddr) { struct machine_specific *ms = &sparc64_machine_specific; int rc = 0; if ((vaddr >= SPARC64_MODULES_VADDR && vaddr < SPARC64_MODULES_END) || (vaddr >= SPARC64_VMALLOC_START && vaddr < ms->vmalloc_end)) rc = 1; return rc; } static int sparc64_is_kvaddr(ulong vaddr) { return kimage_va_range(vaddr) || sparc64_is_linear_mapped(vaddr) || sparc64_is_vmalloc_mapped(vaddr); } static int sparc64_kvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { unsigned long phys_addr; int rc = FALSE; if (kimage_va_range(vaddr)) { phys_addr = kimage_va_translate(vaddr); } else if (sparc64_is_vmalloc_mapped(vaddr)) { phys_addr = sparc64_vmalloc_translate(vaddr, verbose); if (phys_addr == not_valid_pte) goto out; } else if (sparc64_is_linear_mapped(vaddr)) { phys_addr = sparc64_linear_translate(vaddr); } else { error(WARNING, "This is an invalid kernel virtual address=0x%lx.", vaddr); goto out; } *paddr = phys_addr; rc = TRUE; out: return rc; } static int sparc64_is_task_addr(ulong task) { int rc = FALSE; int cpu; if (sparc64_is_linear_mapped(task) || kimage_va_range(task)) rc = TRUE; else { for (cpu = 0; cpu < kt->cpus; cpu++) if (task == tt->idle_threads[cpu]) { rc = TRUE; break; } } return rc; } static int sparc64_is_uvaddr(ulong vaddr, struct task_context *tc) { return vaddr < SPARC64_USERSPACE_TOP; } static const char *pte_page_size(unsigned long pte) { static const char *_4Mb = "4Mb"; static const char *_64Kb = "64Kb"; static const char *_8Kb = "8Kb"; static const char *_ns = "Not Supported"; const char *result; switch (pte & _PAGE_SZALL_4V) { case _PAGE_SZ8K_4V: result = _8Kb; break; case _PAGE_SZ64K_4V: result = _64Kb; break; case _PAGE_SZ4MB_4V: result = _4Mb; break; default: result = _ns; } return result; } static int sparc64_translate_pte(unsigned long pte, void *physaddr, ulonglong unused) { unsigned long paddr = pte_to_pa(pte); int rc = FALSE; int cnt = 0; /* Once again not handling swap pte.*/ if ((pte & _PAGE_VALID) == 0UL) goto out; if (pte & _PAGE_NFO_4V) fprintf(fp, "%sNoFaultOn", cnt++ ? "|" : ""); if (pte & _PAGE_MODIFIED_4V) fprintf(fp, "%sModified", cnt++ ? "|" : ""); if (pte & _PAGE_ACCESSED_4V) fprintf(fp, "%sAccessed", cnt++ ? "|" : ""); if (pte & _PAGE_READ_4V) fprintf(fp, "%sReadSoftware", cnt++ ? "|" : ""); if (pte & _PAGE_WRITE_4V) fprintf(fp, "%sWriteSoftware", cnt++ ? "|" : ""); if (pte & _PAGE_P_4V) fprintf(fp, "%sPriv", cnt++ ? "|" : ""); if (pte & _PAGE_EXEC_4V) fprintf(fp, "%sExecute", cnt++ ? "|" : ""); if (pte & _PAGE_W_4V) fprintf(fp, "%sWritable", cnt++ ? "|" : ""); if (pte & _PAGE_PRESENT_4V) fprintf(fp, "%sPresent", cnt++ ? "|" : ""); fprintf(fp, "|PageSize(%s)\n", pte_page_size(pte)); if (physaddr) *(unsigned long *)physaddr = paddr; rc = TRUE; out: return rc; } static void sparc64_get_frame(struct bt_info *bt, unsigned long *r14, unsigned long *r15) { unsigned long ksp_offset = sparc64_ksp_offset + bt->tc->thread_info; unsigned long ksp; int rc; /* We need thread_info's ksp. This is the stack for sleeping threads * and captured during switch_to. The rest is fetchable from there. */ rc = readmem(ksp_offset, KVADDR, &ksp, sizeof(ksp), "KSP Fetch.", RETURN_ON_ERROR); if (!rc) goto out; *r14 = ksp; *r15 = symbol_value("switch_to_pc"); out: return; } static void sparc64_get_dumpfile_stack_frame(struct bt_info *bt, unsigned long *psp, unsigned long *ppc) { unsigned long *pt_regs; pt_regs = (unsigned long *)bt->machdep; if (!pt_regs) fprintf(fp, "0%lx: registers not saved\n", bt->task); /* pt_regs can be unaligned */ BCOPY(&pt_regs[30], psp, sizeof(ulong)); BCOPY(&pt_regs[33], ppc, sizeof(ulong)); } static void sparc64_get_stack_frame(struct bt_info *bt, unsigned long *pcp, unsigned long *psp) { unsigned long r14, r15; if (DUMPFILE() && is_task_active(bt->task)) sparc64_get_dumpfile_stack_frame(bt, &r14, &r15); else sparc64_get_frame(bt, &r14, &r15); if (pcp) *pcp = r15; if (psp) *psp = r14; } static int sparc64_get_kvaddr_ranges(struct vaddr_range *vrp) { struct machine_specific *ms = &sparc64_machine_specific; vrp[0].type = KVADDR_UNITY_MAP; vrp[0].start = ms->page_offset; vrp[0].end = ~0ULL; vrp[1].type = KVADDR_VMALLOC; vrp[1].start = SPARC64_VMALLOC_START; vrp[1].end = ms->vmalloc_end; vrp[2].type = KVADDR_START_MAP; vrp[2].start = symbol_value("_start"); vrp[2].end = symbol_value("_end"); vrp[3].type = KVADDR_MODULES; vrp[3].start = SPARC64_MODULES_VADDR; vrp[3].end = SPARC64_MODULES_END; return 4; } static void sparc64_get_crash_notes(void) { unsigned long *notes_ptrs, size, crash_notes_address; int ret; if (!symbol_exists("crash_notes")) { error(WARNING, "Could not retrieve crash_notes."); goto out; } crash_notes_address = symbol_value("crash_notes"); size = kt->cpus * sizeof(notes_ptrs[0]); notes_ptrs = (unsigned long *) GETBUF(size); ret = readmem(crash_notes_address, KVADDR, notes_ptrs, size, "crash_notes", RETURN_ON_ERROR); if (!ret) goto out2; out2: FREEBUF(notes_ptrs); out: return; } static void sparc64_init_kstack_info(void) { sparc64_ksp_offset = MEMBER_OFFSET("thread_info", "ksp"); } static void sparc64_init_irq_stacks(void) { void *irq_stack; unsigned long stack_size; stack_size = get_array_length("hardirq_stack", NULL, 0) * sizeof(unsigned long); irq_stack = malloc(stack_size); if (!irq_stack) error(FATAL, "malloc failure in sparc64_init_irq_stacks"); get_symbol_data("hardirq_stack", stack_size, irq_stack); tt->hardirq_ctx = irq_stack; stack_size = get_array_length("softirq_stack", NULL, 0) * sizeof(unsigned long); irq_stack = malloc(stack_size); if (!irq_stack) error(FATAL, "malloc failure in sparc64_init_irq_stacks"); get_symbol_data("softirq_stack", stack_size, irq_stack); tt->softirq_ctx = irq_stack; } static void sparc64_init_vmemmap_info(void) { struct machine_specific *ms = &sparc64_machine_specific; unsigned long page_struct_size = STRUCT_SIZE("page"); /* * vmemmap memory is addressed as vmalloc memory, so we * treat it as an etension of the latter. */ ms->vmalloc_end += ((1UL << (machdep->max_physmem_bits - PAGE_SHIFT)) * page_struct_size); } static void sparc64_init_cpu_info(void) { unsigned long trap_block, per_cpu_base_offset, per_cpu_base; unsigned long trap_per_cpu; int cpu; if (!symbol_exists("trap_block")) error(FATAL, "sparc64 requires trap_block symbol.\n"); trap_block = symbol_value("trap_block"); if (!MEMBER_EXISTS("trap_per_cpu", "__per_cpu_base")) error(FATAL, "sparc64 requires __per_cpu_base.\n"); trap_per_cpu = STRUCT_SIZE("trap_per_cpu"); per_cpu_base_offset = MEMBER_OFFSET("trap_per_cpu", "__per_cpu_base"); for (cpu = 0; cpu < NR_CPUS; cpu++, trap_block = trap_block + trap_per_cpu) { if (!in_cpu_map(POSSIBLE, cpu)) continue; readmem(trap_block + per_cpu_base_offset, KVADDR, &per_cpu_base, sizeof(per_cpu_base), "sparc64: per_cpu_base", FAULT_ON_ERROR); kt->__per_cpu_offset[cpu] = per_cpu_base; } } void sparc64_init(int when) { struct machine_specific *ms = &sparc64_machine_specific; switch (when) { case SETUP_ENV: machdep->process_elf_notes = process_elf64_notes; break; case PRE_SYMTAB: machdep->machspec = ms; machdep->verify_paddr = sparc64_verify_paddr; machdep->verify_symbol = sparc64_verify_symbol; machdep->verify_line_number = sparc64_verify_line_number; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->flags |= MACHDEP_BT_TEXT; if (machdep->cmdline_args[0]) sparc64_parse_cmdline_args(); break; case PRE_GDB: machdep->max_physmem_bits = _MAX_PHYSMEM_BITS; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~((ulonglong) machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; machdep->eframe_search = sparc64_eframe_search; machdep->back_trace = sparc64_back_trace; machdep->processor_speed = sparc64_processor_speed; machdep->uvtop = sparc64_uvtop; machdep->kvtop = sparc64_kvtop; machdep->get_task_pgd = sparc64_get_task_pgd; machdep->dump_irq = generic_dump_irq; machdep->get_stack_frame = sparc64_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = sparc64_translate_pte; machdep->memory_size = generic_memory_size; machdep->vmalloc_start = sparc64_vmalloc_start; machdep->is_task_addr = sparc64_is_task_addr; machdep->is_kvaddr = sparc64_is_kvaddr; machdep->is_uvaddr = sparc64_is_uvaddr; machdep->dis_filter = sparc64_dis_filter; machdep->get_smp_cpus = sparc64_get_smp_cpus; machdep->clear_machdep_cache = sparc64_clear_machdep_cache; machdep->get_kvaddr_ranges = sparc64_get_kvaddr_ranges; machdep->cmd_mach = sparc64_cmd_mach; machdep->init_kernel_pgd = sparc64_init_kernel_pgd; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->get_irq_affinity = generic_get_irq_affinity; machdep->show_interrupts = generic_show_interrupts; pt_level_alloc(&machdep->pgd, "Can't malloc pgd space."); pt_level_alloc(&machdep->pud, "Can't malloc pud space."); pt_level_alloc(&machdep->pmd, "Can't malloc pmd space."); pt_level_alloc(&machdep->ptbl, "Can't malloc ptbl space."); pt_clear_cache(); sparc64_phys_base(); sparc64_kimage_limits(); break; case POST_GDB: get_symbol_data("PAGE_OFFSET", sizeof(unsigned long), &ms->page_offset); machdep->kvbase = symbol_value("_stext"); machdep->identity_map_base = (ulong) PAGE_OFFSET; machdep->ptrs_per_pgd = PTRS_PER_PGD; get_symbol_data("VMALLOC_END", sizeof(unsigned long), &ms->vmalloc_end); machdep->section_size_bits = _SECTION_SIZE_BITS; if (kernel_symbol_exists("nr_irqs")) get_symbol_data("nr_irqs", sizeof(unsigned int), &machdep->nr_irqs); sparc64_init_vmemmap_info(); sparc64_init_cpu_info(); sparc64_init_kstack_info(); sparc64_init_irq_stacks(); break; case POST_VM: if (!ACTIVE()) sparc64_get_crash_notes(); break; case POST_INIT: break; case LOG_ONLY: machdep->machspec = ms; machdep->kvbase = kt->vmcoreinfo._stext_SYMBOL; break; } } void sparc64_dump_machdep_table(ulong arg) { int i, others; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & MACHDEP_BT_TEXT) fprintf(fp, "%sMACHDEP_BT_TEXT", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: sparc64_eframe_search()\n"); fprintf(fp, " back_trace: sparc64_back_trace()\n"); fprintf(fp, " processor_speed: sparc64_processor_speed()\n"); fprintf(fp, " uvtop: sparc64_uvtop()\n"); fprintf(fp, " kvtop: sparc64_kvtop()\n"); fprintf(fp, " get_task_pgd: sparc64_get_task_pgd()\n"); fprintf(fp, " dump_irq: generic_dump_irq()\n"); fprintf(fp, " get_stack_frame: sparc64_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: sparc64_translate_pte()\n"); fprintf(fp, " memory_size: generic_memory_size()\n"); fprintf(fp, " vmalloc_start: sparc64_vmalloc_start()\n"); fprintf(fp, " is_task_addr: sparc64_is_task_addr()\n"); fprintf(fp, " verify_symbol: sparc64_verify_symbol()\n"); fprintf(fp, " dis_filter: sparc64_dis_filter()\n"); fprintf(fp, " cmd_mach: sparc64_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: sparc64_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: sparc64_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: sparc64_is_uvaddr()\n"); fprintf(fp, " verify_paddr: sparc64_verify_paddr()\n"); fprintf(fp, " get_kvaddr_ranges: sparc64_get_kvaddr_ranges()\n"); fprintf(fp, " get_irq_affinity: generic_get_irq_affinity()\n"); fprintf(fp, " show_interrupts: generic_show_interrupts()\n"); fprintf(fp, " xendump_p2m_create: NULL\n"); fprintf(fp, "xen_kdump_p2m_create: NULL\n"); fprintf(fp, " line_number_hooks: NULL\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, "clear_machdep_cache: sparc64_clear_machdep_cache()\n"); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " section_size_bits: %ld\n", machdep->section_size_bits); fprintf(fp, " max_physmem_bits: %ld\n", machdep->max_physmem_bits); fprintf(fp, " sections_per_root: %ld\n", machdep->sections_per_root); for (i = 0; i < MAX_MACHDEP_ARGS; i++) { fprintf(fp, " cmdline_args[%d]: %s\n", i, machdep->cmdline_args[i] ? machdep->cmdline_args[i] : "(unused)"); } fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); fprintf(fp, " page_offset: %lx\n", machdep->machspec->page_offset); fprintf(fp, " vmalloc_end: %lx\n", machdep->machspec->vmalloc_end); } #endif /* SPARC64 */ crash-7.2.8/defs.h0000664000000000000000000065066213614623427012451 0ustar rootroot/* defs.h - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2020 David Anderson * Copyright (C) 2002-2020 Red Hat, Inc. All rights reserved. * Copyright (C) 2002 Silicon Graphics, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #ifndef GDB_COMMON #include #include #include #include #include #include #include #undef basename #if !defined(__USE_GNU) #define __USE_GNU #include #undef __USE_GNU #else #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* backtrace() */ #include #ifdef LZO #include #endif #ifdef SNAPPY #include #endif #ifndef ATTRIBUTE_UNUSED #define ATTRIBUTE_UNUSED __attribute__ ((__unused__)) #endif #undef TRUE #undef FALSE #define TRUE (1) #define FALSE (0) #define STR(x) #x #ifndef offsetof # define offsetof(TYPE, MEMBER) ((ulong)&((TYPE *)0)->MEMBER) #endif #if !defined(X86) && !defined(X86_64) && !defined(ALPHA) && !defined(PPC) && \ !defined(IA64) && !defined(PPC64) && !defined(S390) && !defined(S390X) && \ !defined(ARM) && !defined(ARM64) && !defined(MIPS) && !defined(SPARC64) #ifdef __alpha__ #define ALPHA #endif #ifdef __i386__ #define X86 #endif #ifdef __powerpc64__ #define PPC64 #else #ifdef __powerpc__ #define PPC #endif #endif #ifdef __ia64__ #define IA64 #endif #ifdef __s390__ #define S390 #endif #ifdef __s390x__ #define S390X #endif #ifdef __x86_64__ #define X86_64 #endif #ifdef __arm__ #define ARM #endif #ifdef __aarch64__ #define ARM64 #endif #ifdef __mipsel__ #define MIPS #endif #ifdef __sparc_v9__ #define SPARC64 #endif #endif #ifdef X86 #define NR_CPUS (256) #endif #ifdef X86_64 #define NR_CPUS (8192) #endif #ifdef ALPHA #define NR_CPUS (64) #endif #ifdef PPC #define NR_CPUS (32) #endif #ifdef IA64 #define NR_CPUS (4096) #endif #ifdef PPC64 #define NR_CPUS (2048) #endif #ifdef S390 #define NR_CPUS (512) #endif #ifdef S390X #define NR_CPUS (512) #endif #ifdef ARM #define NR_CPUS (32) #endif #ifdef ARM64 #define NR_CPUS (4096) /* TBD */ #endif #ifdef MIPS #define NR_CPUS (32) #endif #ifdef SPARC64 #define NR_CPUS (4096) #endif #define NR_DEVICE_DUMPS (64) /* Some architectures require memory accesses to be aligned. */ #if defined(SPARC64) #define NEED_ALIGNED_MEM_ACCESS #endif #define BUFSIZE (1500) #define NULLCHAR ('\0') #define MAXARGS (100) /* max number of arguments to one function */ #define MAXARGLEN (40) /* max length of argument */ #define HIST_BLKSIZE (4096) static inline int string_exists(char *s) { return (s ? TRUE : FALSE); } #define STREQ(A, B) (string_exists((char *)A) && string_exists((char *)B) && \ (strcmp((char *)(A), (char *)(B)) == 0)) #define STRNEQ(A, B) (string_exists((char *)A) && string_exists((char *)B) && \ (strncmp((char *)(A), (char *)(B), strlen((char *)(B))) == 0)) #define BZERO(S, N) (memset(S, NULLCHAR, N)) #define BCOPY(S, D, C) (memcpy(D, S, C)) #define BNEG(S, N) (memset(S, 0xff, N)) #define BEEP() fprintf(stderr, "%c", 0x7) #define LASTCHAR(s) (s[strlen(s)-1]) #define FIRSTCHAR(s) (s[0]) #define QUOTED_STRING(s) ((FIRSTCHAR(s) == '"') && (LASTCHAR(s) == '"')) #define SINGLE_QUOTED_STRING(s) ((FIRSTCHAR(s) == '\'') && (LASTCHAR(s) == '\'')) #define PATHEQ(A, B) ((A) && (B) && (pathcmp((char *)(A), (char *)(B)) == 0)) #ifdef roundup #undef roundup #endif #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) typedef uint64_t physaddr_t; #define PADDR_NOT_AVAILABLE (0x1ULL) #define KCORE_USE_VADDR (-1ULL) typedef unsigned long long int ulonglong; struct number_option { ulong num; ulonglong ll_num; ulong retflags; }; /* * program_context flags */ #define LIVE_SYSTEM (0x1ULL) #define TTY (0x2ULL) #define RUNTIME (0x4ULL) #define IN_FOREACH (0x8ULL) #define MCLXCD (0x10ULL) #define CMDLINE_IFILE (0x20ULL) #define MFD_RDWR (0x40ULL) #define KVMDUMP (0x80ULL) #define SILENT (0x100ULL) #define SADUMP (0x200ULL) #define HASH (0x400ULL) #define SCROLL (0x800ULL) #define NO_CONSOLE (0x1000ULL) #define RUNTIME_IFILE (0x2000ULL) #define DROP_CORE (0x4000ULL) #define LKCD (0x8000ULL) #define GDB_INIT (0x10000ULL) #define IN_GDB (0x20000ULL) #define RCLOCAL_IFILE (0x40000ULL) #define RCHOME_IFILE (0x80000ULL) #define VMWARE_VMSS (0x100000ULL) #define READLINE (0x200000ULL) #define _SIGINT_ (0x400000ULL) #define IN_RESTART (0x800000ULL) #define KERNEL_DEBUG_QUERY (0x1000000ULL) #define DEVMEM (0x2000000ULL) #define REM_LIVE_SYSTEM (0x4000000ULL) #define NAMELIST_LOCAL (0x8000000ULL) #define LIVE_RAMDUMP (0x10000000ULL) #define NAMELIST_SAVED (0x20000000ULL) #define DUMPFILE_SAVED (0x40000000ULL) #define UNLINK_NAMELIST (0x80000000ULL) #define NAMELIST_UNLINKED (0x100000000ULL) #define REM_MCLXCD (0x200000000ULL) #define REM_LKCD (0x400000000ULL) #define NAMELIST_NO_GZIP (0x800000000ULL) #define UNLINK_MODULES (0x1000000000ULL) #define S390D (0x2000000000ULL) #define REM_S390D (0x4000000000ULL) #define SYSRQ (0x8000000000ULL) #define KDUMP (0x10000000000ULL) #define NETDUMP (0x20000000000ULL) #define REM_NETDUMP (0x40000000000ULL) #define SYSMAP (0x80000000000ULL) #define SYSMAP_ARG (0x100000000000ULL) #define MEMMOD (0x200000000000ULL) #define MODPRELOAD (0x400000000000ULL) #define DISKDUMP (0x800000000000ULL) #define DATADEBUG (0x1000000000000ULL) #define FINDKERNEL (0x2000000000000ULL) #define VERSION_QUERY (0x4000000000000ULL) #define READNOW (0x8000000000000ULL) #define NOCRASHRC (0x10000000000000ULL) #define INIT_IFILE (0x20000000000000ULL) #define XENDUMP (0x40000000000000ULL) #define XEN_HYPER (0x80000000000000ULL) #define XEN_CORE (0x100000000000000ULL) #define PLEASE_WAIT (0x200000000000000ULL) #define IFILE_ERROR (0x400000000000000ULL) #define KERNTYPES (0x800000000000000ULL) #define MINIMAL_MODE (0x1000000000000000ULL) #define CRASHBUILTIN (0x2000000000000000ULL) #define PRELOAD_EXTENSIONS \ (0x4000000000000000ULL) #define PROC_KCORE (0x8000000000000000ULL) #define ACTIVE() (pc->flags & LIVE_SYSTEM) #define LOCAL_ACTIVE() ((pc->flags & (LIVE_SYSTEM|LIVE_RAMDUMP)) == LIVE_SYSTEM) #define DUMPFILE() (!(pc->flags & LIVE_SYSTEM)) #define LIVE() (pc->flags2 & LIVE_DUMP || pc->flags & LIVE_SYSTEM) #define MEMORY_SOURCES (NETDUMP|KDUMP|MCLXCD|LKCD|DEVMEM|S390D|MEMMOD|DISKDUMP|XENDUMP|CRASHBUILTIN|KVMDUMP|PROC_KCORE|SADUMP|VMWARE_VMSS|LIVE_RAMDUMP) #define DUMPFILE_TYPES (DISKDUMP|NETDUMP|KDUMP|MCLXCD|LKCD|S390D|XENDUMP|KVMDUMP|SADUMP|VMWARE_VMSS|LIVE_RAMDUMP) #define REMOTE() (pc->flags2 & REMOTE_DAEMON) #define REMOTE_ACTIVE() (pc->flags & REM_LIVE_SYSTEM) #define REMOTE_DUMPFILE() \ (pc->flags & (REM_NETDUMP|REM_MCLXCD|REM_LKCD|REM_S390D)) #define REMOTE_MEMSRC() (REMOTE_ACTIVE() || REMOTE_PAUSED() || REMOTE_DUMPFILE()) #define LKCD_DUMPFILE() (pc->flags & (LKCD|REM_LKCD)) #define NETDUMP_DUMPFILE() (pc->flags & (NETDUMP|REM_NETDUMP)) #define DISKDUMP_DUMPFILE() (pc->flags & DISKDUMP) #define KDUMP_DUMPFILE() (pc->flags & KDUMP) #define XENDUMP_DUMPFILE() (pc->flags & XENDUMP) #define XEN_HYPER_MODE() (pc->flags & XEN_HYPER) #define SYSRQ_TASK(X) ((pc->flags & SYSRQ) && is_task_active(X)) #define XEN_CORE_DUMPFILE() (pc->flags & XEN_CORE) #define LKCD_KERNTYPES() (pc->flags & KERNTYPES) #define KVMDUMP_DUMPFILE() (pc->flags & KVMDUMP) #define SADUMP_DUMPFILE() (pc->flags & SADUMP) #define VMSS_DUMPFILE() (pc->flags & VMWARE_VMSS) #define QEMU_MEM_DUMP_NO_VMCOREINFO() \ ((pc->flags2 & (QEMU_MEM_DUMP_ELF|QEMU_MEM_DUMP_COMPRESSED)) && !(pc->flags2 & VMCOREINFO)) #define NETDUMP_LOCAL (0x1) /* netdump_data flags */ #define NETDUMP_REMOTE (0x2) #define VMCORE_VALID() (nd->flags & (NETDUMP_LOCAL|NETDUMP_REMOTE|KDUMP_LOCAL)) #define NETDUMP_ELF32 (0x4) #define NETDUMP_ELF64 (0x8) #define PARTIAL_DUMP (0x10) /* netdump or diskdump */ #define KDUMP_ELF32 (0x20) #define KDUMP_ELF64 (0x40) #define KDUMP_LOCAL (0x80) #define KCORE_LOCAL (0x100) #define KCORE_ELF32 (0x200) #define KCORE_ELF64 (0x400) #define QEMU_MEM_DUMP_KDUMP_BACKUP \ (0x800) #define KVMDUMP_LOCAL (0x1) #define KVMDUMP_VALID() (kvm->flags & (KVMDUMP_LOCAL)) #define DUMPFILE_FORMAT(flags) ((flags) & \ (NETDUMP_ELF32|NETDUMP_ELF64|KDUMP_ELF32|KDUMP_ELF64)) #define DISKDUMP_LOCAL (0x1) #define KDUMP_CMPRS_LOCAL (0x2) #define ERROR_EXCLUDED (0x4) #define ZERO_EXCLUDED (0x8) #define DUMPFILE_SPLIT (0x10) #define NO_ELF_NOTES (0x20) #define LZO_SUPPORTED (0x40) #define SNAPPY_SUPPORTED (0x80) #define DISKDUMP_VALID() (dd->flags & DISKDUMP_LOCAL) #define KDUMP_CMPRS_VALID() (dd->flags & KDUMP_CMPRS_LOCAL) #define KDUMP_SPLIT() (dd->flags & DUMPFILE_SPLIT) #define XENDUMP_LOCAL (0x1) #define XENDUMP_VALID() (xd->flags & XENDUMP_LOCAL) #define SADUMP_LOCAL (0x1) #define SADUMP_DISKSET (0x2) #define SADUMP_MEDIA (0x4) #define SADUMP_ZERO_EXCLUDED (0x8) #define SADUMP_KDUMP_BACKUP (0x10) #define SADUMP_VALID() (sd->flags & SADUMP_LOCAL) #define CRASHDEBUG(x) (pc->debug >= (x)) #define CRASHDEBUG_SUSPEND(X) { pc->debug_save = pc->debug; pc->debug = X; } #define CRASHDEBUG_RESTORE() { pc->debug = pc->debug_save; } #define VERBOSE (0x1) #define ADDRESS_SPECIFIED (0x2) #define FAULT_ON_ERROR (0x1) #define RETURN_ON_ERROR (0x2) #define QUIET (0x4) #define HEX_BIAS (0x8) #define LONG_LONG (0x10) #define RETURN_PARTIAL (0x20) #define NO_DEVMEM_SWITCH (0x40) #define SEEK_ERROR (-1) #define READ_ERROR (-2) #define WRITE_ERROR (-3) #define PAGE_EXCLUDED (-4) #define RESTART() (longjmp(pc->main_loop_env, 1)) #define RESUME_FOREACH() (longjmp(pc->foreach_loop_env, 1)) #define INFO (1) #define FATAL (2) #define FATAL_RESTART (3) #define WARNING (4) #define NOTE (5) #define CONT (6) #define FATAL_ERROR(x) (((x) == FATAL) || ((x) == FATAL_RESTART)) #define CONSOLE_OFF(x) ((x) = console_off()) #define CONSOLE_ON(x) (console_on(x)) #define RADIX(X) (X) #define NUM_HEX (0x1) #define NUM_DEC (0x2) #define NUM_EXPR (0x4) #define NUM_ANY (NUM_HEX|NUM_DEC|NUM_EXPR) /* * program context redirect flags */ #define FROM_COMMAND_LINE (0x1) #define FROM_INPUT_FILE (0x2) #define REDIRECT_NOT_DONE (0x4) #define REDIRECT_TO_PIPE (0x8) #define REDIRECT_TO_STDPIPE (0x10) #define REDIRECT_TO_FILE (0x20) #define REDIRECT_FAILURE (0x40) #define REDIRECT_SHELL_ESCAPE (0x80) #define REDIRECT_SHELL_COMMAND (0x100) #define REDIRECT_PID_KNOWN (0x200) #define REDIRECT_MULTI_PIPE (0x400) #define PIPE_OPTIONS (FROM_COMMAND_LINE | FROM_INPUT_FILE | REDIRECT_TO_PIPE | \ REDIRECT_TO_STDPIPE | REDIRECT_TO_FILE) #define DEFAULT_REDHAT_DEBUG_LOCATION "/usr/lib/debug/lib/modules" #define MEMORY_DRIVER_MODULE "crash" #define MEMORY_DRIVER_DEVICE "/dev/crash" #define MEMORY_DRIVER_DEVICE_MODE (S_IFCHR|S_IRUSR) /* * structure definitions */ struct program_context { char *program_name; /* this program's name */ char *program_path; /* unadulterated argv[0] */ char *program_version; /* this program's version */ char *gdb_version; /* embedded gdb version */ char *prompt; /* this program's prompt */ unsigned long long flags; /* flags from above */ char *namelist; /* linux namelist */ char *dumpfile; /* dumpfile or /dev/kmem */ char *live_memsrc; /* live memory driver */ char *system_map; /* get symbol values from System.map */ char *namelist_debug; /* namelist containing debug data */ char *debuginfo_file; /* separate debuginfo file */ char *memory_module; /* alternative to mem.c driver */ char *memory_device; /* alternative to /dev/[k]mem device */ char *machine_type; /* machine's processor type */ char *editing_mode; /* readline vi or emacs */ char *server; /* network daemon */ char *server_memsrc; /* memory source on server */ char *server_namelist; /* kernel namelist on server */ int nfd; /* linux namelist fd */ int mfd; /* /dev/mem fd */ int kfd; /* /dev/kmem fd */ int dfd; /* dumpfile fd */ int confd; /* console fd */ int sockfd; /* network daemon socket */ ushort port; /* network daemon port */ int rmfd; /* remote server memory source fd */ int rkfd; /* remote server /dev/kmem fd */ ulong program_pid; /* program pid */ ulong server_pid; /* server pid */ ulong rcvbufsize; /* client-side receive buffer size */ char *home; /* user's home directory */ char command_line[BUFSIZE]; /* possibly parsed input command line */ char orig_line[BUFSIZE]; /* original input line */ char *readline; /* pointer to last readline() return */ char my_tty[10]; /* real tty name (shown by ps -ef) */ ulong debug; /* level of debug */ ulong debug_save; /* saved level for debug-suspend */ char *console; /* current debug console device */ char *redhat_debug_loc; /* location of matching debug objects */ int pipefd[2]; /* output pipe file descriptors */ FILE *nullfp; /* bitbucket */ FILE *stdpipe; /* standard pipe for output */ FILE *pipe; /* command line specified pipe */ FILE *ofile; /* command line specified output file */ FILE *ifile; /* command line specified input file */ FILE *ifile_pipe; /* output pipe specified from file */ FILE *ifile_ofile; /* output file specified from file */ FILE *symfile; /* symbol table data file */ FILE *symfile2; /* alternate access to above */ FILE *tmpfile; /* tmpfile for selective data output */ FILE *saved_fp; /* for printing while parsing tmpfile */ FILE *tmp_fp; /* stored tmpfile pointer */ char *input_file; /* input file specified at invocation */ FILE *tmpfile2; /* tmpfile2 does not use save_fp! */ int eoc_index; /* end of redirected command index */ int scroll_command; /* default scroll command for output */ #define SCROLL_NONE 0 #define SCROLL_LESS 1 #define SCROLL_MORE 2 #define SCROLL_CRASHPAGER 3 ulong redirect; /* per-cmd origin and output flags */ pid_t stdpipe_pid; /* per-cmd standard output pipe's pid */ pid_t pipe_pid; /* per-cmd output pipe's pid */ pid_t pipe_shell_pid; /* per-cmd output pipe's shell pid */ char pipe_command[BUFSIZE]; /* pipe command line */ struct command_table_entry *cmd_table; /* linux/xen command table */ char *curcmd; /* currently-executing command */ char *lastcmd; /* previously-executed command */ ulong cmdgencur; /* current command generation number */ ulong curcmd_flags; /* general purpose per-command flag */ #define XEN_MACHINE_ADDR (0x1) #define REPEAT (0x2) #define IDLE_TASK_SHOWN (0x4) #define TASK_SPECIFIED (0x8) #define MEMTYPE_UVADDR (0x10) #define MEMTYPE_FILEADDR (0x20) #define HEADER_PRINTED (0x40) #define BAD_INSTRUCTION (0x80) #define UD2A_INSTRUCTION (0x100) #define IRQ_IN_USE (0x200) #define NO_MODIFY (0x400) #define IGNORE_ERRORS (0x800) #define FROM_RCFILE (0x1000) #define MEMTYPE_KVADDR (0x2000) #define MOD_SECTIONS (0x4000) #define MOD_READNOW (0x8000) #define MM_STRUCT_FORCE (0x10000) #define CPUMASK (0x20000) #define PARTIAL_READ_OK (0x40000) ulonglong curcmd_private; /* general purpose per-command info */ int cur_gdb_cmd; /* current gdb command */ int last_gdb_cmd; /* previously-executed gdb command */ int sigint_cnt; /* number of ignored SIGINTs */ struct gnu_request *cur_req; /* current gdb gnu_request */ struct sigaction sigaction; /* general usage sigaction. */ struct sigaction gdb_sigaction; /* gdb's SIGINT sigaction. */ jmp_buf main_loop_env; /* longjmp target default */ jmp_buf foreach_loop_env; /* longjmp target within foreach */ jmp_buf gdb_interface_env; /* longjmp target for gdb error catch */ struct termios termios_orig; /* non-raw settings */ struct termios termios_raw; /* while gathering command input */ int ncmds; /* number of commands in menu */ char **cmdlist; /* current list of available commands */ int cmdlistsz; /* space available in cmdlist */ unsigned output_radix; /* current gdb output_radix */ void *sbrk; /* current sbrk value */ struct extension_table *curext; /* extension being loaded */ int (*readmem)(int, void *, int, ulong, physaddr_t); /* memory access */ int (*writemem)(int, void *, int, ulong, physaddr_t);/* memory access */ ulong ifile_in_progress; /* original xxx_IFILE flags */ off_t ifile_offset; /* current offset into input file */ char *runtime_ifile_cmd; /* runtime command using input file */ char *kvmdump_mapfile; /* storage of physical to file offsets */ ulonglong flags2; /* flags overrun */ #define FLAT (0x01ULL) #define ELF_NOTES (0x02ULL) #define GET_OSRELEASE (0x04ULL) #define REMOTE_DAEMON (0x08ULL) #define ERASEINFO_DATA (0x10ULL) #define GDB_CMD_MODE (0x20ULL) #define LIVE_DUMP (0x40ULL) #define FLAT_FORMAT() (pc->flags2 & FLAT) #define ELF_NOTES_VALID() (pc->flags2 & ELF_NOTES) #define RADIX_OVERRIDE (0x80ULL) #define QEMU_MEM_DUMP_ELF (0x100ULL) #define GET_LOG (0x200ULL) #define VMCOREINFO (0x400ULL) #define ALLOW_FP (0x800ULL) #define REM_PAUSED_F (0x1000ULL) #define RAMDUMP (0x2000ULL) #define REMOTE_PAUSED() (pc->flags2 & REM_PAUSED_F) #define OFFLINE_HIDE (0x4000ULL) #define INCOMPLETE_DUMP (0x8000ULL) #define is_incomplete_dump() (pc->flags2 & INCOMPLETE_DUMP) #define QEMU_MEM_DUMP_COMPRESSED (0x10000ULL) #define SNAP (0x20000ULL) #define EXCLUDED_VMEMMAP (0x40000ULL) #define is_excluded_vmemmap() (pc->flags2 & EXCLUDED_VMEMMAP) #define MEMSRC_LOCAL (0x80000ULL) #define REDZONE (0x100000ULL) char *cleanup; char *namelist_orig; char *namelist_debug_orig; FILE *args_ifile; /* per-command args input file */ void (*cmd_cleanup)(void *); /* per-command cleanup function */ void *cmd_cleanup_arg; /* optional cleanup function argument */ ulong scope; /* optional text context address */ ulong nr_hash_queues; /* hash queue head count */ char *(*read_vmcoreinfo)(const char *); FILE *error_fp; /* error() message direction */ char *error_path; /* stderr path information */ }; #define READMEM pc->readmem typedef void (*cmd_func_t)(void); struct command_table_entry { /* one for each command in menu */ char *name; cmd_func_t func; char **help_data; ulong flags; }; struct args_input_file { int index; int args_used; int is_gdb_cmd; int in_expression; int start; int resume; char *fileptr; }; #define REFRESH_TASK_TABLE (0x1) /* command_table_entry flags */ #define HIDDEN_COMMAND (0x2) #define CLEANUP (0x4) /* for extensions only */ #define MINIMAL (0x8) /* * A linked list of extension table structures keeps track of the current * set of shared library extensions. */ struct extension_table { void *handle; /* handle from dlopen() */ char *filename; /* name of shared library */ struct command_table_entry *command_table; /* list of commands */ ulong flags; /* registration flags */ struct extension_table *next, *prev; /* bookkeeping */ }; #define REGISTERED (0x1) /* extension_table flags */ #define DUPLICATE_COMMAND_NAME (0x2) #define NO_MINIMAL_COMMANDS (0x4) struct new_utsname { char sysname[65]; char nodename[65]; char release[65]; char version[65]; char machine[65]; char domainname[65]; }; #define NO_MODULE_ACCESS (0x1) #define TVEC_BASES_V1 (0x2) #define GCC_3_2 (0x4) #define GCC_3_2_3 (0x8) #define GCC_2_96 (0x10) #define RA_SEEK (0x20) #define NO_RA_SEEK (0x40) #define KALLSYMS_V1 (0x80) #define NO_KALLSYMS (0x100) #define PER_CPU_OFF (0x200) #define SMP (0x400) #define GCC_3_3_2 (0x800) #define KMOD_V1 (0x1000) #define KMOD_V2 (0x2000) #define KALLSYMS_V2 (0x2000) #define TVEC_BASES_V2 (0x4000) #define GCC_3_3_3 (0x8000) #define USE_OLD_BT (0x10000) #define USE_OPT_BT (0x10000) #define ARCH_XEN (0x20000) #define NO_IKCONFIG (0x40000) #define DWARF_UNWIND (0x80000) #define NO_DWARF_UNWIND (0x100000) #define DWARF_UNWIND_MEMORY (0x200000) #define DWARF_UNWIND_EH_FRAME (0x400000) #define DWARF_UNWIND_CAPABLE (DWARF_UNWIND_MEMORY|DWARF_UNWIND_EH_FRAME) #define DWARF_UNWIND_MODULES (0x800000) #define BUGVERBOSE_OFF (0x1000000) #define RELOC_SET (0x2000000) #define RELOC_FORCE (0x4000000) #define ARCH_OPENVZ (0x8000000) #define ARCH_PVOPS (0x10000000) #define PRE_KERNEL_INIT (0x20000000) #define ARCH_PVOPS_XEN (0x40000000) #define GCC_VERSION_DEPRECATED (GCC_3_2|GCC_3_2_3|GCC_2_96|GCC_3_3_2|GCC_3_3_3) /* flags2 */ #define RELOC_AUTO (0x1ULL) #define KASLR (0x2ULL) #define KASLR_CHECK (0x4ULL) #define GET_TIMESTAMP (0x8ULL) #define TVEC_BASES_V3 (0x10ULL) #define TIMER_BASES (0x20ULL) #define IRQ_DESC_TREE_RADIX (0x40ULL) #define IRQ_DESC_TREE_XARRAY (0x80ULL) #define XEN() (kt->flags & ARCH_XEN) #define OPENVZ() (kt->flags & ARCH_OPENVZ) #define PVOPS() (kt->flags & ARCH_PVOPS) #define PVOPS_XEN() (kt->flags & ARCH_PVOPS_XEN) #define XEN_MACHINE_TO_MFN(m) ((ulonglong)(m) >> PAGESHIFT()) #define XEN_PFN_TO_PSEUDO(p) ((ulonglong)(p) << PAGESHIFT()) #define XEN_MFN_NOT_FOUND (~0UL) #define XEN_PFNS_PER_PAGE (PAGESIZE()/sizeof(ulong)) #define XEN_FOREIGN_FRAME (1UL << (BITS()-1)) #define XEN_MACHADDR_NOT_FOUND (~0ULL) #define XEN_P2M_PER_PAGE (PAGESIZE() / sizeof(unsigned long)) #define XEN_P2M_MID_PER_PAGE (PAGESIZE() / sizeof(unsigned long *)) #define XEN_P2M_TOP_PER_PAGE (PAGESIZE() / sizeof(unsigned long **)) struct kernel_table { /* kernel data */ ulong flags; ulong stext; ulong etext; ulong stext_init; ulong etext_init; ulong init_begin; ulong init_end; ulong end; int cpus; char *cpus_override; void (*display_bh)(void); ulong module_list; ulong kernel_module; int mods_installed; struct timespec date; char proc_version[BUFSIZE]; struct new_utsname utsname; uint kernel_version[3]; uint gcc_version[3]; int runq_siblings; int kernel_NR_CPUS; long __per_cpu_offset[NR_CPUS]; long *__rq_idx; long *__cpu_idx; ulong *cpu_flags; #define POSSIBLE (0x1) #define PRESENT (0x2) #define ONLINE (0x4) #define NMI (0x8) #define POSSIBLE_MAP (POSSIBLE) #define PRESENT_MAP (PRESENT) #define ONLINE_MAP (ONLINE) #define ACTIVE_MAP (0x10) int BUG_bytes; ulong xen_flags; #define WRITABLE_PAGE_TABLES (0x1) #define SHADOW_PAGE_TABLES (0x2) #define CANONICAL_PAGE_TABLES (0x4) #define XEN_SUSPEND (0x8) char *m2p_page; ulong phys_to_machine_mapping; ulong p2m_table_size; #define P2M_MAPPING_CACHE (512) struct p2m_mapping_cache { ulong mapping; ulong pfn; ulong start; ulong end; } p2m_mapping_cache[P2M_MAPPING_CACHE]; #define P2M_MAPPING_PAGE_PFN(c) \ (PVOPS_XEN() ? kt->p2m_mapping_cache[c].pfn : \ (((kt->p2m_mapping_cache[c].mapping - kt->phys_to_machine_mapping)/PAGESIZE()) \ * XEN_PFNS_PER_PAGE)) ulong last_mapping_read; ulong p2m_cache_index; ulong p2m_pages_searched; ulong p2m_mfn_cache_hits; ulong p2m_page_cache_hits; ulong relocate; char *module_tree; struct pvops_xen_info { int p2m_top_entries; ulong p2m_top; ulong p2m_mid_missing; ulong p2m_missing; } pvops_xen; int highest_irq; #define IKCONFIG_AVAIL 0x1 /* kernel contains ikconfig data */ #define IKCONFIG_LOADED 0x2 /* ikconfig data is currently loaded */ int ikconfig_flags; int ikconfig_ents; char *hypervisor; struct vmcoreinfo_data { ulong log_buf_SYMBOL; ulong log_end_SYMBOL; ulong log_buf_len_SYMBOL; ulong logged_chars_SYMBOL; ulong log_first_idx_SYMBOL; ulong log_next_idx_SYMBOL; long log_SIZE; long log_ts_nsec_OFFSET; long log_len_OFFSET; long log_text_len_OFFSET; long log_dict_len_OFFSET; ulong phys_base_SYMBOL; ulong _stext_SYMBOL; } vmcoreinfo; ulonglong flags2; char *source_tree; }; /* * Aid for the two versions of the kernel's module list linkage. */ #define NEXT_MODULE(next_module, modbuf) \ { \ switch (kt->flags & (KMOD_V1|KMOD_V2)) \ { \ case KMOD_V1: \ next_module = ULONG(modbuf + OFFSET(module_next)); \ break; \ case KMOD_V2: \ next_module = ULONG(modbuf + OFFSET(module_list)); \ if (next_module != kt->kernel_module) \ next_module -= OFFSET(module_list); \ break; \ } \ } #define THIS_KERNEL_VERSION ((kt->kernel_version[0] << 16) + \ (kt->kernel_version[1] << 8) + \ (kt->kernel_version[2])) #define LINUX(x,y,z) (((uint)(x) << 16) + ((uint)(y) << 8) + (uint)(z)) #define THIS_GCC_VERSION ((kt->gcc_version[0] << 16) + \ (kt->gcc_version[1] << 8) + \ (kt->gcc_version[2])) #define GCC(x,y,z) (((uint)(x) << 16) + ((uint)(y) << 8) + (uint)(z)) #define IS_KERNEL_STATIC_TEXT(x) (((ulong)(x) >= kt->stext) && \ ((ulong)(x) < kt->etext)) #define TASK_COMM_LEN 16 /* task command name length including NULL */ struct task_context { /* context stored for each task */ ulong task; ulong thread_info; ulong pid; char comm[TASK_COMM_LEN+1]; int processor; ulong ptask; ulong mm_struct; struct task_context *tc_next; }; struct tgid_context { /* tgid and task stored for each task */ ulong tgid; ulong task; }; struct task_table { /* kernel/local task table data */ struct task_context *current; struct task_context *context_array; void (*refresh_task_table)(void); ulong flags; ulong task_start; ulong task_end; void *task_local; int max_tasks; int nr_threads; ulong running_tasks; ulong retries; ulong panicmsg; int panic_processor; ulong *idle_threads; ulong *panic_threads; ulong *active_set; ulong *panic_ksp; ulong *hardirq_ctx; ulong *hardirq_tasks; ulong *softirq_ctx; ulong *softirq_tasks; ulong panic_task; ulong this_task; int pidhash_len; ulong pidhash_addr; ulong last_task_read; ulong last_thread_info_read; ulong last_mm_read; char *task_struct; char *thread_info; char *mm_struct; ulong init_pid_ns; struct tgid_context *tgid_array; struct tgid_context *last_tgid; ulong tgid_searches; ulong tgid_cache_hits; long filepages; long anonpages; ulong stack_end_magic; ulong pf_kthread; ulong pid_radix_tree; int callbacks; struct task_context **context_by_task; /* task_context sorted by task addr */ ulong pid_xarray; }; #define TASK_INIT_DONE (0x1) #define TASK_ARRAY_EXISTS (0x2) #define PANIC_TASK_NOT_FOUND (0x4) #define TASK_REFRESH (0x8) #define TASK_REFRESH_OFF (0x10) #define PANIC_KSP (0x20) #define ACTIVE_SET (0x40) #define POPULATE_PANIC (0x80) #define PIDHASH (0x100) #define PID_HASH (0x200) #define THREAD_INFO (0x400) #define IRQSTACKS (0x800) #define TIMESPEC (0x1000) #define NO_TIMESPEC (0x2000) #define ACTIVE_ONLY (0x4000) #define START_TIME_NSECS (0x8000) #define THREAD_INFO_IN_TASK (0x10000) #define PID_RADIX_TREE (0x20000) #define INDEXED_CONTEXTS (0x40000) #define PID_XARRAY (0x80000) #define TASK_SLUSH (20) #define NO_PROC_ID 0xFF /* No processor magic marker (from kernel) */ /* * Global "tt" points to task_table */ #define CURRENT_CONTEXT() (tt->current) #define CURRENT_TASK() (tt->current->task) #define CURRENT_PID() (tt->current->pid) #define CURRENT_COMM() (tt->current->comm) #define RUNNING_TASKS() (tt->running_tasks) #define FIRST_CONTEXT() (tt->context_array) #define NO_PID ((ulong)-1) #define NO_TASK (0) #define IS_TASK_ADDR(X) (machdep->is_task_addr(X)) #define GET_STACKBASE(X) (machdep->get_stackbase(X)) #define GET_STACKTOP(X) (machdep->get_stacktop(X)) #define STACKSIZE() (machdep->stacksize) #define LONGS_PER_STACK (machdep->stacksize/sizeof(ulong)) #define INSTACK(X,BT) \ (((ulong)(X) >= (BT)->stackbase) && ((ulong)(X) < (BT)->stacktop)) #define ALIGNED_STACK_OFFSET(task) ((ulong)(task) & (STACKSIZE()-1)) #define BITS() (machdep->bits) #define BITS32() (machdep->bits == 32) #define BITS64() (machdep->bits == 64) #define IS_KVADDR(X) (machdep->is_kvaddr(X)) #define IS_UVADDR(X,C) (machdep->is_uvaddr(X,C)) #define PID_ALIVE(x) (kill(x, 0) == 0) struct kernel_list_head { struct kernel_list_head *next, *prev; }; struct stack_hook { ulong esp; ulong eip; }; struct bt_info { ulong task; ulonglong flags; ulong instptr; ulong stkptr; ulong bptr; ulong stackbase; ulong stacktop; char *stackbuf; struct task_context *tc; struct stack_hook *hp; struct stack_hook *textlist; struct reference *ref; ulong frameptr; char *call_target; void *machdep; ulong debug; ulong eframe_ip; ulong radix; ulong *cpumask; }; #define STACK_OFFSET_TYPE(OFF) \ (((ulong)(OFF) > STACKSIZE()) ? \ (ulong)((ulong)(OFF) - (ulong)(bt->stackbase)) : (ulong)(OFF)) #define GET_STACK_ULONG(OFF) \ *((ulong *)((char *)(&bt->stackbuf[(ulong)(STACK_OFFSET_TYPE(OFF))]))) #define GET_STACK_DATA(OFF, LOC, SZ) memcpy((void *)(LOC), \ (void *)(&bt->stackbuf[(ulong)STACK_OFFSET_TYPE(OFF)]), (size_t)(SZ)) struct machine_specific; /* uniquely defined below each machine's area */ struct xendump_data; struct xen_kdump_data; struct vaddr_range { ulong start; ulong end; ulong type; #define KVADDR_UNITY_MAP (1) #define KVADDR_VMALLOC (2) #define KVADDR_VMEMMAP (3) #define KVADDR_START_MAP (4) #define KVADDR_MODULES (5) #define MAX_KVADDR_RANGES KVADDR_MODULES }; #define MAX_MACHDEP_ARGS 5 /* for --machdep/-m machine-specific args */ struct machdep_table { ulong flags; ulong kvbase; ulong identity_map_base; uint pagesize; uint pageshift; ulonglong pagemask; ulong pageoffset; ulong stacksize; uint hz; ulong mhz; int bits; int nr_irqs; uint64_t memsize; int (*eframe_search)(struct bt_info *); void (*back_trace)(struct bt_info *); ulong (*processor_speed)(void); int (*uvtop)(struct task_context *, ulong, physaddr_t *, int); int (*kvtop)(struct task_context *, ulong, physaddr_t *, int); ulong (*get_task_pgd)(ulong); void (*dump_irq)(int); void (*get_stack_frame)(struct bt_info *, ulong *, ulong *); ulong (*get_stackbase)(ulong); ulong (*get_stacktop)(ulong); int (*translate_pte)(ulong, void *, ulonglong); uint64_t (*memory_size)(void); ulong (*vmalloc_start)(void); int (*is_task_addr)(ulong); int (*verify_symbol)(const char *, ulong, char); int (*dis_filter)(ulong, char *, unsigned int); int (*get_smp_cpus)(void); int (*is_kvaddr)(ulong); int (*is_uvaddr)(ulong, struct task_context *); int (*verify_paddr)(uint64_t); void (*cmd_mach)(void); void (*init_kernel_pgd)(void); struct syment *(*value_to_symbol)(ulong, ulong *); struct line_number_hook { char *func; char **file; } *line_number_hooks; ulong last_pgd_read; ulong last_pud_read; ulong last_pmd_read; ulong last_ptbl_read; char *pgd; char *pud; char *pmd; char *ptbl; int ptrs_per_pgd; char *cmdline_args[MAX_MACHDEP_ARGS]; struct machine_specific *machspec; ulong section_size_bits; ulong max_physmem_bits; ulong sections_per_root; int (*xendump_p2m_create)(struct xendump_data *); ulong (*xendump_panic_task)(struct xendump_data *); void (*get_xendump_regs)(struct xendump_data *, struct bt_info *, ulong *, ulong *); void (*clear_machdep_cache)(void); int (*xen_kdump_p2m_create)(struct xen_kdump_data *); int (*in_alternate_stack)(int, ulong); void (*dumpfile_init)(int, void *); void (*process_elf_notes)(void *, unsigned long); int (*get_kvaddr_ranges)(struct vaddr_range *); int (*verify_line_number)(ulong, ulong, ulong); void (*get_irq_affinity)(int); void (*show_interrupts)(int, ulong *); int (*is_page_ptr)(ulong, physaddr_t *); }; /* * Processor-common flags; processor-specific flags use the lower bits * as defined in their processor-specific files below. (see KSYMS_START defs). */ #define HWRESET (0x80000000) #define OMIT_FRAME_PTR (0x40000000) #define FRAMESIZE_DEBUG (0x20000000) #define MACHDEP_BT_TEXT (0x10000000) #define DEVMEMRD (0x8000000) #define INIT (0x4000000) #define VM_4_LEVEL (0x2000000) #define MCA (0x1000000) #define PAE (0x800000) #define VMEMMAP (0x400000) extern struct machdep_table *machdep; #ifndef HZ #define HZ sysconf(_SC_CLK_TCK) #endif #define IS_LAST_PGD_READ(pgd) ((ulong)(pgd) == machdep->last_pgd_read) #define IS_LAST_PMD_READ(pmd) ((ulong)(pmd) == machdep->last_pmd_read) #define IS_LAST_PTBL_READ(ptbl) ((ulong)(ptbl) == machdep->last_ptbl_read) #define IS_LAST_PUD_READ(pud) ((ulong)(pud) == machdep->last_pud_read) #define FILL_PGD(PGD, TYPE, SIZE) \ if (!IS_LAST_PGD_READ(PGD)) { \ readmem((ulonglong)((ulong)(PGD)), TYPE, machdep->pgd, \ SIZE, "pgd page", FAULT_ON_ERROR); \ machdep->last_pgd_read = (ulong)(PGD); \ } #define FILL_PUD(PUD, TYPE, SIZE) \ if (!IS_LAST_PUD_READ(PUD)) { \ readmem((ulonglong)((ulong)(PUD)), TYPE, machdep->pud, \ SIZE, "pud page", FAULT_ON_ERROR); \ machdep->last_pud_read = (ulong)(PUD); \ } #define FILL_PMD(PMD, TYPE, SIZE) \ if (!IS_LAST_PMD_READ(PMD)) { \ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->last_pmd_read = (ulong)(PMD); \ } #define FILL_PTBL(PTBL, TYPE, SIZE) \ if (!IS_LAST_PTBL_READ(PTBL)) { \ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ SIZE, "page table", FAULT_ON_ERROR); \ machdep->last_ptbl_read = (ulong)(PTBL); \ } #define SETUP_ENV (0) #define PRE_SYMTAB (1) #define PRE_GDB (2) #define POST_GDB (3) #define POST_INIT (4) #define POST_VM (5) #define LOG_ONLY (6) #define POST_RELOC (7) #define FOREACH_BT (1) #define FOREACH_VM (2) #define FOREACH_TASK (3) #define FOREACH_SET (4) #define FOREACH_FILES (5) #define FOREACH_NET (6) #define FOREACH_TEST (7) #define FOREACH_VTOP (8) #define FOREACH_SIG (9) #define FOREACH_PS (10) #define MAX_FOREACH_KEYWORDS (10) #define MAX_FOREACH_TASKS (50) #define MAX_FOREACH_PIDS (50) #define MAX_FOREACH_COMMS (50) #define MAX_FOREACH_ARGS (50) #define MAX_REGEX_ARGS (10) #define FOREACH_CMD (0x1) #define FOREACH_r_FLAG (0x2) #define FOREACH_s_FLAG (0x4) #define FOREACH_S_FLAG (0x8) #define FOREACH_i_FLAG (0x10) #define FOREACH_e_FLAG (0x20) #define FOREACH_g_FLAG (0x40) #define FOREACH_l_FLAG (0x80) #define FOREACH_p_FLAG (0x100) #define FOREACH_t_FLAG (0x200) #define FOREACH_u_FLAG (0x400) #define FOREACH_m_FLAG (0x800) #define FOREACH_v_FLAG (0x1000) #define FOREACH_KERNEL (0x2000) #define FOREACH_USER (0x4000) #define FOREACH_SPECIFIED (0x8000) #define FOREACH_ACTIVE (0x10000) #define FOREACH_k_FLAG (0x20000) #define FOREACH_c_FLAG (0x40000) #define FOREACH_f_FLAG (0x80000) #define FOREACH_o_FLAG (0x100000) #define FOREACH_T_FLAG (0x200000) #define FOREACH_F_FLAG (0x400000) #define FOREACH_x_FLAG (0x800000) #define FOREACH_d_FLAG (0x1000000) #define FOREACH_STATE (0x2000000) #define FOREACH_a_FLAG (0x4000000) #define FOREACH_G_FLAG (0x8000000) #define FOREACH_F_FLAG2 (0x10000000) #define FOREACH_y_FLAG (0x20000000) #define FOREACH_GLEADER (0x40000000) #define FOREACH_PS_EXCLUSIVE \ (FOREACH_g_FLAG|FOREACH_a_FLAG|FOREACH_t_FLAG|FOREACH_c_FLAG|FOREACH_p_FLAG|FOREACH_l_FLAG|FOREACH_r_FLAG|FOREACH_m_FLAG) struct foreach_data { ulong flags; int keyword_array[MAX_FOREACH_KEYWORDS]; ulong task_array[MAX_FOREACH_TASKS]; char *comm_array[MAX_FOREACH_COMMS]; ulong pid_array[MAX_FOREACH_PIDS]; ulong arg_array[MAX_FOREACH_ARGS]; struct regex_info { char *pattern; regex_t regex; } regex_info[MAX_REGEX_ARGS]; ulong state; char *reference; int keys; int pids; int tasks; int comms; int args; int regexs; int policy; }; struct reference { char *str; ulong cmdflags; ulong hexval; ulong decval; ulong ref1; ulong ref2; void *refp; }; struct offset_table { /* stash of commonly-used offsets */ long list_head_next; /* add new entries to end of table */ long list_head_prev; long task_struct_pid; long task_struct_state; long task_struct_comm; long task_struct_mm; long task_struct_tss; long task_struct_thread; long task_struct_active_mm; long task_struct_tss_eip; long task_struct_tss_esp; long task_struct_tss_ksp; long task_struct_processor; long task_struct_p_pptr; long task_struct_parent; long task_struct_has_cpu; long task_struct_cpus_runnable; long task_struct_thread_eip; long task_struct_thread_esp; long task_struct_thread_ksp; long task_struct_next_task; long task_struct_files; long task_struct_fs; long task_struct_pidhash_next; long task_struct_next_run; long task_struct_flags; long task_struct_sig; long task_struct_signal; long task_struct_blocked; long task_struct_sigpending; long task_struct_pending; long task_struct_sigqueue; long task_struct_sighand; long task_struct_start_time; long task_struct_times; long task_struct_utime; long task_struct_stime; long task_struct_cpu; long task_struct_run_list; long task_struct_pgrp; long task_struct_tgid; long task_struct_namespace; long task_struct_pids; long task_struct_last_run; long task_struct_timestamp; long task_struct_thread_info; long task_struct_nsproxy; long task_struct_rlim; long thread_info_task; long thread_info_cpu; long thread_info_previous_esp; long thread_info_flags; long nsproxy_mnt_ns; long mnt_namespace_root; long mnt_namespace_list; long pid_link_pid; long pid_hash_chain; long hlist_node_next; long hlist_node_pprev; long pid_pid_chain; long thread_struct_eip; long thread_struct_esp; long thread_struct_ksp; long thread_struct_fph; long thread_struct_rip; long thread_struct_rsp; long thread_struct_rsp0; long tms_tms_utime; long tms_tms_stime; long signal_struct_count; long signal_struct_action; long signal_struct_shared_pending; long signal_struct_rlim; long k_sigaction_sa; long sigaction_sa_handler; long sigaction_sa_flags; long sigaction_sa_mask; long sigpending_head; long sigpending_list; long sigpending_signal; long signal_queue_next; long signal_queue_info; long sigqueue_next; long sigqueue_list; long sigqueue_info; long sighand_struct_action; long siginfo_si_signo; long thread_struct_cr3; long thread_struct_ptbr; long thread_struct_pg_tables; long switch_stack_r26; long switch_stack_b0; long switch_stack_ar_bspstore; long switch_stack_ar_pfs; long switch_stack_ar_rnat; long switch_stack_pr; long cpuinfo_ia64_proc_freq; long cpuinfo_ia64_unimpl_va_mask; long cpuinfo_ia64_unimpl_pa_mask; long device_node_type; long device_node_allnext; long device_node_properties; long property_name; long property_value; long property_next; long machdep_calls_setup_residual; long RESIDUAL_VitalProductData; long VPD_ProcessorHz; long bd_info_bi_intfreq; long hwrpb_struct_cycle_freq; long hwrpb_struct_processor_offset; long hwrpb_struct_processor_size; long percpu_struct_halt_PC; long percpu_struct_halt_ra; long percpu_struct_halt_pv; long mm_struct_mmap; long mm_struct_pgd; long mm_struct_rss; long mm_struct_anon_rss; long mm_struct_file_rss; long mm_struct_total_vm; long mm_struct_start_code; long mm_struct_arg_start; long mm_struct_arg_end; long mm_struct_env_start; long mm_struct_env_end; long vm_area_struct_vm_mm; long vm_area_struct_vm_next; long vm_area_struct_vm_end; long vm_area_struct_vm_start; long vm_area_struct_vm_flags; long vm_area_struct_vm_file; long vm_area_struct_vm_offset; long vm_area_struct_vm_pgoff; long vm_struct_addr; long vm_struct_size; long vm_struct_next; long module_size_of_struct; long module_next; long module_size; long module_name; long module_nsyms; long module_syms; long module_flags; long module_num_syms; long module_list; long module_gpl_syms; long module_num_gpl_syms; long module_module_core; long module_core_size; long module_core_text_size; long module_num_symtab; long module_symtab; long module_strtab; long module_kallsyms_start; long kallsyms_header_sections; long kallsyms_header_section_off; long kallsyms_header_symbols; long kallsyms_header_symbol_off; long kallsyms_header_string_off; long kallsyms_symbol_section_off; long kallsyms_symbol_symbol_addr; long kallsyms_symbol_name_off; long kallsyms_section_start; long kallsyms_section_size; long kallsyms_section_name_off; long page_next; long page_prev; long page_next_hash; long page_list; long page_list_next; long page_list_prev; long page_inode; long page_offset; long page_count; long page_flags; long page_mapping; long page_index; long page_buffers; long page_lru; long page_pte; long swap_info_struct_swap_file; long swap_info_struct_swap_vfsmnt; long swap_info_struct_flags; long swap_info_struct_swap_map; long swap_info_struct_swap_device; long swap_info_struct_prio; long swap_info_struct_max; long swap_info_struct_pages; long swap_info_struct_old_block_size; long block_device_bd_inode; long block_device_bd_list; long block_device_bd_disk; long irq_desc_t_status; long irq_desc_t_handler; long irq_desc_t_chip; long irq_desc_t_action; long irq_desc_t_depth; long irqdesc_action; long irqdesc_ctl; long irqdesc_level; long irqaction_handler; long irqaction_flags; long irqaction_mask; long irqaction_name; long irqaction_dev_id; long irqaction_next; long hw_interrupt_type_typename; long hw_interrupt_type_startup; long hw_interrupt_type_shutdown; long hw_interrupt_type_handle; long hw_interrupt_type_enable; long hw_interrupt_type_disable; long hw_interrupt_type_ack; long hw_interrupt_type_end; long hw_interrupt_type_set_affinity; long irq_chip_typename; long irq_chip_startup; long irq_chip_shutdown; long irq_chip_enable; long irq_chip_disable; long irq_chip_ack; long irq_chip_end; long irq_chip_set_affinity; long irq_chip_mask; long irq_chip_mask_ack; long irq_chip_unmask; long irq_chip_eoi; long irq_chip_retrigger; long irq_chip_set_type; long irq_chip_set_wake; long irq_cpustat_t___softirq_active; long irq_cpustat_t___softirq_mask; long fdtable_max_fds; long fdtable_max_fdset; long fdtable_open_fds; long fdtable_fd; long files_struct_fdt; long files_struct_max_fds; long files_struct_max_fdset; long files_struct_open_fds; long files_struct_fd; long files_struct_open_fds_init; long file_f_dentry; long file_f_vfsmnt; long file_f_count; long file_f_path; long path_mnt; long path_dentry; long fs_struct_root; long fs_struct_pwd; long fs_struct_rootmnt; long fs_struct_pwdmnt; long dentry_d_inode; long dentry_d_parent; long dentry_d_name; long dentry_d_covers; long dentry_d_iname; long qstr_len; long qstr_name; long inode_i_mode; long inode_i_op; long inode_i_sb; long inode_u; long inode_i_flock; long inode_i_fop; long inode_i_mapping; long address_space_nrpages; long vfsmount_mnt_next; long vfsmount_mnt_devname; long vfsmount_mnt_dirname; long vfsmount_mnt_sb; long vfsmount_mnt_list; long vfsmount_mnt_mountpoint; long vfsmount_mnt_parent; long namespace_root; long namespace_list; long super_block_s_dirty; long super_block_s_type; long super_block_s_files; long file_system_type_name; long nlm_file_f_file; long file_lock_fl_owner; long nlm_host_h_exportent; long svc_client_cl_ident; long kmem_cache_s_c_nextp; long kmem_cache_s_c_name; long kmem_cache_s_c_num; long kmem_cache_s_c_org_size; long kmem_cache_s_c_flags; long kmem_cache_s_c_offset; long kmem_cache_s_c_firstp; long kmem_cache_s_c_gfporder; long kmem_cache_s_c_magic; long kmem_cache_s_num; long kmem_cache_s_next; long kmem_cache_s_name; long kmem_cache_s_objsize; long kmem_cache_s_flags; long kmem_cache_s_gfporder; long kmem_cache_s_slabs; long kmem_cache_s_slabs_full; long kmem_cache_s_slabs_partial; long kmem_cache_s_slabs_free; long kmem_cache_s_cpudata; long kmem_cache_s_c_align; long kmem_cache_s_colour_off; long cpucache_s_avail; long cpucache_s_limit; long kmem_cache_s_array; long array_cache_avail; long array_cache_limit; long kmem_cache_s_lists; long kmem_list3_slabs_partial; long kmem_list3_slabs_full; long kmem_list3_slabs_free; long kmem_list3_free_objects; long kmem_list3_shared; long kmem_slab_s_s_nextp; long kmem_slab_s_s_freep; long kmem_slab_s_s_inuse; long kmem_slab_s_s_mem; long kmem_slab_s_s_index; long kmem_slab_s_s_offset; long kmem_slab_s_s_magic; long slab_s_list; long slab_s_s_mem; long slab_s_inuse; long slab_s_free; long slab_list; long slab_s_mem; long slab_inuse; long slab_free; long net_device_next; long net_device_name; long net_device_type; long net_device_addr_len; long net_device_ip_ptr; long net_device_dev_list; long net_dev_base_head; long device_next; long device_name; long device_type; long device_ip_ptr; long device_addr_len; long socket_sk; long sock_daddr; long sock_rcv_saddr; long sock_dport; long sock_sport; long sock_num; long sock_type; long sock_family; long sock_common_skc_family; long sock_sk_type; long inet_sock_inet; long inet_opt_daddr; long inet_opt_rcv_saddr; long inet_opt_dport; long inet_opt_sport; long inet_opt_num; long ipv6_pinfo_rcv_saddr; long ipv6_pinfo_daddr; long timer_list_list; long timer_list_next; long timer_list_entry; long timer_list_expires; long timer_list_function; long timer_vec_root_vec; long timer_vec_vec; long tvec_root_s_vec; long tvec_s_vec; long tvec_t_base_s_tv1; long wait_queue_task; long wait_queue_next; long __wait_queue_task; long __wait_queue_head_task_list; long __wait_queue_task_list; long pglist_data_node_zones; long pglist_data_node_mem_map; long pglist_data_node_start_paddr; long pglist_data_node_start_mapnr; long pglist_data_node_size; long pglist_data_node_id; long pglist_data_node_next; long pglist_data_nr_zones; long pglist_data_node_start_pfn; long pglist_data_pgdat_next; long pglist_data_node_present_pages; long pglist_data_node_spanned_pages; long pglist_data_bdata; long page_cache_bucket_chain; long zone_struct_free_pages; long zone_struct_free_area; long zone_struct_zone_pgdat; long zone_struct_name; long zone_struct_size; long zone_struct_memsize; long zone_struct_zone_start_pfn; long zone_struct_zone_start_paddr; long zone_struct_zone_start_mapnr; long zone_struct_zone_mem_map; long zone_struct_inactive_clean_pages; long zone_struct_inactive_clean_list; long zone_struct_inactive_dirty_pages; long zone_struct_active_pages; long zone_struct_pages_min; long zone_struct_pages_low; long zone_struct_pages_high; long zone_free_pages; long zone_free_area; long zone_zone_pgdat; long zone_zone_mem_map; long zone_name; long zone_spanned_pages; long zone_zone_start_pfn; long zone_pages_min; long zone_pages_low; long zone_pages_high; long zone_vm_stat; long neighbour_next; long neighbour_primary_key; long neighbour_ha; long neighbour_dev; long neighbour_nud_state; long neigh_table_hash_buckets; long neigh_table_key_len; long in_device_ifa_list; long in_ifaddr_ifa_next; long in_ifaddr_ifa_address; long pci_dev_global_list; long pci_dev_next; long pci_dev_bus; long pci_dev_devfn; long pci_dev_class; long pci_dev_device; long pci_dev_vendor; long pci_bus_number; long resource_entry_t_from; long resource_entry_t_num; long resource_entry_t_name; long resource_entry_t_next; long resource_name; long resource_start; long resource_end; long resource_sibling; long resource_child; long runqueue_curr; long runqueue_idle; long runqueue_active; long runqueue_expired; long runqueue_arrays; long runqueue_cpu; long cpu_s_idle; long cpu_s_curr; long prio_array_nr_active; long prio_array_queue; long user_regs_struct_ebp; long user_regs_struct_esp; long user_regs_struct_rip; long user_regs_struct_cs; long user_regs_struct_eflags; long user_regs_struct_rsp; long user_regs_struct_ss; long e820map_nr_map; long e820entry_addr; long e820entry_size; long e820entry_type; long char_device_struct_next; long char_device_struct_name; long char_device_struct_fops; long char_device_struct_major; long gendisk_major; long gendisk_disk_name; long gendisk_fops; long blk_major_name_next; long blk_major_name_major; long blk_major_name_name; long radix_tree_root_height; long radix_tree_root_rnode; long x8664_pda_pcurrent; long x8664_pda_data_offset; long x8664_pda_kernelstack; long x8664_pda_irqrsp; long x8664_pda_irqstackptr; long x8664_pda_level4_pgt; long x8664_pda_cpunumber; long x8664_pda_me; long tss_struct_ist; long mem_section_section_mem_map; long vcpu_guest_context_user_regs; long cpu_user_regs_eip; long cpu_user_regs_esp; long cpu_user_regs_rip; long cpu_user_regs_rsp; long unwind_table_core; long unwind_table_init; long unwind_table_address; long unwind_table_size; long unwind_table_link; long unwind_table_name; long rq_cfs; long rq_rt; long rq_nr_running; long cfs_rq_rb_leftmost; long cfs_rq_nr_running; long cfs_rq_tasks_timeline; long task_struct_se; long sched_entity_run_node; long rt_rq_active; long kmem_cache_size; long kmem_cache_objsize; long kmem_cache_offset; long kmem_cache_order; long kmem_cache_local_node; long kmem_cache_objects; long kmem_cache_inuse; long kmem_cache_align; long kmem_cache_name; long kmem_cache_list; long kmem_cache_node; long kmem_cache_cpu_slab; long page_inuse; /* long page_offset; use "old" page->offset */ long page_slab; long page_first_page; long page_freelist; long kmem_cache_node_nr_partial; long kmem_cache_node_nr_slabs; long kmem_cache_node_partial; long kmem_cache_node_full; long pid_numbers; long upid_nr; long upid_ns; long upid_pid_chain; long pid_tasks; long kmem_cache_cpu_freelist; long kmem_cache_cpu_page; long kmem_cache_cpu_node; long kmem_cache_flags; long zone_nr_active; long zone_nr_inactive; long zone_all_unreclaimable; long zone_present_pages; long zone_flags; long zone_pages_scanned; long pcpu_info_vcpu; long pcpu_info_idle; long vcpu_struct_rq; long task_struct_sched_info; long sched_info_last_arrival; long page_objects; long kmem_cache_oo; long char_device_struct_cdev; long char_device_struct_baseminor; long cdev_ops; long probe_next; long probe_dev; long probe_data; long kobj_map_probes; long task_struct_prio; long zone_watermark; long module_sect_attrs; long module_sect_attrs_attrs; long module_sect_attrs_nsections; long module_sect_attr_mattr; long module_sect_attr_name; long module_sect_attr_address; long module_attribute_attr; long attribute_owner; long module_sect_attr_attr; long module_sections_attrs; long swap_info_struct_inuse_pages; long s390_lowcore_psw_save_area; long mm_struct_rss_stat; long mm_rss_stat_count; long module_module_init; long module_init_text_size; long cpu_context_save_fp; long cpu_context_save_sp; long cpu_context_save_pc; long elf_prstatus_pr_pid; long elf_prstatus_pr_reg; long irq_desc_t_name; long thread_info_cpu_context; long unwind_table_list; long unwind_table_start; long unwind_table_stop; long unwind_table_begin_addr; long unwind_table_end_addr; long unwind_idx_addr; long unwind_idx_insn; long signal_struct_nr_threads; long module_init_size; long module_percpu; long radix_tree_node_slots; long s390_stack_frame_back_chain; long s390_stack_frame_r14; long user_regs_struct_eip; long user_regs_struct_rax; long user_regs_struct_eax; long user_regs_struct_rbx; long user_regs_struct_ebx; long user_regs_struct_rcx; long user_regs_struct_ecx; long user_regs_struct_rdx; long user_regs_struct_edx; long user_regs_struct_rsi; long user_regs_struct_esi; long user_regs_struct_rdi; long user_regs_struct_edi; long user_regs_struct_ds; long user_regs_struct_es; long user_regs_struct_fs; long user_regs_struct_gs; long user_regs_struct_rbp; long user_regs_struct_r8; long user_regs_struct_r9; long user_regs_struct_r10; long user_regs_struct_r11; long user_regs_struct_r12; long user_regs_struct_r13; long user_regs_struct_r14; long user_regs_struct_r15; long sched_entity_cfs_rq; long sched_entity_my_q; long sched_entity_on_rq; long task_struct_on_rq; long cfs_rq_curr; long irq_desc_t_irq_data; long irq_desc_t_kstat_irqs; long irq_desc_t_affinity; long irq_data_chip; long irq_data_affinity; long kernel_stat_irqs; long socket_alloc_vfs_inode; long class_devices; long class_p; long class_private_devices; long device_knode_class; long device_node; long gendisk_dev; long gendisk_kobj; long gendisk_part0; long gendisk_queue; long hd_struct_dev; long klist_k_list; long klist_node_n_klist; long klist_node_n_node; long kobject_entry; long kset_list; long request_list_count; long request_queue_in_flight; long request_queue_rq; long subsys_private_klist_devices; long subsystem_kset; long mount_mnt_parent; long mount_mnt_mountpoint; long mount_mnt_list; long mount_mnt_devname; long mount_mnt; long task_struct_exit_state; long timekeeper_xtime; long file_f_op; long file_private_data; long hstate_order; long hugetlbfs_sb_info_hstate; long idr_layer_ary; long idr_layer_layer; long idr_layers; long idr_top; long ipc_id_ary_p; long ipc_ids_entries; long ipc_ids_max_id; long ipc_ids_ipcs_idr; long ipc_ids_in_use; long ipc_namespace_ids; long kern_ipc_perm_deleted; long kern_ipc_perm_key; long kern_ipc_perm_mode; long kern_ipc_perm_uid; long kern_ipc_perm_id; long kern_ipc_perm_seq; long nsproxy_ipc_ns; long shmem_inode_info_swapped; long shmem_inode_info_vfs_inode; long shm_file_data_file; long shmid_kernel_shm_file; long shmid_kernel_shm_nattch; long shmid_kernel_shm_perm; long shmid_kernel_shm_segsz; long shmid_kernel_id; long sem_array_sem_perm; long sem_array_sem_id; long sem_array_sem_nsems; long msg_queue_q_perm; long msg_queue_q_id; long msg_queue_q_cbytes; long msg_queue_q_qnum; long super_block_s_fs_info; long rq_timestamp; long radix_tree_node_height; long rb_root_rb_node; long rb_node_rb_left; long rb_node_rb_right; long rt_prio_array_queue; long task_struct_rt; long sched_rt_entity_run_list; long log_ts_nsec; long log_len; long log_text_len; long log_dict_len; long log_level; long log_flags_level; long timekeeper_xtime_sec; long neigh_table_hash_mask; long sched_rt_entity_my_q; long neigh_table_hash_shift; long neigh_table_nht_ptr; long task_group_parent; long task_group_css; long cgroup_subsys_state_cgroup; long cgroup_dentry; long task_group_rt_rq; long rt_rq_tg; long task_group_cfs_rq; long cfs_rq_tg; long task_group_siblings; long task_group_children; long task_group_cfs_bandwidth; long cfs_rq_throttled; long task_group_rt_bandwidth; long rt_rq_rt_throttled; long rt_rq_highest_prio; long rt_rq_rt_nr_running; long vmap_area_va_start; long vmap_area_va_end; long vmap_area_list; long vmap_area_flags; long vmap_area_vm; long hrtimer_cpu_base_clock_base; long hrtimer_clock_base_offset; long hrtimer_clock_base_active; long hrtimer_clock_base_first; long hrtimer_clock_base_get_time; long hrtimer_base_first; long hrtimer_base_pending; long hrtimer_base_get_time; long hrtimer_node; long hrtimer_list; long hrtimer_softexpires; long hrtimer_expires; long hrtimer_function; long timerqueue_head_next; long timerqueue_node_expires; long timerqueue_node_node; long ktime_t_tv64; long ktime_t_sec; long ktime_t_nsec; long module_taints; long module_gpgsig_ok; long module_license_gplok; long tnt_bit; long tnt_true; long tnt_false; long task_struct_thread_context_fp; long task_struct_thread_context_sp; long task_struct_thread_context_pc; long page_slab_page; long trace_print_flags_mask; long trace_print_flags_name; long task_struct_rss_stat; long task_rss_stat_count; long page_s_mem; long page_active; long hstate_nr_huge_pages; long hstate_free_huge_pages; long hstate_name; long cgroup_kn; long kernfs_node_name; long kernfs_node_parent; long kmem_cache_cpu_partial; long kmem_cache_cpu_cache; long nsproxy_net_ns; long atomic_t_counter; long percpu_counter_count; long mm_struct_mm_count; long task_struct_thread_reg29; long task_struct_thread_reg31; long pt_regs_regs; long pt_regs_cp0_badvaddr; long address_space_page_tree; long page_compound_head; long irq_desc_irq_data; long kmem_cache_node_total_objects; long timer_base_vectors; long request_queue_mq_ops; long request_queue_queue_ctx; long blk_mq_ctx_rq_dispatched; long blk_mq_ctx_rq_completed; long task_struct_stack; long tnt_mod; long radix_tree_node_shift; long kmem_cache_red_left_pad; long inactive_task_frame_ret_addr; long sk_buff_head_next; long sk_buff_head_qlen; long sk_buff_next; long sk_buff_len; long sk_buff_data; long nlmsghdr_nlmsg_type; long module_arch; long mod_arch_specific_num_orcs; long mod_arch_specific_orc_unwind_ip; long mod_arch_specific_orc_unwind; long task_struct_policy; long kmem_cache_random; long pid_namespace_idr; long idr_idr_rt; long bpf_prog_aux; long bpf_prog_type; long bpf_prog_tag; long bpf_prog_jited_len; long bpf_prog_bpf_func; long bpf_prog_len; long bpf_prog_insnsi; long bpf_prog_pages; long bpf_map_map_type; long bpf_map_map_flags; long bpf_map_pages; long bpf_map_key_size; long bpf_map_value_size; long bpf_map_max_entries; long bpf_map_user; long bpf_map_name; long bpf_prog_aux_used_map_cnt; long bpf_prog_aux_used_maps; long bpf_prog_aux_load_time; long bpf_prog_aux_user; long user_struct_uid; long idr_cur; long kmem_cache_memcg_params; long memcg_cache_params___root_caches_node; long memcg_cache_params_children; long memcg_cache_params_children_node; long task_struct_pid_links; long kernel_symbol_value; long pci_dev_dev; long pci_dev_hdr_type; long pci_dev_pcie_flags_reg; long pci_bus_node; long pci_bus_devices; long pci_bus_dev; long pci_bus_children; long pci_bus_parent; long pci_bus_self; long device_kobj; long kobject_name; long memory_block_dev; long memory_block_start_section_nr; long memory_block_end_section_nr; long memory_block_state; long memory_block_nid; long mem_section_pageblock_flags; long bus_type_p; long device_private_device; long device_private_knode_bus; long xarray_xa_head; long xa_node_slots; long xa_node_shift; long hd_struct_dkstats; long disk_stats_in_flight; long cpu_context_save_r7; long dentry_d_sb; long device_private_knode_class; long timerqueue_head_rb_root; long rb_root_cached_rb_leftmost; }; struct size_table { /* stash of commonly-used sizes */ long page; long free_area_struct; long zone_struct; long free_area; long zone; long kmem_slab_s; long kmem_cache_s; long kmem_bufctl_t; long slab_s; long slab; long cpucache_s; long array_cache; long swap_info_struct; long mm_struct; long vm_area_struct; long pglist_data; long page_cache_bucket; long pt_regs; long task_struct; long thread_info; long softirq_state; long desc_struct; long umode_t; long dentry; long files_struct; long fdtable; long fs_struct; long file; long inode; long vfsmount; long super_block; long irqdesc; long module; long list_head; long hlist_node; long hlist_head; long irq_cpustat_t; long cpuinfo_x86; long cpuinfo_ia64; long timer_list; long timer_vec_root; long timer_vec; long tvec_root_s; long tvec_s; long tvec_t_base_s; long wait_queue; long __wait_queue; long device; long net_device; long sock; long signal_struct; long sigpending_signal; long signal_queue; long sighand_struct; long sigqueue; long k_sigaction; long resource_entry_t; long resource; long runqueue; long irq_desc_t; long task_union; long thread_union; long prio_array; long user_regs_struct; long switch_stack; long vm_area_struct_vm_flags; long e820map; long e820entry; long cpu_s; long pgd_t; long kallsyms_header; long kallsyms_symbol; long kallsyms_section; long irq_ctx; long block_device; long blk_major_name; long gendisk; long address_space; long char_device_struct; long inet_sock; long in6_addr; long socket; long spinlock_t; long radix_tree_root; long radix_tree_node; long x8664_pda; long ppc64_paca; long gate_struct; long tss_struct; long task_struct_start_time; long cputime_t; long mem_section; long pid_link; long unwind_table; long rlimit; long kmem_cache; long kmem_cache_node; long upid; long kmem_cache_cpu; long cfs_rq; long pcpu_info; long vcpu_struct; long cdev; long probe; long kobj_map; long page_flags; long module_sect_attr; long task_struct_utime; long task_struct_stime; long cpu_context_save; long elf_prstatus; long note_buf; long unwind_idx; long softirq_action; long irq_data; long s390_stack_frame; long percpu_data; long sched_entity; long kernel_stat; long subsystem; long class_private; long rq_in_flight; long class_private_devices; long mount; long hstate; long ipc_ids; long shmid_kernel; long sem_array; long msg_queue; long log; long log_level; long rt_rq; long task_group; long vmap_area; long hrtimer_clock_base; long hrtimer_base; long tnt; long trace_print_flags; long task_struct_flags; long timer_base; long taint_flag; long nlmsghdr; long nlmsghdr_nlmsg_type; long sk_buff_head_qlen; long sk_buff_len; long orc_entry; long task_struct_policy; long pid; long bpf_prog; long bpf_prog_aux; long bpf_map; long bpf_insn; long xarray; long xa_node; }; struct array_table { int kmem_cache_s_name; int kmem_cache_s_c_name; int kmem_cache_s_array; int kmem_cache_s_cpudata; int irq_desc; int irq_action; int log_buf; int timer_vec_vec; int timer_vec_root_vec; int tvec_s_vec; int tvec_root_s_vec; int page_hash_table; int net_device_name; int neigh_table_hash_buckets; int neighbour_ha; int swap_info; int pglist_data_node_zones; int zone_struct_free_area; int zone_free_area; int free_area; int free_area_DIMENSION; int prio_array_queue; int height_to_maxindex; int pid_hash; int kmem_cache_node; int kmem_cache_cpu_slab; int rt_prio_array_queue; int height_to_maxnodes; int task_struct_rlim; int signal_struct_rlim; int vm_numa_stat; }; /* * The following set of macros use gdb to determine structure, union, * or member sizes/offsets. They should be used only during initialization * of the offset_table or size_table, or with data structures whose names * or members are only known/specified during runtime. */ #define MEMBER_SIZE_REQUEST ((struct datatype_member *)(-1)) #define ANON_MEMBER_OFFSET_REQUEST ((struct datatype_member *)(-2)) #define MEMBER_TYPE_REQUEST ((struct datatype_member *)(-3)) #define STRUCT_SIZE_REQUEST ((struct datatype_member *)(-4)) #define MEMBER_TYPE_NAME_REQUEST ((struct datatype_member *)(-5)) #define STRUCT_SIZE(X) datatype_info((X), NULL, STRUCT_SIZE_REQUEST) #define UNION_SIZE(X) datatype_info((X), NULL, STRUCT_SIZE_REQUEST) #define STRUCT_EXISTS(X) (datatype_info((X), NULL, STRUCT_SIZE_REQUEST) >= 0) #define DATATYPE_SIZE(X) datatype_info((X)->name, NULL, (X)) #define MEMBER_OFFSET(X,Y) datatype_info((X), (Y), NULL) #define MEMBER_EXISTS(X,Y) (datatype_info((X), (Y), NULL) >= 0) #define MEMBER_SIZE(X,Y) datatype_info((X), (Y), MEMBER_SIZE_REQUEST) #define MEMBER_TYPE(X,Y) datatype_info((X), (Y), MEMBER_TYPE_REQUEST) #define MEMBER_TYPE_NAME(X,Y) ((char *)datatype_info((X), (Y), MEMBER_TYPE_NAME_REQUEST)) #define ANON_MEMBER_OFFSET(X,Y) datatype_info((X), (Y), ANON_MEMBER_OFFSET_REQUEST) /* * The following set of macros can only be used with pre-intialized fields * in the offset table, size table or array_table. */ #define OFFSET(X) (OFFSET_verify(offset_table.X, (char *)__FUNCTION__, __FILE__, __LINE__, #X)) #define SIZE(X) (SIZE_verify(size_table.X, (char *)__FUNCTION__, __FILE__, __LINE__, #X)) #define INVALID_OFFSET (-1) #define INVALID_MEMBER(X) (offset_table.X == INVALID_OFFSET) #define INVALID_SIZE(X) (size_table.X == -1) #define VALID_SIZE(X) (size_table.X >= 0) #define VALID_STRUCT(X) (size_table.X >= 0) #define VALID_MEMBER(X) (offset_table.X >= 0) #define ARRAY_LENGTH(X) (array_table.X) #define ASSIGN_OFFSET(X) (offset_table.X) #define ASSIGN_SIZE(X) (size_table.X) #define OFFSET_OPTION(X,Y) (OFFSET_option(offset_table.X, offset_table.Y, (char *)__FUNCTION__, __FILE__, __LINE__, #X, #Y)) #define SIZE_OPTION(X,Y) (SIZE_option(size_table.X, size_table.Y, (char *)__FUNCTION__, __FILE__, __LINE__, #X, #Y)) #define MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = MEMBER_OFFSET(Y, Z)) #define STRUCT_SIZE_INIT(X, Y) (ASSIGN_SIZE(X) = STRUCT_SIZE(Y)) #define ARRAY_LENGTH_INIT(A, B, C, D, E) ((A) = get_array_length(C, D, E)) #define ARRAY_LENGTH_INIT_ALT(A, B, C, D, E) ((A) = get_array_length_alt(B, C, D, E)) #define MEMBER_SIZE_INIT(X, Y, Z) (ASSIGN_SIZE(X) = MEMBER_SIZE(Y, Z)) #define ANON_MEMBER_OFFSET_INIT(X, Y, Z) (ASSIGN_OFFSET(X) = ANON_MEMBER_OFFSET(Y, Z)) /* * For use with non-debug kernels. */ struct builtin_debug_table { char *release; char *machine_type; struct offset_table *offset_table; struct size_table *size_table; struct array_table *array_table; }; /* * Facilitators for pulling correctly-sized data out of a buffer at a * known address. */ #ifdef NEED_ALIGNED_MEM_ACCESS #define DEF_LOADER(TYPE) \ static inline TYPE \ load_##TYPE (char *addr) \ { \ TYPE ret; \ size_t i = sizeof(TYPE); \ while (i--) \ ((char *)&ret)[i] = addr[i]; \ return ret; \ } DEF_LOADER(int); DEF_LOADER(uint); DEF_LOADER(long); DEF_LOADER(ulong); DEF_LOADER(ulonglong); DEF_LOADER(ushort); DEF_LOADER(short); typedef void *pointer_t; DEF_LOADER(pointer_t); #define LOADER(TYPE) load_##TYPE #define INT(ADDR) LOADER(int) ((char *)(ADDR)) #define UINT(ADDR) LOADER(uint) ((char *)(ADDR)) #define LONG(ADDR) LOADER(long) ((char *)(ADDR)) #define ULONG(ADDR) LOADER(ulong) ((char *)(ADDR)) #define ULONGLONG(ADDR) LOADER(ulonglong) ((char *)(ADDR)) #define ULONG_PTR(ADDR) ((ulong *) (LOADER(pointer_t) ((char *)(ADDR)))) #define USHORT(ADDR) LOADER(ushort) ((char *)(ADDR)) #define SHORT(ADDR) LOADER(short) ((char *)(ADDR)) #define UCHAR(ADDR) *((unsigned char *)((char *)(ADDR))) #define VOID_PTR(ADDR) ((void *) (LOADER(pointer_t) ((char *)(ADDR)))) #else #define INT(ADDR) *((int *)((char *)(ADDR))) #define UINT(ADDR) *((uint *)((char *)(ADDR))) #define LONG(ADDR) *((long *)((char *)(ADDR))) #define ULONG(ADDR) *((ulong *)((char *)(ADDR))) #define ULONGLONG(ADDR) *((ulonglong *)((char *)(ADDR))) #define ULONG_PTR(ADDR) *((ulong **)((char *)(ADDR))) #define USHORT(ADDR) *((ushort *)((char *)(ADDR))) #define SHORT(ADDR) *((short *)((char *)(ADDR))) #define UCHAR(ADDR) *((unsigned char *)((char *)(ADDR))) #define VOID_PTR(ADDR) *((void **)((char *)(ADDR))) #endif /* NEED_ALIGNED_MEM_ACCESS */ struct node_table { int node_id; ulong pgdat; ulong mem_map; ulong size; ulong present; ulonglong start_paddr; ulong start_mapnr; }; struct meminfo; struct slab_data; #define VMA_CACHE (20) struct vm_table { /* kernel VM-related data */ ulong flags; ulong kernel_pgd[NR_CPUS]; ulong high_memory; ulong vmalloc_start; ulong mem_map; long total_pages; ulong totalram_pages; ulong totalhigh_pages; ulong num_physpages; ulong max_mapnr; ulong kmem_max_c_num; ulong kmem_max_limit; ulong kmem_max_cpus; ulong kmem_cache_count; ulong kmem_cache_len_nodes; ulong PG_reserved; ulong PG_slab; ulong PG_head_tail_mask; int kmem_cache_namelen; ulong page_hash_table; int page_hash_table_len; int paddr_prlen; int numnodes; int nr_zones; int nr_free_areas; struct node_table *node_table; void (*dump_free_pages)(struct meminfo *); void (*dump_kmem_cache)(struct meminfo *); struct slab_data *slab_data; uint nr_swapfiles; ulong last_swap_read; char *swap_info_struct; char *vma_cache; ulong cached_vma[VMA_CACHE]; ulong cached_vma_hits[VMA_CACHE]; int vma_cache_index; ulong vma_cache_fills; void *mem_sec; char *mem_section; int ZONE_HIGHMEM; ulong *node_online_map; int node_online_map_len; int nr_vm_stat_items; char **vm_stat_items; int cpu_slab_type; int nr_vm_event_items; char **vm_event_items; int nr_bad_slab_caches; ulong *bad_slab_caches; int nr_pageflags; struct pageflags_data { ulong mask; char *name; } *pageflags_data; ulong max_mem_section_nr; }; #define NODES (0x1) #define ZONES (0x2) #define PERCPU_KMALLOC_V1 (0x4) #define COMMON_VADDR (0x8) #define KMEM_CACHE_INIT (0x10) #define V_MEM_MAP (0x20) #define PERCPU_KMALLOC_V2 (0x40) #define KMEM_CACHE_UNAVAIL (0x80) #define FLATMEM (0x100) #define DISCONTIGMEM (0x200) #define SPARSEMEM (0x400) #define SPARSEMEM_EX (0x800) #define PERCPU_KMALLOC_V2_NODES (0x1000) #define KMEM_CACHE_DELAY (0x2000) #define NODES_ONLINE (0x4000) #define VM_STAT (0x8000) #define KMALLOC_SLUB (0x10000) #define CONFIG_NUMA (0x20000) #define VM_EVENT (0x40000) #define PGCNT_ADJ (0x80000) #define VM_INIT (0x100000) #define SWAPINFO_V1 (0x200000) #define SWAPINFO_V2 (0x400000) #define NODELISTS_IS_PTR (0x800000) #define KMALLOC_COMMON (0x1000000) #define USE_VMAP_AREA (0x2000000) #define PAGEFLAGS (0x4000000) #define SLAB_OVERLOAD_PAGE (0x8000000) #define SLAB_CPU_CACHE (0x10000000) #define SLAB_ROOT_CACHES (0x20000000) #define IS_FLATMEM() (vt->flags & FLATMEM) #define IS_DISCONTIGMEM() (vt->flags & DISCONTIGMEM) #define IS_SPARSEMEM() (vt->flags & SPARSEMEM) #define IS_SPARSEMEM_EX() (vt->flags & SPARSEMEM_EX) #define COMMON_VADDR_SPACE() (vt->flags & COMMON_VADDR) #define PADDR_PRLEN (vt->paddr_prlen) struct datatype_member { /* minimal definition of a structure/union */ char *name; /* and possibly a member within it */ char *member; ulong type; long size; long member_offset; long member_size; int member_typecode; ulong flags; char *tagname; /* tagname and value for enums */ long value; ulong vaddr; }; #define union_name struct_name struct list_data { /* generic structure used by do_list() to walk */ ulong flags; /* through linked lists in the kernel */ ulong start; long member_offset; long list_head_offset; ulong end; ulong searchfor; char **structname; int structname_args; char *header; ulong *list_ptr; int (*callback_func)(void *, void *); void *callback_data; long struct_list_offset; }; #define LIST_OFFSET_ENTERED (VERBOSE << 1) #define LIST_START_ENTERED (VERBOSE << 2) #define LIST_HEAD_FORMAT (VERBOSE << 3) #define LIST_HEAD_POINTER (VERBOSE << 4) #define RETURN_ON_DUPLICATE (VERBOSE << 5) #define RETURN_ON_LIST_ERROR (VERBOSE << 6) #define LIST_STRUCT_RADIX_10 (VERBOSE << 7) #define LIST_STRUCT_RADIX_16 (VERBOSE << 8) #define LIST_HEAD_REVERSE (VERBOSE << 9) #define LIST_ALLOCATE (VERBOSE << 10) #define LIST_CALLBACK (VERBOSE << 11) #define CALLBACK_RETURN (VERBOSE << 12) #define LIST_PARSE_MEMBER (VERBOSE << 13) #define LIST_READ_MEMBER (VERBOSE << 14) #define LIST_BRENT_ALGO (VERBOSE << 15) struct tree_data { ulong flags; ulong start; long node_member_offset; char **structname; int structname_args; int count; }; #define TREE_ROOT_OFFSET_ENTERED (VERBOSE << 1) #define TREE_NODE_OFFSET_ENTERED (VERBOSE << 2) #define TREE_NODE_POINTER (VERBOSE << 3) #define TREE_POSITION_DISPLAY (VERBOSE << 4) #define TREE_STRUCT_RADIX_10 (VERBOSE << 5) #define TREE_STRUCT_RADIX_16 (VERBOSE << 6) #define TREE_PARSE_MEMBER (VERBOSE << 7) #define TREE_READ_MEMBER (VERBOSE << 8) #define TREE_LINEAR_ORDER (VERBOSE << 9) #define ALIAS_RUNTIME (1) #define ALIAS_RCLOCAL (2) #define ALIAS_RCHOME (3) #define ALIAS_BUILTIN (4) struct alias_data { /* command alias storage */ struct alias_data *next; char *alias; int argcnt; int size; int origin; char *args[MAXARGS]; char argbuf[1]; }; struct rb_node { unsigned long rb_parent_color; #define RB_RED 0 #define RB_BLACK 1 struct rb_node *rb_right; struct rb_node *rb_left; }; struct rb_root { struct rb_node *rb_node; }; #define NUMBER_STACKFRAMES 4 #define SAVE_RETURN_ADDRESS(retaddr) \ { \ int i; \ int saved_stacks; \ \ saved_stacks = backtrace((void **)retaddr, NUMBER_STACKFRAMES); \ \ /* explicitely zero out the invalid addresses */ \ for (i = saved_stacks; i < NUMBER_STACKFRAMES; i++) \ retaddr[i] = 0; \ } #endif /* !GDB_COMMON */ #define SYMBOL_NAME_USED (0x1) #define MODULE_SYMBOL (0x2) #define IS_MODULE_SYMBOL(SYM) ((SYM)->flags & MODULE_SYMBOL) struct syment { ulong value; char *name; struct syment *val_hash_next; struct syment *name_hash_next; char type; unsigned char cnt; unsigned char flags; unsigned char pad2; }; #define NAMESPACE_INIT (1) #define NAMESPACE_REUSE (2) #define NAMESPACE_FREE (3) #define NAMESPACE_INSTALL (4) #define NAMESPACE_COMPLETE (5) struct symbol_namespace { char *address; size_t size; long index; long cnt; }; struct downsized { char *name; struct downsized *next; }; #define SYMVAL_HASH (512) #define SYMVAL_HASH_INDEX(vaddr) \ (((vaddr) >> machdep->pageshift) % SYMVAL_HASH) #define SYMNAME_HASH (512) #define SYMNAME_HASH_INDEX(name) \ ((name[0] ^ (name[strlen(name)-1] * name[strlen(name)/2])) % SYMNAME_HASH) #define PATCH_KERNEL_SYMBOLS_START ((char *)(1)) #define PATCH_KERNEL_SYMBOLS_STOP ((char *)(2)) #ifndef GDB_COMMON struct symbol_table_data { ulong flags; #ifdef GDB_5_3 struct _bfd *bfd; #else struct bfd *bfd; #endif struct sec *sections; struct syment *symtable; struct syment *symend; long symcnt; ulong syment_size; struct symval_hash_chain { struct syment *val_hash_head; struct syment *val_hash_last; } symval_hash[SYMVAL_HASH]; double val_hash_searches; double val_hash_iterations; struct syment *symname_hash[SYMNAME_HASH]; struct symbol_namespace kernel_namespace; struct syment *ext_module_symtable; struct syment *ext_module_symend; long ext_module_symcnt; struct symbol_namespace ext_module_namespace; int mods_installed; struct load_module *current; struct load_module *load_modules; off_t dwarf_eh_frame_file_offset; ulong dwarf_eh_frame_size; ulong first_ksymbol; ulong __per_cpu_start; ulong __per_cpu_end; off_t dwarf_debug_frame_file_offset; ulong dwarf_debug_frame_size; ulong first_section_start; ulong last_section_end; ulong _stext_vmlinux; struct downsized downsized; ulong divide_error_vmlinux; ulong idt_table_vmlinux; ulong saved_command_line_vmlinux; ulong pti_init_vmlinux; ulong kaiser_init_vmlinux; int kernel_symbol_type; }; /* flags for st */ #define KERNEL_SYMS (0x1) #define MODULE_SYMS (0x2) #define LOAD_MODULE_SYMS (0x4) #define INSMOD_BUILTIN (0x8) #define GDB_SYMS_PATCHED (0x10) #define GDB_PATCHED() (st->flags & GDB_SYMS_PATCHED) #define NO_SEC_LOAD (0x20) #define NO_SEC_CONTENTS (0x40) #define FORCE_DEBUGINFO (0x80) #define CRC_MATCHES (0x100) #define ADD_SYMBOL_FILE (0x200) #define USE_OLD_ADD_SYM (0x400) #define PERCPU_SYMS (0x800) #define MODSECT_UNKNOWN (0x1000) #define MODSECT_V1 (0x2000) #define MODSECT_V2 (0x4000) #define MODSECT_V3 (0x8000) #define MODSECT_VMASK (MODSECT_V1|MODSECT_V2|MODSECT_V3) #define NO_STRIP (0x10000) #define NO_LINE_NUMBERS() ((st->flags & GDB_SYMS_PATCHED) && !(kt->flags2 & KASLR)) #endif /* !GDB_COMMON */ #define ALL_MODULES (0) #define MAX_MOD_NAMELIST (256) #define MAX_MOD_NAME (64) #define MAX_MOD_SEC_NAME (64) #define MOD_EXT_SYMS (0x1) #define MOD_LOAD_SYMS (0x2) #define MOD_REMOTE (0x4) #define MOD_KALLSYMS (0x8) #define MOD_INITRD (0x10) #define MOD_NOPATCH (0x20) #define MOD_INIT (0x40) #define MOD_DO_READNOW (0x80) #define SEC_FOUND (0x10000) struct mod_section_data { #if defined(GDB_5_3) || defined(GDB_6_0) struct sec *section; #else struct bfd_section *section; #endif char name[MAX_MOD_SEC_NAME]; ulong offset; ulong size; int priority; int flags; }; struct load_module { ulong mod_base; ulong module_struct; long mod_size; char mod_namelist[MAX_MOD_NAMELIST]; char mod_name[MAX_MOD_NAME]; ulong mod_flags; struct syment *mod_symtable; struct syment *mod_symend; long mod_ext_symcnt; struct syment *mod_ext_symtable; struct syment *mod_ext_symend; long mod_load_symcnt; struct syment *mod_load_symtable; struct syment *mod_load_symend; long mod_symalloc; struct symbol_namespace mod_load_namespace; ulong mod_size_of_struct; ulong mod_text_start; ulong mod_etext_guess; ulong mod_rodata_start; ulong mod_data_start; ulong mod_bss_start; int mod_sections; struct mod_section_data *mod_section_data; ulong mod_init_text_size; ulong mod_init_module_ptr; ulong mod_init_size; struct syment *mod_init_symtable; struct syment *mod_init_symend; ulong mod_percpu; ulong mod_percpu_size; struct objfile *loaded_objfile; }; #define IN_MODULE(A,L) \ (((ulong)(A) >= (L)->mod_base) && ((ulong)(A) < ((L)->mod_base+(L)->mod_size))) #define IN_MODULE_INIT(A,L) \ (((ulong)(A) >= (L)->mod_init_module_ptr) && ((ulong)(A) < ((L)->mod_init_module_ptr+(L)->mod_init_size))) #define IN_MODULE_PERCPU(A,L) \ (((ulong)(A) >= (L)->mod_percpu) && ((ulong)(A) < ((L)->mod_percpu+(L)->mod_percpu_size))) #define MODULE_PERCPU_SYMS_LOADED(L) ((L)->mod_percpu && (L)->mod_percpu_size) #ifndef GDB_COMMON #define KVADDR (0x1) #define UVADDR (0x2) #define PHYSADDR (0x4) #define XENMACHADDR (0x8) #define FILEADDR (0x10) #define AMBIGUOUS (~0) #define USE_USER_PGD (UVADDR << 2) #define VERIFY_ADDR (0x8) /* vm_area_dump() flags -- must follow */ #define PRINT_INODES (0x10) /* KVADDR, UVADDR, and PHYSADDR */ #define PRINT_MM_STRUCT (0x20) #define PRINT_VMA_STRUCTS (0x40) #define PRINT_SINGLE_VMA (0x80) #define PRINT_RADIX_10 (0x100) #define PRINT_RADIX_16 (0x200) #define PRINT_NRPAGES (0x400) #define MIN_PAGE_SIZE (4096) #define PTOB(X) ((ulonglong)(X) << machdep->pageshift) #define BTOP(X) ((ulonglong)(X) >> machdep->pageshift) #define PAGESIZE() (machdep->pagesize) #define PAGESHIFT() (machdep->pageshift) #define PAGEOFFSET(X) (((ulong)(X)) & machdep->pageoffset) #define VIRTPAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PHYSPAGEBASE(X) (((physaddr_t)(X)) & (physaddr_t)machdep->pagemask) /* * Sparse memory stuff * These must follow the definitions in the kernel mmzone.h */ #define SECTION_SIZE_BITS() (machdep->section_size_bits) #define MAX_PHYSMEM_BITS() (machdep->max_physmem_bits) #define SECTIONS_SHIFT() (MAX_PHYSMEM_BITS() - SECTION_SIZE_BITS()) #define PA_SECTION_SHIFT() (SECTION_SIZE_BITS()) #define PFN_SECTION_SHIFT() (SECTION_SIZE_BITS() - PAGESHIFT()) #define NR_MEM_SECTIONS() (1UL << SECTIONS_SHIFT()) #define PAGES_PER_SECTION() (1UL << PFN_SECTION_SHIFT()) #define PAGE_SECTION_MASK() (~(PAGES_PER_SECTION()-1)) #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT()) #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT()) #define SECTIONS_PER_ROOT() (machdep->sections_per_root) /* CONFIG_SPARSEMEM_EXTREME */ #define _SECTIONS_PER_ROOT_EXTREME() (PAGESIZE() / SIZE(mem_section)) /* !CONFIG_SPARSEMEM_EXTREME */ #define _SECTIONS_PER_ROOT() (1) #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT()) #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #define NR_SECTION_ROOTS() (DIV_ROUND_UP(NR_MEM_SECTIONS(), SECTIONS_PER_ROOT())) #define SECTION_ROOT_MASK() (SECTIONS_PER_ROOT() - 1) struct QEMUCPUSegment { uint32_t selector; uint32_t limit; uint32_t flags; uint32_t pad; uint64_t base; }; typedef struct QEMUCPUSegment QEMUCPUSegment; struct QEMUCPUState { uint32_t version; uint32_t size; uint64_t rax, rbx, rcx, rdx, rsi, rdi, rsp, rbp; uint64_t r8, r9, r10, r11, r12, r13, r14, r15; uint64_t rip, rflags; QEMUCPUSegment cs, ds, es, fs, gs, ss; QEMUCPUSegment ldt, tr, gdt, idt; uint64_t cr[5]; }; typedef struct QEMUCPUState QEMUCPUState; /* * Machine specific stuff */ #ifdef ARM #define _32BIT_ #define MACHINE_TYPE "ARM" #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PTOV(X) \ ((unsigned long)(X)-(machdep->machspec->phys_base)+(machdep->kvbase)) #define VTOP(X) \ ((unsigned long)(X)-(machdep->kvbase)+(machdep->machspec->phys_base)) #define IS_VMALLOC_ADDR(X) arm_is_vmalloc_addr((ulong)(X)) #define DEFAULT_MODULES_VADDR (machdep->kvbase - 16 * 1024 * 1024) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define PGDIR_SHIFT (21) #define PTRS_PER_PTE (512) #define PTRS_PER_PGD (2048) #define PGD_OFFSET(vaddr) ((vaddr) >> PGDIR_SHIFT) #define PTE_OFFSET(vaddr) (((vaddr) >> PAGESHIFT()) & (PTRS_PER_PTE - 1)) #define __SWP_TYPE_SHIFT 3 #define __SWP_TYPE_BITS 6 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) #define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) #define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #define _SECTION_SIZE_BITS 28 #define _MAX_PHYSMEM_BITS 32 /*add for LPAE*/ typedef unsigned long long u64; typedef signed int s32; typedef u64 pgd_t; typedef u64 pmd_t; typedef u64 pte_t; #define PMDSIZE() (PAGESIZE()) #define LPAE_PGDIR_SHIFT (30) #define LPAE_PMDIR_SHIFT (21) #define LPAE_PGD_OFFSET(vaddr) ((vaddr) >> LPAE_PGDIR_SHIFT) #define LPAE_PMD_OFFSET(vaddr) (((vaddr) >> LPAE_PMDIR_SHIFT) & \ ((1<<(LPAE_PGDIR_SHIFT-LPAE_PMDIR_SHIFT))-1)) #define _SECTION_SIZE_BITS_LPAE 28 #define _MAX_PHYSMEM_BITS_LPAE 36 /* * #define PTRS_PER_PTE 512 * #define PTRS_PER_PMD 512 * #define PTRS_PER_PGD 4 * */ #define LPAE_PGDIR_SIZE() 32 #define LPAE_PGDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PGDIR_SIZE() - 1)) #define LPAE_PMDIR_SIZE() 4096 #define LPAE_PMDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PMDIR_SIZE() - 1)) #define LPAE_PTEDIR_SIZE() 4096 #define LPAE_PTEDIR_OFFSET(X) (((ulong)(X)) & (LPAE_PTEDIR_SIZE() - 1)) /*section size for LPAE is 2MiB*/ #define LPAE_SECTION_PAGE_MASK (~((MEGABYTES(2))-1)) #define _PHYSICAL_MASK_LPAE ((1ULL << _MAX_PHYSMEM_BITS_LPAE) - 1) #define PAGE_BASE_MASK ((u64)((s32)machdep->pagemask & _PHYSICAL_MASK_LPAE)) #define LPAE_PAGEBASE(X) (((ulonglong)(X)) & PAGE_BASE_MASK) #define LPAE_VTOP(X) \ ((unsigned long long)(unsigned long)(X) - \ (machdep->kvbase) + (machdep->machspec->phys_base)) #define IS_LAST_PGD_READ_LPAE(pgd) ((pgd) == \ machdep->machspec->last_pgd_read_lpae) #define IS_LAST_PMD_READ_LPAE(pmd) ((pmd) == \ machdep->machspec->last_pmd_read_lpae) #define IS_LAST_PTBL_READ_LPAE(ptbl) ((ptbl) == \ machdep->machspec->last_ptbl_read_lpae) #define FILL_PGD_LPAE(PGD, TYPE, SIZE) \ if (!IS_LAST_PGD_READ_LPAE(PGD)) { \ readmem((ulonglong)(PGD), TYPE, machdep->pgd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->machspec->last_pgd_read_lpae \ = (ulonglong)(PGD); \ } #define FILL_PMD_LPAE(PMD, TYPE, SIZE) \ if (!IS_LAST_PMD_READ_LPAE(PMD)) { \ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->machspec->last_pmd_read_lpae \ = (ulonglong)(PMD); \ } #define FILL_PTBL_LPAE(PTBL, TYPE, SIZE) \ if (!IS_LAST_PTBL_READ_LPAE(PTBL)) { \ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ SIZE, "page table", FAULT_ON_ERROR); \ machdep->machspec->last_ptbl_read_lpae \ = (ulonglong)(PTBL); \ } #endif /* ARM */ #ifndef EM_AARCH64 #define EM_AARCH64 183 #endif #ifdef ARM64 #define _64BIT_ #define MACHINE_TYPE "ARM64" #define PTOV(X) \ ((unsigned long)(X)-(machdep->machspec->phys_offset)+(machdep->machspec->page_offset)) #define VTOP(X) arm64_VTOP((ulong)(X)) #define USERSPACE_TOP (machdep->machspec->userspace_top) #define PAGE_OFFSET (machdep->machspec->page_offset) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define VMEMMAP_VADDR (machdep->machspec->vmemmap_vaddr) #define VMEMMAP_END (machdep->machspec->vmemmap_end) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define IS_VMALLOC_ADDR(X) arm64_IS_VMALLOC_ADDR((ulong)(X)) #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) /* * 48-bit physical address supported. */ #define PHYS_MASK_SHIFT (48) #define PHYS_MASK (((1UL) << PHYS_MASK_SHIFT) - 1) typedef signed int s32; /* * 3-levels / 4K pages */ #define PTRS_PER_PGD_L3_4K (512) #define PTRS_PER_PMD_L3_4K (512) #define PTRS_PER_PTE_L3_4K (512) #define PGDIR_SHIFT_L3_4K (30) #define PGDIR_SIZE_L3_4K ((1UL) << PGDIR_SHIFT_L3_4K) #define PGDIR_MASK_L3_4K (~(PGDIR_SIZE_L3_4K-1)) #define PMD_SHIFT_L3_4K (21) #define PMD_SIZE_L3_4K (1UL << PMD_SHIFT_L3_4K) #define PMD_MASK_L3_4K (~(PMD_SIZE_L3_4K-1)) /* * 4-levels / 4K pages * 48-bit VA */ #define PTRS_PER_PGD_L4_4K ((1UL) << (48 - 39)) #define PTRS_PER_PUD_L4_4K (512) #define PTRS_PER_PMD_L4_4K (512) #define PTRS_PER_PTE_L4_4K (512) #define PGDIR_SHIFT_L4_4K (39) #define PGDIR_SIZE_L4_4K ((1UL) << PGDIR_SHIFT_L4_4K) #define PGDIR_MASK_L4_4K (~(PGDIR_SIZE_L4_4K-1)) #define PUD_SHIFT_L4_4K (30) #define PUD_SIZE_L4_4K ((1UL) << PUD_SHIFT_L4_4K) #define PUD_MASK_L4_4K (~(PUD_SIZE_L4_4K-1)) #define PMD_SHIFT_L4_4K (21) #define PMD_SIZE_L4_4K (1UL << PMD_SHIFT_L4_4K) #define PMD_MASK_L4_4K (~(PMD_SIZE_L4_4K-1)) #define PGDIR_SIZE_48VA (1UL << ((48 - 39) + 3)) #define PGDIR_MASK_48VA (~(PGDIR_SIZE_48VA - 1)) #define PGDIR_OFFSET_48VA(X) (((ulong)(X)) & (PGDIR_SIZE_48VA - 1)) /* * 3-levels / 64K pages */ #define PTRS_PER_PGD_L3_64K (64) #define PTRS_PER_PMD_L3_64K (8192) #define PTRS_PER_PTE_L3_64K (8192) #define PGDIR_SHIFT_L3_64K (42) #define PGDIR_SIZE_L3_64K ((1UL) << PGDIR_SHIFT_L3_64K) #define PGDIR_MASK_L3_64K (~(PGDIR_SIZE_L3_64K-1)) #define PMD_SHIFT_L3_64K (29) #define PMD_SIZE_L3_64K (1UL << PMD_SHIFT_L3_64K) #define PMD_MASK_L3_64K (~(PMD_SIZE_L3_64K-1)) #define PGDIR_OFFSET_L3_64K(X) (((ulong)(X)) & ((machdep->ptrs_per_pgd * 8) - 1)) /* * 2-levels / 64K pages */ #define PTRS_PER_PGD_L2_64K (8192) #define PTRS_PER_PTE_L2_64K (8192) #define PGDIR_SHIFT_L2_64K (29) #define PGDIR_SIZE_L2_64K ((1UL) << PGDIR_SHIFT_L2_64K) #define PGDIR_MASK_L2_64K (~(PGDIR_SIZE_L2_64K-1)) /* * Software defined PTE bits definition. * (arch/arm64/include/asm/pgtable.h) */ #define PTE_VALID (1UL << 0) #define PTE_DIRTY (1UL << 55) #define PTE_SPECIAL (1UL << 56) /* * Level 3 descriptor (PTE). * (arch/arm64/include/asm/pgtable-hwdef.h) */ #define PTE_TYPE_MASK (3UL << 0) #define PTE_TYPE_FAULT (0UL << 0) #define PTE_TYPE_PAGE (3UL << 0) #define PTE_USER (1UL << 6) /* AP[1] */ #define PTE_RDONLY (1UL << 7) /* AP[2] */ #define PTE_SHARED (3UL << 8) /* SH[1:0], inner shareable */ #define PTE_AF (1UL << 10) /* Access Flag */ #define PTE_NG (1UL << 11) /* nG */ #define PTE_PXN (1UL << 53) /* Privileged XN */ #define PTE_UXN (1UL << 54) /* User XN */ #define __swp_type(x) arm64_swp_type(x) #define __swp_offset(x) arm64_swp_offset(x) #define SWP_TYPE(x) __swp_type(x) #define SWP_OFFSET(x) __swp_offset(x) #define KSYMS_START (0x1) #define PHYS_OFFSET (0x2) #define VM_L2_64K (0x4) #define VM_L3_64K (0x8) #define VM_L3_4K (0x10) #define KDUMP_ENABLED (0x20) #define IRQ_STACKS (0x40) #define NEW_VMEMMAP (0x80) #define VM_L4_4K (0x100) #define UNW_4_14 (0x200) /* * Get kimage_voffset from /dev/crash */ #define DEV_CRASH_ARCH_DATA _IOR('c', 1, unsigned long) /* * sources: Documentation/arm64/memory.txt * arch/arm64/include/asm/memory.h * arch/arm64/include/asm/pgtable.h */ #define ARM64_VA_START ((0xffffffffffffffffUL) \ << machdep->machspec->VA_BITS) #define _VA_START(va) ((0xffffffffffffffffUL) - \ ((1UL) << ((va) - 1)) + 1) #define TEXT_OFFSET_MASK (~((MEGABYTES(2UL))-1)) #define ARM64_PAGE_OFFSET ((0xffffffffffffffffUL) \ << (machdep->machspec->VA_BITS - 1)) #define ARM64_PAGE_OFFSET_ACTUAL ((0xffffffffffffffffUL) \ - ((1UL) << machdep->machspec->VA_BITS_ACTUAL) + 1) #define ARM64_USERSPACE_TOP ((1UL) << machdep->machspec->VA_BITS) #define ARM64_USERSPACE_TOP_ACTUAL ((1UL) << machdep->machspec->VA_BITS_ACTUAL) /* only used for v4.6 or later */ #define ARM64_MODULES_VSIZE MEGABYTES(128) #define ARM64_KASAN_SHADOW_SIZE (1UL << (machdep->machspec->VA_BITS - 3)) /* * The following 3 definitions are the original values, but are obsolete * for 3.17 and later kernels because they are now build-time calculations. * They all depend on the kernel's new VMEMMAP_SIZE value, which is dependent * upon the size of struct page. Accordingly, arm64_calc_virtual_memory_ranges() * determines their values at POST_GDB time. */ #define ARM64_VMALLOC_END (ARM64_PAGE_OFFSET - 0x400000000UL - KILOBYTES(64) - 1) #define ARM64_VMEMMAP_VADDR ((ARM64_VMALLOC_END+1) + KILOBYTES(64)) #define ARM64_VMEMMAP_END (ARM64_VMEMMAP_VADDR + GIGABYTES(8UL) - 1) #define ARM64_STACK_SIZE (16384) #define ARM64_IRQ_STACK_SIZE ARM64_STACK_SIZE #define _SECTION_SIZE_BITS 30 #define _MAX_PHYSMEM_BITS 40 #define _MAX_PHYSMEM_BITS_3_17 48 #define _MAX_PHYSMEM_BITS_52 52 typedef unsigned long long __u64; typedef unsigned long long u64; struct arm64_user_pt_regs { __u64 regs[31]; __u64 sp; __u64 pc; __u64 pstate; }; struct arm64_pt_regs { union { struct arm64_user_pt_regs user_regs; struct { u64 regs[31]; u64 sp; u64 pc; u64 pstate; }; }; u64 orig_x0; u64 syscallno; }; /* AArch32 CPSR bits */ #define PSR_MODE32_BIT 0x00000010 #define TIF_SIGPENDING (0) #define display_idt_table() \ error(FATAL, "-d option is not applicable to ARM64 architecture\n") struct machine_specific { ulong flags; ulong userspace_top; ulong page_offset; ulong vmalloc_start_addr; ulong vmalloc_end; ulong vmemmap_vaddr; ulong vmemmap_end; ulong modules_vaddr; ulong modules_end; ulong phys_offset; ulong __exception_text_start; ulong __exception_text_end; struct arm64_pt_regs *panic_task_regs; ulong PTE_PROT_NONE; ulong PTE_FILE; ulong VA_BITS; ulong __SWP_TYPE_BITS; ulong __SWP_TYPE_SHIFT; ulong __SWP_TYPE_MASK; ulong __SWP_OFFSET_BITS; ulong __SWP_OFFSET_SHIFT; ulong __SWP_OFFSET_MASK; ulong crash_kexec_start; ulong crash_kexec_end; ulong crash_save_cpu_start; ulong crash_save_cpu_end; ulong kernel_flags; ulong irq_stack_size; ulong *irq_stacks; char *irq_stackbuf; ulong __irqentry_text_start; ulong __irqentry_text_end; /* for exception vector code */ ulong exp_entry1_start; ulong exp_entry1_end; ulong exp_entry2_start; ulong exp_entry2_end; /* only needed for v4.6 or later kernel */ ulong kimage_voffset; ulong kimage_text; ulong kimage_end; ulong user_eframe_offset; /* for v4.14 or later */ ulong kern_eframe_offset; ulong machine_kexec_start; ulong machine_kexec_end; ulong VA_BITS_ACTUAL; ulong CONFIG_ARM64_VA_BITS; ulong VA_START; }; struct arm64_stackframe { unsigned long fp; unsigned long sp; unsigned long pc; }; #endif /* ARM64 */ #ifdef MIPS #define _32BIT_ #define MACHINE_TYPE "MIPS" #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PTOV(X) ((unsigned long)(X) + 0x80000000lu) #define VTOP(X) ((unsigned long)(X) & 0x1ffffffflu) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define DEFAULT_MODULES_VADDR (machdep->kvbase - 16 * 1024 * 1024) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define __SWP_TYPE_SHIFT 3 #define __SWP_TYPE_BITS 6 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) #define SWP_TYPE(entry) (((entry) >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) #define SWP_OFFSET(entry) ((entry) >> __SWP_OFFSET_SHIFT) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #define _SECTION_SIZE_BITS 26 #define _MAX_PHYSMEM_BITS 32 #endif /* MIPS */ #ifdef X86 #define _32BIT_ #define MACHINE_TYPE "X86" #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define KVBASE_MASK (0x1ffffff) #define PGDIR_SHIFT_2LEVEL (22) #define PTRS_PER_PTE_2LEVEL (1024) #define PTRS_PER_PGD_2LEVEL (1024) #define PGDIR_SHIFT_3LEVEL (30) #define PTRS_PER_PTE_3LEVEL (512) #define PTRS_PER_PGD_3LEVEL (4) #define PMD_SHIFT (21) /* only used by PAE translators */ #define PTRS_PER_PMD (512) /* only used by PAE translators */ #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_4M 0x080 /* 4 MB page, Pentium+, if present.. */ #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */ #define _PAGE_PROTNONE (machdep->machspec->page_protnone) #define _PAGE_NX (0x8000000000000000ULL) #define NONPAE_PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) #define NX_BIT_MASK (0x7fffffffffffffffULL) #define PAE_PAGEBASE(X) (((unsigned long long)(X)) & ((unsigned long long)machdep->pagemask) & NX_BIT_MASK) #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type_PAE(entry) (((entry) >> 32) & 0x1f) #define __swp_type_nonPAE(entry) (((entry) >> 1) & 0x1f) #define __swp_offset_PAE(entry) (((entry) >> 32) >> 5) #define __swp_offset_nonPAE(entry) ((entry) >> 8) #define __swp_type(entry) (machdep->flags & PAE ? \ __swp_type_PAE(entry) : __swp_type_nonPAE(entry)) #define __swp_offset(entry) (machdep->flags & PAE ? \ __swp_offset_PAE(entry) : __swp_offset_nonPAE(entry)) #define TIF_SIGPENDING (2) // CONFIG_X86_PAE #define _SECTION_SIZE_BITS_PAE_ORIG 30 #define _SECTION_SIZE_BITS_PAE_2_6_26 29 #define _MAX_PHYSMEM_BITS_PAE 36 // !CONFIG_X86_PAE #define _SECTION_SIZE_BITS 26 #define _MAX_PHYSMEM_BITS 32 #define IS_LAST_PMD_READ_PAE(pmd) ((ulong)(pmd) == machdep->machspec->last_pmd_read_PAE) #define IS_LAST_PTBL_READ_PAE(ptbl) ((ulong)(ptbl) == machdep->machspec->last_ptbl_read_PAE) #define FILL_PMD_PAE(PMD, TYPE, SIZE) \ if (!IS_LAST_PMD_READ_PAE(PMD)) { \ readmem((ulonglong)(PMD), TYPE, machdep->pmd, \ SIZE, "pmd page", FAULT_ON_ERROR); \ machdep->machspec->last_pmd_read_PAE = (ulonglong)(PMD); \ } #define FILL_PTBL_PAE(PTBL, TYPE, SIZE) \ if (!IS_LAST_PTBL_READ_PAE(PTBL)) { \ readmem((ulonglong)(PTBL), TYPE, machdep->ptbl, \ SIZE, "page table", FAULT_ON_ERROR); \ machdep->machspec->last_ptbl_read_PAE = (ulonglong)(PTBL); \ } #endif /* X86 */ #ifdef X86_64 #define _64BIT_ #define MACHINE_TYPE "X86_64" #define USERSPACE_TOP (machdep->machspec->userspace_top) #define PAGE_OFFSET (machdep->machspec->page_offset) #define VMALLOC_START (machdep->machspec->vmalloc_start_addr) #define VMALLOC_END (machdep->machspec->vmalloc_end) #define VMEMMAP_VADDR (machdep->machspec->vmemmap_vaddr) #define VMEMMAP_END (machdep->machspec->vmemmap_end) #define MODULES_VADDR (machdep->machspec->modules_vaddr) #define MODULES_END (machdep->machspec->modules_end) #define __START_KERNEL_map 0xffffffff80000000UL #define MODULES_LEN (MODULES_END - MODULES_VADDR) #define USERSPACE_TOP_ORIG 0x0000008000000000 #define PAGE_OFFSET_ORIG 0x0000010000000000 #define VMALLOC_START_ADDR_ORIG 0xffffff0000000000 #define VMALLOC_END_ORIG 0xffffff7fffffffff #define MODULES_VADDR_ORIG 0xffffffffa0000000 #define MODULES_END_ORIG 0xffffffffafffffff #define USERSPACE_TOP_2_6_11 0x0000800000000000 #define PAGE_OFFSET_2_6_11 0xffff810000000000 #define VMALLOC_START_ADDR_2_6_11 0xffffc20000000000 #define VMALLOC_END_2_6_11 0xffffe1ffffffffff #define MODULES_VADDR_2_6_11 0xffffffff88000000 #define MODULES_END_2_6_11 0xfffffffffff00000 #define VMEMMAP_VADDR_2_6_24 0xffffe20000000000 #define VMEMMAP_END_2_6_24 0xffffe2ffffffffff #define MODULES_VADDR_2_6_26 0xffffffffa0000000 #define PAGE_OFFSET_2_6_27 0xffff880000000000 #define MODULES_END_2_6_27 0xffffffffff000000 #define USERSPACE_TOP_XEN 0x0000800000000000 #define PAGE_OFFSET_XEN 0xffff880000000000 #define VMALLOC_START_ADDR_XEN 0xffffc20000000000 #define VMALLOC_END_XEN 0xffffe1ffffffffff #define MODULES_VADDR_XEN 0xffffffff88000000 #define MODULES_END_XEN 0xfffffffffff00000 #define USERSPACE_TOP_XEN_RHEL4 0x0000008000000000 #define PAGE_OFFSET_XEN_RHEL4 0xffffff8000000000 #define VMALLOC_START_ADDR_XEN_RHEL4 0xffffff0000000000 #define VMALLOC_END_XEN_RHEL4 0xffffff7fffffffff #define MODULES_VADDR_XEN_RHEL4 0xffffffffa0000000 #define MODULES_END_XEN_RHEL4 0xffffffffafffffff #define VMALLOC_START_ADDR_2_6_31 0xffffc90000000000 #define VMALLOC_END_2_6_31 0xffffe8ffffffffff #define VMEMMAP_VADDR_2_6_31 0xffffea0000000000 #define VMEMMAP_END_2_6_31 0xffffeaffffffffff #define MODULES_VADDR_2_6_31 0xffffffffa0000000 #define MODULES_END_2_6_31 0xffffffffff000000 #define USERSPACE_TOP_5LEVEL 0x0100000000000000 #define PAGE_OFFSET_5LEVEL 0xff10000000000000 #define VMALLOC_START_ADDR_5LEVEL 0xffa0000000000000 #define VMALLOC_END_5LEVEL 0xffd1ffffffffffff #define MODULES_VADDR_5LEVEL 0xffffffffa0000000 #define MODULES_END_5LEVEL 0xffffffffff5fffff #define VMEMMAP_VADDR_5LEVEL 0xffd4000000000000 #define VMEMMAP_END_5LEVEL 0xffd5ffffffffffff #define PAGE_OFFSET_4LEVEL_4_20 0xffff888000000000 #define PAGE_OFFSET_5LEVEL_4_20 0xff11000000000000 #define VSYSCALL_START 0xffffffffff600000 #define VSYSCALL_END 0xffffffffff601000 #define CPU_ENTRY_AREA_START 0xfffffe0000000000 #define CPU_ENTRY_AREA_END 0xfffffe7fffffffff #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) x86_64_VTOP((ulong)(X)) #define IS_VMALLOC_ADDR(X) x86_64_IS_VMALLOC_ADDR((ulong)(X)) /* * the default page table level for x86_64: * 4 level page tables */ #define PGDIR_SHIFT 39 #define PTRS_PER_PGD 512 #define PUD_SHIFT 30 #define PTRS_PER_PUD 512 #define PMD_SHIFT 21 #define PTRS_PER_PMD 512 #define PTRS_PER_PTE 512 /* 5 level page */ #define PGDIR_SHIFT_5LEVEL 48 #define PTRS_PER_PGD_5LEVEL 512 #define P4D_SHIFT 39 #define PTRS_PER_P4D 512 #define __PGDIR_SHIFT (machdep->machspec->pgdir_shift) #define __PTRS_PER_PGD (machdep->machspec->ptrs_per_pgd) #define pgd_index(address) (((address) >> __PGDIR_SHIFT) & (__PTRS_PER_PGD-1)) #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define FILL_TOP_PGD() \ if (!(pc->flags & RUNTIME) || ACTIVE()) { \ FILL_PGD(vt->kernel_pgd[0], KVADDR, PAGESIZE()); \ } #define FILL_TOP_PGD_HYPER() \ unsigned long idle_pg_table = symbol_exists("idle_pg_table_4") ? \ symbol_value("idle_pg_table_4") : \ symbol_value("idle_pg_table"); \ FILL_PGD(idle_pg_table, KVADDR, PAGESIZE()); #define IS_LAST_P4D_READ(p4d) ((ulong)(p4d) == machdep->machspec->last_p4d_read) #define FILL_P4D(P4D, TYPE, SIZE) \ if (!IS_LAST_P4D_READ(P4D)) { \ readmem((ulonglong)((ulong)(P4D)), TYPE, machdep->machspec->p4d, \ SIZE, "p4d page", FAULT_ON_ERROR); \ machdep->machspec->last_p4d_read = (ulong)(P4D); \ } /* * PHYSICAL_PAGE_MASK changed (enlarged) between 2.4 and 2.6, so * for safety, use the 2.6 values to generate it. */ #define __PHYSICAL_MASK_SHIFT_XEN 40 #define __PHYSICAL_MASK_SHIFT_2_6 46 #define __PHYSICAL_MASK_SHIFT_5LEVEL 52 #define __PHYSICAL_MASK_SHIFT (machdep->machspec->physical_mask_shift) #define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1) #define __VIRTUAL_MASK_SHIFT 48 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK ) #define _PAGE_BIT_NX 63 #define _PAGE_PRESENT 0x001 #define _PAGE_RW 0x002 #define _PAGE_USER 0x004 #define _PAGE_PWT 0x008 #define _PAGE_PCD 0x010 #define _PAGE_ACCESSED 0x020 #define _PAGE_DIRTY 0x040 #define _PAGE_PSE 0x080 /* 2MB page */ #define _PAGE_FILE 0x040 /* set:pagecache, unset:swap */ #define _PAGE_GLOBAL 0x100 /* Global TLB entry */ #define _PAGE_PROTNONE (machdep->machspec->page_protnone) #define _PAGE_NX (1UL<<_PAGE_BIT_NX) #define SWP_TYPE(entry) (((entry) >> 1) & 0x3f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type(entry) x86_64_swp_type(entry) #define __swp_offset(entry) x86_64_swp_offset(entry) #define TIF_SIGPENDING (2) #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define _CPU_PDA_READ2(CPU, BUFFER) \ ((readmem(symbol_value("_cpu_pda"), \ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ "_cpu_pda addr", RETURN_ON_ERROR)) && \ (readmem(cpu_pda_addr + ((CPU) * sizeof(void *)), \ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ "_cpu_pda addr", RETURN_ON_ERROR)) && \ (cpu_pda_addr) && \ (readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda), \ "cpu_pda entry", RETURN_ON_ERROR))) #define _CPU_PDA_READ(CPU, BUFFER) \ ((STRNEQ("_cpu_pda", closest_symbol((symbol_value("_cpu_pda") + \ ((CPU) * sizeof(unsigned long)))))) && \ (readmem(symbol_value("_cpu_pda") + ((CPU) * sizeof(void *)), \ KVADDR, &cpu_pda_addr, sizeof(unsigned long), \ "_cpu_pda addr", RETURN_ON_ERROR)) && \ (readmem(cpu_pda_addr, KVADDR, (BUFFER), SIZE(x8664_pda), \ "cpu_pda entry", RETURN_ON_ERROR))) #define CPU_PDA_READ(CPU, BUFFER) \ (STRNEQ("cpu_pda", closest_symbol((symbol_value("cpu_pda") + \ ((CPU) * SIZE(x8664_pda))))) && \ readmem(symbol_value("cpu_pda") + ((CPU) * SIZE(x8664_pda)), \ KVADDR, (BUFFER), SIZE(x8664_pda), "cpu_pda entry", \ RETURN_ON_ERROR)) #define VALID_LEVEL4_PGT_ADDR(X) \ (((X) == VIRTPAGEBASE(X)) && IS_KVADDR(X) && !IS_VMALLOC_ADDR(X)) #define _SECTION_SIZE_BITS 27 #define _MAX_PHYSMEM_BITS 40 #define _MAX_PHYSMEM_BITS_2_6_26 44 #define _MAX_PHYSMEM_BITS_2_6_31 46 #define _MAX_PHYSMEM_BITS_5LEVEL 52 #endif /* X86_64 */ #ifdef ALPHA #define _64BIT_ #define MACHINE_TYPE "ALPHA" #define PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define KSEG_BASE_48_BIT (0xffff800000000000) #define KSEG_BASE (0xfffffc0000000000) #define _PFN_MASK (0xFFFFFFFF00000000) #define VMALLOC_START (0xFFFFFE0000000000) #define MIN_SYMBOL_VALUE (KSEG_BASE_48_BIT) #define PGDIR_SHIFT (PAGESHIFT() + 2*(PAGESHIFT()-3)) #define PMD_SHIFT (PAGESHIFT() + (PAGESHIFT()-3)) #define PTRS_PER_PAGE (1024) #define PTRS_PER_PGD (1UL << (PAGESHIFT()-3)) /* * OSF/1 PAL-code-imposed page table bits */ #define _PAGE_VALID 0x0001 #define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ #define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ #define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ #define _PAGE_ASM 0x0010 #define _PAGE_KRE 0x0100 /* xxx - see below on the "accessed" bit */ #define _PAGE_URE 0x0200 /* xxx */ #define _PAGE_KWE 0x1000 /* used to do the dirty bit in software */ #define _PAGE_UWE 0x2000 /* used to do the dirty bit in software */ /* .. and these are ours ... */ #define _PAGE_DIRTY 0x20000 #define _PAGE_ACCESSED 0x40000 #define SWP_TYPE(entry) (((entry) >> 32) & 0xff) #define SWP_OFFSET(entry) ((entry) >> 40) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #endif /* ALPHA */ #ifdef PPC #define _32BIT_ #define MACHINE_TYPE "PPC" #define PAGEBASE(X) ((X) & machdep->pagemask) #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) /* Holds the platform specific info for page translation */ struct machine_specific { char *platform; /* page address translation bits */ int pte_size; int pte_rpn_shift; /* page flags */ ulong _page_present; ulong _page_user; ulong _page_rw; ulong _page_guarded; ulong _page_coherent; ulong _page_no_cache; ulong _page_writethru; ulong _page_dirty; ulong _page_accessed; ulong _page_hwwrite; ulong _page_shared; ulong _page_k_rw; /* platform special vtop */ int (*vtop_special)(ulong vaddr, physaddr_t *paddr, int verbose); void *mmu_special; }; /* machdep flags for ppc32 specific */ #define IS_PAE() (machdep->flags & PAE) #define IS_BOOKE() (machdep->flags & CPU_BOOKE) /* Page translation bits */ #define PPC_PLATFORM (machdep->machspec->platform) #define PTE_SIZE (machdep->machspec->pte_size) #define PTE_RPN_SHIFT (machdep->machspec->pte_rpn_shift) #define PAGE_SHIFT (12) #define PTE_T_LOG2 (ffs(PTE_SIZE) - 1) #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT) #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) #define PTRS_PER_PTE (1 << PTE_SHIFT) /* special vtop */ #define VTOP_SPECIAL (machdep->machspec->vtop_special) #define MMU_SPECIAL (machdep->machspec->mmu_special) /* PFN shifts */ #define BOOKE3E_PTE_RPN_SHIFT (24) /* PAGE flags */ #define _PAGE_PRESENT (machdep->machspec->_page_present) /* software: pte contains a translation */ #define _PAGE_USER (machdep->machspec->_page_user) /* matches one of the PP bits */ #define _PAGE_RW (machdep->machspec->_page_rw) /* software: user write access allowed */ #define _PAGE_GUARDED (machdep->machspec->_page_guarded) #define _PAGE_COHERENT (machdep->machspec->_page_coherent /* M: enforce memory coherence (SMP systems) */) #define _PAGE_NO_CACHE (machdep->machspec->_page_no_cache) /* I: cache inhibit */ #define _PAGE_WRITETHRU (machdep->machspec->_page_writethru) /* W: cache write-through */ #define _PAGE_DIRTY (machdep->machspec->_page_dirty) /* C: page changed */ #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) /* R: page referenced */ #define _PAGE_HWWRITE (machdep->machspec->_page_hwwrite) /* software: _PAGE_RW & _PAGE_DIRTY */ #define _PAGE_SHARED (machdep->machspec->_page_shared) #define _PAGE_K_RW (machdep->machspec->_page_k_rw) /* privilege only write access allowed */ /* Default values for PAGE flags */ #define DEFAULT_PAGE_PRESENT 0x001 #define DEFAULT_PAGE_USER 0x002 #define DEFAULT_PAGE_RW 0x004 #define DEFAULT_PAGE_GUARDED 0x008 #define DEFAULT_PAGE_COHERENT 0x010 #define DEFAULT_PAGE_NO_CACHE 0x020 #define DEFAULT_PAGE_WRITETHRU 0x040 #define DEFAULT_PAGE_DIRTY 0x080 #define DEFAULT_PAGE_ACCESSED 0x100 #define DEFAULT_PAGE_HWWRITE 0x200 #define DEFAULT_PAGE_SHARED 0 /* PPC44x PAGE flags: Values from kernel asm/pte-44x.h */ #define PPC44x_PAGE_PRESENT 0x001 #define PPC44x_PAGE_RW 0x002 #define PPC44x_PAGE_ACCESSED 0x008 #define PPC44x_PAGE_DIRTY 0x010 #define PPC44x_PAGE_USER 0x040 #define PPC44x_PAGE_GUARDED 0x100 #define PPC44x_PAGE_COHERENT 0x200 #define PPC44x_PAGE_NO_CACHE 0x400 #define PPC44x_PAGE_WRITETHRU 0x800 #define PPC44x_PAGE_HWWRITE 0 #define PPC44x_PAGE_SHARED 0 /* BOOK3E */ #define BOOK3E_PAGE_PRESENT 0x000001 #define BOOK3E_PAGE_BAP_SR 0x000004 #define BOOK3E_PAGE_BAP_UR 0x000008 /* User Readable */ #define BOOK3E_PAGE_BAP_SW 0x000010 #define BOOK3E_PAGE_BAP_UW 0x000020 /* User Writable */ #define BOOK3E_PAGE_DIRTY 0x001000 #define BOOK3E_PAGE_ACCESSED 0x040000 #define BOOK3E_PAGE_GUARDED 0x100000 #define BOOK3E_PAGE_COHERENT 0x200000 #define BOOK3E_PAGE_NO_CACHE 0x400000 #define BOOK3E_PAGE_WRITETHRU 0x800000 #define BOOK3E_PAGE_HWWRITE 0 #define BOOK3E_PAGE_SHARED 0 #define BOOK3E_PAGE_USER (BOOK3E_PAGE_BAP_SR | BOOK3E_PAGE_BAP_UR) #define BOOK3E_PAGE_RW (BOOK3E_PAGE_BAP_SW | BOOK3E_PAGE_BAP_UW) #define BOOK3E_PAGE_KERNEL_RW (BOOK3E_PAGE_BAP_SW | BOOK3E_PAGE_BAP_SR | BOOK3E_PAGE_DIRTY) /* FSL BOOKE */ #define FSL_BOOKE_PAGE_PRESENT 0x00001 #define FSL_BOOKE_PAGE_USER 0x00002 #define FSL_BOOKE_PAGE_RW 0x00004 #define FSL_BOOKE_PAGE_DIRTY 0x00008 #define FSL_BOOKE_PAGE_ACCESSED 0x00020 #define FSL_BOOKE_PAGE_GUARDED 0x00080 #define FSL_BOOKE_PAGE_COHERENT 0x00100 #define FSL_BOOKE_PAGE_NO_CACHE 0x00200 #define FSL_BOOKE_PAGE_WRITETHRU 0x00400 #define FSL_BOOKE_PAGE_HWWRITE 0 #define FSL_BOOKE_PAGE_SHARED 0 #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #define _SECTION_SIZE_BITS 24 #define _MAX_PHYSMEM_BITS 44 #define STACK_FRAME_OVERHEAD 16 #define STACK_FRAME_LR_SAVE (sizeof(ulong)) #define STACK_FRAME_MARKER (2 * sizeof(ulong)) #define STACK_FRAME_REGS_MARKER 0x72656773 #define PPC_STACK_SIZE 8192 #endif /* PPC */ #ifdef IA64 #define _64BIT_ #define MACHINE_TYPE "IA64" #define PAGEBASE(X) (((unsigned long)(X)) & (unsigned long)machdep->pagemask) #define REGION_SHIFT (61) #define VADDR_REGION(X) ((ulong)(X) >> REGION_SHIFT) #define KERNEL_CACHED_REGION (7) #define KERNEL_UNCACHED_REGION (6) #define KERNEL_VMALLOC_REGION (5) #define USER_STACK_REGION (4) #define USER_DATA_REGION (3) #define USER_TEXT_REGION (2) #define USER_SHMEM_REGION (1) #define USER_IA32_EMUL_REGION (0) #define KERNEL_VMALLOC_BASE ((ulong)KERNEL_VMALLOC_REGION << REGION_SHIFT) #define KERNEL_UNCACHED_BASE ((ulong)KERNEL_UNCACHED_REGION << REGION_SHIFT) #define KERNEL_CACHED_BASE ((ulong)KERNEL_CACHED_REGION << REGION_SHIFT) #define _SECTION_SIZE_BITS 30 #define _MAX_PHYSMEM_BITS 50 /* * As of 2.6, these are no longer straight forward. */ #define PTOV(X) ia64_PTOV((ulong)(X)) #define VTOP(X) ia64_VTOP((ulong)(X)) #define IS_VMALLOC_ADDR(X) ia64_IS_VMALLOC_ADDR((ulong)(X)) #define SWITCH_STACK_ADDR(X) (ia64_get_switch_stack((ulong)(X))) #define __IA64_UL(x) ((unsigned long)(x)) #define IA64_MAX_PHYS_BITS (50) /* max # of phys address bits (architected) */ /* * How many pointers will a page table level hold expressed in shift */ #define PTRS_PER_PTD_SHIFT (PAGESHIFT()-3) /* * Definitions for fourth level: */ #define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT)) /* * Definitions for third level: * * PMD_SHIFT determines the size of the area a third-level page table * can map. */ #define PMD_SHIFT (PAGESHIFT() + (PTRS_PER_PTD_SHIFT)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT)) /* * PUD_SHIFT determines the size of the area a second-level page table * can map */ #define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE-1)) #define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT)) /* * Definitions for first level: * * PGDIR_SHIFT determines what a first-level page table entry can map. */ #define PGDIR_SHIFT_4L (PUD_SHIFT + (PTRS_PER_PTD_SHIFT)) #define PGDIR_SHIFT_3L (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) /* Turns out 4L & 3L PGDIR_SHIFT are the same (for now) */ #define PGDIR_SHIFT PGDIR_SHIFT_4L #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT #define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ #define FIRST_USER_ADDRESS 0 /* * First, define the various bits in a PTE. Note that the PTE format * matches the VHPT short format, the firt doubleword of the VHPD long * format, and the first doubleword of the TLB insertion format. */ #define _PAGE_P (1 << 0) /* page present bit */ #define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */ #define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */ #define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */ #define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */ #define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */ #define _PAGE_MA_MASK (0x7 << 2) #define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */ #define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */ #define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */ #define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */ #define _PAGE_PL_MASK (3 << 7) #define _PAGE_AR_R (0 << 9) /* read only */ #define _PAGE_AR_RX (1 << 9) /* read & execute */ #define _PAGE_AR_RW (2 << 9) /* read & write */ #define _PAGE_AR_RWX (3 << 9) /* read, write & execute */ #define _PAGE_AR_R_RW (4 << 9) /* read / read & write */ #define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */ #define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */ #define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */ #define _PAGE_AR_MASK (7 << 9) #define _PAGE_AR_SHIFT 9 #define _PAGE_A (1 << 5) /* page accessed bit */ #define _PAGE_D (1 << 6) /* page dirty bit */ #define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL) #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ #define _PAGE_PROTNONE (__IA64_UL(1) << 63) #define _PFN_MASK _PAGE_PPN_MASK #define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_A | _PAGE_D) #define _PAGE_SIZE_4K 12 #define _PAGE_SIZE_8K 13 #define _PAGE_SIZE_16K 14 #define _PAGE_SIZE_64K 16 #define _PAGE_SIZE_256K 18 #define _PAGE_SIZE_1M 20 #define _PAGE_SIZE_4M 22 #define _PAGE_SIZE_16M 24 #define _PAGE_SIZE_64M 26 #define _PAGE_SIZE_256M 28 #define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB #define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED #define EFI_PAGE_SHIFT (12) /* * NOTE: #include'ing creates too many compiler problems, so * this stuff is hardwired here; it's probably etched in stone somewhere. */ struct efi_memory_desc_t { uint32_t type; uint32_t pad; uint64_t phys_addr; uint64_t virt_addr; uint64_t num_pages; uint64_t attribute; } desc; /* Memory types: */ #define EFI_RESERVED_TYPE 0 #define EFI_LOADER_CODE 1 #define EFI_LOADER_DATA 2 #define EFI_BOOT_SERVICES_CODE 3 #define EFI_BOOT_SERVICES_DATA 4 #define EFI_RUNTIME_SERVICES_CODE 5 #define EFI_RUNTIME_SERVICES_DATA 6 #define EFI_CONVENTIONAL_MEMORY 7 #define EFI_UNUSABLE_MEMORY 8 #define EFI_ACPI_RECLAIM_MEMORY 9 #define EFI_ACPI_MEMORY_NVS 10 #define EFI_MEMORY_MAPPED_IO 11 #define EFI_MEMORY_MAPPED_IO_PORT_SPACE 12 #define EFI_PAL_CODE 13 #define EFI_MAX_MEMORY_TYPE 14 /* Attribute values: */ #define EFI_MEMORY_UC 0x0000000000000001 /* uncached */ #define EFI_MEMORY_WC 0x0000000000000002 /* write-coalescing */ #define EFI_MEMORY_WT 0x0000000000000004 /* write-through */ #define EFI_MEMORY_WB 0x0000000000000008 /* write-back */ #define EFI_MEMORY_WP 0x0000000000001000 /* write-protect */ #define EFI_MEMORY_RP 0x0000000000002000 /* read-protect */ #define EFI_MEMORY_XP 0x0000000000004000 /* execute-protect */ #define EFI_MEMORY_RUNTIME 0x8000000000000000 /* range requires runtime mapping */ #define SWP_TYPE(entry) (((entry) >> 1) & 0xff) #define SWP_OFFSET(entry) ((entry) >> 9) #define __swp_type(entry) ((entry >> 2) & 0x7f) #define __swp_offset(entry) ((entry << 1) >> 10) #define TIF_SIGPENDING (1) #define KERNEL_TR_PAGE_SIZE (1 << _PAGE_SIZE_64M) #define KERNEL_TR_PAGE_MASK (~(KERNEL_TR_PAGE_SIZE - 1)) #define UNKNOWN_PHYS_START ((ulong)(-1)) #define DEFAULT_PHYS_START (KERNEL_TR_PAGE_SIZE * 1) #define IA64_GET_STACK_ULONG(OFF) \ ((INSTACK(OFF,bt)) ? (GET_STACK_ULONG(OFF)) : get_init_stack_ulong((unsigned long)OFF)) #endif /* IA64 */ #ifdef PPC64 #define _64BIT_ #define MACHINE_TYPE "PPC64" #define PPC64_64K_PAGE_SIZE 65536 #define PPC64_STACK_SIZE 16384 #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define PTOV(X) ((unsigned long)(X)+(machdep->identity_map_base)) #define VTOP(X) ((unsigned long)(X)-(machdep->identity_map_base)) #define BOOK3E_VMBASE 0x8000000000000000 #define IS_VMALLOC_ADDR(X) machdep->machspec->is_vmaddr(X) #define KERNELBASE machdep->pageoffset #define PGDIR_SHIFT (machdep->pageshift + (machdep->pageshift -3) + (machdep->pageshift - 2)) #define PMD_SHIFT (machdep->pageshift + (machdep->pageshift - 3)) #define PGD_MASK (~((1UL << PGDIR_SHIFT) - 1)) #define PMD_MASK (~((1UL << PMD_SHIFT) - 1)) /* shift to put page number into pte */ #define PTE_RPN_SHIFT_DEFAULT 16 #define PMD_TO_PTEPAGE_SHIFT 2 /* Used for 2.6 or later */ #define PTE_INDEX_SIZE 9 #define PMD_INDEX_SIZE 10 #define PGD_INDEX_SIZE 10 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) #define PGD_OFFSET_24(vaddr) ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #define PGD_OFFSET(vaddr) ((vaddr >> PGDIR_SHIFT) & 0x7ff) #define PMD_OFFSET(vaddr) ((vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) /* 4-level page table support */ /* 4K pagesize */ #define PTE_INDEX_SIZE_L4_4K 9 #define PMD_INDEX_SIZE_L4_4K 7 #define PUD_INDEX_SIZE_L4_4K 7 #define PGD_INDEX_SIZE_L4_4K 9 #define PUD_INDEX_SIZE_L4_4K_3_7 9 #define PTE_INDEX_SIZE_RADIX_4K 9 #define PMD_INDEX_SIZE_RADIX_4K 9 #define PUD_INDEX_SIZE_RADIX_4K 9 #define PGD_INDEX_SIZE_RADIX_4K 13 #define PTE_RPN_SHIFT_L4_4K 17 #define PTE_RPN_SHIFT_L4_4K_4_5 18 #define PGD_MASKED_BITS_4K 0 #define PUD_MASKED_BITS_4K 0 #define PMD_MASKED_BITS_4K 0 /* 64K pagesize */ #define PTE_INDEX_SIZE_L4_64K 12 #define PMD_INDEX_SIZE_L4_64K 12 #define PUD_INDEX_SIZE_L4_64K 0 #define PGD_INDEX_SIZE_L4_64K 4 #define PTE_INDEX_SIZE_L4_64K_3_10 8 #define PMD_INDEX_SIZE_L4_64K_3_10 10 #define PGD_INDEX_SIZE_L4_64K_3_10 12 #define PMD_INDEX_SIZE_L4_64K_4_6 5 #define PUD_INDEX_SIZE_L4_64K_4_6 5 #define PMD_INDEX_SIZE_L4_64K_4_12 10 #define PUD_INDEX_SIZE_L4_64K_4_12 7 #define PGD_INDEX_SIZE_L4_64K_4_12 8 #define PUD_INDEX_SIZE_L4_64K_4_17 10 #define PTE_INDEX_SIZE_RADIX_64K 5 #define PMD_INDEX_SIZE_RADIX_64K 9 #define PUD_INDEX_SIZE_RADIX_64K 9 #define PGD_INDEX_SIZE_RADIX_64K 13 #define PTE_RPN_SHIFT_L4_64K_V1 32 #define PTE_RPN_SHIFT_L4_64K_V2 30 #define PTE_RPN_SHIFT_L4_BOOK3E_64K 28 #define PTE_RPN_SHIFT_L4_BOOK3E_4K 24 #define PGD_MASKED_BITS_64K 0 #define PUD_MASKED_BITS_64K 0x1ff #define PMD_MASKED_BITS_64K 0x1ff #define PMD_MASKED_BITS_64K_3_11 0xfff #define PMD_MASKED_BITS_BOOK3E_64K_4_5 0x7ff #define PGD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL #define PUD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL #define PMD_MASKED_BITS_64K_4_6 0xc0000000000000ffUL #define PTE_RPN_MASK_DEFAULT 0xffffffffffffffffUL #define PAGE_PA_MAX_L4_4_6 (THIS_KERNEL_VERSION >= LINUX(4,11,0) ? 53 : 57) #define PTE_RPN_MASK_L4_4_6 \ (((1UL << PAGE_PA_MAX_L4_4_6) - 1) & ~((1UL << PAGESHIFT()) - 1)) #define PTE_RPN_SHIFT_L4_4_6 PAGESHIFT() #define PGD_MASKED_BITS_4_7 0xc0000000000000ffUL #define PUD_MASKED_BITS_4_7 0xc0000000000000ffUL #define PMD_MASKED_BITS_4_7 0xc0000000000000ffUL #define PD_HUGE 0x8000000000000000 #define HUGE_PTE_MASK 0x03 #define HUGEPD_SHIFT_MASK 0x3f #define HUGEPD_ADDR_MASK (0x0fffffffffffffffUL & ~HUGEPD_SHIFT_MASK) #define PGD_MASK_L4 \ (THIS_KERNEL_VERSION >= LINUX(3,10,0) ? (machdep->ptrs_per_pgd - 1) : 0x1ff) #define PGD_OFFSET_L4(vaddr) \ ((vaddr >> (machdep->machspec->l4_shift)) & PGD_MASK_L4) #define PUD_OFFSET_L4(vaddr) \ ((vaddr >> (machdep->machspec->l3_shift)) & (machdep->machspec->ptrs_per_l3 - 1)) #define PMD_OFFSET_L4(vaddr) \ ((vaddr >> (machdep->machspec->l2_shift)) & (machdep->machspec->ptrs_per_l2 - 1)) #define _PAGE_PTE (machdep->machspec->_page_pte) /* distinguishes PTEs from pointers */ #define _PAGE_PRESENT (machdep->machspec->_page_present) /* software: pte contains a translation */ #define _PAGE_USER (machdep->machspec->_page_user) /* matches one of the PP bits */ #define _PAGE_RW (machdep->machspec->_page_rw) /* software: user write access allowed */ #define _PAGE_GUARDED (machdep->machspec->_page_guarded) #define _PAGE_COHERENT (machdep->machspec->_page_coherent /* M: enforce memory coherence (SMP systems) */) #define _PAGE_NO_CACHE (machdep->machspec->_page_no_cache) /* I: cache inhibit */ #define _PAGE_WRITETHRU (machdep->machspec->_page_writethru) /* W: cache write-through */ #define _PAGE_DIRTY (machdep->machspec->_page_dirty) /* C: page changed */ #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) /* R: page referenced */ #define PTE_RPN_MASK (machdep->machspec->pte_rpn_mask) #define PTE_RPN_SHIFT (machdep->machspec->pte_rpn_shift) #define TIF_SIGPENDING (2) #define SWP_TYPE(entry) (((entry) >> 1) & 0x7f) #define SWP_OFFSET(entry) ((entry) >> 8) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define MSR_PR_LG 14 /* Problem State / Privilege Level */ /* Used to find the user or kernel-mode frame*/ #define STACK_FRAME_OVERHEAD 112 #define EXCP_FRAME_MARKER 0x7265677368657265 #define _SECTION_SIZE_BITS 24 #define _MAX_PHYSMEM_BITS 44 #define _MAX_PHYSMEM_BITS_3_7 46 #define _MAX_PHYSMEM_BITS_4_19 47 #define _MAX_PHYSMEM_BITS_4_20 51 #endif /* PPC64 */ #ifdef S390 #define _32BIT_ #define MACHINE_TYPE "S390" #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define PTRS_PER_PTE 1024 #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 512 #define SEGMENT_TABLE_SIZE ((sizeof(ulong)*4) * PTRS_PER_PGD) #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffe) | \ (((entry) >> 7) & 0x1)) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #define _SECTION_SIZE_BITS 25 #define _MAX_PHYSMEM_BITS 31 #endif /* S390 */ #ifdef S390X #define _64BIT_ #define MACHINE_TYPE "S390X" #define PTOV(X) ((unsigned long)(X)+(machdep->kvbase)) #define VTOP(X) ((unsigned long)(X)-(machdep->kvbase)) #define IS_VMALLOC_ADDR(X) (vt->vmalloc_start && (ulong)(X) >= vt->vmalloc_start) #define PTRS_PER_PTE 512 #define PTRS_PER_PMD 1024 #define PTRS_PER_PGD 2048 #define SEGMENT_TABLE_SIZE ((sizeof(ulong)*2) * PTRS_PER_PMD) #define SWP_TYPE(entry) (((entry) >> 2) & 0x1f) #define SWP_OFFSET(entry) ((((entry) >> 11) & 0xfffffffffffffffe) | \ (((entry) >> 7) & 0x1)) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #define TIF_SIGPENDING (2) #define _SECTION_SIZE_BITS 28 #define _MAX_PHYSMEM_BITS_OLD 42 #define _MAX_PHYSMEM_BITS_NEW 46 #endif /* S390X */ #ifdef SPARC64 #define _64BIT_ #define MACHINE_TYPE "SPARC64" #define PTOV(X) \ ((unsigned long)(X) + machdep->machspec->page_offset) #define VTOP(X) \ ((unsigned long)(X) - machdep->machspec->page_offset) #define PAGE_OFFSET (machdep->machspec->page_offset) extern int sparc64_IS_VMALLOC_ADDR(ulong vaddr); #define IS_VMALLOC_ADDR(X) sparc64_IS_VMALLOC_ADDR((ulong)(X)) #define PAGE_SHIFT (13) #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE - 1)) #define PAGEBASE(X) (((ulong)(X)) & (ulong)machdep->pagemask) #define THREAD_SIZE (2 * PAGE_SIZE) /* S3 Core * Core 48-bit physical address supported. * Bit 47 distinguishes memory or I/O. When set to "1" it is I/O. */ #define PHYS_MASK_SHIFT (47) #define PHYS_MASK (((1UL) << PHYS_MASK_SHIFT) - 1) typedef signed int s32; /* * This next two defines are convenience defines for normal page table. */ #define PTES_PER_PAGE (1UL << (PAGE_SHIFT - 3)) #define PTES_PER_PAGE_MASK (PTES_PER_PAGE - 1) /* 4-level page table */ #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE - 1)) #define PMD_BITS (PAGE_SHIFT - 3) #define PUD_SHIFT (PMD_SHIFT + PMD_BITS) #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE - 1)) #define PUD_BITS (PAGE_SHIFT - 3) #define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) #define PGDIR_BITS (PAGE_SHIFT - 3) #define PTRS_PER_PTE (1UL << (PAGE_SHIFT - 3)) #define PTRS_PER_PMD (1UL << PMD_BITS) #define PTRS_PER_PUD (1UL << PUD_BITS) #define PTRS_PER_PGD (1UL << PGDIR_BITS) #define HPAGE_SHIFT (23) /* Down one huge page */ #define SPARC64_USERSPACE_TOP (-(1UL << HPAGE_SHIFT)) #define PAGE_PMD_HUGE (0x0100000000000000UL) /* These are for SUN4V. */ #define _PAGE_VALID (0x8000000000000000UL) #define _PAGE_NFO_4V (0x4000000000000000UL) #define _PAGE_MODIFIED_4V (0x2000000000000000UL) #define _PAGE_ACCESSED_4V (0x1000000000000000UL) #define _PAGE_READ_4V (0x0800000000000000UL) #define _PAGE_WRITE_4V (0x0400000000000000UL) #define _PAGE_PADDR_4V (0x00FFFFFFFFFFE000UL) #define _PAGE_PFN_MASK (_PAGE_PADDR_4V) #define _PAGE_P_4V (0x0000000000000100UL) #define _PAGE_EXEC_4V (0x0000000000000080UL) #define _PAGE_W_4V (0x0000000000000040UL) #define _PAGE_PRESENT_4V (0x0000000000000010UL) #define _PAGE_SZALL_4V (0x0000000000000007UL) /* There are other page sizes. Some supported. */ #define _PAGE_SZ4MB_4V (0x0000000000000003UL) #define _PAGE_SZ512K_4V (0x0000000000000002UL) #define _PAGE_SZ64K_4V (0x0000000000000001UL) #define _PAGE_SZ8K_4V (0x0000000000000000UL) #define SPARC64_MODULES_VADDR (0x0000000010000000UL) #define SPARC64_MODULES_END (0x00000000f0000000UL) #define SPARC64_VMALLOC_START (0x0000000100000000UL) #define SPARC64_STACK_SIZE 0x4000 /* sparsemem */ #define _SECTION_SIZE_BITS 30 #define _MAX_PHYSMEM_BITS 53 #define STACK_BIAS 2047 struct machine_specific { ulong page_offset; ulong vmalloc_end; }; #define TIF_SIGPENDING (2) #define SWP_OFFSET(E) ((E) >> (PAGE_SHIFT + 8UL)) #define SWP_TYPE(E) (((E) >> PAGE_SHIFT) & 0xffUL) #define __swp_type(E) SWP_TYPE(E) #define __swp_offset(E) SWP_OFFSET(E) #endif /* SPARC64 */ #ifdef PLATFORM #define SWP_TYPE(entry) (error("PLATFORM_SWP_TYPE: TBD\n")) #define SWP_OFFSET(entry) (error("PLATFORM_SWP_OFFSET: TBD\n")) #define __swp_type(entry) SWP_TYPE(entry) #define __swp_offset(entry) SWP_OFFSET(entry) #endif /* PLATFORM */ #define KILOBYTES(x) ((x) * (1024)) #define MEGABYTES(x) ((x) * (1048576)) #define GIGABYTES(x) ((x) * (1073741824)) #define TB_SHIFT (40) #define TERABYTES(x) ((x) * (1UL << TB_SHIFT)) #define MEGABYTE_MASK (MEGABYTES(1)-1) #define SIZEOF_64BIT (8) #define SIZEOF_32BIT (4) #define SIZEOF_16BIT (2) #define SIZEOF_8BIT (1) #ifdef ARM #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef X86 #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef ALPHA #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (11) #endif #ifdef PPC #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef IA64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef S390 #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef S390X #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef X86_64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (10) #endif #ifdef PPC64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #ifdef ARM64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (10) #endif #ifdef MIPS #define MAX_HEXADDR_STRLEN (8) #define UVADDR_PRLEN (8) #endif #ifdef SPARC64 #define MAX_HEXADDR_STRLEN (16) #define UVADDR_PRLEN (16) #endif #define BADADDR ((ulong)(-1)) #define BADVAL ((ulong)(-1)) #define UNUSED (-1) #define UNINITIALIZED (BADVAL) #define BITS_PER_BYTE (8) #define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long)) #define NUM_TO_BIT(x) (1UL<<((x)%BITS_PER_LONG)) #define NUM_IN_BITMAP(bitmap, x) (bitmap[(x)/BITS_PER_LONG] & NUM_TO_BIT(x)) #define SET_BIT(bitmap, x) (bitmap[(x)/BITS_PER_LONG] |= NUM_TO_BIT(x)) /* * precision lengths for fprintf */ #define VADDR_PRLEN (sizeof(char *) == 8 ? 16 : 8) #define LONG_LONG_PRLEN (16) #define LONG_PRLEN (sizeof(long) == 8 ? 16 : 8) #define INT_PRLEN (sizeof(int) == 8 ? 16 : 8) #define CHAR_PRLEN (2) #define SHORT_PRLEN (4) #define MINSPACE (-100) #define SYNOPSIS (0x1) #define COMPLETE_HELP (0x2) #define PIPE_TO_SCROLL (0x4) #define MUST_HELP (0x8) #define LEFT_JUSTIFY (1) #define RIGHT_JUSTIFY (2) #define CENTER (0x1) #define LJUST (0x2) #define RJUST (0x4) #define LONG_DEC (0x8) #define LONG_HEX (0x10) #define INT_DEC (0x20) #define INT_HEX (0x40) #define LONGLONG_HEX (0x80) #define ZERO_FILL (0x100) #define SLONG_DEC (0x200) #define INIT_TIME (1) #define RUN_TIME (2) /* * IRQ line status. * For kernels up to and including 2.6.17 */ #define IRQ_INPROGRESS_2_6_17 1 /* IRQ handler active - do not enter! */ #define IRQ_DISABLED_2_6_17 2 /* IRQ disabled - do not enter! */ #define IRQ_PENDING_2_6_17 4 /* IRQ pending - replay on enable */ #define IRQ_REPLAY_2_6_17 8 /* IRQ has been replayed but not acked yet */ #define IRQ_AUTODETECT_2_6_17 16 /* IRQ is being autodetected */ #define IRQ_WAITING_2_6_17 32 /* IRQ not yet seen - for autodetection */ #define IRQ_LEVEL_2_6_17 64 /* IRQ level triggered */ #define IRQ_MASKED_2_6_17 128 /* IRQ masked - shouldn't be seen again */ /* * For kernel 2.6.21 and later */ #define IRQ_TYPE_NONE_2_6_21 0x00000000 /* Default, unspecified type */ #define IRQ_TYPE_EDGE_RISING_2_6_21 0x00000001 /* Edge rising type */ #define IRQ_TYPE_EDGE_FALLING_2_6_21 0x00000002 /* Edge falling type */ #define IRQ_TYPE_EDGE_BOTH_2_6_21 (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) #define IRQ_TYPE_LEVEL_HIGH_2_6_21 0x00000004 /* Level high type */ #define IRQ_TYPE_LEVEL_LOW_2_6_21 0x00000008 /* Level low type */ #define IRQ_TYPE_SENSE_MASK_2_6_21 0x0000000f /* Mask of the above */ #define IRQ_TYPE_PROBE_2_6_21 0x00000010 /* Probing in progress */ #define IRQ_INPROGRESS_2_6_21 0x00000100 /* IRQ handler active - do not enter! */ #define IRQ_DISABLED_2_6_21 0x00000200 /* IRQ disabled - do not enter! */ #define IRQ_PENDING_2_6_21 0x00000400 /* IRQ pending - replay on enable */ #define IRQ_REPLAY_2_6_21 0x00000800 /* IRQ has been replayed but not acked yet */ #define IRQ_AUTODETECT_2_6_21 0x00001000 /* IRQ is being autodetected */ #define IRQ_WAITING_2_6_21 0x00002000 /* IRQ not yet seen - for autodetection */ #define IRQ_LEVEL_2_6_21 0x00004000 /* IRQ level triggered */ #define IRQ_MASKED_2_6_21 0x00008000 /* IRQ masked - shouldn't be seen again */ #define IRQ_PER_CPU_2_6_21 0x00010000 /* IRQ is per CPU */ #define IRQ_NOPROBE_2_6_21 0x00020000 /* IRQ is not valid for probing */ #define IRQ_NOREQUEST_2_6_21 0x00040000 /* IRQ cannot be requested */ #define IRQ_NOAUTOEN_2_6_21 0x00080000 /* IRQ will not be enabled on request irq */ #define IRQ_WAKEUP_2_6_21 0x00100000 /* IRQ triggers system wakeup */ #define IRQ_MOVE_PENDING_2_6_21 0x00200000 /* need to re-target IRQ destination */ #define IRQ_NO_BALANCING_2_6_21 0x00400000 /* IRQ is excluded from balancing */ #define IRQ_SPURIOUS_DISABLED_2_6_21 0x00800000 /* IRQ was disabled by the spurious trap */ #define IRQ_MOVE_PCNTXT_2_6_21 0x01000000 /* IRQ migration from process context */ #define IRQ_AFFINITY_SET_2_6_21 0x02000000 /* IRQ affinity was set from userspace*/ /* * Select proper IRQ value depending on kernel version */ #define IRQ_TYPE_NONE \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_NONE_2_6_21 : 0) #define IRQ_TYPE_EDGE_RISING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_RISING_2_6_21 : 0) #define IRQ_TYPE_EDGE_FALLING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_FALLING_2_6_21 : 0) #define IRQ_TYPE_EDGE_BOTH \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_EDGE_BOTH_2_6_21 : 0) #define IRQ_TYPE_LEVEL_HIGH \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_LEVEL_HIGH_2_6_21 : 0) #define IRQ_TYPE_LEVEL_LOW \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_LEVEL_LOW_2_6_21 : 0) #define IRQ_TYPE_SENSE_MASK \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_SENSE_MASK_2_6_21 : 0) #define IRQ_TYPE_PROBE \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_TYPE_PROBE_2_6_21 : 0) #define IRQ_INPROGRESS \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_INPROGRESS_2_6_21 : IRQ_INPROGRESS_2_6_17) #define IRQ_DISABLED \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_DISABLED_2_6_21 : IRQ_DISABLED_2_6_17) #define IRQ_PENDING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_PENDING_2_6_21 : IRQ_PENDING_2_6_17) #define IRQ_REPLAY \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_REPLAY_2_6_21 : IRQ_REPLAY_2_6_17) #define IRQ_AUTODETECT \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_AUTODETECT_2_6_21 : IRQ_AUTODETECT_2_6_17) #define IRQ_WAITING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_WAITING_2_6_21 : IRQ_WAITING_2_6_17) #define IRQ_LEVEL \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_LEVEL_2_6_21 : IRQ_LEVEL_2_6_17) #define IRQ_MASKED \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MASKED_2_6_21 : IRQ_MASKED_2_6_17) #define IRQ_PER_CPU \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_PER_CPU_2_6_21 : 0) #define IRQ_NOPROBE \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOPROBE_2_6_21 : 0) #define IRQ_NOREQUEST \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOREQUEST_2_6_21 : 0) #define IRQ_NOAUTOEN \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NOAUTOEN_2_6_21 : 0) #define IRQ_WAKEUP \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_WAKEUP_2_6_21 : 0) #define IRQ_MOVE_PENDING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MOVE_PENDING_2_6_21 : 0) #define IRQ_NO_BALANCING \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_NO_BALANCING_2_6_21 : 0) #define IRQ_SPURIOUS_DISABLED \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_SPURIOUS_DISABLED_2_6_21 : 0) #define IRQ_MOVE_PCNTXT \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_MOVE_PCNTXT_2_6_21 : 0) #define IRQ_AFFINITY_SET \ (THIS_KERNEL_VERSION >= LINUX(2,6,21) ? IRQ_AFFINITY_SET_2_6_21 : 0) #ifdef ARM #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef X86 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef X86_64 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef ALPHA #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x40000000 #endif #ifdef PPC #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef PPC64 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000u #endif #ifdef IA64 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef S390 #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #ifdef S390X #define SA_PROBE SA_ONESHOT #define SA_SAMPLE_RANDOM SA_RESTART #define SA_SHIRQ 0x04000000 #define SA_RESTORER 0x04000000 #endif #define ACTION_FLAGS (SA_INTERRUPT|SA_PROBE|SA_SAMPLE_RANDOM|SA_SHIRQ) #endif /* !GDB_COMMON */ /* * Common request structure for BFD or GDB data or commands. */ struct gnu_request { int command; char *buf; FILE *fp; ulong addr; ulong addr2; ulong count; ulong flags; char *name; ulong length; int typecode; #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) char *typename; #else char *type_name; #endif char *target_typename; ulong target_length; int target_typecode; int is_typedef; char *member; long member_offset; long member_length; int member_typecode; long value; char *tagname; ulong pc; ulong sp; ulong ra; int curframe; ulong frame; ulong prevsp; ulong prevpc; ulong lastsp; ulong task; ulong debug; struct stack_hook *hookp; struct global_iterator { int finished; int block_index; struct symtab *symtab; struct symbol *sym; struct objfile *obj; } global_iterator; struct load_module *lm; char *member_main_type_name; char *member_main_type_tag_name; char *member_target_type_name; char *member_target_type_tag_name; char *type_tag_name; }; /* * GNU commands */ #define GNU_DATATYPE_INIT (1) #define GNU_DISASSEMBLE (2) #define GNU_GET_LINE_NUMBER (3) #define GNU_PASS_THROUGH (4) #define GNU_GET_DATATYPE (5) #define GNU_COMMAND_EXISTS (6) #define GNU_STACK_TRACE (7) #define GNU_ALPHA_FRAME_OFFSET (8) #define GNU_FUNCTION_NUMARGS (9) #define GNU_RESOLVE_TEXT_ADDR (10) #define GNU_ADD_SYMBOL_FILE (11) #define GNU_DELETE_SYMBOL_FILE (12) #define GNU_VERSION (13) #define GNU_PATCH_SYMBOL_VALUES (14) #define GNU_GET_SYMBOL_TYPE (15) #define GNU_USER_PRINT_OPTION (16) #define GNU_SET_CRASH_BLOCK (17) #define GNU_GET_FUNCTION_RANGE (18) #define GNU_GET_NEXT_DATATYPE (19) #define GNU_LOOKUP_STRUCT_CONTENTS (20) #define GNU_DEBUG_COMMAND (100) /* * GNU flags */ #define GNU_PRINT_LINE_NUMBERS (0x1) #define GNU_FUNCTION_ONLY (0x2) #define GNU_PRINT_ENUMERATORS (0x4) #define GNU_RETURN_ON_ERROR (0x8) #define GNU_COMMAND_FAILED (0x10) #define GNU_FROM_TTY_OFF (0x20) #define GNU_NO_READMEM (0x40) #define GNU_VAR_LENGTH_TYPECODE (0x80) #undef TRUE #undef FALSE #define TRUE (1) #define FALSE (0) #ifdef GDB_COMMON /* * function prototypes required by modified gdb source files. */ int console(char *, ...); int gdb_CRASHDEBUG(ulong); int gdb_readmem_callback(ulong, void *, int, int); void patch_load_module(struct objfile *objfile, struct minimal_symbol *msymbol); int patch_kernel_symbol(struct gnu_request *); struct syment *symbol_search(char *); int gdb_line_number_callback(ulong, ulong, ulong); int gdb_print_callback(ulong); #endif #ifndef GDB_COMMON /* * WARNING: the following type codes are type_code enums from gdb/gdbtypes.h */ enum type_code { TYPE_CODE_UNDEF, /* Not used; catches errors */ TYPE_CODE_PTR, /* Pointer type */ TYPE_CODE_ARRAY, /* Array type with lower & upper bounds. */ TYPE_CODE_STRUCT, /* C struct or Pascal record */ TYPE_CODE_UNION, /* C union or Pascal variant part */ TYPE_CODE_ENUM, /* Enumeration type */ #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) || defined(GDB_7_0) || defined(GDB_7_3_1) || defined(GDB_7_6) #if defined(GDB_7_0) || defined(GDB_7_3_1) || defined(GDB_7_6) TYPE_CODE_FLAGS, /* Bit flags type */ #endif TYPE_CODE_FUNC, /* Function type */ TYPE_CODE_INT, /* Integer type */ /* Floating type. This is *NOT* a complex type. Beware, there are parts of GDB which bogusly assume that TYPE_CODE_FLT can mean complex. */ TYPE_CODE_FLT, /* Void type. The length field specifies the length (probably always one) which is used in pointer arithmetic involving pointers to this type, but actually dereferencing such a pointer is invalid; a void type has no length and no actual representation in memory or registers. A pointer to a void type is a generic pointer. */ TYPE_CODE_VOID, TYPE_CODE_SET, /* Pascal sets */ TYPE_CODE_RANGE, /* Range (integers within spec'd bounds) */ /* * NOTE: the remainder of the type codes are not list or used here... */ TYPE_CODE_BOOL = 20, #endif }; /* * include/linux/sched.h */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define SCHED_NORMAL 0 #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 #define SCHED_ISO 4 #define SCHED_IDLE 5 #define SCHED_DEADLINE 6 extern long _ZOMBIE_; #define IS_ZOMBIE(task) (task_state(task) & _ZOMBIE_) #define IS_EXITING(task) (task_flags(task) & PF_EXITING) /* * ps command options. */ #define PS_BY_PID (0x1) #define PS_BY_TASK (0x2) #define PS_BY_CMD (0x4) #define PS_SHOW_ALL (0x8) #define PS_PPID_LIST (0x10) #define PS_CHILD_LIST (0x20) #define PS_KERNEL (0x40) #define PS_USER (0x80) #define PS_TIMES (0x100) #define PS_KSTACKP (0x200) #define PS_LAST_RUN (0x400) #define PS_ARGV_ENVP (0x800) #define PS_TGID_LIST (0x1000) #define PS_RLIMIT (0x2000) #define PS_GROUP (0x4000) #define PS_BY_REGEX (0x8000) #define PS_NO_HEADER (0x10000) #define PS_MSECS (0x20000) #define PS_SUMMARY (0x40000) #define PS_POLICY (0x80000) #define PS_ACTIVE (0x100000) #define PS_EXCLUSIVE (PS_TGID_LIST|PS_ARGV_ENVP|PS_TIMES|PS_CHILD_LIST|PS_PPID_LIST|PS_LAST_RUN|PS_RLIMIT|PS_MSECS|PS_SUMMARY|PS_ACTIVE) #define MAX_PS_ARGS (100) /* maximum command-line specific requests */ struct psinfo { int argc; ulong pid[MAX_PS_ARGS]; int type[MAX_PS_ARGS]; ulong task[MAX_PS_ARGS]; char comm[MAX_PS_ARGS][TASK_COMM_LEN+1]; struct regex_data { char *pattern; regex_t regex; } regex_data[MAX_PS_ARGS]; int regexs; ulong *cpus; int policy; }; #define IS_A_NUMBER(X) (decimal(X, 0) || hexadecimal(X, 0)) #define AMBIGUOUS_NUMBER(X) (decimal(X, 0) && hexadecimal(X, 0)) #define is_mclx_compressed_dump(X) (va_server_init((X), 0, 0, 0) == 0) struct task_mem_usage { ulong rss; ulong total_vm; double pct_physmem; ulong mm_struct_addr; ulong pgd_addr; }; /* * Global data (global_data.c) */ extern FILE *fp; extern struct program_context program_context, *pc; extern struct task_table task_table, *tt; extern struct kernel_table kernel_table, *kt; extern struct command_table_entry linux_command_table[]; extern char *args[MAXARGS]; extern int argcnt; extern int argerrs; extern struct offset_table offset_table; extern struct size_table size_table; extern struct array_table array_table; extern struct vm_table vm_table, *vt; extern struct machdep_table *machdep; extern struct symbol_table_data symbol_table_data, *st; extern struct extension_table *extension_table; /* * Generated in build_data.c */ extern char *build_command; extern char *build_data; extern char *build_target; extern char *build_version; extern char *compiler_version; /* * command prototypes */ void cmd_quit(void); /* main.c */ void cmd_mach(void); /* main.c */ void cmd_help(void); /* help.c */ void cmd_test(void); /* test.c */ void cmd_ascii(void); /* tools.c */ void cmd_bpf(void); /* bfp.c */ void cmd_set(void); /* tools.c */ void cmd_eval(void); /* tools.c */ void cmd_list(void); /* tools.c */ void cmd_tree(void); /* tools.c */ void cmd_template(void); /* tools.c */ void cmd_alias(void); /* cmdline.c */ void cmd_repeat(void); /* cmdline.c */ void cmd_rd(void); /* memory.c */ void cmd_wr(void); /* memory.c */ void cmd_ptov(void); /* memory.c */ void cmd_vtop(void); /* memory.c */ void cmd_vm(void); /* memory.c */ void cmd_ptob(void); /* memory.c */ void cmd_btop(void); /* memory.c */ void cmd_kmem(void); /* memory.c */ void cmd_search(void); /* memory.c */ void cmd_swap(void); /* memory.c */ void cmd_pte(void); /* memory.c */ void cmd_ps(void); /* task.c */ void cmd_task(void); /* task.c */ void cmd_foreach(void); /* task.c */ void cmd_runq(void); /* task.c */ void cmd_sig(void); /* task.c */ void cmd_bt(void); /* kernel.c */ void cmd_dis(void); /* kernel.c */ void cmd_mod(void); /* kernel.c */ void cmd_log(void); /* kernel.c */ void cmd_sys(void); /* kernel.c */ void cmd_irq(void); /* kernel.c */ void cmd_timer(void); /* kernel.c */ void cmd_waitq(void); /* kernel.c */ void cmd_sym(void); /* symbols.c */ void cmd_struct(void); /* symbols.c */ void cmd_union(void); /* symbols.c */ void cmd_pointer(void); /* symbols.c */ void cmd_whatis(void); /* symbols.c */ void cmd_p(void); /* symbols.c */ void cmd_mount(void); /* filesys.c */ void cmd_files(void); /* filesys.c */ void cmd_fuser(void); /* filesys.c */ void cmd_dev(void); /* dev.c */ void cmd_gdb(void); /* gdb_interface.c */ void cmd_net(void); /* net.c */ void cmd_extend(void); /* extensions.c */ #if defined(S390) || defined(S390X) void cmd_s390dbf(void); #endif void cmd_map(void); /* kvmdump.c */ void cmd_ipcs(void); /* ipcs.c */ /* * main.c */ void main_loop(void); void exec_command(void); struct command_table_entry *get_command_table_entry(char *); void program_usage(int); #define LONG_FORM (1) #define SHORT_FORM (0) void dump_program_context(void); void dump_build_data(void); #ifdef ARM #define machdep_init(X) arm_init(X) #endif #ifdef ARM64 #define machdep_init(X) arm64_init(X) #endif #ifdef X86 #define machdep_init(X) x86_init(X) #endif #ifdef ALPHA #define machdep_init(X) alpha_init(X) #endif #ifdef PPC #define machdep_init(X) ppc_init(X) #endif #ifdef IA64 #define machdep_init(X) ia64_init(X) #endif #ifdef S390 #define machdep_init(X) s390_init(X) #endif #ifdef S390X #define machdep_init(X) s390x_init(X) #endif #ifdef X86_64 #define machdep_init(X) x86_64_init(X) #endif #ifdef PPC64 #define machdep_init(X) ppc64_init(X) #endif #ifdef MIPS #define machdep_init(X) mips_init(X) #endif #ifdef SPARC64 #define machdep_init(X) sparc64_init(X) #endif int clean_exit(int); int untrusted_file(FILE *, char *); char *readmem_function_name(void); char *writemem_function_name(void); char *no_vmcoreinfo(const char *); /* * cmdline.c */ void restart(int); void alias_init(char *); struct alias_data *is_alias(char *); void deallocate_alias(char *); void cmdline_init(void); void set_command_prompt(char *); void exec_input_file(void); void process_command_line(void); void dump_history(void); void resolve_rc_cmd(char *, int); void dump_alias_data(void); int output_open(void); #define output_closed() (!output_open()) void close_output(void); int interruptible(void); int received_SIGINT(void); void debug_redirect(char *); int CRASHPAGER_valid(void); char *setup_scroll_command(void); int minimal_functions(char *); int is_args_input_file(struct command_table_entry *, struct args_input_file *); void exec_args_input_file(struct command_table_entry *, struct args_input_file *); /* * tools.c */ FILE *set_error(char *); int __error(int, char *, ...); #define error __error /* avoid conflict with gdb error() */ int console(char *, ...); void create_console_device(char *); int console_off(void); int console_on(int); int console_verbatim(char *); int whitespace(int); int ascii(int); int ascii_string(char *); int printable_string(char *); char *clean_line(char *); char *strip_line_end(char *); char *strip_linefeeds(char *); char *strip_beginning_whitespace(char *); char *strip_ending_whitespace(char *); char *strip_ending_char(char *, char); char *strip_beginning_char(char *, char); char *strip_comma(char *); char *strip_hex(char *); char *upper_case(const char *, char *); char *first_nonspace(char *); char *first_space(char *); char *replace_string(char *, char *, char); void string_insert(char *, char *); char *strstr_rightmost(char *, char *); char *null_first_space(char *); int parse_line(char *, char **); void print_verbatim(FILE *, char *); char *fixup_percent(char *); int can_eval(char *); ulong eval(char *, int, int *); ulonglong evall(char *, int, int *); int eval_common(char *, int, int *, struct number_option *); ulong htol(char *, int, int *); ulong dtol(char *, int, int *); unsigned int dtoi(char *, int, int *); ulong stol(char *, int, int *); ulonglong stoll(char *, int, int *); ulonglong htoll(char *, int, int *); ulonglong dtoll(char *, int, int *); int decimal(char *, int); int hexadecimal(char *, int); int hexadecimal_only(char *, int); ulong convert(char *, int, int *, ulong); void pad_line(FILE *, int, char); #define INDENT(x) pad_line(fp, x, ' ') char *mkstring(char *, int, ulong, const char *); #define MKSTR(X) ((const char *)(X)) int count_leading_spaces(char *); int count_chars(char *, char); long count_buffer_chars(char *, char, long); char *space(int); char *concat_args(char *, int, int); char *shift_string_left(char *, int); char *shift_string_right(char *, int); int bracketed(char *, char *, int); void backspace(int); int do_list(struct list_data *); int do_list_no_hash(struct list_data *); struct radix_tree_ops { void (*entry)(ulong node, ulong slot, const char *path, ulong index, void *private); uint radix; void *private; }; int do_radix_tree_traverse(ulong ptr, int is_root, struct radix_tree_ops *ops); struct xarray_ops { void (*entry)(ulong node, ulong slot, const char *path, ulong index, void *private); uint radix; void *private; }; int do_xarray_traverse(ulong ptr, int is_root, struct xarray_ops *ops); int do_rdtree(struct tree_data *); int do_rbtree(struct tree_data *); int do_xatree(struct tree_data *); int retrieve_list(ulong *, int); long power(long, int); long long ll_power(long long, long long); void hq_init(void); int hq_open(void); int hq_close(void); int hq_enter(ulong); int hq_entry_exists(ulong); int hq_is_open(void); int hq_is_inuse(void); long get_embedded(void); void dump_embedded(char *); char *ordinal(ulong, char *); char *first_nonspace(char *); void dump_hash_table(int); void dump_shared_bufs(void); void drop_core(char *); int extract_hex(char *, ulong *, char, ulong); int count_bits_int(int); int count_bits_long(ulong); int highest_bit_long(ulong); int lowest_bit_long(ulong); void buf_init(void); void sym_buf_init(void); void free_all_bufs(void); char *getbuf(long); void freebuf(char *); char *resizebuf(char *, long, long); char *strdupbuf(char *); #define GETBUF(X) getbuf((long)(X)) #define FREEBUF(X) freebuf((char *)(X)) #define RESIZEBUF(X,Y,Z) (X) = (typeof(X))resizebuf((char *)(X), (long)(Y), (long)(Z)); #define STRDUPBUF(X) strdupbuf((char *)(X)) void sigsetup(int, void *, struct sigaction *, struct sigaction *); #define SIGACTION(s, h, a, o) sigsetup(s, h, a, o) char *convert_time(ulonglong, char *); void stall(ulong); char *pages_to_size(ulong, char *); int clean_arg(void); int empty_list(ulong); int machine_type(char *); int machine_type_mismatch(char *, char *, char *, ulong); void command_not_supported(void); void option_not_supported(int); void please_wait(char *); void please_wait_done(void); int pathcmp(char *, char *); int calculate(char *, ulong *, ulonglong *, ulong); int endian_mismatch(char *, char, ulong); uint16_t swap16(uint16_t, int); uint32_t swap32(uint32_t, int); uint64_t swap64(uint64_t, int); ulong *get_cpumask_buf(void); int make_cpumask(char *, ulong *, int, int *); size_t strlcpy(char *, char *, size_t); struct rb_node *rb_first(struct rb_root *); struct rb_node *rb_parent(struct rb_node *, struct rb_node *); struct rb_node *rb_right(struct rb_node *, struct rb_node *); struct rb_node *rb_left(struct rb_node *, struct rb_node *); struct rb_node *rb_next(struct rb_node *); struct rb_node *rb_last(struct rb_root *); /* * symbols.c */ void symtab_init(void); char *check_specified_kernel_debug_file(void); void no_debugging_data(int); void get_text_init_space(void); int is_kernel_text(ulong); int is_kernel_data(ulong); int is_init_data(ulong value); int is_kernel_text_offset(ulong); int is_symbol_text(struct syment *); int is_rodata(ulong, struct syment **); int get_text_function_range(ulong, ulong *, ulong *); void datatype_init(void); struct syment *symbol_search(char *); struct syment *value_search(ulong, ulong *); struct syment *value_search_base_kernel(ulong, ulong *); struct syment *value_search_module(ulong, ulong *); struct syment *symbol_search_next(char *, struct syment *); ulong highest_bss_symbol(void); int in_ksymbol_range(ulong); int module_symbol(ulong, struct syment **, struct load_module **, char *, ulong); #define IS_MODULE_VADDR(X) \ (module_symbol((ulong)(X), NULL, NULL, NULL, *gdb_output_radix)) char *closest_symbol(ulong); ulong closest_symbol_value(ulong); #define SAME_FUNCTION(X,Y) (closest_symbol_value(X) == closest_symbol_value(Y)) void show_symbol(struct syment *, ulong, ulong); #define SHOW_LINENUM (0x1) #define SHOW_SECTION (0x2) #define SHOW_HEX_OFFS (0x4) #define SHOW_DEC_OFFS (0x8) #define SHOW_RADIX() (*gdb_output_radix == 16 ? SHOW_HEX_OFFS : SHOW_DEC_OFFS) #define SHOW_MODULE (0x10) int symbol_name_count(char *); int symbol_query(char *, char *, struct syment **); struct syment *next_symbol(char *, struct syment *); struct syment *prev_symbol(char *, struct syment *); void get_symbol_data(char *, long, void *); int try_get_symbol_data(char *, long, void *); char *value_to_symstr(ulong, char *, ulong); char *value_symbol(ulong); ulong symbol_value(char *); ulong symbol_value_module(char *, char *); struct syment *per_cpu_symbol_search(char *); int symbol_exists(char *s); int kernel_symbol_exists(char *s); struct syment *kernel_symbol_search(char *); ulong symbol_value_from_proc_kallsyms(char *); int get_syment_array(char *, struct syment **, int); void set_temporary_radix(unsigned int, unsigned int *); void restore_current_radix(unsigned int); void dump_struct(char *, ulong, unsigned); void dump_struct_member(char *, ulong, unsigned); void dump_union(char *, ulong, unsigned); void store_module_symbols_v1(ulong, int); void store_module_symbols_v2(ulong, int); int is_datatype_command(void); int is_typedef(char *); int arg_to_datatype(char *, struct datatype_member *, ulong); void dump_symbol_table(void); void dump_struct_table(ulong); void dump_offset_table(char *, ulong); int is_elf_file(char *); int is_kernel(char *); int is_shared_object(char *); int file_elf_version(char *); int is_system_map(char *); int is_compressed_kernel(char *, char **); int select_namelist(char *); int get_array_length(char *, int *, long); int get_array_length_alt(char *, char *, int *, long); int builtin_array_length(char *, int, int *); char *get_line_number(ulong, char *, int); char *get_build_directory(char *); int datatype_exists(char *); int get_function_numargs(ulong); int is_module_name(char *, ulong *, struct load_module **); int is_module_address(ulong, char *); ulong lowest_module_address(void); ulong highest_module_address(void); int load_module_symbols(char *, char *, ulong); void delete_load_module(ulong); ulong gdb_load_module_callback(ulong, char *); char *load_module_filter(char *, int); #define LM_P_FILTER (1) #define LM_DIS_FILTER (2) long datatype_info(char *, char *, struct datatype_member *); int get_symbol_type(char *, char *, struct gnu_request *); int get_symbol_length(char *); int text_value_cache(ulong, uint32_t, uint32_t *); int text_value_cache_byte(ulong, unsigned char *); void dump_text_value_cache(int); void clear_text_value_cache(void); void dump_numargs_cache(void); int patch_kernel_symbol(struct gnu_request *); struct syment *generic_machdep_value_to_symbol(ulong, ulong *); long OFFSET_verify(long, char *, char *, int, char *); long SIZE_verify(long, char *, char *, int, char *); long OFFSET_option(long, long, char *, char *, int, char *, char *); long SIZE_option(long, long, char *, char *, int, char *, char *); void dump_trace(void **); int enumerator_value(char *, long *); int dump_enumerator_list(char *); struct load_module *init_module_function(ulong); struct struct_member_data { char *structure; char *member; long type; long unsigned_type; long length; long offset; long bitpos; long bitsize; }; int fill_struct_member_data(struct struct_member_data *); void parse_for_member_extended(struct datatype_member *, ulong); void add_to_downsized(char *); int is_downsized(char *); int is_string(char *, char *); struct syment *symbol_complete_match(const char *, struct syment *); /* * memory.c */ void mem_init(void); void vm_init(void); int readmem(ulonglong, int, void *, long, char *, ulong); int writemem(ulonglong, int, void *, long, char *, ulong); int generic_verify_paddr(uint64_t); int read_dev_mem(int, void *, int, ulong, physaddr_t); int read_memory_device(int, void *, int, ulong, physaddr_t); int read_mclx_dumpfile(int, void *, int, ulong, physaddr_t); int read_lkcd_dumpfile(int, void *, int, ulong, physaddr_t); int read_daemon(int, void *, int, ulong, physaddr_t); int write_dev_mem(int, void *, int, ulong, physaddr_t); int write_memory_device(int, void *, int, ulong, physaddr_t); int write_mclx_dumpfile(int, void *, int, ulong, physaddr_t); int write_lkcd_dumpfile(int, void *, int, ulong, physaddr_t); int write_daemon(int, void *, int, ulong, physaddr_t); int kvtop(struct task_context *, ulong, physaddr_t *, int); int uvtop(struct task_context *, ulong, physaddr_t *, int); void do_vtop(ulong, struct task_context *, ulong); void raw_stack_dump(ulong, ulong); void raw_data_dump(ulong, long, int); int accessible(ulong); ulong vm_area_dump(ulong, ulong, ulong, struct reference *); #define IN_TASK_VMA(TASK,VA) (vm_area_dump((TASK), UVADDR|VERIFY_ADDR, (VA), 0)) char *fill_vma_cache(ulong); void clear_vma_cache(void); void dump_vma_cache(ulong); int generic_is_page_ptr(ulong, physaddr_t *); int is_page_ptr(ulong, physaddr_t *); void dump_vm_table(int); int read_string(ulong, char *, int); void get_task_mem_usage(ulong, struct task_mem_usage *); char *get_memory_size(char *); uint64_t generic_memory_size(void); char *swap_location(ulonglong, char *); void clear_swap_info_cache(void); uint memory_page_size(void); void force_page_size(char *); ulong first_vmalloc_address(void); ulong last_vmalloc_address(void); int in_vmlist_segment(ulong); int phys_to_page(physaddr_t, ulong *); int generic_get_kvaddr_ranges(struct vaddr_range *); int l1_cache_size(void); int dumpfile_memory(int); #define DUMPFILE_MEM_USED (1) #define DUMPFILE_FREE_MEM (2) #define DUMPFILE_MEM_DUMP (3) #define DUMPFILE_ENVIRONMENT (4) uint64_t total_node_memory(void); int generic_is_kvaddr(ulong); int generic_is_uvaddr(ulong, struct task_context *); void fill_stackbuf(struct bt_info *); void alter_stackbuf(struct bt_info *); int vaddr_type(ulong, struct task_context *); char *format_stack_entry(struct bt_info *bt, char *, ulong, ulong); int in_user_stack(ulong, ulong); int dump_inode_page(ulong); ulong valid_section_nr(ulong); void display_memory_from_file_offset(ulonglong, long, void *); /* * filesys.c */ void fd_init(void); void vfs_init(void); int is_a_tty(char *); int file_exists(char *, struct stat *); int file_readable(char *); int is_directory(char *); char *search_directory_tree(char *, char *, int); void open_tmpfile(void); void close_tmpfile(void); void open_tmpfile2(void); void set_tmpfile2(FILE *); void close_tmpfile2(void); void open_files_dump(ulong, int, struct reference *); void get_pathname(ulong, char *, int, int, ulong); ulong *get_mount_list(int *, struct task_context *); char *vfsmount_devname(ulong, char *, int); ulong file_to_dentry(ulong); ulong file_to_vfsmnt(ulong); int get_proc_version(void); int file_checksum(char *, long *); void dump_filesys_table(int); char *fill_file_cache(ulong); void clear_file_cache(void); char *fill_dentry_cache(ulong); void clear_dentry_cache(void); char *fill_inode_cache(ulong); void clear_inode_cache(void); int monitor_memory(long *, long *, long *, long *); int is_readable(char *); struct list_pair { ulong index; void *value; }; #define radix_tree_pair list_pair ulong do_radix_tree(ulong, int, struct list_pair *); #define RADIX_TREE_COUNT (1) #define RADIX_TREE_SEARCH (2) #define RADIX_TREE_DUMP (3) #define RADIX_TREE_GATHER (4) #define RADIX_TREE_DUMP_CB (5) /* * from: "include/linux/radix-tree.h" */ #define RADIX_TREE_ENTRY_MASK 3UL #define RADIX_TREE_EXCEPTIONAL_ENTRY 2 ulong do_xarray(ulong, int, struct list_pair *); #define XARRAY_COUNT (1) #define XARRAY_SEARCH (2) #define XARRAY_DUMP (3) #define XARRAY_GATHER (4) #define XARRAY_DUMP_CB (5) #define XARRAY_TAG_MASK (3UL) #define XARRAY_TAG_INTERNAL (2UL) int file_dump(ulong, ulong, ulong, int, int); #define DUMP_FULL_NAME 0x1 #define DUMP_INODE_ONLY 0x2 #define DUMP_DENTRY_ONLY 0x4 #define DUMP_EMPTY_FILE 0x8 #define DUMP_FILE_NRPAGES 0x10 #endif /* !GDB_COMMON */ int same_file(char *, char *); #ifndef GDB_COMMON int cleanup_memory_driver(void); /* * help.c */ #define HELP_COLUMNS 5 #define START_OF_HELP_DATA(X) "START_OF_HELP_DATA" X #define END_OF_HELP_DATA "END_OF_HELP_DATA" void help_init(void); void cmd_usage(char *, int); void display_version(void); void display_help_screen(char *); #ifdef ARM #define dump_machdep_table(X) arm_dump_machdep_table(X) #endif #ifdef ARM64 #define dump_machdep_table(X) arm64_dump_machdep_table(X) #endif #ifdef X86 #define dump_machdep_table(X) x86_dump_machdep_table(X) #endif #ifdef ALPHA #define dump_machdep_table(X) alpha_dump_machdep_table(X) #endif #ifdef PPC #define dump_machdep_table(X) ppc_dump_machdep_table(X) #endif #ifdef IA64 #define dump_machdep_table(X) ia64_dump_machdep_table(X) #endif #ifdef S390 #define dump_machdep_table(X) s390_dump_machdep_table(X) #endif #ifdef S390X #define dump_machdep_table(X) s390x_dump_machdep_table(X) #endif #ifdef X86_64 #define dump_machdep_table(X) x86_64_dump_machdep_table(X) #endif #ifdef PPC64 #define dump_machdep_table(X) ppc64_dump_machdep_table(X) #endif #ifdef MIPS #define dump_machdep_table(X) mips_dump_machdep_table(X) #endif #ifdef SPARC64 #define dump_machdep_table(X) sparc64_dump_machdep_table(X) #endif extern char *help_pointer[]; extern char *help_alias[]; extern char *help_ascii[]; extern char *help_bpf[]; extern char *help_bt[]; extern char *help_btop[]; extern char *help_dev[]; extern char *help_dis[]; extern char *help_eval[]; extern char *help_exit[]; extern char *help_extend[]; extern char *help_files[]; extern char *help_foreach[]; extern char *help_fuser[]; extern char *help_gdb[]; extern char *help_help[]; extern char *help_irq[]; extern char *help_kmem[]; extern char *help__list[]; extern char *help_tree[]; extern char *help_log[]; extern char *help_mach[]; extern char *help_mod[]; extern char *help_mount[]; extern char *help_net[]; extern char *help_p[]; extern char *help_ps[]; extern char *help_pte[]; extern char *help_ptob[]; extern char *help_ptov[]; extern char *help_quit[]; extern char *help_rd[]; extern char *help_repeat[]; extern char *help_runq[]; extern char *help_ipcs[]; extern char *help_search[]; extern char *help_set[]; extern char *help_sig[]; extern char *help_struct[]; extern char *help_swap[]; extern char *help_sym[]; extern char *help_sys[]; extern char *help_task[]; extern char *help_timer[]; extern char *help_union[]; extern char *help_vm[]; extern char *help_vtop[]; extern char *help_waitq[]; extern char *help_whatis[]; extern char *help_wr[]; #if defined(S390) || defined(S390X) extern char *help_s390dbf[]; #endif extern char *help_map[]; /* * task.c */ void task_init(void); int set_context(ulong, ulong); void show_context(struct task_context *); ulong pid_to_task(ulong); ulong task_to_pid(ulong); int task_exists(ulong); int is_kernel_thread(ulong); int is_idle_thread(ulong); void get_idle_threads(ulong *, int); char *task_state_string(ulong, char *, int); ulong task_flags(ulong); ulong task_state(ulong); ulong task_mm(ulong, int); ulong task_tgid(ulong); ulonglong task_last_run(ulong); ulong vaddr_in_task_struct(ulong); int comm_exists(char *); struct task_context *task_to_context(ulong); struct task_context *pid_to_context(ulong); struct task_context *tgid_to_context(ulong); ulong stkptr_to_task(ulong); ulong task_to_thread_info(ulong); ulong task_to_stackbase(ulong); int str_to_context(char *, ulong *, struct task_context **); #define STR_PID (0x1) #define STR_TASK (0x2) #define STR_INVALID (0x4) char *get_panicmsg(char *); char *task_cpu(int, char *, int); void print_task_header(FILE *, struct task_context *, int); ulong get_active_task(int); int is_task_active(ulong); int is_panic_thread(ulong); int get_panic_ksp(struct bt_info *, ulong *); void foreach(struct foreach_data *); int pid_exists(ulong); #define TASKS_PER_PID(x) pid_exists(x) char *fill_task_struct(ulong); #define IS_LAST_TASK_READ(task) ((ulong)(task) == tt->last_task_read) char *fill_thread_info(ulong); #define IS_LAST_THREAD_INFO_READ(ti) ((ulong)(ti) == tt->last_thread_info_read) char *fill_mm_struct(ulong); #define IS_LAST_MM_READ(mm) ((ulong)(mm) == tt->last_mm_read) void do_task(ulong, ulong, struct reference *, unsigned int); void clear_task_cache(void); int get_active_set(void); void clear_active_set(void); void do_sig(ulong, ulong, struct reference *); void modify_signame(int, char *, char *); ulong generic_get_stackbase(ulong); ulong generic_get_stacktop(ulong); void dump_task_table(int); void sort_context_array(void); void sort_tgid_array(void); int sort_by_tgid(const void *, const void *); int in_irq_ctx(ulonglong, int, ulong); void check_stack_overflow(void); /* * extensions.c */ void register_extension(struct command_table_entry *); void dump_extension_table(int); void load_extension(char *); void unload_extension(char *); void preload_extensions(void); /* Hooks for sial */ unsigned long get_curtask(void); char *crash_global_cmd(void); struct command_table_entry *crash_cmd_table(void); /* * kernel.c */ void kernel_init(void); void module_init(void); void verify_version(void); void verify_spinlock(void); void non_matching_kernel(void); struct load_module *modref_to_load_module(char *); int load_module_symbols_helper(char *); void unlink_module(struct load_module *); int check_specified_module_tree(char *, char *); int is_system_call(char *, ulong); void generic_dump_irq(int); void generic_get_irq_affinity(int); void generic_show_interrupts(int, ulong *); int generic_dis_filter(ulong, char *, unsigned int); int kernel_BUG_encoding_bytes(void); void display_sys_stats(void); char *get_uptime(char *, ulonglong *); void clone_bt_info(struct bt_info *, struct bt_info *, struct task_context *); void dump_kernel_table(int); void dump_bt_info(struct bt_info *, char *where); void dump_log(int); #define SHOW_LOG_LEVEL (0x1) #define SHOW_LOG_DICT (0x2) #define SHOW_LOG_TEXT (0x4) #define SHOW_LOG_AUDIT (0x8) void set_cpu(int); void clear_machdep_cache(void); struct stack_hook *gather_text_list(struct bt_info *); int get_cpus_online(void); int get_cpus_active(void); int get_cpus_present(void); int get_cpus_possible(void); int check_offline_cpu(int); int hide_offline_cpu(int); int get_highest_cpu_online(void); int get_highest_cpu_present(void); int get_cpus_to_display(void); void get_log_from_vmcoreinfo(char *file); int in_cpu_map(int, int); void paravirt_init(void); void print_stack_text_syms(struct bt_info *, ulong, ulong); void back_trace(struct bt_info *); int in_alternate_stack(int, ulong); ulong cpu_map_addr(const char *type); #define BT_RAW (0x1ULL) #define BT_SYMBOLIC_ARGS (0x2ULL) #define BT_FULL (0x4ULL) #define BT_TEXT_SYMBOLS (0x8ULL) #define BT_TEXT_SYMBOLS_PRINT (0x10ULL) #define BT_TEXT_SYMBOLS_NOPRINT (0x20ULL) #define BT_USE_GDB (0x40ULL) #define BT_EXCEPTION_FRAME (0x80ULL) #define BT_LINE_NUMBERS (0x100ULL) #define BT_USER_EFRAME (0x200ULL) #define BT_INCOMPLETE_USER_EFRAME (BT_USER_EFRAME) #define BT_SAVE_LASTSP (0x400ULL) #define BT_FROM_EXCEPTION (0x800ULL) #define BT_FROM_CALLFRAME (0x1000ULL) #define BT_EFRAME_SEARCH (0x2000ULL) #define BT_SPECULATE (0x4000ULL) #define BT_FRAMESIZE_DISABLE (BT_SPECULATE) #define BT_RESCHEDULE (0x8000ULL) #define BT_SCHEDULE (BT_RESCHEDULE) #define BT_RET_FROM_SMP_FORK (0x10000ULL) #define BT_STRACE (0x20000ULL) #define BT_KDUMP_ADJUST (BT_STRACE) #define BT_KSTACKP (0x40000ULL) #define BT_LOOP_TRAP (0x80000ULL) #define BT_BUMP_FRAME_LEVEL (0x100000ULL) #define BT_EFRAME_COUNT (0x200000ULL) #define BT_CPU_IDLE (0x400000ULL) #define BT_WRAP_TRAP (0x800000ULL) #define BT_KERNEL_THREAD (0x1000000ULL) #define BT_ERROR_MASK (BT_LOOP_TRAP|BT_WRAP_TRAP|BT_KERNEL_THREAD|BT_CPU_IDLE) #define BT_UNWIND_ERROR (0x2000000ULL) #define BT_OLD_BACK_TRACE (0x4000000ULL) #define BT_OPT_BACK_TRACE (0x4000000ULL) #define BT_FRAMESIZE_DEBUG (0x8000000ULL) #define BT_CONTEXT_SWITCH (0x10000000ULL) #define BT_HARDIRQ (0x20000000ULL) #define BT_SOFTIRQ (0x40000000ULL) #define BT_CHECK_CALLER (0x80000000ULL) #define BT_NO_CHECK_CALLER (0x100000000ULL) #define BT_EXCEPTION_STACK (0x200000000ULL) #define BT_IRQSTACK (0x400000000ULL) #define BT_DUMPFILE_SEARCH (0x800000000ULL) #define BT_EFRAME_SEARCH2 (0x1000000000ULL) #define BT_START (0x2000000000ULL) #define BT_TEXT_SYMBOLS_ALL (0x4000000000ULL) #define BT_XEN_STOP_THIS_CPU (0x8000000000ULL) #define BT_THREAD_GROUP (0x10000000000ULL) #define BT_SAVE_EFRAME_IP (0x20000000000ULL) #define BT_FULL_SYM_SLAB (0x40000000000ULL) #define BT_KDUMP_ELF_REGS (0x80000000000ULL) #define BT_USER_SPACE (0x100000000000ULL) #define BT_KERNEL_SPACE (0x200000000000ULL) #define BT_FULL_SYM_SLAB2 (0x400000000000ULL) #define BT_EFRAME_TARGET (0x800000000000ULL) #define BT_CPUMASK (0x1000000000000ULL) #define BT_SHOW_ALL_REGS (0x2000000000000ULL) #define BT_REGS_NOT_FOUND (0x4000000000000ULL) #define BT_SYMBOL_OFFSET (BT_SYMBOLIC_ARGS) #define BT_REF_HEXVAL (0x1) #define BT_REF_SYMBOL (0x2) #define BT_REF_FOUND (0x4) #define BT_REFERENCE_CHECK(X) ((X)->ref) #define BT_REFERENCE_FOUND(X) ((X)->ref && ((X)->ref->cmdflags & BT_REF_FOUND)) #define NO_MODULES() \ (!kt->module_list || (kt->module_list == kt->kernel_module)) #define USER_EFRAME_ADDR(task) \ ((ulong)task + UNION_SIZE("task_union") - SIZE(pt_regs)) struct remote_file { char *filename; char *local; int fd; int flags; int type; long csum; off_t size; }; #define REMOTE_VERBOSE (O_RDWR << 1) #define REMOTE_COPY_DONE (REMOTE_VERBOSE << 1) #define TYPE_ELF (REMOTE_VERBOSE << 2) #define TYPE_DEVMEM (REMOTE_VERBOSE << 3) #define TYPE_MCLXCD (REMOTE_VERBOSE << 4) #define TYPE_LKCD (REMOTE_VERBOSE << 5) #define TYPE_S390D (REMOTE_VERBOSE << 6) #define TYPE_NETDUMP (REMOTE_VERBOSE << 7) ulonglong xen_m2p(ulonglong); void read_in_kernel_config(int); #define IKCFG_INIT (0) #define IKCFG_READ (1) #define IKCFG_SETUP (2) #define IKCFG_FREE (3) int get_kernel_config(char *, char **); enum { IKCONFIG_N, IKCONFIG_Y, IKCONFIG_M, IKCONFIG_STR, }; #define MAGIC_START "IKCFG_ST" #define MAGIC_END "IKCFG_ED" #define MAGIC_SIZE (sizeof(MAGIC_START) - 1) /* * dev.c */ void dev_init(void); void dump_dev_table(void); void devdump_extract(void *, ulonglong, char *, FILE *); void devdump_info(void *, ulonglong, FILE *); /* * ipcs.c */ void ipcs_init(void); ulong idr_find(ulong, int); #ifdef ARM void arm_init(int); void arm_dump_machdep_table(ulong); int arm_is_vmalloc_addr(ulong); void arm_dump_backtrace_entry(struct bt_info *, int, ulong, ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to ARM architecture\n") struct arm_pt_regs { ulong uregs[18]; }; #define ARM_cpsr uregs[16] #define ARM_pc uregs[15] #define ARM_lr uregs[14] #define ARM_sp uregs[13] #define ARM_ip uregs[12] #define ARM_fp uregs[11] #define ARM_r10 uregs[10] #define ARM_r9 uregs[9] #define ARM_r8 uregs[8] #define ARM_r7 uregs[7] #define ARM_r6 uregs[6] #define ARM_r5 uregs[5] #define ARM_r4 uregs[4] #define ARM_r3 uregs[3] #define ARM_r2 uregs[2] #define ARM_r1 uregs[1] #define ARM_r0 uregs[0] #define ARM_ORIG_r0 uregs[17] #define KSYMS_START (0x1) #define PHYS_BASE (0x2) #define PGTABLE_V2 (0x4) #define IDMAP_PGD (0x8) #define KVBASE_MASK (0x1ffffff) struct machine_specific { ulong phys_base; ulong vmalloc_start_addr; ulong modules_vaddr; ulong modules_end; ulong kernel_text_start; ulong kernel_text_end; ulong exception_text_start; ulong exception_text_end; ulonglong last_pgd_read_lpae; ulonglong last_pmd_read_lpae; ulonglong last_ptbl_read_lpae; struct arm_pt_regs *crash_task_regs; int unwind_index_prel31; }; int init_unwind_tables(void); void unwind_backtrace(struct bt_info *); #endif /* ARM */ /* * arm64.c */ #ifdef ARM64 void arm64_init(int); void arm64_dump_machdep_table(ulong); ulong arm64_VTOP(ulong); int arm64_IS_VMALLOC_ADDR(ulong); ulong arm64_swp_type(ulong); ulong arm64_swp_offset(ulong); #endif /* * alpha.c */ #ifdef ALPHA void alpha_init(int); void alpha_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to alpha architecture\n") #define HWRESET_TASK(X) ((machdep->flags & HWRESET) && is_task_active(X) && \ (task_to_context(X)->processor == 0)) #endif /* * x86.c */ #ifdef X86 void x86_init(int); void x86_dump_machdep_table(ulong); void x86_display_idt_table(void); #define display_idt_table() x86_display_idt_table() #define KSYMS_START (0x1) void x86_dump_eframe_common(struct bt_info *bt, ulong *, int); char *x86_function_called_by(ulong); struct syment *x86_jmp_error_code(ulong); struct syment *x86_text_lock_jmp(ulong, ulong *); struct machine_specific { ulong *idt_table; ulong entry_tramp_start; ulong entry_tramp_end; physaddr_t entry_tramp_start_phys; ulonglong last_pmd_read_PAE; ulonglong last_ptbl_read_PAE; ulong page_protnone; int max_numnodes; ulong *remap_start_vaddr; ulong *remap_end_vaddr; ulong *remap_start_pfn; }; struct syment *x86_is_entry_tramp_address(ulong, ulong *); #endif /* * x86_64.c */ #ifdef X86_64 void x86_64_init(int); void x86_64_dump_machdep_table(ulong); ulong x86_64_PTOV(ulong); ulong x86_64_VTOP(ulong); int x86_64_IS_VMALLOC_ADDR(ulong); ulong x86_64_swp_type(ulong); ulong x86_64_swp_offset(ulong); void x86_64_display_idt_table(void); #define display_idt_table() x86_64_display_idt_table() long x86_64_exception_frame(ulong, ulong, char *, struct bt_info *, FILE *); #define EFRAME_INIT (0) struct x86_64_pt_regs_offsets { long r15; long r14; long r13; long r12; long rbp; long rbx; /* arguments: non interrupts/non tracing syscalls only save upto here*/ long r11; long r10; long r9; long r8; long rax; long rcx; long rdx; long rsi; long rdi; long orig_rax; /* end of arguments */ /* cpu exception frame or undefined */ long rip; long cs; long eflags; long rsp; long ss; }; #define MAX_EXCEPTION_STACKS 7 #define NMI_STACK (machdep->machspec->stkinfo.NMI_stack_index) struct x86_64_stkinfo { ulong ebase[NR_CPUS][MAX_EXCEPTION_STACKS]; int esize[MAX_EXCEPTION_STACKS]; ulong ibase[NR_CPUS]; int isize; int NMI_stack_index; char *exception_stacks[MAX_EXCEPTION_STACKS]; }; typedef struct __attribute__((__packed__)) { signed short sp_offset; signed short bp_offset; unsigned int sp_reg:4; unsigned int bp_reg:4; unsigned int type:2; unsigned int end:1; } kernel_orc_entry; struct ORC_data { int module_ORC; uint lookup_num_blocks; ulong __start_orc_unwind_ip; ulong __stop_orc_unwind_ip; ulong __start_orc_unwind; ulong __stop_orc_unwind; ulong orc_lookup; ulong ip_entry; ulong orc_entry; kernel_orc_entry kernel_orc_entry; }; #define ORC_TYPE_CALL 0 #define ORC_TYPE_REGS 1 #define ORC_TYPE_REGS_IRET 2 #define UNWIND_HINT_TYPE_SAVE 3 #define UNWIND_HINT_TYPE_RESTORE 4 #define ORC_REG_UNDEFINED 0 #define ORC_REG_PREV_SP 1 #define ORC_REG_DX 2 #define ORC_REG_DI 3 #define ORC_REG_BP 4 #define ORC_REG_SP 5 #define ORC_REG_R10 6 #define ORC_REG_R13 7 #define ORC_REG_BP_INDIRECT 8 #define ORC_REG_SP_INDIRECT 9 #define ORC_REG_MAX 15 struct machine_specific { ulong userspace_top; ulong page_offset; ulong vmalloc_start_addr; ulong vmalloc_end; ulong vmemmap_vaddr; ulong vmemmap_end; ulong modules_vaddr; ulong modules_end; ulong phys_base; char *pml4; char *upml; ulong last_upml_read; ulong last_pml4_read; char *irqstack; ulong irq_eframe_link; struct x86_64_pt_regs_offsets pto; struct x86_64_stkinfo stkinfo; ulong *current; ulong *crash_nmi_rsp; ulong vsyscall_page; ulong thread_return; ulong page_protnone; ulong GART_start; ulong GART_end; ulong kernel_image_size; ulong physical_mask_shift; ulong pgdir_shift; char *p4d; ulong last_p4d_read; struct ORC_data orc; ulong irq_stack_gap; ulong kpti_entry_stack; ulong kpti_entry_stack_size; ulong ptrs_per_pgd; ulong cpu_entry_area_start; ulong cpu_entry_area_end; ulong page_offset_force; }; #define KSYMS_START (0x1) #define PT_REGS_INIT (0x2) #define VM_ORIG (0x4) #define VM_2_6_11 (0x8) #define VM_XEN (0x10) #define NO_TSS (0x20) #define SCHED_TEXT (0x40) #define PHYS_BASE (0x80) #define VM_XEN_RHEL4 (0x100) #define FRAMEPOINTER (0x200) #define GART_REGION (0x400) #define NESTED_NMI (0x800) #define RANDOMIZED (0x1000) #define VM_5LEVEL (0x2000) #define ORC (0x4000) #define KPTI (0x8000) #define L1TF (0x10000) #define VM_FLAGS (VM_ORIG|VM_2_6_11|VM_XEN|VM_XEN_RHEL4|VM_5LEVEL) #define _2MB_PAGE_MASK (~((MEGABYTES(2))-1)) #endif #if defined(X86) || defined(X86_64) /* * unwind_x86_32_64.c */ void init_unwind_table(void); int dwarf_backtrace(struct bt_info *, int, ulong); void dwarf_debug(struct bt_info *); int dwarf_print_stack_entry(struct bt_info *, int); #endif /* * ppc64.c */ /* * This structure was copied from kernel source * in include/asm-ppc/ptrace.h */ struct ppc64_pt_regs { long gpr[32]; long nip; long msr; long orig_gpr3; /* Used for restarting system calls */ long ctr; long link; long xer; long ccr; long mq; /* 601 only (not used at present) */ /* Used on APUS to hold IPL value. */ long trap; /* Reason for being here */ long dar; /* Fault registers */ long dsisr; long result; /* Result of a system call */ }; struct ppc64_elf_siginfo { int si_signo; int si_code; int si_errno; }; struct ppc64_elf_prstatus { struct ppc64_elf_siginfo pr_info; short pr_cursig; unsigned long pr_sigpend; unsigned long pr_sighold; pid_t pr_pid; pid_t pr_ppid; pid_t pr_pgrp; pid_t pr_sid; struct timeval pr_utime; struct timeval pr_stime; struct timeval pr_cutime; struct timeval pr_cstime; struct ppc64_pt_regs pr_reg; int pr_fpvalid; }; #ifdef PPC64 struct ppc64_opal { uint64_t base; uint64_t entry; uint64_t size; }; struct ppc64_vmemmap { unsigned long phys; unsigned long virt; }; /* * Used to store the HW interrupt stack. It is only for 2.4. */ struct machine_specific { ulong hwintrstack[NR_CPUS]; char *hwstackbuf; uint hwstacksize; uint l4_index_size; uint l3_index_size; uint l2_index_size; uint l1_index_size; uint ptrs_per_l4; uint ptrs_per_l3; uint ptrs_per_l2; uint ptrs_per_l1; uint l4_shift; uint l3_shift; uint l2_shift; uint l1_shift; uint pte_rpn_shift; ulong pte_rpn_mask; ulong pgd_masked_bits; ulong pud_masked_bits; ulong pmd_masked_bits; int vmemmap_cnt; int vmemmap_psize; ulong vmemmap_base; struct ppc64_vmemmap *vmemmap_list; ulong _page_pte; ulong _page_present; ulong _page_user; ulong _page_rw; ulong _page_guarded; ulong _page_coherent; ulong _page_no_cache; ulong _page_writethru; ulong _page_dirty; ulong _page_accessed; int (*is_kvaddr)(ulong); int (*is_vmaddr)(ulong); struct ppc64_opal opal; }; void ppc64_init(int); void ppc64_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to PowerPC architecture\n") #define KSYMS_START (0x1) #define VM_ORIG (0x2) #define VMEMMAP_AWARE (0x4) #define BOOK3E (0x8) #define PHYS_ENTRY_L4 (0x10) #define SWAP_ENTRY_L4 (0x20) /* * The flag bit for radix MMU in cpu_spec.mmu_features * in the kernel is also 0x40. */ #define RADIX_MMU (0x40) #define OPAL_FW (0x80) #define REGION_SHIFT (60UL) #define REGION_ID(addr) (((unsigned long)(addr)) >> REGION_SHIFT) #define VMEMMAP_REGION_ID (0xfUL) #endif /* * ppc.c */ #ifdef PPC void ppc_init(int); void ppc_dump_machdep_table(ulong); void ppc_relocate_nt_prstatus_percpu(void **, uint *); #define display_idt_table() \ error(FATAL, "-d option is not applicable to PowerPC architecture\n") #define KSYMS_START (0x1) /* This should match PPC_FEATURE_BOOKE from include/asm-powerpc/cputable.h */ #define CPU_BOOKE (0x00008000) #else #define ppc_relocate_nt_prstatus_percpu(X,Y) do {} while (0) #endif /* * lkcd_fix_mem.c */ struct _dump_header_asm_s; struct _dump_header_s; ulong get_lkcd_switch_stack(ulong); int fix_addr_v8(struct _dump_header_asm_s *); int lkcd_dump_init_v8_arch(struct _dump_header_s *dh); int fix_addr_v7(int); int get_lkcd_regs_for_cpu_arch(int cpu, ulong *eip, ulong *esp); int lkcd_get_kernel_start_v8(ulong *addr); /* * lkcd_v8.c */ int get_lkcd_regs_for_cpu_v8(struct bt_info *bt, ulong *eip, ulong *esp); /* * ia64.c */ #ifdef IA64 void ia64_init(int); void ia64_dump_machdep_table(ulong); void ia64_dump_line_number(ulong); ulong ia64_get_switch_stack(ulong); void ia64_exception_frame(ulong, struct bt_info *bt); ulong ia64_PTOV(ulong); ulong ia64_VTOP(ulong); int ia64_IS_VMALLOC_ADDR(ulong); #define display_idt_table() \ error(FATAL, "-d option TBD on ia64 architecture\n"); int ia64_in_init_stack(ulong addr); int ia64_in_mca_stack_hyper(ulong addr, struct bt_info *bt); physaddr_t ia64_xen_kdump_p2m(struct xen_kdump_data *xkd, physaddr_t pseudo); #define OLD_UNWIND (0x1) /* CONFIG_IA64_NEW_UNWIND not turned on */ #define NEW_UNWIND (0x2) /* CONFIG_IA64_NEW_UNWIND turned on */ #define NEW_UNW_V1 (0x4) #define NEW_UNW_V2 (0x8) #define NEW_UNW_V3 (0x10) #define UNW_OUT_OF_SYNC (0x20) /* shared data structures out of sync */ #define UNW_READ (0x40) /* kernel unw has been read successfully */ #define MEM_LIMIT (0x80) #define UNW_PTREGS (0x100) #define UNW_R0 (0x200) #undef IA64_RBS_OFFSET #undef IA64_STK_OFFSET #define IA64_RBS_OFFSET ((SIZE(task_struct) + 15) & ~15) #define IA64_STK_OFFSET (STACKSIZE()) struct machine_specific { ulong cpu_data_address; ulong unimpl_va_mask; ulong unimpl_pa_mask; long unw_tables_offset; long unw_kernel_table_offset; long unw_pt_regs_offsets; int script_index; struct unw_script *script_cache; ulong script_cache_fills; ulong script_cache_hits; void *unw; ulong mem_limit; ulong kernel_region; ulong kernel_start; ulong phys_start; ulong vmalloc_start; char *ia64_memmap; uint64_t efi_memmap_size; uint64_t efi_memdesc_size; void (*unwind_init)(void); void (*unwind)(struct bt_info *); void (*dump_unwind_stats)(void); int (*unwind_debug)(ulong); int ia64_init_stack_size; }; /* * unwind.c */ void unwind_init_v1(void); void unwind_v1(struct bt_info *); void dump_unwind_stats_v1(void); int unwind_debug_v1(ulong); void unwind_init_v2(void); void unwind_v2(struct bt_info *); void dump_unwind_stats_v2(void); int unwind_debug_v2(ulong); void unwind_init_v3(void); void unwind_v3(struct bt_info *); void dump_unwind_stats_v3(void); int unwind_debug_v3(ulong); #endif /* IA64 */ /* * s390.c */ #ifdef S390 void s390_init(int); void s390_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to S390 architecture\n") #define KSYMS_START (0x1) #endif /* * s390_dump.c */ int is_s390_dump(char *); FILE* s390_dump_init(char *); int read_s390_dumpfile(int, void *, int, ulong, physaddr_t); int write_s390_dumpfile(int, void *, int, ulong, physaddr_t); uint s390_page_size(void); int s390_memory_used(void); int s390_free_memory(void); int s390_memory_dump(FILE *); ulong get_s390_panic_task(void); void get_s390_panicmsg(char *); /* * s390x.c */ #ifdef S390X void s390x_init(int); void s390x_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to S390X architecture\n") #define KSYMS_START (0x1) #endif /* * mips.c */ void mips_display_regs_from_elf_notes(int, FILE *); #ifdef MIPS void mips_init(int); void mips_dump_machdep_table(ulong); #define display_idt_table() \ error(FATAL, "-d option is not applicable to MIPS architecture\n") struct mips_regset { ulong regs[45]; }; struct mips_pt_regs_main { ulong regs[32]; ulong cp0_status; ulong hi; ulong lo; }; struct mips_pt_regs_cp0 { ulong cp0_badvaddr; ulong cp0_cause; ulong cp0_epc; }; #define KSYMS_START (0x1) #define PHYS_BASE (0x2) #define KVBASE_MASK (0x1ffffff) struct machine_specific { ulong phys_base; ulong vmalloc_start_addr; ulong modules_vaddr; ulong modules_end; ulong _page_present; ulong _page_read; ulong _page_write; ulong _page_accessed; ulong _page_modified; ulong _page_global; ulong _page_valid; ulong _page_no_read; ulong _page_no_exec; ulong _page_dirty; ulong _pfn_shift; #define _PAGE_PRESENT (machdep->machspec->_page_present) #define _PAGE_READ (machdep->machspec->_page_read) #define _PAGE_WRITE (machdep->machspec->_page_write) #define _PAGE_ACCESSED (machdep->machspec->_page_accessed) #define _PAGE_MODIFIED (machdep->machspec->_page_modified) #define _PAGE_GLOBAL (machdep->machspec->_page_global) #define _PAGE_VALID (machdep->machspec->_page_valid) #define _PAGE_NO_READ (machdep->machspec->_page_no_read) #define _PAGE_NO_EXEC (machdep->machspec->_page_no_exec) #define _PAGE_DIRTY (machdep->machspec->_page_dirty) #define _PFN_SHIFT (machdep->machspec->_pfn_shift) struct mips_regset *crash_task_regs; }; #endif /* MIPS */ /* * sparc64.c */ #ifdef SPARC64 void sparc64_init(int); void sparc64_dump_machdep_table(ulong); int sparc64_vmalloc_addr(ulong); #define display_idt_table() \ error(FATAL, "The -d option is not applicable to sparc64.\n") #endif /* * netdump.c */ int is_netdump(char *, ulong); uint netdump_page_size(void); int read_netdump(int, void *, int, ulong, physaddr_t); int write_netdump(int, void *, int, ulong, physaddr_t); int netdump_free_memory(void); int netdump_memory_used(void); int netdump_init(char *, FILE *); ulong get_netdump_panic_task(void); ulong get_netdump_switch_stack(ulong); FILE *set_netdump_fp(FILE *); int netdump_memory_dump(FILE *); void get_netdump_regs(struct bt_info *, ulong *, ulong *); int is_partial_netdump(void); void get_netdump_regs_x86(struct bt_info *, ulong *, ulong *); void get_netdump_regs_x86_64(struct bt_info *, ulong *, ulong *); void dump_registers_for_elf_dumpfiles(void); struct vmcore_data; struct vmcore_data *get_kdump_vmcore_data(void); int read_kdump(int, void *, int, ulong, physaddr_t); int write_kdump(int, void *, int, ulong, physaddr_t); int is_kdump(char *, ulong); int kdump_init(char *, FILE *); ulong get_kdump_panic_task(void); uint kdump_page_size(void); int kdump_free_memory(void); int kdump_memory_used(void); int kdump_memory_dump(FILE *); void get_kdump_regs(struct bt_info *, ulong *, ulong *); void xen_kdump_p2m_mfn(char *); int is_sadump_xen(void); void set_xen_phys_start(char *); ulong xen_phys_start(void); int xen_major_version(void); int xen_minor_version(void); int get_netdump_arch(void); int exist_regs_in_elf_notes(struct task_context *); void *get_regs_from_elf_notes(struct task_context *); void map_cpus_to_prstatus(void); int kdump_phys_base(ulong *); int kdump_set_phys_base(ulong); int arm_kdump_phys_base(ulong *); int arm_kdump_phys_end(ulong *); int is_proc_kcore(char *, ulong); int proc_kcore_init(FILE *, int); int read_proc_kcore(int, void *, int, ulong, physaddr_t); int write_proc_kcore(int, void *, int, ulong, physaddr_t); int kcore_memory_dump(FILE *); void dump_registers_for_qemu_mem_dump(void); void kdump_backup_region_init(void); void display_regs_from_elf_notes(int, FILE *); void display_ELF_note(int, int, void *, FILE *); void *netdump_get_prstatus_percpu(int); int kdump_kaslr_check(void); void display_vmcoredd_note(void *ptr, FILE *ofp); QEMUCPUState *kdump_get_qemucpustate(int); void kdump_device_dump_info(FILE *); void kdump_device_dump_extract(int, char *, FILE *); #define PRSTATUS_NOTE (1) #define QEMU_NOTE (2) /* * ramdump.c */ int is_ramdump(char *pattern); char *ramdump_to_elf(void); void ramdump_elf_output_file(char *opt); void ramdump_cleanup(void); int read_ramdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr); void show_ramdump_files(void); void dump_ramdump_data(void); int is_ramdump_image(void); /* * diskdump.c */ int is_diskdump(char *); uint diskdump_page_size(void); int read_diskdump(int, void *, int, ulong, physaddr_t); int write_diskdump(int, void *, int, ulong, physaddr_t); int diskdump_free_memory(void); int diskdump_memory_used(void); int diskdump_init(char *, FILE *); ulong get_diskdump_panic_task(void); ulong get_diskdump_switch_stack(ulong); int diskdump_memory_dump(FILE *); FILE *set_diskdump_fp(FILE *); void get_diskdump_regs(struct bt_info *, ulong *, ulong *); int diskdump_phys_base(unsigned long *); int diskdump_set_phys_base(unsigned long); extern ulong *diskdump_flags; int is_partial_diskdump(void); int get_dump_level(void); int dumpfile_is_split(void); void show_split_dumpfiles(void); void x86_process_elf_notes(void *, unsigned long); void *diskdump_get_prstatus_percpu(int); void map_cpus_to_prstatus_kdump_cmprs(void); void diskdump_display_regs(int, FILE *); void process_elf32_notes(void *, ulong); void process_elf64_notes(void *, ulong); void dump_registers_for_compressed_kdump(void); int diskdump_kaslr_check(void); QEMUCPUState *diskdump_get_qemucpustate(int); void diskdump_device_dump_info(FILE *); void diskdump_device_dump_extract(int, char *, FILE *); /* * makedumpfile.c */ void check_flattened_format(char *file); int is_flattened_format(char *file); int read_flattened_format(int fd, off_t offset, void *buf, size_t size); void dump_flat_header(FILE *); /* * xendump.c */ int is_xendump(char *); int read_xendump(int, void *, int, ulong, physaddr_t); int write_xendump(int, void *, int, ulong, physaddr_t); uint xendump_page_size(void); int xendump_free_memory(void); int xendump_memory_used(void); int xendump_init(char *, FILE *); int xendump_memory_dump(FILE *); ulong get_xendump_panic_task(void); void get_xendump_regs(struct bt_info *, ulong *, ulong *); char *xc_core_mfn_to_page(ulong, char *); int xc_core_mfn_to_page_index(ulong); void xendump_panic_hook(char *); int read_xendump_hyper(int, void *, int, ulong, physaddr_t); struct xendump_data *get_xendump_data(void); /* * kvmdump.c */ int is_kvmdump(char *); int is_kvmdump_mapfile(char *); int kvmdump_init(char *, FILE *); int read_kvmdump(int, void *, int, ulong, physaddr_t); int write_kvmdump(int, void *, int, ulong, physaddr_t); int kvmdump_free_memory(void); int kvmdump_memory_used(void); int kvmdump_memory_dump(FILE *); void get_kvmdump_regs(struct bt_info *, ulong *, ulong *); ulong get_kvmdump_panic_task(void); int kvmdump_phys_base(unsigned long *); void kvmdump_display_regs(int, FILE *); void set_kvmhost_type(char *); void set_kvm_iohole(char *); struct kvm_register_set { union { uint32_t cs; uint32_t ss; uint32_t ds; uint32_t es; uint32_t fs; uint32_t gs; uint64_t ip; uint64_t flags; uint64_t regs[16]; } x86; }; int get_kvm_register_set(int, struct kvm_register_set *); /* * sadump.c */ int is_sadump(char *); uint sadump_page_size(void); int read_sadump(int, void *, int, ulong, physaddr_t); int write_sadump(int, void *, int, ulong, physaddr_t); int sadump_init(char *, FILE *); int sadump_is_diskset(void); ulong get_sadump_panic_task(void); ulong get_sadump_switch_stack(ulong); int sadump_memory_used(void); int sadump_free_memory(void); int sadump_memory_dump(FILE *); FILE *set_sadump_fp(FILE *); void get_sadump_regs(struct bt_info *bt, ulong *ipp, ulong *spp); void sadump_display_regs(int, FILE *); int sadump_phys_base(ulong *); int sadump_set_phys_base(ulong); void sadump_show_diskset(void); int sadump_is_zero_excluded(void); void sadump_set_zero_excluded(void); void sadump_unset_zero_excluded(void); struct sadump_data; struct sadump_data *get_sadump_data(void); int sadump_calc_kaslr_offset(ulong *); int sadump_get_cr3_idtr(ulong *, ulong *); /* * qemu.c */ int qemu_init(char *); /* * qemu-load.c */ int is_qemu_vm_file(char *); void dump_qemu_header(FILE *); /* * net.c */ void net_init(void); void dump_net_table(void); void dump_sockets_workhorse(ulong, ulong, struct reference *); /* * remote.c */ int is_remote_daemon(char *); physaddr_t get_remote_phys_base(physaddr_t, physaddr_t); physaddr_t remote_vtop(int, physaddr_t); int get_remote_regs(struct bt_info *, ulong *, ulong *); physaddr_t get_remote_cr3(int); void remote_fd_init(void); int get_remote_file(struct remote_file *); uint remote_page_size(void); int find_remote_module_objfile(struct load_module *lm, char *, char *); int remote_free_memory(void); int remote_memory_dump(int); int remote_memory_used(void); void remote_exit(void); int remote_execute(void); void remote_clear_pipeline(void); int remote_memory_read(int, char *, int, physaddr_t, int); /* * vmware_vmss.c */ int is_vmware_vmss(char *filename); int vmware_vmss_init(char *filename, FILE *ofp); uint vmware_vmss_page_size(void); int read_vmware_vmss(int, void *, int, ulong, physaddr_t); int write_vmware_vmss(int, void *, int, ulong, physaddr_t); void vmware_vmss_display_regs(int, FILE *); void get_vmware_vmss_regs(struct bt_info *, ulong *, ulong *); int vmware_vmss_memory_dump(FILE *); void dump_registers_for_vmss_dump(void); int vmware_vmss_valid_regs(struct bt_info *); int vmware_vmss_get_cr3_idtr(ulong *, ulong *); int vmware_vmss_phys_base(ulong *phys_base); int vmware_vmss_set_phys_base(ulong); /* * kaslr_helper.c */ int calc_kaslr_offset(ulong *, ulong *); /* * gnu_binutils.c */ /* NO LONGER IN USE */ /* * test.c */ void cmd_template(void); void foreach_test(ulong, ulong); /* * va_server.c */ int mclx_page_size(void); int vas_memory_used(void); int vas_memory_dump(FILE *); int vas_free_memory(char *); void set_vas_debug(ulong); size_t vas_write(void *, size_t); int va_server_init(char *, ulong *, ulong *, ulong *); size_t vas_read(void *, size_t); int vas_lseek(ulong, int); /* * lkcd_x86_trace.c */ int lkcd_x86_back_trace(struct bt_info *, int, FILE *); /* * lkcd_common.c */ int lkcd_dump_init(FILE *, int, char *); ulong get_lkcd_panic_task(void); void get_lkcd_panicmsg(char *); int is_lkcd_compressed_dump(char *); void dump_lkcd_environment(ulong); int lkcd_lseek(physaddr_t); long lkcd_read(void *, long); void set_lkcd_debug(ulong); FILE *set_lkcd_fp(FILE *); uint lkcd_page_size(void); int lkcd_memory_used(void); int lkcd_memory_dump(FILE *); int lkcd_free_memory(void); void lkcd_print(char *, ...); void set_remote_lkcd_panic_data(ulong, char *); void set_lkcd_nohash(void); int lkcd_load_dump_page_header(void *, ulong); void lkcd_dumpfile_complaint(uint32_t, uint32_t, int); int set_mb_benchmark(ulong); ulonglong fix_lkcd_address(ulonglong); int lkcd_get_kernel_start(ulong *addr); int get_lkcd_regs_for_cpu(struct bt_info *bt, ulong *eip, ulong *esp); /* * lkcd_v1.c */ int lkcd_dump_init_v1(FILE *, int); void dump_dump_page_v1(char *, void *); void dump_lkcd_environment_v1(ulong); uint32_t get_dp_size_v1(void); uint32_t get_dp_flags_v1(void); uint64_t get_dp_address_v1(void); /* * lkcd_v2_v3.c */ int lkcd_dump_init_v2_v3(FILE *, int); void dump_dump_page_v2_v3(char *, void *); void dump_lkcd_environment_v2_v3(ulong); uint32_t get_dp_size_v2_v3(void); uint32_t get_dp_flags_v2_v3(void); uint64_t get_dp_address_v2_v3(void); /* * lkcd_v5.c */ int lkcd_dump_init_v5(FILE *, int); void dump_dump_page_v5(char *, void *); void dump_lkcd_environment_v5(ulong); uint32_t get_dp_size_v5(void); uint32_t get_dp_flags_v5(void); uint64_t get_dp_address_v5(void); /* * lkcd_v7.c */ int lkcd_dump_init_v7(FILE *, int, char *); void dump_dump_page_v7(char *, void *); void dump_lkcd_environment_v7(ulong); uint32_t get_dp_size_v7(void); uint32_t get_dp_flags_v7(void); uint64_t get_dp_address_v7(void); /* * lkcd_v8.c */ int lkcd_dump_init_v8(FILE *, int, char *); void dump_dump_page_v8(char *, void *); void dump_lkcd_environment_v8(ulong); uint32_t get_dp_size_v8(void); uint32_t get_dp_flags_v8(void); uint64_t get_dp_address_v8(void); #ifdef LKCD_COMMON /* * Until they differ across versions, these remain usable in the common * routines in lkcd_common.c */ #define LKCD_DUMP_MAGIC_NUMBER (0xa8190173618f23edULL) #define LKCD_DUMP_MAGIC_LIVE (0xa8190173618f23cdULL) #define LKCD_DUMP_V1 (0x1) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V2 (0x2) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V3 (0x3) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V5 (0x5) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V6 (0x6) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V7 (0x7) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V8 (0x8) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V9 (0x9) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_V10 (0xa) /* DUMP_VERSION_NUMBER */ #define LKCD_DUMP_VERSION_NUMBER_MASK (0xf) #define LKCD_DUMP_RAW (0x1) /* DUMP_[DH_]RAW */ #define LKCD_DUMP_COMPRESSED (0x2) /* DUMP_[DH_]COMPRESSED */ #define LKCD_DUMP_END (0x4) /* DUMP_[DH_]END */ #define LKCD_DUMP_COMPRESS_NONE (0x0) /* DUMP_COMPRESS_NONE */ #define LKCD_DUMP_COMPRESS_RLE (0x1) /* DUMP_COMPRESS_RLE */ #define LKCD_DUMP_COMPRESS_GZIP (0x2) /* DUMP_COMPRESS_GZIP */ #define LKCD_DUMP_MCLX_V0 (0x80000000) /* MCLX mod of LKCD */ #define LKCD_DUMP_MCLX_V1 (0x40000000) /* Extra page header data */ #define LKCD_OFFSET_TO_FIRST_PAGE (65536) #define MCLX_PAGE_HEADERS (4096) #define MCLX_V1_PAGE_HEADER_CACHE ((sizeof(uint64_t)) * MCLX_PAGE_HEADERS) /* * lkcd_load_dump_page_header() return values */ #define LKCD_DUMPFILE_OK (0) #define LKCD_DUMPFILE_EOF (1) #define LKCD_DUMPFILE_END (2) /* * Common handling of LKCD dump environment */ #define LKCD_CACHED_PAGES (16) #define LKCD_PAGE_HASH (32) #define LKCD_DUMP_HEADER_ONLY (1) /* arguments to lkcd_dump_environment */ #define LKCD_DUMP_PAGE_ONLY (2) #define LKCD_VALID (0x1) /* flags */ #define LKCD_REMOTE (0x2) #define LKCD_NOHASH (0x4) #define LKCD_MCLX (0x8) #define LKCD_BAD_DUMP (0x10) struct page_hash_entry { uint32_t pg_flags; uint64_t pg_addr; off_t pg_hdr_offset; struct page_hash_entry *next; }; struct page_desc { off_t offset; /* lseek offset in dump file */ }; struct physmem_zone { uint64_t start; struct page_desc *pages; }; struct fix_addrs { ulong task; ulong saddr; ulong sw; }; struct lkcd_environment { int fd; /* dumpfile file descriptor */ ulong flags; /* flags from above */ ulong debug; /* shadow of pc->debug */ FILE *fp; /* abstracted fp for fprintf */ void *dump_header; /* header stash, v1 or v2 */ void *dump_header_asm; /* architecture specific header for v2 */ void *dump_header_asm_smp; /* architecture specific header for v7 & v8 */ void *dump_page; /* current page header holder */ uint32_t version; /* version number of this dump */ uint32_t page_size; /* size of a Linux memory page */ int page_shift; /* byte address to page */ int bits; /* processor bitsize */ ulong panic_task; /* panic task address */ char *panic_string; /* pointer to stashed panic string */ uint32_t compression; /* compression type */ uint32_t (*get_dp_size)(void); /* returns current page's dp_size */ uint32_t (*get_dp_flags)(void); /* returns current page's dp_size */ uint64_t (*get_dp_address)(void); /* returns current page's dp_address*/ size_t page_header_size; /* size of version's page header */ unsigned long curpos; /* offset into current page */ uint64_t curpaddr; /* current page's physical address */ off_t curhdroffs; /* current page's header offset */ char *curbufptr; /* pointer to uncompressed page buffer */ uint64_t kvbase; /* physical-to-LKCD page address format*/ char *page_cache_buf; /* base of cached buffer pages */ char *compressed_page; /* copy of compressed page data */ int evict_index; /* next page to evict */ ulong evictions; /* total evictions done */ struct page_cache_hdr { /* header for each cached page */ uint32_t pg_flags; uint64_t pg_addr; char *pg_bufptr; ulong pg_hit_count; } page_cache_hdr[LKCD_CACHED_PAGES]; struct page_hash_entry *page_hash; ulong total_pages; ulong benchmark_pages; ulong benchmarks_done; off_t *mb_hdr_offsets; ulong total_reads; ulong cached_reads; ulong hashed_reads; ulong hashed; ulong compressed; ulong raw; /* lkcd_v7 additions */ char *dumpfile_index; /* array of offsets for each page */ int ifd; /* index file for dump (LKCD V7+) */ long memory_pages; /* Mamimum index of dump pages */ off_t page_offset_max; /* Offset of page with greatest offset seen so far */ long page_index_max; /* Index of page with greatest offset seen so far */ off_t *page_offsets; /* Pointer to huge array with seek offsets */ /* NB: There are no holes in the array */ struct physmem_zone *zones; /* Array of physical memory zones */ int num_zones; /* Number of zones initialized */ int max_zones; /* Size of the zones array */ long zoned_offsets; /* Number of stored page offsets */ uint64_t zone_mask; int zone_shift; int fix_addr_num; /* Number of active stacks to switch to saved values */ struct fix_addrs *fix_addr; /* Array of active stacks to switch to saved values */ }; #define ZONE_ALLOC 128 #define ZONE_SIZE (MEGABYTES(512)) #define MEGABYTE_ALIGNED(vaddr) (!((uint64_t)(vaddr) & MEGABYTE_MASK)) #define LKCD_PAGE_HASH_INDEX(paddr) \ (((paddr) >> lkcd->page_shift) % LKCD_PAGE_HASH) #define LKCD_PAGES_PER_MEGABYTE() (MEGABYTES(1) / lkcd->page_size) #define LKCD_PAGE_MEGABYTE(page) ((page) / LKCD_PAGES_PER_MEGABYTE()) #define LKCD_BENCHMARKS_DONE() (lkcd->benchmarks_done >= lkcd->benchmark_pages) #define LKCD_VALID_PAGE(flags) ((flags) & LKCD_VALID) extern struct lkcd_environment *lkcd; #define LKCD_DEBUG(x) (lkcd->debug >= (x)) #undef BITS #undef BITS32 #undef BITS64 #define BITS() (lkcd->bits) #define BITS32() (lkcd->bits == 32) #define BITS64() (lkcd->bits == 64) #endif /* LKCD_COMMON */ /* * gdb_interface.c */ void gdb_main_loop(int, char **); void display_gdb_banner(void); void get_gdb_version(void); void gdb_session_init(void); void gdb_interface(struct gnu_request *); int gdb_pass_through(char *, FILE *, ulong); int gdb_readmem_callback(ulong, void *, int, int); int gdb_line_number_callback(ulong, ulong, ulong); int gdb_print_callback(ulong); void gdb_error_hook(void); void restore_gdb_sanity(void); int is_gdb_command(int, ulong); char *gdb_command_string(int, char *, int); void dump_gnu_request(struct gnu_request *, int); int gdb_CRASHDEBUG(ulong); void dump_gdb_data(void); void update_gdb_hooks(void); void gdb_readnow_warning(void); int gdb_set_crash_scope(ulong, char *); extern int *gdb_output_format; extern unsigned int *gdb_print_max; extern int *gdb_prettyprint_structs; extern int *gdb_prettyprint_arrays; extern int *gdb_repeat_count_threshold; extern int *gdb_stop_print_at_null; extern unsigned int *gdb_output_radix; /* * gdb/top.c */ extern void execute_command (char *, int); #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) extern void (*command_loop_hook)(void); extern void (*error_hook)(void); #else extern void (*deprecated_command_loop_hook)(void); /* * gdb/exceptions.c */ extern void (*error_hook)(void); #endif /* * gdb/symtab.c */ extern void gdb_command_funnel(struct gnu_request *); /* * gdb/symfile.c */ #if defined(GDB_6_0) || defined(GDB_6_1) struct objfile; extern void (*target_new_objfile_hook)(struct objfile *); #endif /* * gdb/valprint.c */ extern unsigned output_radix; #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) extern int output_format; extern int prettyprint_structs; extern int prettyprint_arrays; extern int repeat_count_threshold; extern unsigned int print_max; extern int stop_print_at_null; #endif #ifdef GDB_7_6 /* * gdb/cleanups.c */ struct cleanup; extern struct cleanup *all_cleanups(void); extern void do_cleanups(struct cleanup *); #else /* * gdb/utils.c */ extern void do_cleanups(void *); #endif /* * gdb/version.c */ extern char *version; /* * gdb/disasm.c */ #ifdef GDB_5_3 extern int gdb_disassemble_from_exec; #endif /* * readline/readline.c */ #ifdef GDB_5_3 extern char *readline(char *); #else extern char *readline(const char *); #endif extern int rl_editing_mode; /* * readline/history.c */ extern int history_offset; /* * external gdb routines */ extern int gdb_main_entry(int, char **); #ifdef GDB_5_3 extern unsigned long calc_crc32(unsigned long, unsigned char *, size_t); #else extern unsigned long gnu_debuglink_crc32 (unsigned long, unsigned char *, size_t); #endif extern int have_partial_symbols(void); extern int have_full_symbols(void); #if defined(X86) || defined(X86_64) || defined(IA64) #define XEN_HYPERVISOR_ARCH #endif #endif /* !GDB_COMMON */ crash-7.2.8/unwind.h0000664000000000000000000005305613614623427013026 0ustar rootroot/* * Copyright (C) 1999-2000 Hewlett-Packard Co * Copyright (C) 1999-2000 David Mosberger-Tang */ /* * Copyright (C) 1998, 1999 Hewlett-Packard Co * Copyright (C) 1998, 1999 David Mosberger-Tang */ /* * unwind.h * * Copyright (C) 2002, 2003, 2004, 2005 David Anderson * Copyright (C) 2002, 2003, 2004, 2005 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Adapted from: * * include/asm-ia64/fpu.h (kernel-2.4.18-6.23) * include/asm-ia64/unwind.h (kernel-2.4.18-6.23) */ #ifndef _ASM_IA64_FPU_H #define _ASM_IA64_FPU_H struct ia64_fpreg { union { unsigned long bits[2]; } u; } __attribute__ ((aligned (16))); #endif /* _ASM_IA64_FPU_H */ #ifndef _ASM_IA64_UNWIND_H #define _ASM_IA64_UNWIND_H /* * A simple API for unwinding kernel stacks. This is used for * debugging and error reporting purposes. The kernel doesn't need * full-blown stack unwinding with all the bells and whitles, so there * is not much point in implementing the full IA-64 unwind API (though * it would of course be possible to implement the kernel API on top * of it). */ struct task_struct; /* forward declaration */ struct switch_stack; /* forward declaration */ enum unw_application_register { UNW_AR_BSP, UNW_AR_BSPSTORE, UNW_AR_PFS, UNW_AR_RNAT, UNW_AR_UNAT, UNW_AR_LC, UNW_AR_EC, UNW_AR_FPSR, UNW_AR_RSC, UNW_AR_CCV, UNW_AR_CSD, UNW_AR_SSD }; /* * The following declarations are private to the unwind * implementation: */ struct unw_stack { unsigned long limit; unsigned long top; }; #define UNW_FLAG_INTERRUPT_FRAME (1UL << 0) /* * No user of this module should every access this structure directly * as it is subject to change. It is declared here solely so we can * use automatic variables. */ struct unw_frame_info { struct unw_stack regstk; struct unw_stack memstk; unsigned int flags; short hint; short prev_script; /* current frame info: */ unsigned long bsp; /* backing store pointer value */ unsigned long sp; /* stack pointer value */ unsigned long psp; /* previous sp value */ unsigned long ip; /* instruction pointer value */ unsigned long pr; /* current predicate values */ unsigned long *cfm_loc; /* cfm save location (or NULL) */ #if defined(UNWIND_V2) || defined(UNWIND_V3) unsigned long pt; /* struct pt_regs location */ #endif struct task_struct *task; struct switch_stack *sw; /* preserved state: */ unsigned long *bsp_loc; /* previous bsp save location */ unsigned long *bspstore_loc; unsigned long *pfs_loc; unsigned long *rnat_loc; unsigned long *rp_loc; unsigned long *pri_unat_loc; unsigned long *unat_loc; unsigned long *pr_loc; unsigned long *lc_loc; unsigned long *fpsr_loc; struct unw_ireg { unsigned long *loc; struct unw_ireg_nat { long type : 3; /* enum unw_nat_type */ signed long off : 61; /* NaT word is at loc+nat.off */ } nat; } r4, r5, r6, r7; unsigned long *b1_loc, *b2_loc, *b3_loc, *b4_loc, *b5_loc; struct ia64_fpreg *f2_loc, *f3_loc, *f4_loc, *f5_loc, *fr_loc[16]; }; /* * The official API follows below: */ /* * Initialize unwind support. */ extern void unw_init (void); extern void unw_create_gate_table (void); extern void *unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp, const void *table_start, const void *table_end); extern void unw_remove_unwind_table (void *handle); /* * Prepare to unwind blocked task t. */ #ifndef REDHAT extern void unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t); extern void unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw); #endif /* !REDHAT */ /* * Prepare to unwind the currently running thread. */ extern void unw_init_running (void (*callback)(struct unw_frame_info *info, void *arg), void *arg); /* * Unwind to previous to frame. Returns 0 if successful, negative * number in case of an error. */ #ifndef REDHAT extern int unw_unwind (struct unw_frame_info *info); #endif /* !REDHAT */ /* * Unwind until the return pointer is in user-land (or until an error * occurs). Returns 0 if successful, negative number in case of * error. */ extern int unw_unwind_to_user (struct unw_frame_info *info); #define unw_is_intr_frame(info) (((info)->flags & UNW_FLAG_INTERRUPT_FRAME) != 0) static inline int unw_get_ip (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->ip; return 0; } static inline int unw_get_sp (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->sp; return 0; } static inline int unw_get_psp (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->psp; return 0; } static inline int unw_get_bsp (struct unw_frame_info *info, unsigned long *valp) { *valp = (info)->bsp; return 0; } static inline int unw_get_cfm (struct unw_frame_info *info, unsigned long *valp) { *valp = *(info)->cfm_loc; return 0; } static inline int unw_set_cfm (struct unw_frame_info *info, unsigned long val) { *(info)->cfm_loc = val; return 0; } static inline int unw_get_rp (struct unw_frame_info *info, unsigned long *val) { if (!info->rp_loc) return -1; *val = *info->rp_loc; return 0; } #ifdef UNWIND_V1 extern int unw_access_gr_v1 (struct unw_frame_info *, int, unsigned long *, char *, int); extern int unw_access_br_v1 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_fr_v1 (struct unw_frame_info *, int, struct ia64_fpreg *, int); extern int unw_access_ar_v1 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_pr_v1 (struct unw_frame_info *, unsigned long *, int); #define unw_access_gr unw_access_gr_v1 #define unw_access_br unw_access_br_v1 #define unw_access_fr unw_access_fr_v1 #define unw_access_ar unw_access_ar_v1 #define unw_access_pr unw_access_pr_v1 #endif #ifdef UNWIND_V2 extern int unw_access_gr_v2 (struct unw_frame_info *, int, unsigned long *, char *, int); extern int unw_access_br_v2 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_fr_v2 (struct unw_frame_info *, int, struct ia64_fpreg *, int); extern int unw_access_ar_v2 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_pr_v2 (struct unw_frame_info *, unsigned long *, int); #define unw_access_gr unw_access_gr_v2 #define unw_access_br unw_access_br_v2 #define unw_access_fr unw_access_fr_v2 #define unw_access_ar unw_access_ar_v2 #define unw_access_pr unw_access_pr_v2 #endif #ifdef UNWIND_V3 extern int unw_access_gr_v3 (struct unw_frame_info *, int, unsigned long *, char *, int); extern int unw_access_br_v3 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_fr_v3 (struct unw_frame_info *, int, struct ia64_fpreg *, int); extern int unw_access_ar_v3 (struct unw_frame_info *, int, unsigned long *, int); extern int unw_access_pr_v3 (struct unw_frame_info *, unsigned long *, int); #define unw_access_gr unw_access_gr_v3 #define unw_access_br unw_access_br_v3 #define unw_access_fr unw_access_fr_v3 #define unw_access_ar unw_access_ar_v3 #define unw_access_pr unw_access_pr_v3 #endif static inline int unw_set_gr (struct unw_frame_info *i, int n, unsigned long v, char nat) { return unw_access_gr(i, n, &v, &nat, 1); } static inline int unw_set_br (struct unw_frame_info *i, int n, unsigned long v) { return unw_access_br(i, n, &v, 1); } static inline int unw_set_fr (struct unw_frame_info *i, int n, struct ia64_fpreg v) { return unw_access_fr(i, n, &v, 1); } static inline int unw_set_ar (struct unw_frame_info *i, int n, unsigned long v) { return unw_access_ar(i, n, &v, 1); } static inline int unw_set_pr (struct unw_frame_info *i, unsigned long v) { return unw_access_pr(i, &v, 1); } #define unw_get_gr(i,n,v,nat) unw_access_gr(i,n,v,nat,0) #define unw_get_br(i,n,v) unw_access_br(i,n,v,0) #define unw_get_fr(i,n,v) unw_access_fr(i,n,v,0) #define unw_get_ar(i,n,v) unw_access_ar(i,n,v,0) #define unw_get_pr(i,v) unw_access_pr(i,v,0) #ifdef UNWIND_V1 struct switch_stack { unsigned long caller_unat; /* user NaT collection register (preserved) */ unsigned long ar_fpsr; /* floating-point status register */ struct ia64_fpreg f2; /* preserved */ struct ia64_fpreg f3; /* preserved */ struct ia64_fpreg f4; /* preserved */ struct ia64_fpreg f5; /* preserved */ struct ia64_fpreg f10; /* scratch, but untouched by kernel */ struct ia64_fpreg f11; /* scratch, but untouched by kernel */ struct ia64_fpreg f12; /* scratch, but untouched by kernel */ struct ia64_fpreg f13; /* scratch, but untouched by kernel */ struct ia64_fpreg f14; /* scratch, but untouched by kernel */ struct ia64_fpreg f15; /* scratch, but untouched by kernel */ struct ia64_fpreg f16; /* preserved */ struct ia64_fpreg f17; /* preserved */ struct ia64_fpreg f18; /* preserved */ struct ia64_fpreg f19; /* preserved */ struct ia64_fpreg f20; /* preserved */ struct ia64_fpreg f21; /* preserved */ struct ia64_fpreg f22; /* preserved */ struct ia64_fpreg f23; /* preserved */ struct ia64_fpreg f24; /* preserved */ struct ia64_fpreg f25; /* preserved */ struct ia64_fpreg f26; /* preserved */ struct ia64_fpreg f27; /* preserved */ struct ia64_fpreg f28; /* preserved */ struct ia64_fpreg f29; /* preserved */ struct ia64_fpreg f30; /* preserved */ struct ia64_fpreg f31; /* preserved */ unsigned long r4; /* preserved */ unsigned long r5; /* preserved */ unsigned long r6; /* preserved */ unsigned long r7; /* preserved */ unsigned long b0; /* so we can force a direct return in copy_thread */ unsigned long b1; unsigned long b2; unsigned long b3; unsigned long b4; unsigned long b5; unsigned long ar_pfs; /* previous function state */ unsigned long ar_lc; /* loop counter (preserved) */ unsigned long ar_unat; /* NaT bits for r4-r7 */ unsigned long ar_rnat; /* RSE NaT collection register */ unsigned long ar_bspstore; /* RSE dirty base (preserved) */ unsigned long pr; /* 64 predicate registers (1 bit each) */ }; struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b6; /* scratch */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long r14; /* scratch */ unsigned long r15; /* scratch */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long b0; /* return pointer (bp) */ unsigned long b7; /* scratch */ /* * Floating point registers that the kernel considers * scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ }; #endif /* UNWIND_V1 */ #ifdef UNWIND_V2 struct switch_stack { unsigned long caller_unat; /* user NaT collection register (preserved) */ unsigned long ar_fpsr; /* floating-point status register */ struct ia64_fpreg f2; /* preserved */ struct ia64_fpreg f3; /* preserved */ struct ia64_fpreg f4; /* preserved */ struct ia64_fpreg f5; /* preserved */ struct ia64_fpreg f10; /* scratch, but untouched by kernel */ struct ia64_fpreg f11; /* scratch, but untouched by kernel */ struct ia64_fpreg f12; /* scratch, but untouched by kernel */ struct ia64_fpreg f13; /* scratch, but untouched by kernel */ struct ia64_fpreg f14; /* scratch, but untouched by kernel */ struct ia64_fpreg f15; /* scratch, but untouched by kernel */ struct ia64_fpreg f16; /* preserved */ struct ia64_fpreg f17; /* preserved */ struct ia64_fpreg f18; /* preserved */ struct ia64_fpreg f19; /* preserved */ struct ia64_fpreg f20; /* preserved */ struct ia64_fpreg f21; /* preserved */ struct ia64_fpreg f22; /* preserved */ struct ia64_fpreg f23; /* preserved */ struct ia64_fpreg f24; /* preserved */ struct ia64_fpreg f25; /* preserved */ struct ia64_fpreg f26; /* preserved */ struct ia64_fpreg f27; /* preserved */ struct ia64_fpreg f28; /* preserved */ struct ia64_fpreg f29; /* preserved */ struct ia64_fpreg f30; /* preserved */ struct ia64_fpreg f31; /* preserved */ unsigned long r4; /* preserved */ unsigned long r5; /* preserved */ unsigned long r6; /* preserved */ unsigned long r7; /* preserved */ unsigned long b0; /* so we can force a direct return in copy_thread */ unsigned long b1; unsigned long b2; unsigned long b3; unsigned long b4; unsigned long b5; unsigned long ar_pfs; /* previous function state */ unsigned long ar_lc; /* loop counter (preserved) */ unsigned long ar_unat; /* NaT bits for r4-r7 */ unsigned long ar_rnat; /* RSE NaT collection register */ unsigned long ar_bspstore; /* RSE dirty base (preserved) */ unsigned long pr; /* 64 predicate registers (1 bit each) */ }; struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b6; /* scratch */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long r14; /* scratch */ unsigned long r15; /* scratch */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long b0; /* return pointer (bp) */ unsigned long b7; /* scratch */ /* * Floating point registers that the kernel considers * scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ }; #endif /* UNWIND_V2 */ #ifdef UNWIND_V3 struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ struct ia64_fpreg f10; /* scratch */ struct ia64_fpreg f11; /* scratch */ }; /* * This structure contains the addition registers that need to * preserved across a context switch. This generally consists of * "preserved" registers. */ struct switch_stack { unsigned long caller_unat; /* user NaT collection register (preserved) */ unsigned long ar_fpsr; /* floating-point status register */ struct ia64_fpreg f2; /* preserved */ struct ia64_fpreg f3; /* preserved */ struct ia64_fpreg f4; /* preserved */ struct ia64_fpreg f5; /* preserved */ struct ia64_fpreg f12; /* scratch, but untouched by kernel */ struct ia64_fpreg f13; /* scratch, but untouched by kernel */ struct ia64_fpreg f14; /* scratch, but untouched by kernel */ struct ia64_fpreg f15; /* scratch, but untouched by kernel */ struct ia64_fpreg f16; /* preserved */ struct ia64_fpreg f17; /* preserved */ struct ia64_fpreg f18; /* preserved */ struct ia64_fpreg f19; /* preserved */ struct ia64_fpreg f20; /* preserved */ struct ia64_fpreg f21; /* preserved */ struct ia64_fpreg f22; /* preserved */ struct ia64_fpreg f23; /* preserved */ struct ia64_fpreg f24; /* preserved */ struct ia64_fpreg f25; /* preserved */ struct ia64_fpreg f26; /* preserved */ struct ia64_fpreg f27; /* preserved */ struct ia64_fpreg f28; /* preserved */ struct ia64_fpreg f29; /* preserved */ struct ia64_fpreg f30; /* preserved */ struct ia64_fpreg f31; /* preserved */ unsigned long r4; /* preserved */ unsigned long r5; /* preserved */ unsigned long r6; /* preserved */ unsigned long r7; /* preserved */ unsigned long b0; /* so we can force a direct return in copy_thread */ unsigned long b1; unsigned long b2; unsigned long b3; unsigned long b4; unsigned long b5; unsigned long ar_pfs; /* previous function state */ unsigned long ar_lc; /* loop counter (preserved) */ unsigned long ar_unat; /* NaT bits for r4-r7 */ unsigned long ar_rnat; /* RSE NaT collection register */ unsigned long ar_bspstore; /* RSE dirty base (preserved) */ unsigned long pr; /* 64 predicate registers (1 bit each) */ }; #endif /* UNWIND_V3 */ #endif /* _ASM_UNWIND_H */ crash-7.2.8/test.c0000775000000000000000000000475513614623427012501 0ustar rootroot/* test.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2011 David Anderson * Copyright (C) 2002, 2003, 2004, 2005, 2011 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include static struct option test_long_options[] = { {"no", no_argument, 0, 0}, {"req", required_argument, 0, 0}, {0, 0, 0, 0} }; /* * Test your stuff here first if you'd like. If anything's being done * below in this routine, consider it leftover trash... */ void cmd_test(void) { int c; int option_index; while ((c = getopt_long(argcnt, args, "", test_long_options, &option_index)) != EOF) { switch(c) { case 0: if (STREQ(test_long_options[option_index].name, "no")) fprintf(fp, "no argument\n"); if (STREQ(test_long_options[option_index].name, "req")) fprintf(fp, "required argument: %s\n", optarg); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { ; optind++; } } /* * Scratch routine for testing a feature on a per-task basis by entering * the "foreach test" command. Like cmd_test(), anything that's being done * below in this routine can be considered trash. */ void foreach_test(ulong task, ulong flags) { } /* * Template for building a new command. */ void cmd_template(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { ; optind++; } } crash-7.2.8/kaslr_helper.c0000664000000000000000000003157413614623427014171 0ustar rootroot/* * kaslr_helper - helper for kaslr offset calculation * * Copyright (c) 2011 FUJITSU LIMITED * Copyright (c) 2018 Red Hat Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Authors: HATAYAMA Daisuke * INDOH Takao * Sergio Lopez */ #include "defs.h" #include #include #ifdef X86_64 /* * Get address of vector0 interrupt handler (Devide Error) from Interrupt * Descriptor Table. */ static ulong get_vec0_addr(ulong idtr) { struct gate_struct64 { uint16_t offset_low; uint16_t segment; uint32_t ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; uint16_t offset_middle; uint32_t offset_high; uint32_t zero1; } __attribute__((packed)) gate; readmem(idtr, PHYSADDR, &gate, sizeof(gate), "idt_table", FAULT_ON_ERROR); return ((ulong)gate.offset_high << 32) + ((ulong)gate.offset_middle << 16) + gate.offset_low; } /* * Parse a string of [size[KMG] ]offset[KMG] * Import from Linux kernel(lib/cmdline.c) */ static ulong memparse(char *ptr, char **retptr) { char *endptr; unsigned long long ret = strtoull(ptr, &endptr, 0); switch (*endptr) { case 'E': case 'e': ret <<= 10; case 'P': case 'p': ret <<= 10; case 'T': case 't': ret <<= 10; case 'G': case 'g': ret <<= 10; case 'M': case 'm': ret <<= 10; case 'K': case 'k': ret <<= 10; endptr++; default: break; } if (retptr) *retptr = endptr; return ret; } /* * Find "elfcorehdr=" in the boot parameter of kernel and return the address * of elfcorehdr. */ static ulong get_elfcorehdr(ulong kaslr_offset) { char cmdline[BUFSIZE], *ptr; ulong cmdline_vaddr; ulong cmdline_paddr; ulong buf_vaddr, buf_paddr; char *end; ulong elfcorehdr_addr = 0, elfcorehdr_size = 0; int verbose = CRASHDEBUG(1)? 1: 0; cmdline_vaddr = st->saved_command_line_vmlinux + kaslr_offset; if (!kvtop(NULL, cmdline_vaddr, &cmdline_paddr, verbose)) return 0; if (CRASHDEBUG(1)) { fprintf(fp, "cmdline vaddr=%lx\n", cmdline_vaddr); fprintf(fp, "cmdline paddr=%lx\n", cmdline_paddr); } if (!readmem(cmdline_paddr, PHYSADDR, &buf_vaddr, sizeof(ulong), "saved_command_line", RETURN_ON_ERROR)) return 0; if (!kvtop(NULL, buf_vaddr, &buf_paddr, verbose)) return 0; if (CRASHDEBUG(1)) { fprintf(fp, "cmdline buffer vaddr=%lx\n", buf_vaddr); fprintf(fp, "cmdline buffer paddr=%lx\n", buf_paddr); } memset(cmdline, 0, BUFSIZE); if (!readmem(buf_paddr, PHYSADDR, cmdline, BUFSIZE, "saved_command_line", RETURN_ON_ERROR)) return 0; ptr = strstr(cmdline, "elfcorehdr="); if (!ptr) return 0; if (CRASHDEBUG(1)) fprintf(fp, "2nd kernel detected\n"); ptr += strlen("elfcorehdr="); elfcorehdr_addr = memparse(ptr, &end); if (*end == '@') { elfcorehdr_size = elfcorehdr_addr; elfcorehdr_addr = memparse(end + 1, &end); } if (CRASHDEBUG(1)) { fprintf(fp, "elfcorehdr_addr=%lx\n", elfcorehdr_addr); fprintf(fp, "elfcorehdr_size=%lx\n", elfcorehdr_size); } return elfcorehdr_addr; } /* * Get vmcoreinfo from elfcorehdr. * Some codes are imported from Linux kernel(fs/proc/vmcore.c) */ static int get_vmcoreinfo(ulong elfcorehdr, ulong *addr, int *len) { unsigned char e_ident[EI_NIDENT]; Elf64_Ehdr ehdr; Elf64_Phdr phdr; Elf64_Nhdr nhdr; ulong ptr; ulong nhdr_offset = 0; int i; if (!readmem(elfcorehdr, PHYSADDR, e_ident, EI_NIDENT, "EI_NIDENT", RETURN_ON_ERROR)) return FALSE; if (e_ident[EI_CLASS] != ELFCLASS64) { error(INFO, "Only ELFCLASS64 is supportd\n"); return FALSE; } if (!readmem(elfcorehdr, PHYSADDR, &ehdr, sizeof(ehdr), "Elf64_Ehdr", RETURN_ON_ERROR)) return FALSE; /* Sanity Check */ if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || (ehdr.e_type != ET_CORE) || ehdr.e_ident[EI_CLASS] != ELFCLASS64 || ehdr.e_ident[EI_VERSION] != EV_CURRENT || ehdr.e_version != EV_CURRENT || ehdr.e_ehsize != sizeof(Elf64_Ehdr) || ehdr.e_phentsize != sizeof(Elf64_Phdr) || ehdr.e_phnum == 0) { error(INFO, "Invalid elf header\n"); return FALSE; } ptr = elfcorehdr + ehdr.e_phoff; for (i = 0; i < ehdr.e_phnum; i++) { ulong offset; char name[16]; if (!readmem(ptr, PHYSADDR, &phdr, sizeof(phdr), "Elf64_Phdr", RETURN_ON_ERROR)) return FALSE; ptr += sizeof(phdr); if (phdr.p_type != PT_NOTE) continue; offset = phdr.p_offset; if (!readmem(offset, PHYSADDR, &nhdr, sizeof(nhdr), "Elf64_Nhdr", RETURN_ON_ERROR)) return FALSE; offset += DIV_ROUND_UP(sizeof(Elf64_Nhdr), sizeof(Elf64_Word))* sizeof(Elf64_Word); memset(name, 0, sizeof(name)); if (!readmem(offset, PHYSADDR, name, sizeof(name), "Elf64_Nhdr name", RETURN_ON_ERROR)) return FALSE; if(!strcmp(name, "VMCOREINFO")) { nhdr_offset = offset; break; } } if (!nhdr_offset) return FALSE; *addr = nhdr_offset + DIV_ROUND_UP(nhdr.n_namesz, sizeof(Elf64_Word))* sizeof(Elf64_Word); *len = nhdr.n_descsz; if (CRASHDEBUG(1)) { fprintf(fp, "vmcoreinfo addr=%lx\n", *addr); fprintf(fp, "vmcoreinfo len=%d\n", *len); } return TRUE; } static int qemu_get_cr3_idtr(ulong *cr3, ulong *idtr) { QEMUCPUState *cpustat; if (DISKDUMP_DUMPFILE()) { cpustat = diskdump_get_qemucpustate(0); } else if (KDUMP_DUMPFILE()) { cpustat = kdump_get_qemucpustate(0); } else { return FALSE; } if (!cpustat) { return FALSE; } *cr3 = cpustat->cr[3]; *idtr = cpustat->idt.base; return TRUE; } /* * Check if current kaslr_offset/phys_base is for 1st kernel or 2nd kernel. * If we are in 2nd kernel, get kaslr_offset/phys_base from vmcoreinfo. * * 1. Get command line and try to retrieve "elfcorehdr=" boot parameter * 2. If "elfcorehdr=" is not found in command line, we are in 1st kernel. * There is nothing to do. * 3. If "elfcorehdr=" is found, we are in 2nd kernel. Find vmcoreinfo * using "elfcorehdr=" and retrieve kaslr_offset/phys_base from vmcoreinfo. */ static int get_kaslr_offset_from_vmcoreinfo(ulong orig_kaslr_offset, ulong *kaslr_offset, ulong *phys_base) { ulong elfcorehdr_addr = 0; ulong vmcoreinfo_addr; int vmcoreinfo_len; char *buf, *pos; int ret = FALSE; /* Find "elfcorehdr=" in the kernel boot parameter */ elfcorehdr_addr = get_elfcorehdr(orig_kaslr_offset); if (!elfcorehdr_addr) return FALSE; /* Get vmcoreinfo from the address of "elfcorehdr=" */ if (!get_vmcoreinfo(elfcorehdr_addr, &vmcoreinfo_addr, &vmcoreinfo_len)) return FALSE; if (!vmcoreinfo_len) return FALSE; if (CRASHDEBUG(1)) fprintf(fp, "Find vmcoreinfo in kdump memory\n"); buf = GETBUF(vmcoreinfo_len); if (!readmem(vmcoreinfo_addr, PHYSADDR, buf, vmcoreinfo_len, "vmcoreinfo", RETURN_ON_ERROR)) goto quit; /* Get phys_base form vmcoreinfo */ pos = strstr(buf, "NUMBER(phys_base)="); if (!pos) goto quit; *phys_base = strtoull(pos + strlen("NUMBER(phys_base)="), NULL, 0); /* Get kaslr_offset form vmcoreinfo */ pos = strstr(buf, "KERNELOFFSET="); if (!pos) goto quit; *kaslr_offset = strtoull(pos + strlen("KERNELOFFSET="), NULL, 16); ret = TRUE; quit: FREEBUF(buf); return ret; } /* * Calculate kaslr_offset and phys_base * * kaslr_offset: * The difference between original address in System.map or vmlinux and * actual address placed randomly by kaslr feature. To be more accurate, * kaslr_offset = actual address - original address * * phys_base: * Physical address where the kerenel is placed. In other words, it's a * physical address of __START_KERNEL_map. This is also decided randomly by * kaslr. * * kaslr offset and phys_base are calculated as follows: * * kaslr_offset: * 1) Get IDTR and CR3 value from the dump header. * 2) Get a virtual address of IDT from IDTR value * --- (A) * 3) Translate (A) to physical address using CR3, the upper 52 bits * of which points a top of page table. * --- (B) * 4) Get an address of vector0 (Devide Error) interrupt handler from * IDT, which are pointed by (B). * --- (C) * 5) Get an address of symbol "divide_error" form vmlinux * --- (D) * * Now we have two addresses: * (C)-> Actual address of "divide_error" * (D)-> Original address of "divide_error" in the vmlinux * * kaslr_offset can be calculated by the difference between these two * value. * * phys_base; * 1) Get IDT virtual address from vmlinux * --- (E) * * So phys_base can be calculated using relationship of directly mapped * address. * * phys_base = * Physical address(B) - * (Virtual address(E) + kaslr_offset - __START_KERNEL_map) * * Note that the address (A) cannot be used instead of (E) because (A) is * not direct map address, it's a fixed map address. * * This solution works in most every case, but does not work in the * following case. * * 1) If the dump is captured on early stage of kernel boot, IDTR points * early IDT table(early_idts) instead of normal IDT(idt_table). * 2) If the dump is captured whle kdump is working, IDTR points * IDT table of 2nd kernel, not 1st kernel. * * Current implementation does not support the case 1), need * enhancement in the future. For the case 2), get kaslr_offset and * phys_base as follows. * * 1) Get kaslr_offset and phys_base using the above solution. * 2) Get kernel boot parameter from "saved_command_line" * 3) If "elfcorehdr=" is not included in boot parameter, we are in the * first kernel, nothing to do any more. * 4) If "elfcorehdr=" is included in boot parameter, we are in the 2nd * kernel. Retrieve vmcoreinfo from address of "elfcorehdr=" and * get kaslr_offset and phys_base from vmcoreinfo. */ #define PTI_USER_PGTABLE_BIT PAGE_SHIFT #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) #define CR3_PCID_MASK 0xFFFull int calc_kaslr_offset(ulong *kaslr_offset, ulong *phys_base) { uint64_t cr3 = 0, idtr = 0, pgd = 0, idtr_paddr; ulong divide_error_vmcore; ulong kaslr_offset_kdump, phys_base_kdump; int ret = FALSE; int verbose = CRASHDEBUG(1)? 1: 0; if (!machine_type("X86_64")) return FALSE; if (SADUMP_DUMPFILE()) { if (!sadump_get_cr3_idtr(&cr3, &idtr)) return FALSE; } else if (QEMU_MEM_DUMP_NO_VMCOREINFO()) { if (!qemu_get_cr3_idtr(&cr3, &idtr)) return FALSE; } else if (VMSS_DUMPFILE()) { if (!vmware_vmss_get_cr3_idtr(&cr3, &idtr)) return FALSE; } else return FALSE; if (st->pti_init_vmlinux || st->kaiser_init_vmlinux) pgd = cr3 & ~(CR3_PCID_MASK|PTI_USER_PGTABLE_MASK); else pgd = cr3 & ~CR3_PCID_MASK; /* * Set up for kvtop. * * calc_kaslr_offset() is called before machdep_init(PRE_GDB), so some * variables are not initialized yet. Set up them here to call kvtop(). * * TODO: XEN and 5-level is not supported */ vt->kernel_pgd[0] = pgd; machdep->last_pgd_read = vt->kernel_pgd[0]; machdep->machspec->physical_mask_shift = __PHYSICAL_MASK_SHIFT_2_6; machdep->machspec->pgdir_shift = PGDIR_SHIFT; machdep->machspec->ptrs_per_pgd = PTRS_PER_PGD; if (!readmem(pgd, PHYSADDR, machdep->pgd, PAGESIZE(), "pgd", RETURN_ON_ERROR)) goto quit; /* Convert virtual address of IDT table to physical address */ if (!kvtop(NULL, idtr, &idtr_paddr, verbose)) goto quit; /* Now we can calculate kaslr_offset and phys_base */ divide_error_vmcore = get_vec0_addr(idtr_paddr); *kaslr_offset = divide_error_vmcore - st->divide_error_vmlinux; *phys_base = idtr_paddr - (st->idt_table_vmlinux + *kaslr_offset - __START_KERNEL_map); if (CRASHDEBUG(1)) { fprintf(fp, "calc_kaslr_offset: idtr=%lx\n", idtr); fprintf(fp, "calc_kaslr_offset: pgd=%lx\n", pgd); fprintf(fp, "calc_kaslr_offset: idtr(phys)=%lx\n", idtr_paddr); fprintf(fp, "calc_kaslr_offset: divide_error(vmlinux): %lx\n", st->divide_error_vmlinux); fprintf(fp, "calc_kaslr_offset: divide_error(vmcore): %lx\n", divide_error_vmcore); } /* * Check if current kaslr_offset/phys_base is for 1st kernel or 2nd * kernel. If we are in 2nd kernel, get kaslr_offset/phys_base * from vmcoreinfo */ if (get_kaslr_offset_from_vmcoreinfo( *kaslr_offset, &kaslr_offset_kdump, &phys_base_kdump)) { *kaslr_offset = kaslr_offset_kdump; *phys_base = phys_base_kdump; } else if (CRASHDEBUG(1)) { fprintf(fp, "kaslr_helper: failed to determine which kernel was running at crash,\n"); fprintf(fp, "kaslr_helper: asssuming the kdump 1st kernel.\n"); } if (CRASHDEBUG(1)) { fprintf(fp, "calc_kaslr_offset: kaslr_offset=%lx\n", *kaslr_offset); fprintf(fp, "calc_kaslr_offset: phys_base=%lx\n", *phys_base); } ret = TRUE; quit: vt->kernel_pgd[0] = 0; machdep->last_pgd_read = 0; return ret; } #else int calc_kaslr_offset(ulong *kaslr_offset, ulong *phys_page) { return FALSE; } #endif /* X86_64 */ crash-7.2.8/tools.c0000664000000000000000000051544313614623427012660 0ustar rootroot/* tools.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2019 David Anderson * Copyright (C) 2002-2019 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include static void print_number(struct number_option *, int, int); static long alloc_hq_entry(void); struct hq_entry; static void dealloc_hq_entry(struct hq_entry *); static void show_options(void); static void dump_struct_members(struct list_data *, int, ulong); static void rbtree_iteration(ulong, struct tree_data *, char *); static void dump_struct_members_for_tree(struct tree_data *, int, ulong); struct req_entry { char *arg, *name, **member; int *is_str, *is_ptr; ulong *width, *offset; int count; }; static void print_value(struct req_entry *, unsigned int, ulong, unsigned int); static struct req_entry *fill_member_offsets(char *); static void dump_struct_members_fast(struct req_entry *, int, ulong); FILE * set_error(char *target) { FILE *tmp_fp = NULL; char *tmp_str = NULL; if (STREQ(target, pc->error_path)) return pc->error_fp; tmp_str = malloc(strlen(target) + 1); if (tmp_str == NULL) return NULL; strcpy(tmp_str, target); if (STREQ(target, "default")) tmp_fp = stdout; else if (STREQ(target, "redirect")) tmp_fp = fp; else { tmp_fp = fopen(target, "a"); if (tmp_fp == NULL) { error(INFO, "invalid path: %s\n", target); return NULL; } } if (pc->error_fp != NULL && pc->error_fp != stdout && pc->error_fp != fp) fclose(pc->error_fp); if (pc->error_path) free(pc->error_path); pc->error_fp = tmp_fp; pc->error_path = tmp_str; return pc->error_fp; } /* * General purpose error reporting routine. Type INFO prints the message * and returns. Type FATAL aborts the command in progress, and longjmps * back to the appropriate recovery location. If a FATAL occurs during * program initialization, exit() is called. * * The idea is to get the message out so that it is seen by the user * regardless of how the command output may be piped or redirected. * Besides stderr, check whether the output is going to a file or pipe, and * if so, intermingle the error message there as well. */ int __error(int type, char *fmt, ...) { int end_of_line, new_line; char buf[BUFSIZE]; char *spacebuf; void *retaddr[NUMBER_STACKFRAMES] = { 0 }; va_list ap; if (STREQ(pc->error_path, "redirect")) pc->error_fp = fp; if (CRASHDEBUG(1) || (pc->flags & DROP_CORE)) { SAVE_RETURN_ADDRESS(retaddr); console("error() trace: %lx => %lx => %lx => %lx\n", retaddr[3], retaddr[2], retaddr[1], retaddr[0]); } va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (!fmt && FATAL_ERROR(type)) { fprintf(pc->error_fp, "\n"); clean_exit(1); } end_of_line = FATAL_ERROR(type) && !(pc->flags & RUNTIME); if ((new_line = (buf[0] == '\n'))) shift_string_left(buf, 1); else if (pc->flags & PLEASE_WAIT) new_line = TRUE; if (type == CONT) spacebuf = space(strlen(pc->curcmd)); else spacebuf = NULL; if (pc->stdpipe && (STREQ(pc->error_path, "default") || STREQ(pc->error_path, "redirect"))) { fprintf(pc->stdpipe, "%s%s%s %s%s", new_line ? "\n" : "", type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", type == WARNING ? "WARNING: " : type == NOTE ? "NOTE: " : "", buf); fflush(pc->stdpipe); } else { fprintf(pc->error_fp, "%s%s%s %s%s", new_line || end_of_line ? "\n" : "", type == WARNING ? "WARNING" : type == NOTE ? "NOTE" : type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", buf, end_of_line ? "\n" : ""); fflush(pc->error_fp); } if ((STREQ(pc->error_path, "default")) && (fp != stdout) && (fp != pc->stdpipe) && (fp != pc->tmpfile)) { fprintf(fp, "%s%s%s %s", new_line ? "\n" : "", type == WARNING ? "WARNING" : type == NOTE ? "NOTE" : type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", buf); fflush(fp); } if ((pc->flags & DROP_CORE) && (type != NOTE)) { dump_trace(retaddr); SIGACTION(SIGSEGV, SIG_DFL, &pc->sigaction, NULL); drop_core("DROP_CORE flag set: forcing a segmentation fault\n"); } switch (type) { case FATAL: if (pc->flags & IN_FOREACH) RESUME_FOREACH(); /* FALLTHROUGH */ case FATAL_RESTART: if (pc->flags & RUNTIME) RESTART(); else { if (REMOTE()) remote_exit(); clean_exit(1); } default: case INFO: case NOTE: case WARNING: return FALSE; } } /* * Parse a line into tokens, populate the passed-in argv[] array, and return * the count of arguments found. This function modifies the passed-string * by inserting a NULL character at the end of each token. Expressions * encompassed by parentheses, and strings encompassed by apostrophes, are * collected into single tokens. */ int parse_line(char *str, char *argv[]) { int i, j, k; int string; int expression; for (i = 0; i < MAXARGS; i++) argv[i] = NULL; clean_line(str); if (str == NULL || strlen(str) == 0) return(0); i = j = k = 0; string = FALSE; expression = 0; /* * Special handling for when the first character is a '"'. */ if (str[0] == '"') { next: do { i++; } while ((str[i] != NULLCHAR) && (str[i] != '"')); switch (str[i]) { case NULLCHAR: argv[j] = &str[k]; return j+1; case '"': argv[j++] = &str[k+1]; str[i++] = NULLCHAR; if (str[i] == '"') { k = i; goto next; } break; } } else argv[j++] = str; while (TRUE) { if (j == MAXARGS) error(FATAL, "too many arguments in string!\n"); while (str[i] != ' ' && str[i] != '\t' && str[i] != NULLCHAR) { i++; } switch (str[i]) { case ' ': case '\t': str[i++] = NULLCHAR; while (str[i] == ' ' || str[i] == '\t') { i++; } if (str[i] == '"') { str[i] = ' '; string = TRUE; i++; } /* * Make an expression encompassed by a set of parentheses * a single argument. Also account for embedded sets. */ if (!string && str[i] == '(') { argv[j++] = &str[i]; expression = 1; while (expression > 0) { i++; switch (str[i]) { case '(': expression++; break; case ')': expression--; break; case NULLCHAR: case '\n': expression = -1; break; default: break; } } if (expression == 0) { i++; continue; } } if (str[i] != NULLCHAR && str[i] != '\n') { argv[j++] = &str[i]; if (string) { string = FALSE; while (str[i] != '"' && str[i] != NULLCHAR) i++; if (str[i] == '"') str[i] = ' '; } break; } /* else fall through */ case '\n': str[i] = NULLCHAR; /* keep falling... */ case NULLCHAR: argv[j] = NULLCHAR; return(j); } } } /* * Defuse controversy re: extensions to ctype.h */ int whitespace(int c) { return ((c == ' ') ||(c == '\t')); } int ascii(int c) { return ((c >= 0) && ( c <= 0x7f)); } /* * Strip line-ending whitespace and linefeeds. */ char * strip_line_end(char *line) { strip_linefeeds(line); strip_ending_whitespace(line); return(line); } /* * Strip line-beginning and line-ending whitespace and linefeeds. */ char * clean_line(char *line) { strip_beginning_whitespace(line); strip_linefeeds(line); strip_ending_whitespace(line); return(line); } /* * Strip line-ending linefeeds in a string. */ char * strip_linefeeds(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == '\n') { *p = NULLCHAR; if (--p < line) break; } return(line); } /* * Strip a specified line-ending character in a string. */ char * strip_ending_char(char *line, char c) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); if (*p == c) *p = NULLCHAR; return(line); } /* * Strip a specified line-beginning character in a string. */ char * strip_beginning_char(char *line, char c) { if (line == NULL || strlen(line) == 0) return(line); if (FIRSTCHAR(line) == c) shift_string_left(line, 1); return(line); } /* * Strip line-ending whitespace. */ char * strip_ending_whitespace(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == ' ' || *p == '\t') { *p = NULLCHAR; if (p == line) break; p--; } return(line); } /* * Strip line-beginning whitespace. */ char * strip_beginning_whitespace(char *line) { char buf[BUFSIZE]; char *p; if (line == NULL || strlen(line) == 0) return(line); strcpy(buf, line); p = &buf[0]; while (*p == ' ' || *p == '\t') p++; strcpy(line, p); return(line); } /* * End line at first comma found. */ char * strip_comma(char *line) { char *p; if ((p = strstr(line, ","))) *p = NULLCHAR; return(line); } /* * Strip the 0x from the beginning of a hexadecimal value string. */ char * strip_hex(char *line) { if (STRNEQ(line, "0x")) shift_string_left(line, 2); return(line); } /* * Turn a string into upper-case. */ char * upper_case(const char *s, char *buf) { const char *p1; char *p2; p1 = s; p2 = buf; while (*p1) { *p2 = toupper(*p1); p1++, p2++; } *p2 = NULLCHAR; return(buf); } /* * Return pointer to first non-space/tab in a string. */ char * first_nonspace(char *s) { return(s + strspn(s, " \t")); } /* * Return pointer to first space/tab in a string. If none are found, * return a pointer to the string terminating NULL. */ char * first_space(char *s) { return(s + strcspn(s, " \t")); } /* * Replace the first space/tab found in a string with a NULL character. */ char * null_first_space(char *s) { char *p1; p1 = first_space(s); if (*p1) *p1 = NULLCHAR; return s; } /* * Replace any instances of the characters in string c that are found in * string s with the character passed in r. */ char * replace_string(char *s, char *c, char r) { int i, j; for (i = 0; s[i]; i++) { for (j = 0; c[j]; j++) { if (s[i] == c[j]) s[i] = r; } } return s; } void string_insert(char *insert, char *where) { char *p; p = GETBUF(strlen(insert) + strlen(where) + 1); sprintf(p, "%s%s", insert, where); strcpy(where, p); FREEBUF(p); } /* * Find the rightmost instance of a substring in a string. */ char * strstr_rightmost(char *s, char *lookfor) { char *next, *last, *p; for (p = s, last = NULL; *p; p++) { if (!(next = strstr(p, lookfor))) break; last = p = next; } return last; } /* * Prints a string verbatim, allowing strings with % signs to be displayed * without printf conversions. */ void print_verbatim(FILE *filep, char *line) { int i; for (i = 0; i < strlen(line); i++) { fputc(line[i], filep); fflush(filep); } } char * fixup_percent(char *s) { char *p1; if ((p1 = strstr(s, "%")) == NULL) return s; s[strlen(s)+1] = NULLCHAR; memmove(p1+1, p1, strlen(p1)); *p1 = '%'; return s; } /* * Convert an indeterminate number string to either a hexadecimal or decimal * long value. Translate with a bias towards decimal unless HEX_BIAS is set. */ ulong stol(char *s, int flags, int *errptr) { if ((flags & HEX_BIAS) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); else { if (decimal(s, 0)) return(dtol(s, flags, errptr)); else if (hexadecimal(s, 0)) return(htol(s, flags, errptr)); } if (!(flags & QUIET)) error(INFO, "not a valid number: %s\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } ulonglong stoll(char *s, int flags, int *errptr) { if ((flags & HEX_BIAS) && hexadecimal(s, 0)) return(htoll(s, flags, errptr)); else { if (decimal(s, 0)) return(dtoll(s, flags, errptr)); else if (hexadecimal(s, 0)) return(htoll(s, flags, errptr)); } if (!(flags & QUIET)) error(INFO, "not a valid number: %s\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Append a two-character string to a number to make 1, 2, 3 and 4 into * 1st, 2nd, 3rd, 4th, and so on... */ char * ordinal(ulong val, char *buf) { char *p1; sprintf(buf, "%ld", val); p1 = &buf[strlen(buf)-1]; switch (*p1) { case '1': strcat(buf, "st"); break; case '2': strcat(buf, "nd"); break; case '3': strcat(buf, "rd"); break; default: strcat(buf, "th"); break; } return buf; } /* * Convert a string into: * * 1. an evaluated expression if it's enclosed within parentheses. * 2. to a decimal value if the string is all decimal characters. * 3. to a hexadecimal value if the string is all hexadecimal characters. * 4. to a symbol value if the string is a known symbol. * * If HEX_BIAS is set, pass the value on to htol(). */ ulong convert(char *s, int flags, int *errptr, ulong numflag) { struct syment *sp; if ((numflag & NUM_EXPR) && can_eval(s)) return(eval(s, flags, errptr)); if ((flags & HEX_BIAS) && (numflag & NUM_HEX) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); else { if ((numflag & NUM_DEC) && decimal(s, 0)) return(dtol(s, flags, errptr)); if ((numflag & NUM_HEX) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); } if ((sp = symbol_search(s))) return(sp->value); error(INFO, "cannot convert \"%s\"\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a hexadecimal long value. */ ulong htol(char *s, int flags, int *errptr) { long i, j; ulong n; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto htol_error; } if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > MAX_HEXADDR_STRLEN) { if (!(flags & QUIET)) error(INFO, "input string too large: \"%s\" (%d vs %d)\n", s, strlen(s), MAX_HEXADDR_STRLEN); goto htol_error; } for (n = i = 0; s[i] != 0; i++) { switch (s[i]) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j = (s[i] - 'a') + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j = (s[i] - 'A') + 10; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': j = s[i] - '0'; break; case 'x': case 'X': case 'h': continue; default: if (!(flags & QUIET)) error(INFO, "invalid input: \"%s\"\n", s); goto htol_error; } n = (16 * n) + j; } return(n); htol_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return BADADDR; } /* * Convert a string to a hexadecimal unsigned long long value. */ ulonglong htoll(char *s, int flags, int *errptr) { long i, j; ulonglong n; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto htoll_error; } if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > LONG_LONG_PRLEN) { if (!(flags & QUIET)) error(INFO, "input string too large: \"%s\" (%d vs %d)\n", s, strlen(s), LONG_LONG_PRLEN); goto htoll_error; } for (n = i = 0; s[i] != 0; i++) { switch (s[i]) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j = (s[i] - 'a') + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j = (s[i] - 'A') + 10; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': j = s[i] - '0'; break; case 'x': case 'X': case 'h': continue; default: if (!(flags & QUIET)) error(INFO, "invalid input: \"%s\"\n", s); goto htoll_error; } n = (16 * n) + j; } return(n); htoll_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a decimal long value. */ ulong dtol(char *s, int flags, int *errptr) { ulong retval; char *p, *orig; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtol_error; } if (strlen(s) == 0) goto dtol_error; p = orig = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0') { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", orig, s[j]); goto dtol_error; } else if (sscanf(s, "%lu", &retval) != 1) { if (!(flags & QUIET)) error(INFO, "invalid expression\n"); goto dtol_error; } return(retval); dtol_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a decimal long value. */ ulonglong dtoll(char *s, int flags, int *errptr) { ulonglong retval; char *p, *orig; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtoll_error; } if (strlen(s) == 0) goto dtoll_error; p = orig = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0') { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", orig, s[j]); goto dtoll_error; } else if (sscanf(s, "%llu", &retval) != 1) { if (!(flags & QUIET)) error(INFO, "invalid expression\n"); goto dtoll_error; } return (retval); dtoll_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return ((ulonglong)(-1)); } /* * Convert a string to a decimal integer value. */ unsigned int dtoi(char *s, int flags, int *errptr) { unsigned int retval; char *p; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtoi_error; } p = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0' || (sscanf(s, "%d", (int *)&retval) != 1)) { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", s, s[j]); goto dtoi_error; } return(retval); dtoi_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return((unsigned int)(-1)); } /* * Determine whether a string contains only decimal characters. * If count is non-zero, limit the search to count characters. */ int decimal(char *s, int count) { char *p; int cnt, digits; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; for (p = &s[0], digits = 0; *p; p++) { switch(*p) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': digits++; case ' ': break; default: return FALSE; } if (count && (--cnt == 0)) break; } return (digits ? TRUE : FALSE); } /* * Extract a hexadecimal number from a string. If first_instance is FALSE, * and two possibilities are found, a fatal error results. */ int extract_hex(char *s, ulong *result, char stripchar, ulong first_instance) { int i, found; char *arglist[MAXARGS]; int argc; ulong value; char *buf; buf = GETBUF(strlen(s)); strcpy(buf, s); argc = parse_line(buf, arglist); for (i = found = value = 0; i < argc; i++) { if (stripchar) strip_ending_char(arglist[i], stripchar); if (hexadecimal(arglist[i], 0)) { if (found) { FREEBUF(buf); error(FATAL, "two hexadecimal args in: \"%s\"\n", strip_linefeeds(s)); } value = htol(arglist[i], FAULT_ON_ERROR, NULL); found = TRUE; if (first_instance) break; } } FREEBUF(buf); if (found) { *result = value; return TRUE; } return FALSE; } /* * Determine whether a string contains only ASCII characters. */ int ascii_string(char *s) { char *p; for (p = &s[0]; *p; p++) { if (!ascii(*p)) return FALSE; } return TRUE; } /* * Check whether a string contains only printable ASCII characters. */ int printable_string(char *s) { char *p; for (p = &s[0]; *p; p++) { if (!isprint(*p)) return FALSE; } return TRUE; } /* * Determine whether a string contains only hexadecimal characters. * If count is non-zero, limit the search to count characters. */ int hexadecimal(char *s, int count) { char *p; int cnt, digits; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; for (p = &s[0], digits = 0; *p; p++) { switch(*p) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': digits++; case 'x': case 'X': break; case ' ': if (*(p+1) == NULLCHAR) break; else return FALSE; default: return FALSE; } if (count && (--cnt == 0)) break; } return (digits ? TRUE : FALSE); } /* * Determine whether a string contains only hexadecimal characters. * and cannot be construed as a decimal number. * If count is non-zero, limit the search to count characters. */ int hexadecimal_only(char *s, int count) { char *p; int cnt, only; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; only = 0; for (p = &s[0]; *p; p++) { switch(*p) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'x': case 'X': only++; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': break; case ' ': if (*(p+1) == NULLCHAR) break; else return FALSE; default: return FALSE; } if (count && (--cnt == 0)) break; } return only; } /* * Clean a command argument that has an obvious but ignorable error. * The first one is an attached comma to a number, that usually is the * result of a cut-and-paste of an address from a structure display. * The second on is an attached colon to a number, usually from a * cut-and-paste of a memory dump. * Add more when they become annoynance. * * It presumes args[optind] is the argument being tinkered with, and * always returns TRUE for convenience of use. */ int clean_arg(void) { char buf[BUFSIZE]; if (LASTCHAR(args[optind]) == ',' || LASTCHAR(args[optind]) == ':') { strcpy(buf, args[optind]); LASTCHAR(buf) = NULLCHAR; if (IS_A_NUMBER(buf)) LASTCHAR(args[optind]) = NULLCHAR; } return TRUE; } /* * Translate a hexadecimal string into its ASCII components. */ void cmd_ascii(void) { int i; ulonglong value; char *s; int c, prlen, bytes; optind = 1; if (!args[optind]) { fprintf(fp, "\n"); fprintf(fp, " 0 1 2 3 4 5 6 7\n"); fprintf(fp, " +-------------------------------\n"); fprintf(fp, " 0 | NUL DLE SP 0 @ P ' p\n"); fprintf(fp, " 1 | SOH DC1 ! 1 A Q a q\n"); fprintf(fp, " 2 | STX DC2 %c 2 B R b r\n", 0x22); fprintf(fp, " 3 | ETX DC3 # 3 C S c s\n"); fprintf(fp, " 4 | EOT DC4 $ 4 D T d t\n"); fprintf(fp, " 5 | ENQ NAK %c 5 E U e u\n", 0x25); fprintf(fp, " 6 | ACK SYN & 6 F V f v\n"); fprintf(fp, " 7 | BEL ETB ` 7 G W g w\n"); fprintf(fp, " 8 | BS CAN ( 8 H X h x\n"); fprintf(fp, " 9 | HT EM ) 9 I Y i y\n"); fprintf(fp, " A | LF SUB * : J Z j z\n"); fprintf(fp, " B | VT ESC + ; K [ k {\n"); fprintf(fp, " C | FF FS , < L %c l |\n", 0x5c); fprintf(fp, " D | CR GS _ = M ] m }\n"); fprintf(fp, " E | SO RS . > N ^ n ~\n"); fprintf(fp, " F | SI US / ? O - o DEL\n"); fprintf(fp, "\n"); return; } while (args[optind]) { s = args[optind]; if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > LONG_PRLEN) { prlen = LONG_LONG_PRLEN; bytes = sizeof(long long); } else { prlen = LONG_PRLEN; bytes = sizeof(long); } value = htoll(s, FAULT_ON_ERROR, NULL); fprintf(fp, "%.*llx: ", prlen, value); for (i = 0; i < bytes; i++) { c = (value >> (8*i)) & 0xff; if ((c >= 0x20) && (c < 0x7f)) { fprintf(fp, "%c", (char)c); continue; } if (c > 0x7f) { fprintf(fp, "<%02x>", c); continue; } switch (c) { case 0x0: fprintf(fp, ""); break; case 0x1: fprintf(fp, ""); break; case 0x2: fprintf(fp, ""); break; case 0x3: fprintf(fp, ""); break; case 0x4: fprintf(fp, ""); break; case 0x5: fprintf(fp, ""); break; case 0x6: fprintf(fp, ""); break; case 0x7: fprintf(fp, ""); break; case 0x8: fprintf(fp, ""); break; case 0x9: fprintf(fp, ""); break; case 0xa: fprintf(fp, ""); break; case 0xb: fprintf(fp, ""); break; case 0xc: fprintf(fp, ""); break; case 0xd: fprintf(fp, ""); break; case 0xe: fprintf(fp, ""); break; case 0xf: fprintf(fp, ""); break; case 0x10: fprintf(fp, ""); break; case 0x11: fprintf(fp, ""); break; case 0x12: fprintf(fp, ""); break; case 0x13: fprintf(fp, ""); break; case 0x14: fprintf(fp, ""); break; case 0x15: fprintf(fp, ""); break; case 0x16: fprintf(fp, ""); break; case 0x17: fprintf(fp, ""); break; case 0x18: fprintf(fp, ""); break; case 0x19: fprintf(fp, ""); break; case 0x1a: fprintf(fp, ""); break; case 0x1b: fprintf(fp, ""); break; case 0x1c: fprintf(fp, ""); break; case 0x1d: fprintf(fp, ""); break; case 0x1e: fprintf(fp, ""); break; case 0x1f: fprintf(fp, ""); break; case 0x7f: fprintf(fp, ""); break; } } fprintf(fp, "\n"); optind++; } } /* * Counts number of leading whitespace characters in a string. */ int count_leading_spaces(char *s) { return (strspn(s, " \t")); } /* * Prints the requested number of spaces. */ void pad_line(FILE *filep, int cnt, char c) { int i; for (i = 0; i < cnt; i++) fputc(c, filep); } /* * Returns appropriate number of inter-field spaces in a usable string. * MINSPACE is defined as -100, but implies the minimum space between two * fields. Currently this can be either one or two spaces, depending upon * the architecture. Since the mininum space must be at least 1, MINSPACE, * MINSPACE-1 and MINSPACE+1 are all valid, special numbers. Otherwise * the space count must be greater than or equal to 0. * * If the cnt request is greater than SPACES, a dynamic buffer is * allocated, and normal buffer garbage collection will return it * back to the pool. */ char * space(int cnt) { #define SPACES 40 static char spacebuf[SPACES+1] = { 0 }; int i; char *bigspace; if (cnt > SPACES) { bigspace = GETBUF(cnt); for (i = 0; i < cnt; i++) bigspace[i] = ' '; bigspace[i] = NULLCHAR; return bigspace; } if (!strlen(spacebuf)) { for (i = 0; i < SPACES; i++) spacebuf[i] = ' '; spacebuf[i] = NULLCHAR; } if (cnt < (MINSPACE-1)) error(FATAL, "illegal spacing request: %d\n", cnt); if ((cnt > MINSPACE+1) && (cnt < 0)) error(FATAL, "illegal spacing request\n"); switch (cnt) { case (MINSPACE-1): if (VADDR_PRLEN > 8) return (&spacebuf[SPACES]); /* NULL */ else return (&spacebuf[SPACES-1]); /* 1 space */ case MINSPACE: if (VADDR_PRLEN > 8) return (&spacebuf[SPACES-1]); /* 1 space */ else return (&spacebuf[SPACES-2]); /* 2 spaces */ case (MINSPACE+1): if (VADDR_PRLEN > 8) return (&spacebuf[SPACES-2]); /* 2 spaces */ else return (&spacebuf[SPACES-3]); /* 3 spaces */ default: return (&spacebuf[SPACES-cnt]); /* as requested */ } } /* * Determine whether substring s1, with length len, and contained within * string s, is surrounded by characters. If len is 0, calculate * it. */ int bracketed(char *s, char *s1, int len) { char *s2; if (!len) { if (!(s2 = strstr(s1, ">"))) return FALSE; len = s2-s1; } if (((s1-s) < 1) || (*(s1-1) != '<') || ((s1+len) >= &s[strlen(s)]) || (*(s1+len) != '>')) return FALSE; return TRUE; } /* * Counts the number of a specified character in a string. */ int count_chars(char *s, char c) { char *p; int count; if (!s) return 0; count = 0; for (p = s; *p; p++) { if (*p == c) count++; } return count; } /* * Counts the number of a specified characters in a buffer. */ long count_buffer_chars(char *bufptr, char c, long len) { long i, cnt; for (i = cnt = 0; i < len; i++, bufptr++) { if (*bufptr == c) cnt++; } return cnt; } /* * Concatenates the tokens in the global args[] array into one string, * separating each token with one space. If the no_options flag is set, * don't include any args beginning with a dash character. */ char * concat_args(char *buf, int arg, int no_options) { int i; BZERO(buf, BUFSIZE); for (i = arg; i < argcnt; i++) { if (no_options && STRNEQ(args[i], "-")) continue; strcat(buf, args[i]); strcat(buf, " "); } return(strip_ending_whitespace(buf)); } /* * Shifts the contents of a string to the left by cnt characters, * disposing the leftmost characters. */ char * shift_string_left(char *s, int cnt) { int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s, s+cnt, (origlen-cnt)); *(s+(origlen-cnt)) = NULLCHAR; return(s); } /* * Shifts the contents of a string to the right by cnt characters, * inserting space characters. (caller confirms space is available) */ char * shift_string_right(char *s, int cnt) { int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s+cnt, s, origlen); s[origlen+cnt] = NULLCHAR; return(memset(s, ' ', cnt)); } /* * Create a string in a buffer of a given size, centering, or justifying * left or right as requested. If the opt argument is used, then the string * is created with its string/integer value. If opt is NULL, then the * string is already in contained in string s (not justified). Note that * flag LONGLONG_HEX implies that opt is a ulonglong pointer to the * actual value. */ char * mkstring(char *s, int size, ulong flags, const char *opt) { int len; int extra; int left; int right; switch (flags & (LONG_DEC|SLONG_DEC|LONG_HEX|INT_HEX|INT_DEC|LONGLONG_HEX|ZERO_FILL)) { case LONG_DEC: sprintf(s, "%lu", (ulong)opt); break; case SLONG_DEC: sprintf(s, "%ld", (ulong)opt); break; case LONG_HEX: sprintf(s, "%lx", (ulong)opt); break; case (LONG_HEX|ZERO_FILL): if (VADDR_PRLEN == 8) sprintf(s, "%08lx", (ulong)opt); else if (VADDR_PRLEN == 16) sprintf(s, "%016lx", (ulong)opt); break; case INT_DEC: sprintf(s, "%u", (uint)((ulong)opt)); break; case INT_HEX: sprintf(s, "%x", (uint)((ulong)opt)); break; case LONGLONG_HEX: sprintf(s, "%llx", *((ulonglong *)opt)); break; default: if (opt) strcpy(s, opt); break; } /* * At this point, string s has the string to be justified, * and has room to work with. The relevant flags from this * point on are of CENTER, LJUST and RJUST. If the length * of string s is already larger than the requested size, * just return it as is. */ len = strlen(s); if (size <= len) return(s); extra = size - len; if (flags & CENTER) { /* * If absolute centering is not possible, justify the * string as requested -- or to the left if no justify * argument was passed in. */ if (extra % 2) { switch (flags & (LJUST|RJUST)) { default: case LJUST: right = (extra/2) + 1; left = extra/2; break; case RJUST: right = extra/2; left = (extra/2) + 1; break; } } else left = right = extra/2; shift_string_right(s, left); len = strlen(s); memset(s + len, ' ', right); s[len + right] = NULLCHAR; return(s); } if (flags & LJUST) { len = strlen(s); memset(s + len, ' ', extra); s[len + extra] = NULLCHAR; } else if (flags & RJUST) shift_string_right(s, extra); return(s); } /* * Prints the requested number of BACKSPACE characters. */ void backspace(int cnt) { int i; for (i = 0; i < cnt; i++) fprintf(fp, "\b"); } /* * Set/display process context or internal variables. Processes are set * by their task or PID number, or to the panic context with the -p flag. * Internal variables may be viewed or changed, depending whether an argument * follows the variable name. If no arguments are entered, the current * process context is dumped. The current set of variables and their * acceptable settings are: * * debug "on", "off", or any number. "on" sets it to a value of 1. * hash "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * scroll "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * silent "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * refresh "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * sym regular filename * console device filename * radix 10 or 16 * core (no arg) drop core when error() is called. * vi (no arg) set editing mode to vi (from .rc file only). * emacs (no arg) set editing mode to emacs (from .rc file only). * namelist kernel name (from .rc file only). * dumpfile dumpfile name (from .rc file only). * * gdb variable settings not changeable by gdb's "set" command: * * print_max value (default is 200). */ void cmd_set(void) { int i, c; ulong value; int cpu, runtime, from_rc_file; char buf[BUFSIZE]; char *extra_message; struct task_context *tc; struct syment *sp; #define defer() do { } while (0) #define already_done() do { } while (0) #define ignore() do { } while (0) extra_message = NULL; runtime = pc->flags & RUNTIME ? TRUE : FALSE; from_rc_file = pc->curcmd_flags & FROM_RCFILE ? TRUE : FALSE; while ((c = getopt(argcnt, args, "pvc:a:")) != EOF) { switch(c) { case 'c': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) { error(INFO, "not allowed on a live system\n"); argerrs++; break; } cpu = dtoi(optarg, FAULT_ON_ERROR, NULL); set_cpu(cpu); return; case 'p': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) { set_context(tt->this_task, NO_PID); show_context(CURRENT_CONTEXT()); return; } if (!tt->panic_task) { error(INFO, "no panic task found!\n"); return; } set_context(tt->panic_task, NO_PID); show_context(CURRENT_CONTEXT()); return; case 'v': if (!runtime) return; show_options(); return; case 'a': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) error(FATAL, "-a option not allowed on live systems\n"); switch (str_to_context(optarg, &value, &tc)) { case STR_PID: if ((i = TASKS_PER_PID(value)) > 1) error(FATAL, "pid %d has %d tasks: " "use a task address\n", value, i); break; case STR_TASK: break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); } cpu = tc->processor; tt->active_set[cpu] = tc->task; if (tt->panic_threads[cpu]) tt->panic_threads[cpu] = tc->task; fprintf(fp, "\"%s\" task %lx has been marked as the active task on cpu %d\n", tc->comm, tc->task, cpu); return; default: argerrs++; break; } } if (argerrs) { if (runtime) cmd_usage(pc->curcmd, SYNOPSIS); return; } if (!args[optind]) { if (XEN_HYPER_MODE()) error(INFO, "requires an option with the Xen hypervisor\n"); else if (pc->flags & MINIMAL_MODE) show_options(); else if (runtime) show_context(CURRENT_CONTEXT()); return; } while (args[optind]) { if (STREQ(args[optind], "debug")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) pc->debug = 1; else if (STREQ(args[optind], "off")) pc->debug = 0; else if (IS_A_NUMBER(args[optind])) pc->debug = stol(args[optind], FAULT_ON_ERROR, NULL); else goto invalid_set_command; } if (runtime) fprintf(fp, "debug: %ld\n", pc->debug); set_lkcd_debug(pc->debug); set_vas_debug(pc->debug); return; } else if (STREQ(args[optind], "hash")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) pc->flags |= HASH; else if (STREQ(args[optind], "off")) pc->flags &= ~HASH; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= HASH; else pc->flags &= ~HASH; } else goto invalid_set_command; } if (runtime) fprintf(fp, "hash: %s\n", pc->flags & HASH ? "on" : "off"); return; } else if (STREQ(args[optind], "unwind")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) { if ((kt->flags & DWARF_UNWIND_CAPABLE) || !runtime) { kt->flags |= DWARF_UNWIND; kt->flags &= ~NO_DWARF_UNWIND; } } else if (STREQ(args[optind], "off")) { kt->flags &= ~DWARF_UNWIND; if (!runtime) kt->flags |= NO_DWARF_UNWIND; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { if ((kt->flags & DWARF_UNWIND_CAPABLE) || !runtime) { kt->flags |= DWARF_UNWIND; kt->flags &= ~NO_DWARF_UNWIND; } } else { kt->flags &= ~DWARF_UNWIND; if (!runtime) kt->flags |= NO_DWARF_UNWIND; } } else goto invalid_set_command; } if (runtime) fprintf(fp, "unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off"); return; } else if (STREQ(args[optind], "refresh")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) tt->flags |= TASK_REFRESH; else if (STREQ(args[optind], "off")) { tt->flags &= ~TASK_REFRESH; if (!runtime) tt->flags |= TASK_REFRESH_OFF; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) tt->flags |= TASK_REFRESH; else { tt->flags &= ~TASK_REFRESH; if (!runtime) tt->flags |= TASK_REFRESH_OFF; } } else goto invalid_set_command; } if (runtime) fprintf(fp, "refresh: %s\n", tt->flags & TASK_REFRESH ? "on" : "off"); return; } else if (STREQ(args[optind], "gdb")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) { if (pc->flags & MINIMAL_MODE) goto invalid_set_command; else pc->flags2 |= GDB_CMD_MODE; } else if (STREQ(args[optind], "off")) pc->flags2 &= ~GDB_CMD_MODE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { if (pc->flags & MINIMAL_MODE) goto invalid_set_command; else pc->flags2 |= GDB_CMD_MODE; } else pc->flags2 &= ~GDB_CMD_MODE; } else goto invalid_set_command; set_command_prompt(pc->flags2 & GDB_CMD_MODE ? "gdb> " : NULL); } if (runtime) fprintf(fp, "gdb: %s\n", pc->flags2 & GDB_CMD_MODE ? "on" : "off"); return; } else if (STREQ(args[optind], "scroll")) { if (args[optind+1] && pc->scroll_command) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "on")) pc->flags |= SCROLL; else if (STREQ(args[optind], "off")) pc->flags &= ~SCROLL; else if (STREQ(args[optind], "more")) pc->scroll_command = SCROLL_MORE; else if (STREQ(args[optind], "less")) pc->scroll_command = SCROLL_LESS; else if (STREQ(args[optind], "CRASHPAGER")) { if (CRASHPAGER_valid()) pc->scroll_command = SCROLL_CRASHPAGER; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= SCROLL; else pc->flags &= ~SCROLL; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "scroll: %s ", pc->flags & SCROLL ? "on" : "off"); switch (pc->scroll_command) { case SCROLL_LESS: fprintf(fp, "(/usr/bin/less)\n"); break; case SCROLL_MORE: fprintf(fp, "(/bin/more)\n"); break; case SCROLL_NONE: fprintf(fp, "(none)\n"); break; case SCROLL_CRASHPAGER: fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); break; } } return; } else if (STREQ(args[optind], "silent")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) { pc->flags |= SILENT; pc->flags &= ~SCROLL; } else if (STREQ(args[optind], "off")) pc->flags &= ~SILENT; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { pc->flags |= SILENT; pc->flags &= ~SCROLL; } else pc->flags &= ~SILENT; } else goto invalid_set_command; if (!(pc->flags & SILENT)) fprintf(fp, "silent: off\n"); } else if (runtime && !(pc->flags & SILENT)) fprintf(fp, "silent: off\n"); return; } else if (STREQ(args[optind], "console")) { int assignment; if (args[optind+1]) { create_console_device(args[optind+1]); optind++; assignment = optind; } else assignment = 0; if (runtime) { fprintf(fp, "console: "); if (pc->console) fprintf(fp, "%s\n", pc->console); else { if (assignment) fprintf(fp, "assignment to %s failed\n", args[assignment]); else fprintf(fp, "not set\n"); } } return; } else if (STREQ(args[optind], "core")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) pc->flags |= DROP_CORE; else if (STREQ(args[optind], "off")) pc->flags &= ~DROP_CORE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= DROP_CORE; else pc->flags &= ~DROP_CORE; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "core: %s on error message)\n", pc->flags & DROP_CORE ? "on (drop core" : "off (do NOT drop core"); } return; } else if (STREQ(args[optind], "radix")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (STREQ(args[optind], "10") || STRNEQ(args[optind], "dec") || STRNEQ(args[optind], "ten")) pc->output_radix = 10; else if (STREQ(args[optind], "16") || STRNEQ(args[optind], "hex") || STRNEQ(args[optind], "six")) pc->output_radix = 16; else goto invalid_set_command; } if (runtime) { sprintf(buf, "set output-radix %d", pc->output_radix); gdb_pass_through(buf, NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: %d (%s)\n", pc->output_radix, pc->output_radix == 10 ? "decimal" : "hex"); } return; } else if (STREQ(args[optind], "hex")) { if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (runtime) { pc->output_radix = 16; gdb_pass_through("set output-radix 16", NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: 16 (hex)\n"); } return; } else if (STREQ(args[optind], "dec")) { if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (runtime) { pc->output_radix = 10; gdb_pass_through("set output-radix 10", NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: 10 (decimal)\n"); } return; } else if (STREQ(args[optind], "edit")) { if (args[optind+1]) { if (runtime && !from_rc_file) error(FATAL, "cannot change editing mode during runtime\n"); optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "vi")) pc->editing_mode = "vi"; else if (STREQ(args[optind], "emacs")) pc->editing_mode = "emacs"; else goto invalid_set_command; } if (runtime) fprintf(fp, "edit: %s\n", pc->editing_mode); return; } else if (STREQ(args[optind], "vi")) { if (runtime) { if (!from_rc_file) error(FATAL, "cannot change editing mode during runtime\n"); fprintf(fp, "edit: %s\n", pc->editing_mode); } else pc->editing_mode = "vi"; return; } else if (STREQ(args[optind], "emacs")) { if (runtime) { if (!from_rc_file) error(FATAL, "cannot change %s editing mode during runtime\n", pc->editing_mode); fprintf(fp, "edit: %s\n", pc->editing_mode); } else pc->editing_mode = "emacs"; return; } else if (STREQ(args[optind], "print_max")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (decimal(args[optind], 0)) *gdb_print_max = atoi(args[optind]); else if (hexadecimal(args[optind], 0)) *gdb_print_max = (unsigned int) htol(args[optind], FAULT_ON_ERROR, NULL); else goto invalid_set_command; } if (runtime) fprintf(fp, "print_max: %d\n", *gdb_print_max); return; } else if (STREQ(args[optind], "scope")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (can_eval(args[optind])) value = eval(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) value = htol(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) value = sp->value; else goto invalid_set_command; if (runtime) { if (gdb_set_crash_scope(value, args[optind])) pc->scope = value; else return; } } if (runtime) { fprintf(fp, "scope: %lx ", pc->scope); if (pc->scope) fprintf(fp, "(%s)\n", value_to_symstr(pc->scope, buf, 0)); else fprintf(fp, "(not set)\n"); } return; } else if (STREQ(args[optind], "null-stop")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (STREQ(args[optind], "on")) *gdb_stop_print_at_null = 1; else if (STREQ(args[optind], "off")) *gdb_stop_print_at_null = 0; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) *gdb_stop_print_at_null = 1; else *gdb_stop_print_at_null = 0; } else goto invalid_set_command; } if (runtime) fprintf(fp, "null-stop: %s\n", *gdb_stop_print_at_null ? "on" : "off"); return; } else if (STREQ(args[optind], "print_array")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (STREQ(args[optind], "on")) *gdb_prettyprint_arrays = 1; else if (STREQ(args[optind], "off")) *gdb_prettyprint_arrays = 0; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) *gdb_prettyprint_arrays = 1; else *gdb_prettyprint_arrays = 0; } else goto invalid_set_command; } if (runtime) fprintf(fp, "print_array: %s\n", *gdb_prettyprint_arrays ? "on" : "off"); return; } else if (STREQ(args[optind], "namelist")) { optind++; if (!runtime && args[optind]) { if (!is_elf_file(args[optind])) error(FATAL, "%s: not a kernel namelist (from .%src file)\n", args[optind], pc->program_name); if ((pc->namelist = (char *) malloc(strlen(args[optind])+1)) == NULL) { error(INFO, "cannot malloc memory for namelist: %s: %s\n", args[optind], strerror(errno)); } else strcpy(pc->namelist, args[optind]); } if (runtime) fprintf(fp, "namelist: %s\n", pc->namelist); return; } else if (STREQ(args[optind], "free")) { if (!runtime) defer(); else fprintf(fp, "%d pages freed\n", dumpfile_memory(DUMPFILE_FREE_MEM)); return; } else if (STREQ(args[optind], "data_debug")) { pc->flags |= DATADEBUG; return; } else if (STREQ(args[optind], "zero_excluded")) { if (args[optind+1]) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "on")) { *diskdump_flags |= ZERO_EXCLUDED; sadump_set_zero_excluded(); } else if (STREQ(args[optind], "off")) { *diskdump_flags &= ~ZERO_EXCLUDED; sadump_unset_zero_excluded(); } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { *diskdump_flags |= ZERO_EXCLUDED; sadump_set_zero_excluded(); } else { *diskdump_flags &= ~ZERO_EXCLUDED; sadump_unset_zero_excluded(); } } else goto invalid_set_command; } if (runtime) fprintf(fp, "zero_excluded: %s\n", (*diskdump_flags & ZERO_EXCLUDED) || sadump_is_zero_excluded() ? "on" : "off"); return; } else if (STREQ(args[optind], "offline")) { if (args[optind+1]) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "show")) pc->flags2 &= ~OFFLINE_HIDE; else if(STREQ(args[optind], "hide")) pc->flags2 |= OFFLINE_HIDE; else goto invalid_set_command; } if (runtime) fprintf(fp, " offline: %s\n", pc->flags2 & OFFLINE_HIDE ? "hide" : "show"); return; } else if (STREQ(args[optind], "redzone")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) pc->flags2 |= REDZONE; else if (STREQ(args[optind], "off")) pc->flags2 &= ~REDZONE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags2 |= REDZONE; else pc->flags2 &= ~REDZONE; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "redzone: %s\n", pc->flags2 & REDZONE ? "on" : "off"); } return; } else if (STREQ(args[optind], "error")) { if (args[optind+1]) { optind++; if (!set_error(args[optind])) return; } if (runtime) { fprintf(fp, "error: %s\n", pc->error_path); } return; } else if (XEN_HYPER_MODE()) { error(FATAL, "invalid argument for the Xen hypervisor\n"); } else if (pc->flags & MINIMAL_MODE) { error(FATAL, "invalid argument in minimal mode\n"); } else if (runtime) { ulong pid, task; switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: pid = value; task = NO_TASK; if (set_context(task, pid)) show_context(CURRENT_CONTEXT()); break; case STR_TASK: task = value; pid = NO_PID; if (set_context(task, pid)) show_context(CURRENT_CONTEXT()); break; case STR_INVALID: error(INFO, "invalid task or pid value: %s\n", args[optind]); break; } } else console("set: ignoring \"%s\"\n", args[optind]); optind++; } return; invalid_set_command: sprintf(buf, "invalid command"); if (!runtime) sprintf(&buf[strlen(buf)], " in .%src file", pc->program_name); strcat(buf, ": "); for (i = 0; i < argcnt; i++) sprintf(&buf[strlen(buf)], "%s ", args[i]); strcat(buf, "\n"); if (extra_message) strcat(buf, extra_message); error(runtime ? FATAL : INFO, buf); #undef defer #undef already_done #undef ignore } /* * Display the set of settable internal variables. */ static void show_options(void) { char buf[BUFSIZE]; fprintf(fp, " scroll: %s ", pc->flags & SCROLL ? "on" : "off"); switch (pc->scroll_command) { case SCROLL_LESS: fprintf(fp, "(/usr/bin/less)\n"); break; case SCROLL_MORE: fprintf(fp, "(/bin/more)\n"); break; case SCROLL_NONE: fprintf(fp, "(none)\n"); break; case SCROLL_CRASHPAGER: fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); break; } fprintf(fp, " radix: %d (%s)\n", pc->output_radix, pc->output_radix == 10 ? "decimal" : pc->output_radix == 16 ? "hexadecimal" : "unknown"); fprintf(fp, " refresh: %s\n", tt->flags & TASK_REFRESH ? "on" : "off"); fprintf(fp, " print_max: %d\n", *gdb_print_max); fprintf(fp, " print_array: %s\n", *gdb_prettyprint_arrays ? "on" : "off"); fprintf(fp, " console: %s\n", pc->console ? pc->console : "(not assigned)"); fprintf(fp, " debug: %ld\n", pc->debug); fprintf(fp, " core: %s\n", pc->flags & DROP_CORE ? "on" : "off"); fprintf(fp, " hash: %s\n", pc->flags & HASH ? "on" : "off"); fprintf(fp, " silent: %s\n", pc->flags & SILENT ? "on" : "off"); fprintf(fp, " edit: %s\n", pc->editing_mode); fprintf(fp, " namelist: %s\n", pc->namelist); fprintf(fp, " dumpfile: %s\n", pc->dumpfile); fprintf(fp, " unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off"); fprintf(fp, " zero_excluded: %s\n", (*diskdump_flags & ZERO_EXCLUDED) || sadump_is_zero_excluded() ? "on" : "off"); fprintf(fp, " null-stop: %s\n", *gdb_stop_print_at_null ? "on" : "off"); fprintf(fp, " gdb: %s\n", pc->flags2 & GDB_CMD_MODE ? "on" : "off"); fprintf(fp, " scope: %lx ", pc->scope); if (pc->scope) fprintf(fp, "(%s)\n", value_to_symstr(pc->scope, buf, 0)); else fprintf(fp, "(not set)\n"); fprintf(fp, " offline: %s\n", pc->flags2 & OFFLINE_HIDE ? "hide" : "show"); fprintf(fp, " redzone: %s\n", pc->flags2 & REDZONE ? "on" : "off"); fprintf(fp, " error: %s\n", pc->error_path); } /* * Evaluate an expression, which can consist of a single symbol, single value, * or an expression consisting of two values and an operator. If the * expression contains redirection characters, the whole expression must * be enclosed with parentheses. The result is printed in decimal, hex, * octal and binary. Input number values can only be hex or decimal, with * a bias towards decimal (use 0x when necessary). */ void cmd_eval(void) { int flags; int bitflag, longlongflag, longlongflagforce; struct number_option nopt; char buf1[BUFSIZE]; /* * getopt() is not used to avoid confusion with minus sign. */ optind = 1; bitflag = 0; longlongflag = longlongflagforce = 0; BZERO(&nopt, sizeof(struct number_option)); if (STREQ(args[optind], "-lb") || STREQ(args[optind], "-bl")) { longlongflagforce++; bitflag++; optind++; } else if (STREQ(args[optind], "-l")) { longlongflagforce++; optind++; if (STREQ(args[optind], "-b") && args[optind+1]) { optind++; bitflag++; } } else if (STREQ(args[optind], "-b")) { if (STREQ(args[optind+1], "-l")) { if (args[optind+2]) { bitflag++; longlongflagforce++; optind += 2; } else cmd_usage(pc->curcmd, SYNOPSIS); } else if (args[optind+1]) { bitflag++; optind++; } } if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); longlongflag = BITS32() ? TRUE : FALSE; flags = longlongflag ? (LONG_LONG|RETURN_ON_ERROR) : FAULT_ON_ERROR; if(!BITS32()) longlongflagforce = 0; BZERO(buf1, BUFSIZE); buf1[0] = '('; while (args[optind]) { if (*args[optind] == '(') { if (eval_common(args[optind], flags, NULL, &nopt)) print_number(&nopt, bitflag, longlongflagforce); else error(FATAL, "invalid expression: %s\n", args[optind]); return; } else { strcat(buf1, args[optind]); strcat(buf1, " "); } optind++; } clean_line(buf1); strcat(buf1, ")"); if (eval_common(buf1, flags, NULL, &nopt)) print_number(&nopt, bitflag, longlongflagforce); else error(FATAL, "invalid expression: %s\n", buf1); } /* * Pre-check a string for eval-worthiness. This allows callers to avoid * having to encompass a non-whitespace expression with parentheses. * Note that the data being evaluated is not error-checked here, but * rather that it exists in the proper format. */ int can_eval(char *s) { char *op; char *element1, *element2; char work[BUFSIZE]; /* * If we've got a () pair containing any sort of stuff in between, * then presume it's eval-able. It might contain crap, but it * should be sent to eval() regardless. */ if ((FIRSTCHAR(s) == '(') && (count_chars(s, '(') == 1) && (count_chars(s, ')') == 1) && (strlen(s) > 2) && (LASTCHAR(s) == ')')) return TRUE; /* * If the string contains any of the operators except the shifters, * and has any kind of data on either side, it's also eval-able. */ strcpy(work, s); if (!(op = strpbrk(work, "><+-&|*/%^"))) return FALSE; element1 = &work[0]; *op = NULLCHAR; element2 = op+1; if (!strlen(element1) || !strlen(element2)) return FALSE; return TRUE; } /* * Evaluate an expression involving two values and an operator. */ #define OP_ADD (1) #define OP_SUB (2) #define OP_AND (3) #define OP_OR (4) #define OP_MUL (5) #define OP_DIV (6) #define OP_MOD (7) #define OP_SL (8) #define OP_SR (9) #define OP_EXOR (10) #define OP_POWER (11) ulong eval(char *s, int flags, int *errptr) { struct number_option nopt; if (eval_common(s, flags, errptr, &nopt)) { return(nopt.num); } else { switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: error(FATAL, "invalid expression: %s\n", s); case RETURN_ON_ERROR: error(INFO, "invalid expression: %s\n", s); if (errptr) *errptr = TRUE; break; } return UNUSED; } } ulonglong evall(char *s, int flags, int *errptr) { struct number_option nopt; if (BITS32()) flags |= LONG_LONG; if (eval_common(s, flags, errptr, &nopt)) { return(nopt.ll_num); } else { switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: error(FATAL, "invalid expression: %s\n", s); case RETURN_ON_ERROR: error(INFO, "invalid expression: %s\n", s); if (errptr) *errptr = TRUE; break; } return UNUSED; } } int eval_common(char *s, int flags, int *errptr, struct number_option *np) { char *p1, *p2; char *op, opcode; ulong value1; ulong value2; ulonglong ll_value1; ulonglong ll_value2; char work[BUFSIZE]; char *element1; char *element2; struct syment *sp; opcode = 0; value1 = value2 = 0; ll_value1 = ll_value2 = 0; if (strstr(s, "(") || strstr(s, ")")) { p1 = s; if (*p1 != '(') goto malformed; if (LASTCHAR(s) != ')') goto malformed; p2 = &LASTCHAR(s); if (strstr(s, ")") != p2) goto malformed; strcpy(work, p1+1); LASTCHAR(work) = NULLCHAR; if (strstr(work, "(") || strstr(work, ")")) goto malformed; } else strcpy(work, s); if (work[0] == '-') { shift_string_right(work, 1); work[0] = '0'; } if (!(op = strpbrk(work, "#><+-&|*/%^"))) { if (calculate(work, &value1, &ll_value1, flags & (HEX_BIAS|LONG_LONG))) { if (flags & LONG_LONG) { np->ll_num = ll_value1; if (BITS32() && (ll_value1 > 0xffffffff)) np->retflags |= LONG_LONG; return TRUE; } else { np->num = value1; return TRUE; } } goto malformed; } switch (*op) { case '+': opcode = OP_ADD; break; case '-': opcode = OP_SUB; break; case '&': opcode = OP_AND; break; case '|': opcode = OP_OR; break; case '*': opcode = OP_MUL; break; case '%': opcode = OP_MOD; break; case '/': opcode = OP_DIV; break; case '<': if (*(op+1) != '<') goto malformed; opcode = OP_SL; break; case '>': if (*(op+1) != '>') goto malformed; opcode = OP_SR; break; case '^': opcode = OP_EXOR; break; case '#': opcode = OP_POWER; break; } element1 = &work[0]; *op = NULLCHAR; if ((opcode == OP_SL) || (opcode == OP_SR)) { *(op+1) = NULLCHAR; element2 = op+2; } else element2 = op+1; if (strlen(clean_line(element1)) == 0) goto malformed; if (strlen(clean_line(element2)) == 0) goto malformed; if ((sp = symbol_search(element1))) value1 = ll_value1 = sp->value; else { if (!calculate(element1, &value1, &ll_value1, flags & (HEX_BIAS|LONG_LONG))) goto malformed; if (BITS32() && (ll_value1 > 0xffffffff)) np->retflags |= LONG_LONG; } if ((sp = symbol_search(element2))) value2 = ll_value2 = sp->value; else if (!calculate(element2, &value2, &ll_value2, flags & (HEX_BIAS|LONG_LONG))) goto malformed; if (flags & LONG_LONG) { if (BITS32() && (ll_value2 > 0xffffffff)) np->retflags |= LONG_LONG; switch (opcode) { case OP_ADD: np->ll_num = (ll_value1 + ll_value2); break; case OP_SUB: np->ll_num = (ll_value1 - ll_value2); break; case OP_AND: np->ll_num = (ll_value1 & ll_value2); break; case OP_OR: np->ll_num = (ll_value1 | ll_value2); break; case OP_MUL: np->ll_num = (ll_value1 * ll_value2); break; case OP_DIV: np->ll_num = (ll_value1 / ll_value2); break; case OP_MOD: np->ll_num = (ll_value1 % ll_value2); break; case OP_SL: np->ll_num = (ll_value1 << ll_value2); break; case OP_SR: np->ll_num = (ll_value1 >> ll_value2); break; case OP_EXOR: np->ll_num = (ll_value1 ^ ll_value2); break; case OP_POWER: np->ll_num = ll_power(ll_value1, ll_value2); break; } } else { switch (opcode) { case OP_ADD: np->num = (value1 + value2); break; case OP_SUB: np->num = (value1 - value2); break; case OP_AND: np->num = (value1 & value2); break; case OP_OR: np->num = (value1 | value2); break; case OP_MUL: np->num = (value1 * value2); break; case OP_DIV: np->num = (value1 / value2); break; case OP_MOD: np->num = (value1 % value2); break; case OP_SL: np->num = (value1 << value2); break; case OP_SR: np->num = (value1 >> value2); break; case OP_EXOR: np->num = (value1 ^ value2); break; case OP_POWER: np->num = power(value1, value2); break; } } return TRUE; malformed: return FALSE; } /* * Take string containing a number, and possibly a multiplier, and calculate * its real value. The allowable multipliers are k, K, m, M, g and G, for * kilobytes, megabytes and gigabytes. */ int calculate(char *s, ulong *value, ulonglong *llvalue, ulong flags) { ulong factor, bias; int errflag; int ones_complement; ulong localval; ulonglong ll_localval; struct syment *sp; bias = flags & HEX_BIAS; if (*s == '~') { ones_complement = TRUE; s++; } else ones_complement = FALSE; if ((sp = symbol_search(s))) { if (flags & LONG_LONG) { *llvalue = (ulonglong)sp->value; if (ones_complement) *llvalue = ~(*llvalue); } else *value = ones_complement ? ~(sp->value) : sp->value; return TRUE; } factor = 1; errflag = 0; switch (LASTCHAR(s)) { case 'k': case 'K': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = 1024; else return FALSE; break; case 'm': case 'M': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = (1024*1024); else return FALSE; break; case 'g': case 'G': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = (1024*1024*1024); else return FALSE; break; default: if (!IS_A_NUMBER(s)) return FALSE; break; } if (flags & LONG_LONG) { ll_localval = stoll(s, RETURN_ON_ERROR|bias, &errflag); if (errflag) return FALSE; if (ones_complement) *llvalue = ~(ll_localval * factor); else *llvalue = ll_localval * factor; } else { localval = stol(s, RETURN_ON_ERROR|bias, &errflag); if (errflag) return FALSE; if (ones_complement) *value = ~(localval * factor); else *value = localval * factor; } return TRUE; } /* * Print a 32-bit or 64-bit number in hexadecimal, decimal, octal and binary, * also showing the bits set if appropriate. * */ static void print_number(struct number_option *np, int bitflag, int longlongflagforce) { int i; ulong hibit; ulonglong ll_hibit; int ccnt; ulong mask; ulonglong ll_mask; char *hdr = " bits set: "; char buf[BUFSIZE]; int hdrlen; int longlongformat; longlongformat = longlongflagforce; if (!longlongflagforce) { if (BITS32()) { if (np->retflags & LONG_LONG) longlongformat = TRUE; if (np->ll_num > 0xffffffff) longlongformat = TRUE; else np->num = (ulong)np->ll_num; } } if (longlongformat) { ll_hibit = (ulonglong)(1) << ((sizeof(long long)*8)-1); fprintf(fp, "hexadecimal: %llx ", np->ll_num); if (np->ll_num >= KILOBYTES(1)) { if ((np->ll_num % GIGABYTES(1)) == 0) fprintf(fp, "(%lldGB)", np->ll_num / GIGABYTES(1)); else if ((np->ll_num % MEGABYTES(1)) == 0) fprintf(fp, "(%lldMB)", np->ll_num / MEGABYTES(1)); else if ((np->ll_num % KILOBYTES(1)) == 0) fprintf(fp, "(%lldKB)", np->ll_num / KILOBYTES(1)); } fprintf(fp, "\n"); fprintf(fp, " decimal: %llu ", np->ll_num); if ((long long)np->ll_num < 0) fprintf(fp, "(%lld)\n", (long long)np->ll_num); else fprintf(fp, "\n"); fprintf(fp, " octal: %llo\n", np->ll_num); fprintf(fp, " binary: "); for(i = 0, ll_mask = np->ll_num; i < (sizeof(long long)*8); i++, ll_mask <<= 1) if (ll_mask & ll_hibit) fprintf(fp, "1"); else fprintf(fp, "0"); fprintf(fp,"\n"); } else { hibit = (ulong)(1) << ((sizeof(long)*8)-1); fprintf(fp, "hexadecimal: %lx ", np->num); if (np->num >= KILOBYTES(1)) { if ((np->num % GIGABYTES(1)) == 0) fprintf(fp, "(%ldGB)", np->num / GIGABYTES(1)); else if ((np->num % MEGABYTES(1)) == 0) fprintf(fp, "(%ldMB)", np->num / MEGABYTES(1)); else if ((np->num % KILOBYTES(1)) == 0) fprintf(fp, "(%ldKB)", np->num / KILOBYTES(1)); } fprintf(fp, "\n"); fprintf(fp, " decimal: %lu ", np->num); if ((long)np->num < 0) fprintf(fp, "(%ld)\n", (long)np->num); else fprintf(fp, "\n"); fprintf(fp, " octal: %lo\n", np->num); fprintf(fp, " binary: "); for(i = 0, mask = np->num; i < (sizeof(long)*8); i++, mask <<= 1) if (mask & hibit) fprintf(fp, "1"); else fprintf(fp, "0"); fprintf(fp,"\n"); } if (!bitflag) return; hdrlen = strlen(hdr); ccnt = hdrlen; fprintf(fp, "%s", hdr); if (longlongformat) { for (i = 63; i >= 0; i--) { ll_mask = (ulonglong)(1) << i; if (np->ll_num & ll_mask) { sprintf(buf, "%d ", i); fprintf(fp, "%s", buf); ccnt += strlen(buf); if (ccnt >= 77) { fprintf(fp, "\n"); INDENT(strlen(hdr)); ccnt = hdrlen; } } } } else { for (i = BITS()-1; i >= 0; i--) { mask = (ulong)(1) << i; if (np->num & mask) { sprintf(buf, "%d ", i); fprintf(fp, "%s", buf); ccnt += strlen(buf); if (ccnt >= 77) { fprintf(fp, "\n"); INDENT(strlen(hdr)); ccnt = hdrlen; } } } } fprintf(fp, "\n"); } /* * Display the contents of a linked list. Minimum requirements are a starting * address, typically of a structure which contains the "next" list entry at * some offset into the structure. The default offset is zero bytes, and need * not be entered if that's the case. Otherwise a number argument that's not * a kernel * virtual address will be understood to be the offset. * Alternatively the offset may be entered in "struct.member" format. Each * item in the list is dumped, and the list will be considered terminated upon * encountering a "next" value that is: * * a NULL pointer. * a pointer to the starting address. * a pointer to the entry pointed to by the starting address. * a pointer to the structure itself. * a pointer to the value specified with the "-e ending_addr" option. * * If the structures are linked using list_head structures, the -h or -H * options must be used. In that case, the "start" address is: * a pointer to the structure that contains the list_head structure (-h), * or a pointer to a LIST_HEAD() structure (-H). * * Given that the contents of the structures containing the next pointers * often contain useful data, the "-s structname" also prints each structure * in the list. * * By default, the list members are hashed to guard against duplicate entries * causing the list to wrap back upon itself. * * WARNING: There's an inordinate amount of work parsing arguments below * in order to maintain backwards compatibility re: not having to use -o, * which gets sticky with zero-based kernel virtual address space. */ void cmd_list(void) { int c; struct list_data list_data, *ld; struct datatype_member struct_member, *sm; struct syment *sp; ulong value, struct_list_offset; sm = &struct_member; ld = &list_data; BZERO(ld, sizeof(struct list_data)); struct_list_offset = 0; while ((c = getopt(argcnt, args, "BHhrs:S:e:o:xdl:")) != EOF) { switch(c) { case 'B': ld->flags |= LIST_BRENT_ALGO; break; case 'H': ld->flags |= LIST_HEAD_FORMAT; ld->flags |= LIST_HEAD_POINTER; break; case 'h': ld->flags |= LIST_HEAD_FORMAT; break; case 'r': ld->flags |= LIST_HEAD_REVERSE; break; case 's': case 'S': if (ld->structname_args++ == 0) hq_open(); hq_enter((ulong)optarg); ld->flags |= (c == 's') ? LIST_PARSE_MEMBER : LIST_READ_MEMBER; if (count_bits_long(ld->flags & (LIST_PARSE_MEMBER|LIST_READ_MEMBER)) > 1) error(FATAL, "-S and -s options are mutually exclusive\n"); break; case 'l': if (IS_A_NUMBER(optarg)) struct_list_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) struct_list_offset = sm->member_offset; else error(FATAL, "invalid -l option: %s\n", optarg); break; case 'o': if (ld->flags & LIST_OFFSET_ENTERED) error(FATAL, "offset value %d (0x%lx) already entered\n", ld->member_offset, ld->member_offset); else if (IS_A_NUMBER(optarg)) ld->member_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) ld->member_offset = sm->member_offset; else error(FATAL, "invalid -o argument: %s\n", optarg); ld->flags |= LIST_OFFSET_ENTERED; break; case 'e': ld->end = htol(optarg, FAULT_ON_ERROR, NULL); break; case 'x': if (ld->flags & LIST_STRUCT_RADIX_10) error(FATAL, "-d and -x are mutually exclusive\n"); ld->flags |= LIST_STRUCT_RADIX_16; break; case 'd': if (ld->flags & LIST_STRUCT_RADIX_16) error(FATAL, "-d and -x are mutually exclusive\n"); ld->flags |= LIST_STRUCT_RADIX_10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind] && args[optind+1] && args[optind+2]) { error(INFO, "too many arguments\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (ld->structname_args) { ld->structname = (char **)GETBUF(sizeof(char *) * ld->structname_args); retrieve_list((ulong *)ld->structname, ld->structname_args); hq_close(); ld->struct_list_offset = struct_list_offset; } else if (struct_list_offset) { error(INFO, "-l option can only be used with -s or -S option\n"); cmd_usage(pc->curcmd, SYNOPSIS); } while (args[optind]) { if (strstr(args[optind], ".") && arg_to_datatype(args[optind], sm, RETURN_ON_ERROR) > 1) { if (ld->flags & LIST_OFFSET_ENTERED) error(FATAL, "offset value %ld (0x%lx) already entered\n", ld->member_offset, ld->member_offset); ld->member_offset = sm->member_offset; ld->flags |= LIST_OFFSET_ENTERED; } else { /* * Do an inordinate amount of work to avoid -o... * * OK, if it's a symbol, then it has to be a start. */ if ((sp = symbol_search(args[optind]))) { if (ld->flags & LIST_START_ENTERED) error(FATAL, "list start already entered\n"); ld->start = sp->value; ld->flags |= LIST_START_ENTERED; goto next_arg; } /* * If it's not a symbol nor a number, bail out if it * cannot be evaluated as a start address. */ if (!IS_A_NUMBER(args[optind])) { if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { if (ld->flags & LIST_START_ENTERED) error(FATAL, "list start already entered\n"); ld->start = value; ld->flags |= LIST_START_ENTERED; goto next_arg; } } error(FATAL, "invalid argument: %s\n", args[optind]); } /* * If the start is known, it's got to be an offset. */ if (ld->flags & LIST_START_ENTERED) { value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; break; } /* * If the offset is known, or there's no subsequent * argument, then it's got to be a start. */ if ((ld->flags & LIST_OFFSET_ENTERED) || !args[optind+1]) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (!IS_KVADDR(value)) error(FATAL, "invalid kernel virtual address: %s\n", args[optind]); ld->start = value; ld->flags |= LIST_START_ENTERED; break; } /* * Neither start nor offset has been entered, and * it's a number. Look ahead to the next argument. * If it's a symbol, then this must be an offset. */ if ((sp = symbol_search(args[optind+1]))) { value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; goto next_arg; } else if ((!IS_A_NUMBER(args[optind+1]) && !can_eval(args[optind+1])) && !strstr(args[optind+1], ".")) error(FATAL, "symbol not found: %s\n", args[optind+1]); /* * Crunch time. We've got two numbers. If they're * both ambigous we must have zero-based kernel * virtual address space. */ if (COMMON_VADDR_SPACE() && AMBIGUOUS_NUMBER(args[optind]) && AMBIGUOUS_NUMBER(args[optind+1])) { error(INFO, "ambiguous arguments: \"%s\" and \"%s\": -o is required\n", args[optind], args[optind+1]); cmd_usage(pc->curcmd, SYNOPSIS); } if (hexadecimal_only(args[optind], 0)) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { ld->start = value; ld->flags |= LIST_START_ENTERED; goto next_arg; } } value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; } next_arg: optind++; } if (!(ld->flags & LIST_START_ENTERED)) { error(INFO, "starting address required\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if ((ld->flags & LIST_OFFSET_ENTERED) && ld->struct_list_offset) { error(INFO, "-l and -o are mutually exclusive\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (ld->flags & LIST_HEAD_FORMAT) { ld->list_head_offset = ld->member_offset; if (ld->flags & LIST_HEAD_REVERSE) ld->member_offset = sizeof(void *); else ld->member_offset = 0; if (ld->flags & LIST_HEAD_POINTER) { if (!ld->end) ld->end = ld->start; readmem(ld->start + ld->member_offset, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (ld->start == ld->end) { fprintf(fp, "(empty)\n"); return; } } else ld->start += ld->list_head_offset; } ld->flags &= ~(LIST_OFFSET_ENTERED|LIST_START_ENTERED); ld->flags |= VERBOSE; if (ld->flags & LIST_BRENT_ALGO) c = do_list_no_hash(ld); else { hq_open(); c = do_list(ld); hq_close(); } if (ld->structname_args) FREEBUF(ld->structname); } void dump_struct_members_fast(struct req_entry *e, int radix, ulong p) { unsigned int i; char b[BUFSIZE]; if (!(e && IS_KVADDR(p))) return; if (!radix) radix = *gdb_output_radix; for (i = 0; i < e->count; i++) { if (0 < e->width[i] && (e->width[i] <= 8 || e->is_str[i])) { print_value(e, i, p, e->is_ptr[i] ? 16 : radix); } else if (e->width[i] == 0 || e->width[i] > 8) { snprintf(b, BUFSIZE, "%s.%s", e->name, e->member[i]); dump_struct_member(b, p, radix); } } } static struct req_entry * fill_member_offsets(char *arg) { int j; char *p, m; struct req_entry *e; char buf[BUFSIZE]; if (!(arg && *arg)) return NULL; j = count_chars(arg, ',') + 1; e = (struct req_entry *)GETBUF(sizeof(*e)); e->arg = GETBUF(strlen(arg + 1)); strcpy(e->arg, arg); m = ((p = strchr(e->arg, '.')) != NULL); if (!p++) p = e->arg + strlen(e->arg) + 1; e->name = GETBUF(p - e->arg); strncpy(e->name, e->arg, p - e->arg - 1); if (!m) return e; e->count = count_chars(p, ',') + 1; e->width = (ulong *)GETBUF(e->count * sizeof(ulong)); e->is_ptr = (int *)GETBUF(e->count * sizeof(int)); e->is_str = (int *)GETBUF(e->count * sizeof(int)); e->member = (char **)GETBUF(e->count * sizeof(char *)); e->offset = (ulong *)GETBUF(e->count * sizeof(ulong)); replace_string(p, ",", ' '); parse_line(p, e->member); for (j = 0; j < e->count; j++) { e->offset[j] = MEMBER_OFFSET(e->name, e->member[j]); if (e->offset[j] == INVALID_OFFSET) e->offset[j] = ANON_MEMBER_OFFSET(e->name, e->member[j]); if (e->offset[j] == INVALID_OFFSET) error(FATAL, "Can't get offset of '%s.%s'\n", e->name, e->member[j]); e->is_ptr[j] = MEMBER_TYPE(e->name, e->member[j]) == TYPE_CODE_PTR; e->is_str[j] = is_string(e->name, e->member[j]); /* Dirty hack for obtaining size of particular field */ snprintf(buf, BUFSIZE, "%s + 1", e->member[j]); e->width[j] = ANON_MEMBER_OFFSET(e->name, buf) - e->offset[j]; } return e; } static void print_value(struct req_entry *e, unsigned int i, ulong addr, unsigned int radix) { union { uint64_t v64; uint32_t v32; uint16_t v16; uint8_t v8; } v; char buf[BUFSIZE]; struct syment *sym; addr += e->offset[i]; /* Read up to 8 bytes, counters, pointers, etc. */ if (e->width[i] <= 8 && !readmem(addr, KVADDR, &v, e->width[i], "structure value", RETURN_ON_ERROR | QUIET)) { error(INFO, "cannot access member: %s at %lx\n", e->member[i], addr); return; } snprintf(buf, BUFSIZE, " %%s = %s%%%s%s", (radix == 16 ? "0x" : ""), (e->width[i] == 8 ? "l" : ""), (radix == 16 ? "x" : "u" ) ); switch (e->width[i]) { case 1: fprintf(fp, buf, e->member[i], v.v8); break; case 2: fprintf(fp, buf, e->member[i], v.v16); break; case 4: fprintf(fp, buf, e->member[i], v.v32); break; case 8: fprintf(fp, buf, e->member[i], v.v64); break; } if (e->is_str[i]) { if (e->is_ptr[i]) { read_string(v.v64, buf, BUFSIZE); fprintf(fp, " \"%s\"", buf); } else { read_string(addr, buf, e->width[i]); fprintf(fp, " %s = \"%s\"", e->member[i], buf); } } else if ((sym = value_search(v.v64, 0)) && is_symbol_text(sym)) fprintf(fp, " <%s>", sym->name); fprintf(fp, "\n"); } /* * Does the work for cmd_list() and any other function that requires the * contents of a linked list. See cmd_list description above for details. */ int do_list(struct list_data *ld) { ulong next, last, first, offset; ulong searchfor, readflag; int i, count, others, close_hq_on_return; unsigned int radix; struct req_entry **e = NULL; if (CRASHDEBUG(1)) { others = 0; console(" flags: %lx (", ld->flags); if (ld->flags & VERBOSE) console("%sVERBOSE", others++ ? "|" : ""); if (ld->flags & LIST_OFFSET_ENTERED) console("%sLIST_OFFSET_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_START_ENTERED) console("%sLIST_START_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_FORMAT) console("%sLIST_HEAD_FORMAT", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_POINTER) console("%sLIST_HEAD_POINTER", others++ ? "|" : ""); if (ld->flags & RETURN_ON_DUPLICATE) console("%sRETURN_ON_DUPLICATE", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_10) console("%sLIST_STRUCT_RADIX_10", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_16) console("%sLIST_STRUCT_RADIX_16", others++ ? "|" : ""); if (ld->flags & LIST_ALLOCATE) console("%sLIST_ALLOCATE", others++ ? "|" : ""); if (ld->flags & LIST_CALLBACK) console("%sLIST_CALLBACK", others++ ? "|" : ""); if (ld->flags & CALLBACK_RETURN) console("%sCALLBACK_RETURN", others++ ? "|" : ""); console(")\n"); console(" start: %lx\n", ld->start); console(" member_offset: %ld\n", ld->member_offset); console(" list_head_offset: %ld\n", ld->list_head_offset); console(" end: %lx\n", ld->end); console(" searchfor: %lx\n", ld->searchfor); console(" structname_args: %lx\n", ld->structname_args); if (!ld->structname_args) console(" structname: (unused)\n"); for (i = 0; i < ld->structname_args; i++) console(" structname[%d]: %s\n", i, ld->structname[i]); console(" header: %s\n", ld->header); console(" list_ptr: %lx\n", (ulong)ld->list_ptr); console(" callback_func: %lx\n", (ulong)ld->callback_func); console(" callback_data: %lx\n", (ulong)ld->callback_data); console("struct_list_offset: %lx\n", ld->struct_list_offset); } count = 0; searchfor = ld->searchfor; ld->searchfor = 0; if (ld->flags & LIST_STRUCT_RADIX_10) radix = 10; else if (ld->flags & LIST_STRUCT_RADIX_16) radix = 16; else radix = 0; next = ld->start; close_hq_on_return = FALSE; if (ld->flags & LIST_ALLOCATE) { if (!hq_is_open()) { hq_open(); close_hq_on_return = TRUE; } else if (hq_is_inuse()) { error(ld->flags & RETURN_ON_LIST_ERROR ? INFO : FATAL, "\ndo_list: hash queue is in use?\n"); return -1; } } readflag = ld->flags & RETURN_ON_LIST_ERROR ? (RETURN_ON_ERROR|QUIET) : FAULT_ON_ERROR; if (!readmem(next + ld->member_offset, KVADDR, &first, sizeof(void *), "first list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } if (ld->header) fprintf(fp, "%s", ld->header); offset = ld->list_head_offset + ld->struct_list_offset; if (ld->structname && (ld->flags & LIST_READ_MEMBER)) { e = (struct req_entry **)GETBUF(sizeof(*e) * ld->structname_args); for (i = 0; i < ld->structname_args; i++) e[i] = fill_member_offsets(ld->structname[i]); } while (1) { if (ld->flags & VERBOSE) { fprintf(fp, "%lx\n", next - ld->list_head_offset); if (ld->structname) { for (i = 0; i < ld->structname_args; i++) { switch (count_chars(ld->structname[i], '.')) { case 0: dump_struct(ld->structname[i], next - offset, radix); break; default: if (ld->flags & LIST_PARSE_MEMBER) dump_struct_members(ld, i, next); else if (ld->flags & LIST_READ_MEMBER) dump_struct_members_fast(e[i], radix, next - offset); break; } } } } if (next && !hq_enter(next - ld->list_head_offset)) { if (ld->flags & (RETURN_ON_DUPLICATE|RETURN_ON_LIST_ERROR)) { error(INFO, "\nduplicate list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } error(FATAL, "\nduplicate list entry: %lx\n", next); } if ((searchfor == next) || (searchfor == (next - ld->list_head_offset))) ld->searchfor = searchfor; count++; last = next; if ((ld->flags & LIST_CALLBACK) && ld->callback_func((void *)(next - ld->list_head_offset), ld->callback_data) && (ld->flags & CALLBACK_RETURN)) break; if (!readmem(next + ld->member_offset, KVADDR, &next, sizeof(void *), "list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } if (next == 0) { if (ld->flags & LIST_HEAD_FORMAT) { error(INFO, "\ninvalid list entry: 0\n"); if (close_hq_on_return) hq_close(); return -1; } if (CRASHDEBUG(1)) console("do_list end: next:%lx\n", next); break; } if (next == ld->end) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == end:%lx\n", next, ld->end); break; } if (next == ld->start) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == start:%lx\n", next, ld->start); break; } if (next == last) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == last:%lx\n", next, last); break; } if ((next == first) && (count != 1)) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == first:%lx (count %d)\n", next, last, count); break; } } if (CRASHDEBUG(1)) console("do_list count: %d\n", count); if (ld->flags & LIST_ALLOCATE) { ld->list_ptr = (ulong *)GETBUF(count * sizeof(void *)); count = retrieve_list(ld->list_ptr, count); if (close_hq_on_return) hq_close(); } return count; } static void do_list_debug_entry(struct list_data *ld) { int i, others; if (CRASHDEBUG(1)) { others = 0; console(" flags: %lx (", ld->flags); if (ld->flags & VERBOSE) console("%sVERBOSE", others++ ? "|" : ""); if (ld->flags & LIST_OFFSET_ENTERED) console("%sLIST_OFFSET_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_START_ENTERED) console("%sLIST_START_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_FORMAT) console("%sLIST_HEAD_FORMAT", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_POINTER) console("%sLIST_HEAD_POINTER", others++ ? "|" : ""); if (ld->flags & RETURN_ON_DUPLICATE) console("%sRETURN_ON_DUPLICATE", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_10) console("%sLIST_STRUCT_RADIX_10", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_16) console("%sLIST_STRUCT_RADIX_16", others++ ? "|" : ""); if (ld->flags & LIST_ALLOCATE) console("%sLIST_ALLOCATE", others++ ? "|" : ""); if (ld->flags & LIST_CALLBACK) console("%sLIST_CALLBACK", others++ ? "|" : ""); if (ld->flags & CALLBACK_RETURN) console("%sCALLBACK_RETURN", others++ ? "|" : ""); console(")\n"); console(" start: %lx\n", ld->start); console(" member_offset: %ld\n", ld->member_offset); console(" list_head_offset: %ld\n", ld->list_head_offset); console(" end: %lx\n", ld->end); console(" searchfor: %lx\n", ld->searchfor); console(" structname_args: %lx\n", ld->structname_args); if (!ld->structname_args) console(" structname: (unused)\n"); for (i = 0; i < ld->structname_args; i++) console(" structname[%d]: %s\n", i, ld->structname[i]); console(" header: %s\n", ld->header); console(" list_ptr: %lx\n", (ulong)ld->list_ptr); console(" callback_func: %lx\n", (ulong)ld->callback_func); console(" callback_data: %lx\n", (ulong)ld->callback_data); console("struct_list_offset: %lx\n", ld->struct_list_offset); } } static void do_list_output_struct(struct list_data *ld, ulong next, ulong offset, unsigned int radix, struct req_entry **e) { int i; for (i = 0; i < ld->structname_args; i++) { switch (count_chars(ld->structname[i], '.')) { case 0: dump_struct(ld->structname[i], next - offset, radix); break; default: if (ld->flags & LIST_PARSE_MEMBER) dump_struct_members(ld, i, next); else if (ld->flags & LIST_READ_MEMBER) dump_struct_members_fast(e[i], radix, next - offset); break; } } } static int do_list_no_hash_readmem(struct list_data *ld, ulong *next_ptr, ulong readflag) { if (!readmem(*next_ptr + ld->member_offset, KVADDR, next_ptr, sizeof(void *), "list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", *next_ptr); return -1; } return 0; } static ulong brent_x; /* tortoise */ static ulong brent_y; /* hare */ static ulong brent_r; /* power */ static ulong brent_lambda; /* loop length */ static ulong brent_mu; /* distance to start of loop */ static ulong brent_loop_detect; static ulong brent_loop_exit; /* * 'ptr': representative of x or y; modified on return */ static int brent_f(ulong *ptr, struct list_data *ld, ulong readflag) { return do_list_no_hash_readmem(ld, ptr, readflag); } /* * Similar to do_list() but without the hash_table or LIST_ALLOCATE. * Useful for the 'list' command and other callers needing faster list * enumeration. */ int do_list_no_hash(struct list_data *ld) { ulong next, last, first, offset; ulong searchfor, readflag; int i, count, ret; unsigned int radix; struct req_entry **e = NULL; do_list_debug_entry(ld); count = 0; searchfor = ld->searchfor; ld->searchfor = 0; if (ld->flags & LIST_STRUCT_RADIX_10) radix = 10; else if (ld->flags & LIST_STRUCT_RADIX_16) radix = 16; else radix = 0; next = ld->start; readflag = ld->flags & RETURN_ON_LIST_ERROR ? (RETURN_ON_ERROR|QUIET) : FAULT_ON_ERROR; if (!readmem(next + ld->member_offset, KVADDR, &first, sizeof(void *), "first list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", next); return -1; } if (ld->header) fprintf(fp, "%s", ld->header); offset = ld->list_head_offset + ld->struct_list_offset; if (ld->structname && (ld->flags & LIST_READ_MEMBER)) { e = (struct req_entry **)GETBUF(sizeof(*e) * ld->structname_args); for (i = 0; i < ld->structname_args; i++) e[i] = fill_member_offsets(ld->structname[i]); } brent_loop_detect = brent_loop_exit = 0; brent_lambda = 0; brent_r = 2; brent_x = brent_y = next; ret = brent_f(&brent_y, ld, readflag); if (ret == -1) return -1; while (1) { if (!brent_loop_detect && ld->flags & VERBOSE) { fprintf(fp, "%lx\n", next - ld->list_head_offset); if (ld->structname) { do_list_output_struct(ld, next, offset, radix, e); } } if (next && brent_loop_exit) { if (ld->flags & (RETURN_ON_DUPLICATE|RETURN_ON_LIST_ERROR)) { error(INFO, "\nduplicate list entry: %lx\n", brent_x); return -1; } error(FATAL, "\nduplicate list entry: %lx\n", brent_x); } if ((searchfor == next) || (searchfor == (next - ld->list_head_offset))) ld->searchfor = searchfor; count++; last = next; if ((ld->flags & LIST_CALLBACK) && ld->callback_func((void *)(next - ld->list_head_offset), ld->callback_data) && (ld->flags & CALLBACK_RETURN)) break; ret = do_list_no_hash_readmem(ld, &next, readflag); if (ret == -1) return -1; if (!brent_loop_detect) { if (count > 1 && brent_x == brent_y) { brent_loop_detect = 1; error(INFO, "loop detected, loop length: %ld\n", brent_lambda); /* reset x and y to start; advance y loop length */ brent_mu = 0; brent_x = brent_y = ld->start; while (brent_lambda--) { ret = brent_f(&brent_y, ld, readflag); if (ret == -1) return -1; } } else { if (brent_r == brent_lambda) { brent_x = brent_y; brent_r *= 2; brent_lambda = 0; } brent_y = next; brent_lambda++; } } else { if (!brent_loop_exit && brent_x == brent_y) { brent_loop_exit = 1; error(INFO, "length from start to loop: %lx", brent_mu); } else { ret = brent_f(&brent_x, ld, readflag); if (ret == -1) return -1; ret = brent_f(&brent_y, ld, readflag); if (ret == -1) return -1; brent_mu++; } } if (next == 0) { if (ld->flags & LIST_HEAD_FORMAT) { error(INFO, "\ninvalid list entry: 0\n"); return -1; } if (CRASHDEBUG(1)) console("do_list end: next:%lx\n", next); break; } if (next == ld->end) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == end:%lx\n", next, ld->end); break; } if (next == ld->start) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == start:%lx\n", next, ld->start); break; } if (next == last) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == last:%lx\n", next, last); break; } if ((next == first) && (count != 1)) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == first:%lx (count %d)\n", next, last, count); break; } } if (CRASHDEBUG(1)) console("do_list count: %d\n", count); return count; } /* * Issue a dump_struct_member() call for one or more structure * members. Multiple members are passed in a comma-separated * list using the the format: * * struct.member1,member2,member3 */ void dump_struct_members(struct list_data *ld, int idx, ulong next) { int i, argc; char *p1, *p2; char *structname, *members; char *arglist[MAXARGS]; unsigned int radix; if (ld->flags & LIST_STRUCT_RADIX_10) radix = 10; else if (ld->flags & LIST_STRUCT_RADIX_16) radix = 16; else radix = 0; structname = GETBUF(strlen(ld->structname[idx])+1); members = GETBUF(strlen(ld->structname[idx])+1); strcpy(structname, ld->structname[idx]); p1 = strstr(structname, ".") + 1; p2 = strstr(ld->structname[idx], ".") + 1; strcpy(members, p2); replace_string(members, ",", ' '); argc = parse_line(members, arglist); for (i = 0; i < argc; i++) { *p1 = NULLCHAR; strcat(structname, arglist[i]); dump_struct_member(structname, next - ld->list_head_offset - ld->struct_list_offset, radix); } FREEBUF(structname); FREEBUF(members); } #define RADIXTREE_REQUEST (0x1) #define RBTREE_REQUEST (0x2) #define XARRAY_REQUEST (0x4) void cmd_tree() { int c, type_flag, others; long root_offset; struct tree_data tree_data, *td; struct datatype_member struct_member, *sm; struct syment *sp; ulong value; type_flag = 0; root_offset = 0; sm = &struct_member; td = &tree_data; BZERO(td, sizeof(struct tree_data)); while ((c = getopt(argcnt, args, "xdt:r:o:s:S:plN")) != EOF) { switch (c) { case 't': if (type_flag & (RADIXTREE_REQUEST|RBTREE_REQUEST|XARRAY_REQUEST)) { error(INFO, "multiple tree types may not be entered\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (STRNEQ(optarg, "ra")) if (MEMBER_EXISTS("radix_tree_root", "xa_head")) type_flag = XARRAY_REQUEST; else type_flag = RADIXTREE_REQUEST; else if (STRNEQ(optarg, "rb")) type_flag = RBTREE_REQUEST; else if (STRNEQ(optarg, "x")) type_flag = XARRAY_REQUEST; else { error(INFO, "invalid tree type: %s\n", optarg); cmd_usage(pc->curcmd, SYNOPSIS); } break; case 'l': td->flags |= TREE_LINEAR_ORDER; break; case 'r': if (td->flags & TREE_ROOT_OFFSET_ENTERED) error(FATAL, "root offset value %d (0x%lx) already entered\n", root_offset, root_offset); else if (IS_A_NUMBER(optarg)) root_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) root_offset = sm->member_offset; else error(FATAL, "invalid -r argument: %s\n", optarg); td->flags |= TREE_ROOT_OFFSET_ENTERED; break; case 'o': if (td->flags & TREE_NODE_OFFSET_ENTERED) error(FATAL, "node offset value %d (0x%lx) already entered\n", td->node_member_offset, td->node_member_offset); else if (IS_A_NUMBER(optarg)) td->node_member_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) td->node_member_offset = sm->member_offset; else error(FATAL, "invalid -o argument: %s\n", optarg); td->flags |= TREE_NODE_OFFSET_ENTERED; break; case 's': case 'S': if (td->structname_args++ == 0) hq_open(); hq_enter((ulong)optarg); td->flags |= (c == 's') ? TREE_PARSE_MEMBER : TREE_READ_MEMBER; if (count_bits_long(td->flags & (TREE_PARSE_MEMBER|TREE_READ_MEMBER)) > 1) error(FATAL, "-S and -s options are mutually exclusive\n"); break; case 'p': td->flags |= TREE_POSITION_DISPLAY; break; case 'N': td->flags |= TREE_NODE_POINTER; break; case 'x': if (td->flags & TREE_STRUCT_RADIX_10) error(FATAL, "-d and -x are mutually exclusive\n"); td->flags |= TREE_STRUCT_RADIX_16; break; case 'd': if (td->flags & TREE_STRUCT_RADIX_16) error(FATAL, "-d and -x are mutually exclusive\n"); td->flags |= TREE_STRUCT_RADIX_10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if ((type_flag & (XARRAY_REQUEST|RADIXTREE_REQUEST)) && (td->flags & TREE_LINEAR_ORDER)) error(FATAL, "-l option is not applicable to %s\n", type_flag & RADIXTREE_REQUEST ? "radix trees" : "Xarrays"); if ((type_flag & (XARRAY_REQUEST|RADIXTREE_REQUEST)) && (td->flags & TREE_NODE_OFFSET_ENTERED)) error(FATAL, "-o option is not applicable to %s\n", type_flag & RADIXTREE_REQUEST ? "radix trees" : "Xarrays"); if ((td->flags & TREE_ROOT_OFFSET_ENTERED) && (td->flags & TREE_NODE_POINTER)) error(FATAL, "-r and -N options are mutually exclusive\n"); if (!args[optind]) { error(INFO, "a starting address is required\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if ((sp = symbol_search(args[optind]))) { td->start = sp->value; goto next_arg; } if (!IS_A_NUMBER(args[optind])) { if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { td->start = value; goto next_arg; } } error(FATAL, "invalid start argument: %s\n", args[optind]); } if (hexadecimal_only(args[optind], 0)) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { td->start = value; goto next_arg; } } error(FATAL, "invalid start argument: %s\n", args[optind]); next_arg: if (args[optind+1]) { error(INFO, "too many arguments entered\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (td->structname_args) { td->structname = (char **)GETBUF(sizeof(char *) * td->structname_args); retrieve_list((ulong *)td->structname, td->structname_args); hq_close(); } if (!(td->flags & TREE_NODE_POINTER)) td->start = td->start + root_offset; if (CRASHDEBUG(1)) { others = 0; fprintf(fp, " flags: %lx (", td->flags); if (td->flags & TREE_ROOT_OFFSET_ENTERED) fprintf(fp, "%sTREE_ROOT_OFFSET_ENTERED", others++ ? "|" : ""); if (td->flags & TREE_NODE_OFFSET_ENTERED) fprintf(fp, "%sTREE_NODE_OFFSET_ENTERED", others++ ? "|" : ""); if (td->flags & TREE_NODE_POINTER) fprintf(fp, "%sTREE_NODE_POINTER", others++ ? "|" : ""); if (td->flags & TREE_POSITION_DISPLAY) fprintf(fp, "%sTREE_POSITION_DISPLAY", others++ ? "|" : ""); if (td->flags & TREE_STRUCT_RADIX_10) fprintf(fp, "%sTREE_STRUCT_RADIX_10", others++ ? "|" : ""); if (td->flags & TREE_STRUCT_RADIX_16) fprintf(fp, "%sTREE_STRUCT_RADIX_16", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " type: "); if (type_flag & RADIXTREE_REQUEST) fprintf(fp, "radix\n"); else if (type_flag & XARRAY_REQUEST) fprintf(fp, "xarray\n"); else fprintf(fp, "red-black%s", type_flag & RBTREE_REQUEST ? "\n" : " (default)\n"); fprintf(fp, " node pointer: %s\n", td->flags & TREE_NODE_POINTER ? "yes" : "no"); fprintf(fp, " start: %lx\n", td->start); fprintf(fp, "node_member_offset: %ld\n", td->node_member_offset); fprintf(fp, " structname_args: %d\n", td->structname_args); fprintf(fp, " count: %d\n", td->count); } td->flags &= ~TREE_NODE_OFFSET_ENTERED; td->flags |= VERBOSE; hq_open(); if (type_flag & RADIXTREE_REQUEST) do_rdtree(td); else if (type_flag & XARRAY_REQUEST) do_xatree(td); else do_rbtree(td); hq_close(); if (td->structname_args) FREEBUF(td->structname); } static ulong RADIX_TREE_MAP_SHIFT = UNINITIALIZED; static ulong RADIX_TREE_MAP_SIZE = UNINITIALIZED; static ulong RADIX_TREE_MAP_MASK = UNINITIALIZED; #define RADIX_TREE_ENTRY_MASK 3UL #define RADIX_TREE_INTERNAL_NODE 1UL static void do_radix_tree_iter(ulong node, uint height, char *path, ulong index, struct radix_tree_ops *ops) { uint off; if (!hq_enter(node)) error(FATAL, "\nduplicate tree node: %lx\n", node); for (off = 0; off < RADIX_TREE_MAP_SIZE; off++) { ulong slot; ulong shift = (height - 1) * RADIX_TREE_MAP_SHIFT; readmem(node + OFFSET(radix_tree_node_slots) + sizeof(void *) * off, KVADDR, &slot, sizeof(void *), "radix_tree_node.slot[off]", FAULT_ON_ERROR); if (!slot) continue; if (slot & RADIX_TREE_INTERNAL_NODE) slot &= ~RADIX_TREE_INTERNAL_NODE; if (height == 1) ops->entry(node, slot, path, index | off, ops->private); else { ulong child_index = index | (off << shift); char child_path[BUFSIZE]; sprintf(child_path, "%s/%d", path, off); do_radix_tree_iter(slot, height - 1, child_path, child_index, ops); } } } int do_radix_tree_traverse(ulong ptr, int is_root, struct radix_tree_ops *ops) { static ulong max_height = UNINITIALIZED; ulong node_p; long nlen; uint height, is_internal; unsigned char shift; char path[BUFSIZE]; if (!VALID_STRUCT(radix_tree_root) || !VALID_STRUCT(radix_tree_node) || ((!VALID_MEMBER(radix_tree_root_height) || !VALID_MEMBER(radix_tree_root_rnode) || !VALID_MEMBER(radix_tree_node_slots) || !ARRAY_LENGTH(height_to_maxindex)) && (!VALID_MEMBER(radix_tree_root_rnode) || !VALID_MEMBER(radix_tree_node_shift) || !VALID_MEMBER(radix_tree_node_slots) || !ARRAY_LENGTH(height_to_maxnodes)))) error(FATAL, "radix trees do not exist or have changed " "their format\n"); if (RADIX_TREE_MAP_SHIFT == UNINITIALIZED) { if (!(nlen = MEMBER_SIZE("radix_tree_node", "slots"))) error(FATAL, "cannot determine length of " "radix_tree_node.slots[] array\n"); nlen /= sizeof(void *); RADIX_TREE_MAP_SHIFT = ffsl(nlen) - 1; RADIX_TREE_MAP_SIZE = (1UL << RADIX_TREE_MAP_SHIFT); RADIX_TREE_MAP_MASK = (RADIX_TREE_MAP_SIZE-1); if (ARRAY_LENGTH(height_to_maxindex)) max_height = ARRAY_LENGTH(height_to_maxindex); else max_height = ARRAY_LENGTH(height_to_maxnodes); } height = 0; if (!is_root) { node_p = ptr; if (node_p & RADIX_TREE_INTERNAL_NODE) node_p &= ~RADIX_TREE_INTERNAL_NODE; if (VALID_MEMBER(radix_tree_node_height)) { readmem(node_p + OFFSET(radix_tree_node_height), KVADDR, &height, sizeof(uint), "radix_tree_node height", FAULT_ON_ERROR); } else if (VALID_MEMBER(radix_tree_node_shift)) { readmem(node_p + OFFSET(radix_tree_node_shift), KVADDR, &shift, sizeof(shift), "radix_tree_node shift", FAULT_ON_ERROR); height = (shift / RADIX_TREE_MAP_SHIFT) + 1; } else error(FATAL, "-N option is not supported or applicable" " for radix trees on this architecture or kernel\n"); if (height > max_height) goto error_height; } else { if (VALID_MEMBER(radix_tree_root_height)) { readmem(ptr + OFFSET(radix_tree_root_height), KVADDR, &height, sizeof(uint), "radix_tree_root height", FAULT_ON_ERROR); } readmem(ptr + OFFSET(radix_tree_root_rnode), KVADDR, &node_p, sizeof(void *), "radix_tree_root rnode", FAULT_ON_ERROR); is_internal = (node_p & RADIX_TREE_INTERNAL_NODE); if (node_p & RADIX_TREE_INTERNAL_NODE) node_p &= ~RADIX_TREE_INTERNAL_NODE; if (is_internal && VALID_MEMBER(radix_tree_node_shift)) { readmem(node_p + OFFSET(radix_tree_node_shift), KVADDR, &shift, sizeof(shift), "radix_tree_node shift", FAULT_ON_ERROR); height = (shift / RADIX_TREE_MAP_SHIFT) + 1; } if (height > max_height) { node_p = ptr; goto error_height; } } if (CRASHDEBUG(1)) { fprintf(fp, "radix_tree_node.slots[%ld]\n", RADIX_TREE_MAP_SIZE); fprintf(fp, "max_height %ld: ", max_height); fprintf(fp, "\n"); fprintf(fp, "pointer at %lx (is_root? %s):\n", node_p, is_root ? "yes" : "no"); if (is_root) dump_struct("radix_tree_root", ptr, RADIX(ops->radix)); else dump_struct("radix_tree_node", node_p, RADIX(ops->radix)); } if (height == 0) { strcpy(path, "direct"); ops->entry(node_p, node_p, path, 0, ops->private); } else { strcpy(path, "root"); do_radix_tree_iter(node_p, height, path, 0, ops); } return 0; error_height: fprintf(fp, "radix_tree_node at %lx\n", node_p); dump_struct("radix_tree_node", node_p, RADIX(ops->radix)); error(FATAL, "height %d is greater than " "maximum radix tree height index %ld\n", height, max_height); return -1; } static ulong XA_CHUNK_SHIFT = UNINITIALIZED; static ulong XA_CHUNK_SIZE = UNINITIALIZED; static ulong XA_CHUNK_MASK = UNINITIALIZED; static void do_xarray_iter(ulong node, uint height, char *path, ulong index, struct xarray_ops *ops) { uint off; if (!hq_enter(node)) error(FATAL, "\nduplicate tree node: %lx\n", node); for (off = 0; off < XA_CHUNK_SIZE; off++) { ulong slot; ulong shift = (height - 1) * XA_CHUNK_SHIFT; readmem(node + OFFSET(xa_node_slots) + sizeof(void *) * off, KVADDR, &slot, sizeof(void *), "xa_node.slots[off]", FAULT_ON_ERROR); if (!slot) continue; if ((slot & XARRAY_TAG_MASK) == XARRAY_TAG_INTERNAL) slot &= ~XARRAY_TAG_INTERNAL; if (height == 1) ops->entry(node, slot, path, index | off, ops->private); else { ulong child_index = index | (off << shift); char child_path[BUFSIZE]; sprintf(child_path, "%s/%d", path, off); do_xarray_iter(slot, height - 1, child_path, child_index, ops); } } } int do_xarray_traverse(ulong ptr, int is_root, struct xarray_ops *ops) { ulong node_p; long nlen; uint height, is_internal; unsigned char shift; char path[BUFSIZE]; if (!VALID_STRUCT(xarray) || !VALID_STRUCT(xa_node) || !VALID_MEMBER(xarray_xa_head) || !VALID_MEMBER(xa_node_slots) || !VALID_MEMBER(xa_node_shift)) error(FATAL, "xarray facility does not exist or has changed its format\n"); if (XA_CHUNK_SHIFT == UNINITIALIZED) { if ((nlen = MEMBER_SIZE("xa_node", "slots")) <= 0) error(FATAL, "cannot determine length of xa_node.slots[] array\n"); nlen /= sizeof(void *); XA_CHUNK_SHIFT = ffsl(nlen) - 1; XA_CHUNK_SIZE = (1UL << XA_CHUNK_SHIFT); XA_CHUNK_MASK = (XA_CHUNK_SIZE-1); } height = 0; if (!is_root) { node_p = ptr; if ((node_p & XARRAY_TAG_MASK) == XARRAY_TAG_INTERNAL) node_p &= ~XARRAY_TAG_MASK; if (VALID_MEMBER(xa_node_shift)) { readmem(node_p + OFFSET(xa_node_shift), KVADDR, &shift, sizeof(shift), "xa_node shift", FAULT_ON_ERROR); height = (shift / XA_CHUNK_SHIFT) + 1; } else error(FATAL, "-N option is not supported or applicable" " for xarrays on this architecture or kernel\n"); } else { readmem(ptr + OFFSET(xarray_xa_head), KVADDR, &node_p, sizeof(void *), "xarray xa_head", FAULT_ON_ERROR); is_internal = ((node_p & XARRAY_TAG_MASK) == XARRAY_TAG_INTERNAL); if (node_p & XARRAY_TAG_MASK) node_p &= ~XARRAY_TAG_MASK; if (is_internal && VALID_MEMBER(xa_node_shift)) { readmem(node_p + OFFSET(xa_node_shift), KVADDR, &shift, sizeof(shift), "xa_node shift", FAULT_ON_ERROR); height = (shift / XA_CHUNK_SHIFT) + 1; } } if (CRASHDEBUG(1)) { fprintf(fp, "xa_node.slots[%ld]\n", XA_CHUNK_SIZE); fprintf(fp, "pointer at %lx (is_root? %s):\n", node_p, is_root ? "yes" : "no"); if (is_root) dump_struct("xarray", ptr, RADIX(ops->radix)); else dump_struct("xa_node", node_p, RADIX(ops->radix)); } if (height == 0) { strcpy(path, "direct"); ops->entry(node_p, node_p, path, 0, ops->private); } else { strcpy(path, "root"); do_xarray_iter(node_p, height, path, 0, ops); } return 0; } static void do_rdtree_entry(ulong node, ulong slot, const char *path, ulong index, void *private) { struct tree_data *td = private; static struct req_entry **e = NULL; uint print_radix; int i; if (!td->count && td->structname_args) { /* * Retrieve all members' info only once (count == 0) * After last iteration all memory will be freed up */ e = (struct req_entry **)GETBUF(sizeof(*e) * td->structname_args); for (i = 0; i < td->structname_args; i++) e[i] = fill_member_offsets(td->structname[i]); } td->count++; if (td->flags & VERBOSE) fprintf(fp, "%lx\n", slot); if (td->flags & TREE_POSITION_DISPLAY) { fprintf(fp, " index: %ld position: %s/%ld\n", index, path, index & RADIX_TREE_MAP_MASK); } if (td->structname) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch (count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], slot, print_radix); break; default: if (td->flags & TREE_PARSE_MEMBER) dump_struct_members_for_tree(td, i, slot); else if (td->flags & TREE_READ_MEMBER) dump_struct_members_fast(e[i], print_radix, slot); break; } } } } int do_rdtree(struct tree_data *td) { struct radix_tree_ops ops = { .entry = do_rdtree_entry, .private = td, }; int is_root = !(td->flags & TREE_NODE_POINTER); if (td->flags & TREE_STRUCT_RADIX_10) ops.radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) ops.radix = 16; else ops.radix = 0; do_radix_tree_traverse(td->start, is_root, &ops); return 0; } static void do_xarray_entry(ulong node, ulong slot, const char *path, ulong index, void *private) { struct tree_data *td = private; static struct req_entry **e = NULL; uint print_radix; int i; if (!td->count && td->structname_args) { /* * Retrieve all members' info only once (count == 0) * After last iteration all memory will be freed up */ e = (struct req_entry **)GETBUF(sizeof(*e) * td->structname_args); for (i = 0; i < td->structname_args; i++) e[i] = fill_member_offsets(td->structname[i]); } td->count++; if (td->flags & VERBOSE) fprintf(fp, "%lx\n", slot); if (td->flags & TREE_POSITION_DISPLAY) { fprintf(fp, " index: %ld position: %s/%ld\n", index, path, index & XA_CHUNK_MASK); } if (td->structname) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch (count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], slot, print_radix); break; default: if (td->flags & TREE_PARSE_MEMBER) dump_struct_members_for_tree(td, i, slot); else if (td->flags & TREE_READ_MEMBER) dump_struct_members_fast(e[i], print_radix, slot); break; } } } } int do_xatree(struct tree_data *td) { struct xarray_ops ops = { .entry = do_xarray_entry, .private = td, }; int is_root = !(td->flags & TREE_NODE_POINTER); if (td->flags & TREE_STRUCT_RADIX_10) ops.radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) ops.radix = 16; else ops.radix = 0; do_xarray_traverse(td->start, is_root, &ops); return 0; } int do_rbtree(struct tree_data *td) { ulong start; char pos[BUFSIZE]; if (!VALID_MEMBER(rb_root_rb_node) || !VALID_MEMBER(rb_node_rb_left) || !VALID_MEMBER(rb_node_rb_right)) error(FATAL, "red-black trees do not exist or have changed " "their format\n"); sprintf(pos, "root"); if (td->flags & TREE_NODE_POINTER) start = td->start; else readmem(td->start + OFFSET(rb_root_rb_node), KVADDR, &start, sizeof(void *), "rb_root rb_node", FAULT_ON_ERROR); rbtree_iteration(start, td, pos); return td->count; } void rbtree_iteration(ulong node_p, struct tree_data *td, char *pos) { int i; uint print_radix; ulong struct_p, new_p, test_p; char new_pos[BUFSIZE]; static struct req_entry **e; if (!node_p) return; if (!td->count && td->structname_args) { /* * Retrieve all members' info only once (count == 0) * After last iteration all memory will be freed up */ e = (struct req_entry **)GETBUF(sizeof(*e) * td->structname_args); for (i = 0; i < td->structname_args; i++) e[i] = fill_member_offsets(td->structname[i]); } if (hq_enter(node_p)) td->count++; else error(FATAL, "\nduplicate tree entry: %lx\n", node_p); if ((td->flags & TREE_LINEAR_ORDER) && readmem(node_p+OFFSET(rb_node_rb_left), KVADDR, &new_p, sizeof(void *), "rb_node rb_left", RETURN_ON_ERROR) && new_p) { if (readmem(new_p+OFFSET(rb_node_rb_left), KVADDR, &test_p, sizeof(void *), "rb_node rb_left", RETURN_ON_ERROR|QUIET)) { sprintf(new_pos, "%s/l", pos); rbtree_iteration(new_p, td, new_pos); } else error(INFO, "rb_node: %lx: corrupted rb_left pointer: %lx\n", node_p, new_p); } struct_p = node_p - td->node_member_offset; if (td->flags & VERBOSE) fprintf(fp, "%lx\n", struct_p); if (td->flags & TREE_POSITION_DISPLAY) fprintf(fp, " position: %s\n", pos); if (td->structname) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch(count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], struct_p, print_radix); break; default: if (td->flags & TREE_PARSE_MEMBER) dump_struct_members_for_tree(td, i, struct_p); else if (td->flags & TREE_READ_MEMBER) dump_struct_members_fast(e[i], print_radix, struct_p); break; } } } if (!(td->flags & TREE_LINEAR_ORDER) && readmem(node_p+OFFSET(rb_node_rb_left), KVADDR, &new_p, sizeof(void *), "rb_node rb_left", RETURN_ON_ERROR) && new_p) { if (readmem(new_p+OFFSET(rb_node_rb_left), KVADDR, &test_p, sizeof(void *), "rb_node rb_left", RETURN_ON_ERROR|QUIET)) { sprintf(new_pos, "%s/l", pos); rbtree_iteration(new_p, td, new_pos); } else error(INFO, "rb_node: %lx: corrupted rb_left pointer: %lx\n", node_p, new_p); } if (readmem(node_p+OFFSET(rb_node_rb_right), KVADDR, &new_p, sizeof(void *), "rb_node rb_right", RETURN_ON_ERROR) && new_p) { if (readmem(new_p+OFFSET(rb_node_rb_left), KVADDR, &test_p, sizeof(void *), "rb_node rb_left", RETURN_ON_ERROR|QUIET)) { sprintf(new_pos, "%s/r", pos); rbtree_iteration(new_p, td, new_pos); } else error(INFO, "rb_node: %lx: corrupted rb_right pointer: %lx\n", node_p, new_p); } } void dump_struct_members_for_tree(struct tree_data *td, int idx, ulong struct_p) { int i, argc; uint print_radix; char *p1; char *structname, *members; char *arglist[MAXARGS]; if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; structname = GETBUF(strlen(td->structname[idx])+1); members = GETBUF(strlen(td->structname[idx])+1); strcpy(structname, td->structname[idx]); p1 = strstr(structname, ".") + 1; strcpy(members, p1); replace_string(members, ",", ' '); argc = parse_line(members, arglist); for (i = 0; i pageshift) #define HQ_INDEX(X) (((X) >> HQ_SHIFT) % pc->nr_hash_queues) struct hq_entry { int next; int order; ulong value; }; struct hq_head { int next; int qcnt; }; struct hash_table { ulong flags; struct hq_head *queue_heads; struct hq_entry *memptr; long count; long index; int reallocs; } hash_table = { 0 }; /* * For starters, allocate a hash table containing HQ_ENTRY_CHUNK entries. * If necessary during runtime, it will be increased in size. */ void hq_init(void) { struct hash_table *ht; ht = &hash_table; if (pc->nr_hash_queues == 0) pc->nr_hash_queues = NR_HASH_QUEUES_DEFAULT; if ((ht->queue_heads = (struct hq_head *)malloc(pc->nr_hash_queues * sizeof(struct hq_head))) == NULL) { error(INFO, "cannot malloc memory for hash queue heads: %s\n", strerror(errno)); ht->flags = HASH_QUEUE_NONE; pc->flags &= ~HASH; return; } if ((ht->memptr = (struct hq_entry *)malloc(HQ_ENTRY_CHUNK * sizeof(struct hq_entry))) == NULL) { error(INFO, "cannot malloc memory for hash queues: %s\n", strerror(errno)); ht->flags = HASH_QUEUE_NONE; pc->flags &= ~HASH; return; } BZERO(ht->memptr, HQ_ENTRY_CHUNK * sizeof(struct hq_entry)); ht->count = HQ_ENTRY_CHUNK; ht->index = 0; } /* * Get a free hash queue entry. If there's no more available, realloc() * a new chunk of memory with another HQ_ENTRY_CHUNK entries stuck on the end. */ static long alloc_hq_entry(void) { struct hash_table *ht; struct hq_entry *new, *end_of_old; ht = &hash_table; if (++ht->index == ht->count) { if (!(new = (void *)realloc((void *)ht->memptr, (ht->count+HQ_ENTRY_CHUNK) * sizeof(struct hq_entry)))) { error(INFO, "cannot realloc memory for hash queues: %s\n", strerror(errno)); ht->flags |= HASH_QUEUE_FULL; return(-1); } ht->reallocs++; ht->memptr = new; end_of_old = ht->memptr + ht->count; BZERO(end_of_old, HQ_ENTRY_CHUNK * sizeof(struct hq_entry)); ht->count += HQ_ENTRY_CHUNK; } return(ht->index); } /* * Restore the hash queue to its state before the duplicate entry * was attempted. */ static void dealloc_hq_entry(struct hq_entry *entry) { struct hash_table *ht; long hqi; ht = &hash_table; hqi = HQ_INDEX(entry->value); ht->index--; BZERO(entry, sizeof(struct hq_entry)); ht->queue_heads[hqi].qcnt--; } /* * Initialize the hash table for a hashing session. */ int hq_open(void) { struct hash_table *ht; if (!(pc->flags & HASH)) return FALSE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE|HASH_QUEUE_OPEN)) return FALSE; ht->flags &= ~(HASH_QUEUE_FULL|HASH_QUEUE_CLOSED); BZERO(ht->queue_heads, sizeof(struct hq_head) * pc->nr_hash_queues); BZERO(ht->memptr, ht->count * sizeof(struct hq_entry)); ht->index = 0; ht->flags |= HASH_QUEUE_OPEN; return TRUE; } int hq_is_open(void) { struct hash_table *ht; ht = &hash_table; return (ht->flags & HASH_QUEUE_OPEN ? TRUE : FALSE); } int hq_is_inuse(void) { struct hash_table *ht; if (!hq_is_open()) return FALSE; ht = &hash_table; return (ht->index ? TRUE : FALSE); } /* * Close the hash table, returning the number of items hashed in this session. */ int hq_close(void) { struct hash_table *ht; ht = &hash_table; ht->flags &= ~(HASH_QUEUE_OPEN); ht->flags |= HASH_QUEUE_CLOSED; if (!(pc->flags & HASH)) return(0); if (ht->flags & HASH_QUEUE_NONE) return(0); ht->flags &= ~HASH_QUEUE_FULL; return(ht->index); } char *corrupt_hq = "corrupt hash queue entry: value: %lx next: %d order: %d\n"; /* * For a given value, allocate a hash queue entry and hash it into the * open hash table. If a duplicate entry is found, return FALSE; for all * other possibilities return TRUE. Note that it's up to the user to deal * with failure. */ int hq_enter(ulong value) { struct hash_table *ht; struct hq_entry *entry; struct hq_entry *list_entry; long hqi; long index; if (!(pc->flags & HASH)) return TRUE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE|HASH_QUEUE_FULL)) return TRUE; if (!(ht->flags & HASH_QUEUE_OPEN)) return TRUE; if ((index = alloc_hq_entry()) < 0) return TRUE; entry = ht->memptr + index; if (entry->next || entry->value || entry->order) { error(INFO, corrupt_hq, entry->value, entry->next, entry->order); ht->flags |= HASH_QUEUE_NONE; return TRUE; } entry->next = 0; entry->value = value; entry->order = index; hqi = HQ_INDEX(value); if (ht->queue_heads[hqi].next == 0) { ht->queue_heads[hqi].next = index; ht->queue_heads[hqi].qcnt = 1; return TRUE; } else ht->queue_heads[hqi].qcnt++; list_entry = ht->memptr + ht->queue_heads[hqi].next; while (TRUE) { if (list_entry->value == entry->value) { dealloc_hq_entry(entry); return FALSE; } if (list_entry->next >= ht->count) { error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return TRUE; } if (list_entry->next == 0) break; list_entry = ht->memptr + list_entry->next; } list_entry->next = index; return TRUE; } /* * "hash -d" output */ void dump_hash_table(int verbose) { int i; struct hash_table *ht; struct hq_entry *list_entry; long elements; long queues_in_use; int others; uint minq, maxq; ht = &hash_table; others = 0; fprintf(fp, " flags: %lx (", ht->flags); if (ht->flags & HASH_QUEUE_NONE) fprintf(fp, "%sHASH_QUEUE_NONE", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_OPEN) fprintf(fp, "%sHASH_QUEUE_OPEN", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_CLOSED) fprintf(fp, "%sHASH_QUEUE_CLOSED", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_FULL) fprintf(fp, "%sHASH_QUEUE_FULL", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " queue_heads[%ld]: %lx\n", pc->nr_hash_queues, (ulong)ht->queue_heads); fprintf(fp, " memptr: %lx\n", (ulong)ht->memptr); fprintf(fp, " count: %ld ", ht->count); if (ht->reallocs) fprintf(fp, " (%d reallocs)", ht->reallocs); fprintf(fp, "\n"); fprintf(fp, " index: %ld\n", ht->index); queues_in_use = 0; minq = ~(0); maxq = 0; for (i = 0; i < pc->nr_hash_queues; i++) { if (ht->queue_heads[i].next == 0) { minq = 0; continue; } if (ht->queue_heads[i].qcnt < minq) minq = ht->queue_heads[i].qcnt; if (ht->queue_heads[i].qcnt > maxq) maxq = ht->queue_heads[i].qcnt; queues_in_use++; } elements = 0; list_entry = ht->memptr; for (i = 0; i < ht->count; i++, list_entry++) { if (!list_entry->order) { if (list_entry->value || list_entry->next) goto corrupt_list_entry; continue; } if (list_entry->next >= ht->count) goto corrupt_list_entry; ++elements; } if (elements != ht->index) fprintf(fp, " elements found: %ld (expected %ld)\n", elements, ht->index); fprintf(fp, " queues in use: %ld of %ld\n", queues_in_use, pc->nr_hash_queues); fprintf(fp, " queue length range: %d to %d\n", minq, maxq); if (verbose) { if (!elements) { fprintf(fp, " entries: (none)\n"); return; } fprintf(fp, " entries: "); list_entry = ht->memptr; for (i = 0; i < ht->count; i++, list_entry++) { if (list_entry->order) fprintf(fp, "%s%lx (%d)\n", list_entry->order == 1 ? "" : " ", list_entry->value, list_entry->order); } } return; corrupt_list_entry: error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; } /* * Retrieve the count of, and optionally stuff a pre-allocated array with, * the current hash table entries. The entries will be sorted according * to the order in which they were entered, so from this point on, no * further hq_enter() operations on this list will be allowed. However, * multiple calls to retrieve_list are allowed because the second and * subsequent ones will go directly to where the non-zero (valid) entries * start in the potentially very large list_entry memory chunk. */ int retrieve_list(ulong array[], int count) { int i; struct hash_table *ht; struct hq_entry *list_entry; int elements; if (!(pc->flags & HASH)) error(FATAL, "cannot perform this command with hash turned off\n"); ht = &hash_table; list_entry = ht->memptr; for (i = elements = 0; i < ht->count; i++, list_entry++) { if (!list_entry->order) { if (list_entry->value || list_entry->next) goto corrupt_list_entry; continue; } if (list_entry->next >= ht->count) goto corrupt_list_entry; if (array) array[elements] = list_entry->value; if (++elements == count) break; } return elements; corrupt_list_entry: error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return(-1); } /* * For a given value, check to see if a hash queue entry exists. If an * entry is found, return TRUE; for all other possibilities return FALSE. */ int hq_entry_exists(ulong value) { struct hash_table *ht; struct hq_entry *list_entry; long hqi; if (!(pc->flags & HASH)) return FALSE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE)) return FALSE; if (!(ht->flags & HASH_QUEUE_OPEN)) return FALSE; hqi = HQ_INDEX(value); list_entry = ht->memptr + ht->queue_heads[hqi].next; while (TRUE) { if (list_entry->value == value) return TRUE; if (list_entry->next >= ht->count) { error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return FALSE; } if (list_entry->next == 0) break; list_entry = ht->memptr + list_entry->next; } return FALSE; } /* * K&R power function for integers */ long power(long base, int exp) { int i; long p; p = 1; for (i = 1; i <= exp; i++) p = p * base; return p; } long long ll_power(long long base, long long exp) { long long i; long long p; p = 1; for (i = 1; i <= exp; i++) p = p * base; return p; } /* * Internal buffer allocation scheme to avoid inline malloc() calls and * resultant memory leaks due to aborted commands. These buffers are * for TEMPORARY use on a per-command basis. They are allocated by calls * to GETBUF(size). They can explicitly freed by FREEBUF(address), but * they are all freed by free_all_bufs() which is called in a number of * places, most not */ #define NUMBER_1K_BUFS (10) #define NUMBER_2K_BUFS (10) #define NUMBER_4K_BUFS (5) #define NUMBER_8K_BUFS (5) #define NUMBER_32K_BUFS (1) #define SHARED_1K_BUF_FULL (0x003ff) #define SHARED_2K_BUF_FULL (0x003ff) #define SHARED_4K_BUF_FULL (0x0001f) #define SHARED_8K_BUF_FULL (0x0001f) #define SHARED_32K_BUF_FULL (0x00001) #define SHARED_1K_BUF_AVAIL(X) \ (NUMBER_1K_BUFS && !(((X) & SHARED_1K_BUF_FULL) == SHARED_1K_BUF_FULL)) #define SHARED_2K_BUF_AVAIL(X) \ (NUMBER_2K_BUFS && !(((X) & SHARED_2K_BUF_FULL) == SHARED_2K_BUF_FULL)) #define SHARED_4K_BUF_AVAIL(X) \ (NUMBER_4K_BUFS && !(((X) & SHARED_4K_BUF_FULL) == SHARED_4K_BUF_FULL)) #define SHARED_8K_BUF_AVAIL(X) \ (NUMBER_8K_BUFS && !(((X) & SHARED_8K_BUF_FULL) == SHARED_8K_BUF_FULL)) #define SHARED_32K_BUF_AVAIL(X) \ (NUMBER_32K_BUFS && !(((X) & SHARED_32K_BUF_FULL) == SHARED_32K_BUF_FULL)) #define B1K (0) #define B2K (1) #define B4K (2) #define B8K (3) #define B32K (4) #define SHARED_BUF_SIZES (B32K+1) #define MAX_MALLOC_BUFS (2000) #define MAX_CACHE_SIZE (KILOBYTES(32)) struct shared_bufs { char buf_1K[NUMBER_1K_BUFS][1024]; char buf_2K[NUMBER_2K_BUFS][2048]; char buf_4K[NUMBER_4K_BUFS][4096]; char buf_8K[NUMBER_8K_BUFS][8192]; char buf_32K[NUMBER_32K_BUFS][32768]; long buf_1K_used; long buf_2K_used; long buf_4K_used; long buf_8K_used; long buf_32K_used; long buf_1K_maxuse; long buf_2K_maxuse; long buf_4K_maxuse; long buf_8K_maxuse; long buf_32K_maxuse; long buf_1K_ovf; long buf_2K_ovf; long buf_4K_ovf; long buf_8K_ovf; long buf_32K_ovf; int buf_inuse[SHARED_BUF_SIZES]; char *malloc_bp[MAX_MALLOC_BUFS]; long smallest; long largest; long embedded; long max_embedded; long mallocs; long frees; double total; ulong reqs; } shared_bufs; void buf_init(void) { struct shared_bufs *bp; bp = &shared_bufs; BZERO(bp, sizeof(struct shared_bufs)); bp->smallest = 0x7fffffff; bp->total = 0.0; } /* * Free up all buffers used by the last command. */ void free_all_bufs(void) { int i; struct shared_bufs *bp; bp = &shared_bufs; bp->embedded = 0; for (i = 0; i < SHARED_BUF_SIZES; i++) bp->buf_inuse[i] = 0; for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i]) { free(bp->malloc_bp[i]); bp->malloc_bp[i] = NULL; bp->frees++; } } if (bp->mallocs != bp->frees) error(WARNING, "malloc/free mismatch (%ld/%ld)\n", bp->mallocs, bp->frees); } /* * Free a specific buffer that may have been returned by malloc(). * If the address is one of the static buffers, look for it and * clear its inuse bit. */ void freebuf(char *addr) { int i; struct shared_bufs *bp; bp = &shared_bufs; bp->embedded--; if (CRASHDEBUG(8)) { INDENT(bp->embedded*2); fprintf(fp, "FREEBUF(%ld)\n", bp->embedded); } for (i = 0; i < NUMBER_1K_BUFS; i++) { if (addr == (char *)&bp->buf_1K[i]) { bp->buf_inuse[B1K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_2K_BUFS; i++) { if (addr == (char *)&bp->buf_2K[i]) { bp->buf_inuse[B2K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_4K_BUFS; i++) { if (addr == (char *)&bp->buf_4K[i]) { bp->buf_inuse[B4K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_8K_BUFS; i++) { if (addr == (char *)&bp->buf_8K[i]) { bp->buf_inuse[B8K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_32K_BUFS; i++) { if (addr == (char *)&bp->buf_32K[i]) { bp->buf_inuse[B32K] &= ~(1 << i); return; } } for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i] == addr) { free(bp->malloc_bp[i]); bp->malloc_bp[i] = NULL; bp->frees++; return; } } error(FATAL, "freeing an unknown buffer -- shared buffer inconsistency!\n"); } /* DEBUG */ void dump_embedded(char *s) { struct shared_bufs *bp; char *p1; p1 = s ? s : ""; bp = &shared_bufs; console("%s: embedded: %ld mallocs: %ld frees: %ld\n", p1, bp->embedded, bp->mallocs, bp->frees); } /* DEBUG */ long get_embedded(void) { struct shared_bufs *bp; bp = &shared_bufs; return(bp->embedded); } /* * "help -b" output */ void dump_shared_bufs(void) { int i; struct shared_bufs *bp; bp = &shared_bufs; fprintf(fp, " buf_1K_used: %ld\n", bp->buf_1K_used); fprintf(fp, " buf_2K_used: %ld\n", bp->buf_2K_used); fprintf(fp, " buf_4K_used: %ld\n", bp->buf_4K_used); fprintf(fp, " buf_8K_used: %ld\n", bp->buf_8K_used); fprintf(fp, " buf_32K_used: %ld\n", bp->buf_32K_used); fprintf(fp, " buf_1K_ovf: %ld\n", bp->buf_1K_ovf); fprintf(fp, " buf_2K_ovf: %ld\n", bp->buf_2K_ovf); fprintf(fp, " buf_4K_ovf: %ld\n", bp->buf_4K_ovf); fprintf(fp, " buf_8K_ovf: %ld\n", bp->buf_8K_ovf); fprintf(fp, " buf_32K_ovf: %ld\n", bp->buf_32K_ovf); fprintf(fp, " buf_1K_maxuse: %2ld of %d\n", bp->buf_1K_maxuse, NUMBER_1K_BUFS); fprintf(fp, " buf_2K_maxuse: %2ld of %d\n", bp->buf_2K_maxuse, NUMBER_2K_BUFS); fprintf(fp, " buf_4K_maxuse: %2ld of %d\n", bp->buf_4K_maxuse, NUMBER_4K_BUFS); fprintf(fp, " buf_8K_maxuse: %2ld of %d\n", bp->buf_8K_maxuse, NUMBER_8K_BUFS); fprintf(fp, "buf_32K_maxuse: %2ld of %d\n", bp->buf_32K_maxuse, NUMBER_32K_BUFS); fprintf(fp, " buf_inuse[%d]: ", SHARED_BUF_SIZES); for (i = 0; i < SHARED_BUF_SIZES; i++) fprintf(fp, "[%lx]", (ulong)bp->buf_inuse[i]); fprintf(fp, "\n"); for (i = 0; i < MAX_MALLOC_BUFS; i++) if (bp->malloc_bp[i]) fprintf(fp, " malloc_bp[%d]: %lx\n", i, (ulong)bp->malloc_bp[i]); if (bp->smallest == 0x7fffffff) fprintf(fp, " smallest: 0\n"); else fprintf(fp, " smallest: %ld\n", bp->smallest); fprintf(fp, " largest: %ld\n", bp->largest); fprintf(fp, " embedded: %ld\n", bp->embedded); fprintf(fp, " max_embedded: %ld\n", bp->max_embedded); fprintf(fp, " mallocs: %ld\n", bp->mallocs); fprintf(fp, " frees: %ld\n", bp->frees); fprintf(fp, " reqs/total: %ld/%.0f\n", bp->reqs, bp->total); fprintf(fp, " average size: %.0f\n", bp->total/bp->reqs); } /* * Try to get one of the static buffers first. If not available, fall * through and get it from malloc(), keeping trace of the returned address. */ #define SHARED_BUFSIZE(size) \ ((size <= 1024) ? 1024 >> 7 : \ ((size <= 2048) ? 2048 >> 7 : \ ((size <= 4096) ? 4096 >> 7 : \ ((size <= 8192) ? 8192 >> 7 : \ ((size <= 32768) ? 32768 >> 7 : -1))))) char * getbuf(long reqsize) { int i; int index; int bdx; int mask; struct shared_bufs *bp; char *bufp; if (!reqsize) { ulong retaddr = (ulong)__builtin_return_address(0); error(FATAL, "zero-size memory allocation! (called from %lx)\n", retaddr); } bp = &shared_bufs; index = SHARED_BUFSIZE(reqsize); if (CRASHDEBUG(7) && (reqsize > MAX_CACHE_SIZE)) error(NOTE, "GETBUF request > MAX_CACHE_SIZE: %ld\n", reqsize); if (CRASHDEBUG(8)) { INDENT(bp->embedded*2); fprintf(fp, "GETBUF(%ld -> %ld)\n", reqsize, bp->embedded); } bp->embedded++; if (bp->embedded > bp->max_embedded) bp->max_embedded = bp->embedded; if (reqsize < bp->smallest) bp->smallest = reqsize; if (reqsize > bp->largest) bp->largest = reqsize; bp->total += reqsize; bp->reqs++; switch (index) { case -1: break; case 8: if (SHARED_1K_BUF_AVAIL(bp->buf_inuse[B1K])) { mask = ~(bp->buf_inuse[B1K]); bdx = ffs(mask) - 1; bufp = bp->buf_1K[bdx]; bp->buf_1K_used++; bp->buf_inuse[B1K] |= (1 << bdx); bp->buf_1K_maxuse = MAX(bp->buf_1K_maxuse, count_bits_int(bp->buf_inuse[B1K])); BZERO(bufp, 1024); return(bufp); } bp->buf_1K_ovf++; /* FALLTHROUGH */ case 16: if (SHARED_2K_BUF_AVAIL(bp->buf_inuse[B2K])) { mask = ~(bp->buf_inuse[B2K]); bdx = ffs(mask) - 1; bufp = bp->buf_2K[bdx]; bp->buf_2K_used++; bp->buf_inuse[B2K] |= (1 << bdx); bp->buf_2K_maxuse = MAX(bp->buf_2K_maxuse, count_bits_int(bp->buf_inuse[B2K])); BZERO(bufp, 2048); return(bufp); } bp->buf_2K_ovf++; /* FALLTHROUGH */ case 32: if (SHARED_4K_BUF_AVAIL(bp->buf_inuse[B4K])) { mask = ~(bp->buf_inuse[B4K]); bdx = ffs(mask) - 1; bufp = bp->buf_4K[bdx]; bp->buf_4K_used++; bp->buf_inuse[B4K] |= (1 << bdx); bp->buf_4K_maxuse = MAX(bp->buf_4K_maxuse, count_bits_int(bp->buf_inuse[B4K])); BZERO(bufp, 4096); return(bufp); } bp->buf_4K_ovf++; /* FALLTHROUGH */ case 64: if (SHARED_8K_BUF_AVAIL(bp->buf_inuse[B8K])) { mask = ~(bp->buf_inuse[B8K]); bdx = ffs(mask) - 1; bufp = bp->buf_8K[bdx]; bp->buf_8K_used++; bp->buf_inuse[B8K] |= (1 << bdx); bp->buf_8K_maxuse = MAX(bp->buf_8K_maxuse, count_bits_int(bp->buf_inuse[B8K])); BZERO(bufp, 8192); return(bufp); } bp->buf_8K_ovf++; /* FALLTHROUGH */ case 256: if (SHARED_32K_BUF_AVAIL(bp->buf_inuse[B32K])) { mask = ~(bp->buf_inuse[B32K]); bdx = ffs(mask) - 1; bufp = bp->buf_32K[bdx]; bp->buf_32K_used++; bp->buf_inuse[B32K] |= (1 << bdx); bp->buf_32K_maxuse = MAX(bp->buf_32K_maxuse, count_bits_int(bp->buf_inuse[B32K])); BZERO(bufp, 32768); return(bufp); } bp->buf_32K_ovf++; break; } for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i]) continue; if ((bp->malloc_bp[i] = (char *)calloc(reqsize, 1))) { bp->mallocs++; return(bp->malloc_bp[i]); } break; } dump_shared_bufs(); return ((char *)(long) error(FATAL, "cannot allocate any more memory!\n")); } /* * Change the size of the previously-allocated memory block * pointed to by oldbuf to newsize bytes. Copy the minimum * of oldsize and newsize bytes from the oldbuf to the newbuf, * and return the address of the new buffer, which will have * a different address than oldbuf. */ char * resizebuf(char *oldbuf, long oldsize, long newsize) { char *newbuf; newbuf = GETBUF(newsize); BCOPY(oldbuf, newbuf, MIN(oldsize, newsize)); FREEBUF(oldbuf); return newbuf; } /* * Duplicate a string into a buffer allocated with GETBUF(). */ char * strdupbuf(char *oldstring) { char *newstring; newstring = GETBUF(strlen(oldstring)+1); strcpy(newstring, oldstring); return newstring; } /* * Return the number of bits set in an int or long. */ int count_bits_int(int val) { int i, cnt; int total; cnt = sizeof(int) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) total++; val >>= 1; } return total; } int count_bits_long(ulong val) { int i, cnt; int total; cnt = sizeof(long) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) total++; val >>= 1; } return total; } int highest_bit_long(ulong val) { int i, cnt; int total; int highest; highest = -1; cnt = sizeof(long) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) highest = i; val >>= 1; } return highest; } int lowest_bit_long(ulong val) { int i, cnt; int lowest; lowest = -1; cnt = sizeof(long) * 8; for (i = 0; i < cnt; i++) { if (val & 1) { lowest = i; break; } val >>= 1; } return lowest; } /* * Debug routine to stop whatever's going on in its tracks. */ void drop_core(char *s) { volatile int *nullptr; int i ATTRIBUTE_UNUSED; if (s && ascii_string(s)) fprintf(stderr, "%s", s); kill((pid_t)pc->program_pid, 3); nullptr = NULL; while (TRUE) i = *nullptr; } /* * For debug output to a device other than the current terminal. * pc->console must have been preset by: * * 1. by an .rc file setting: "set console /dev/whatever" * 2. by a runtime command: "set console /dev/whatever" * 3. during program invocation: "-c /dev/whatever" * * The first time it's called, the device will be opened. */ int console(char *fmt, ...) { char output[BUFSIZE*2]; va_list ap; if (!pc->console || !strlen(pc->console) || (pc->flags & NO_CONSOLE) || (pc->confd == -1)) return 0; if (!fmt || !strlen(fmt)) return 0; va_start(ap, fmt); (void)vsnprintf(output, BUFSIZE*2, fmt, ap); va_end(ap); if (pc->confd == -2) { if ((pc->confd = open(pc->console, O_WRONLY|O_NDELAY)) < 0) { error(INFO, "console device %s: %s\n", pc->console, strerror(errno), 0, 0); return 0; } } return(write(pc->confd, output, strlen(output))); } /* * Allocate space to store the designated console device name. * If a console device pre-exists, free its name space and close the device. */ void create_console_device(char *dev) { if (pc->console) { if (pc->confd != -1) close(pc->confd); free(pc->console); } pc->confd = -2; if ((pc->console = (char *)malloc(strlen(dev)+1)) == NULL) fprintf(stderr, "console name malloc: %s\n", strerror(errno)); else { strcpy(pc->console, dev); if (console("debug console [%ld]: %s\n", pc->program_pid, (ulong)pc->console) < 0) { close(pc->confd); free(pc->console); pc->console = NULL; pc->confd = -1; if (!(pc->flags & RUNTIME)) error(INFO, "cannot set console to %s\n", dev); } } } /* * Disable console output without closing the device. * Typically used with CONSOLE_OFF() macro. */ int console_off(void) { int orig_no_console; orig_no_console = pc->flags & NO_CONSOLE; pc->flags |= NO_CONSOLE; return orig_no_console; } /* * Re-enable console output. Typically used with CONSOLE_ON() macro. */ int console_on(int orig_no_console) { if (!orig_no_console) pc->flags &= ~NO_CONSOLE; return(pc->flags & NO_CONSOLE); } /* * Print a string to the console device with no formatting, useful for * sending strings containing % signs. */ int console_verbatim(char *s) { char *p; int cnt; if (!pc->console || !strlen(pc->console) || (pc->flags & NO_CONSOLE) || (pc->confd == -1)) return 0; if (!s || !strlen(s)) return 0; if (pc->confd == -2) { if ((pc->confd = open(pc->console, O_WRONLY|O_NDELAY)) < 0) { fprintf(stderr, "%s: %s\n", pc->console, strerror(errno)); return 0; } } for (cnt = 0, p = s; *p; p++) { if (write(pc->confd, p, 1) != 1) break; cnt++; } return cnt; } /* * Set up a signal handler. */ void sigsetup(int sig, void *handler, struct sigaction *act,struct sigaction *oldact) { BZERO(act, sizeof(struct sigaction)); act->sa_handler = handler; act->sa_flags = SA_NOMASK; sigaction(sig, act, oldact); } /* * Convert a jiffies-based time value into a string showing the * the number of days, hours:minutes:seconds. */ #define SEC_MINUTES (60) #define SEC_HOURS (60 * SEC_MINUTES) #define SEC_DAYS (24 * SEC_HOURS) char * convert_time(ulonglong count, char *buf) { ulonglong total, days, hours, minutes, seconds; if (CRASHDEBUG(2)) error(INFO, "convert_time: %lld (%llx)\n", count, count); if (!machdep->hz) { sprintf(buf, "(cannot calculate: unknown HZ value)"); return buf; } total = (count)/(ulonglong)machdep->hz; days = total / SEC_DAYS; total %= SEC_DAYS; hours = total / SEC_HOURS; total %= SEC_HOURS; minutes = total / SEC_MINUTES; seconds = total % SEC_MINUTES; buf[0] = NULLCHAR; if (days) sprintf(buf, "%llu days, ", days); sprintf(&buf[strlen(buf)], "%02llu:%02llu:%02llu", hours, minutes, seconds); return buf; } /* * Stall for a number of microseconds. */ void stall(ulong microseconds) { struct timeval delay; delay.tv_sec = 0; delay.tv_usec = (__time_t)microseconds; (void) select(0, (fd_set *) 0, (fd_set *) 0, (fd_set *) 0, &delay); } /* * Fill a buffer with a page count translated to a GB/MB/KB value. */ char * pages_to_size(ulong pages, char *buf) { double total; char *p1, *p2; if (pages == 0) { sprintf(buf, "0"); return buf; } total = (double)pages * (double)PAGESIZE(); if (total >= GIGABYTES(1)) sprintf(buf, "%.1f GB", total/(double)GIGABYTES(1)); else if (total >= MEGABYTES(1)) sprintf(buf, "%.1f MB", total/(double)MEGABYTES(1)); else sprintf(buf, "%ld KB", (ulong)(total/(double)KILOBYTES(1))); if ((p1 = strstr(buf, ".0 "))) { p2 = p1 + 3; *p1++ = ' '; strcpy(p1, p2); } return buf; } /* * If the list_head.next value points to itself, it's an emtpy list. */ int empty_list(ulong list_head_addr) { ulong next; if (!readmem(list_head_addr, KVADDR, &next, sizeof(void *), "list_head next contents", RETURN_ON_ERROR)) return TRUE; return (next == list_head_addr); } int machine_type(char *type) { return STREQ(MACHINE_TYPE, type); } int machine_type_mismatch(char *file, char *e_machine, char *alt, ulong query) { if (machine_type(e_machine) || machine_type(alt)) return FALSE; if (query == KDUMP_LOCAL) /* already printed by NETDUMP_LOCAL */ return TRUE; error(WARNING, "machine type mismatch:\n"); fprintf(fp, " crash utility: %s\n", MACHINE_TYPE); fprintf(fp, " %s: %s%s%s\n\n", file, e_machine, alt ? " or " : "", alt ? alt : ""); return TRUE; } void command_not_supported() { error(FATAL, "command not supported or applicable on this architecture or kernel\n"); } void option_not_supported(int c) { error(FATAL, "-%c option not supported or applicable on this architecture or kernel\n", (char)c); } static int please_wait_len = 0; void please_wait(char *s) { int fd; char buf[BUFSIZE]; if ((pc->flags & SILENT) || !DUMPFILE() || (pc->flags & RUNTIME)) return; if (!(pc->flags & TTY) && KVMDUMP_DUMPFILE()) { if (!isatty(fileno(stdin)) || ((fd = open("/dev/tty", O_RDONLY)) < 0)) return; close(fd); } pc->flags |= PLEASE_WAIT; please_wait_len = sprintf(buf, "\rplease wait... (%s)", s); fprintf(fp, "%s", buf); fflush(fp); } void please_wait_done(void) { if (!(pc->flags & PLEASE_WAIT)) return; pc->flags &= ~PLEASE_WAIT; fprintf(fp, "\r"); pad_line(fp, please_wait_len, ' '); fprintf(fp, "\r"); fflush(fp); } /* * Compare two pathnames. */ int pathcmp(char *p1, char *p2) { char c1, c2; do { if ((c1 = *p1++) == '/') while (*p1 == '/') { p1++; } if ((c2 = *p2++) == '/') while (*p2 == '/') { p2++; } if (c1 == '\0') return ((c2 == '/') && (*p2 == '\0')) ? 0 : c1 - c2; } while (c1 == c2); return ((c2 == '\0') && (c1 == '/') && (*p1 == '\0')) ? 0 : c1 - c2; } #include /* * Check the byte-order of an ELF file vs. the host byte order. */ int endian_mismatch(char *file, char dumpfile_endian, ulong query) { char *endian; switch (dumpfile_endian) { case ELFDATA2LSB: if (__BYTE_ORDER == __LITTLE_ENDIAN) return FALSE; endian = "little-endian"; break; case ELFDATA2MSB: if (__BYTE_ORDER == __BIG_ENDIAN) return FALSE; endian = "big-endian"; break; default: endian = "unknown"; break; } if (query == KDUMP_LOCAL) /* already printed by NETDUMP_LOCAL */ return TRUE; error(WARNING, "endian mismatch:\n"); fprintf(fp, " crash utility: %s\n", (__BYTE_ORDER == __LITTLE_ENDIAN) ? "little-endian" : "big-endian"); fprintf(fp, " %s: %s\n\n", file, endian); return TRUE; } uint16_t swap16(uint16_t val, int swap) { if (swap) return (((val & 0x00ff) << 8) | ((val & 0xff00) >> 8)); else return val; } uint32_t swap32(uint32_t val, int swap) { if (swap) return (((val & 0x000000ffU) << 24) | ((val & 0x0000ff00U) << 8) | ((val & 0x00ff0000U) >> 8) | ((val & 0xff000000U) >> 24)); else return val; } uint64_t swap64(uint64_t val, int swap) { if (swap) return (((val & 0x00000000000000ffULL) << 56) | ((val & 0x000000000000ff00ULL) << 40) | ((val & 0x0000000000ff0000ULL) << 24) | ((val & 0x00000000ff000000ULL) << 8) | ((val & 0x000000ff00000000ULL) >> 8) | ((val & 0x0000ff0000000000ULL) >> 24) | ((val & 0x00ff000000000000ULL) >> 40) | ((val & 0xff00000000000000ULL) >> 56)); else return val; } /* * Get a sufficiently large buffer for cpumask. * You should call FREEBUF() on the result when you no longer need it. */ ulong * get_cpumask_buf(void) { int cpulen; if ((cpulen = STRUCT_SIZE("cpumask_t")) < 0) cpulen = DIV_ROUND_UP(kt->cpus, BITS_PER_LONG) * sizeof(ulong); return (ulong *)GETBUF(cpulen); } int make_cpumask(char *s, ulong *mask, int flags, int *errptr) { char *p, *q, *orig; int start, end; int i; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "make_cpumask: received NULL string\n"); orig = NULL; goto make_cpumask_error; } orig = strdup(s); p = strtok(s, ","); while (p) { s = strtok(NULL, ""); if (STREQ(p, "a") || STREQ(p, "all")) { start = 0; end = kt->cpus - 1; } else { start = end = -1; q = strtok(p, "-"); start = dtoi(q, flags, errptr); if ((q = strtok(NULL, "-"))) end = dtoi(q, flags, errptr); if (end == -1) end = start; } if ((start < 0) || (start >= kt->cpus) || (end < 0) || (end >= kt->cpus)) { error(INFO, "invalid cpu specification: %s\n", orig); goto make_cpumask_error; } for (i = start; i <= end; i++) SET_BIT(mask, i); p = strtok(s, ","); } free(orig); return TRUE; make_cpumask_error: free(orig); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Copy a string into a sized buffer. If necessary, truncate * the resultant string in the sized buffer so that it will * always be NULL-terminated. */ size_t strlcpy(char *dest, char *src, size_t size) { size_t ret = strlen(src); if (size) { size_t len = (ret >= size) ? size - 1 : ret; memcpy(dest, src, len); dest[len] = '\0'; } return ret; } struct rb_node * rb_first(struct rb_root *root) { struct rb_root rloc; struct rb_node *n; struct rb_node nloc; readmem((ulong)root, KVADDR, &rloc, sizeof(struct rb_root), "rb_root", FAULT_ON_ERROR); n = rloc.rb_node; if (!n) return NULL; while (rb_left(n, &nloc)) n = nloc.rb_left; return n; } struct rb_node * rb_parent(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return (struct rb_node *)(nloc->rb_parent_color & ~3); } struct rb_node * rb_right(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return nloc->rb_right; } struct rb_node * rb_left(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return nloc->rb_left; } struct rb_node * rb_next(struct rb_node *node) { struct rb_node nloc; struct rb_node *parent; /* node is destroyed */ if (!accessible((ulong)node)) return NULL; parent = rb_parent(node, &nloc); if (parent == node) return NULL; if (nloc.rb_right) { /* rb_right is destroyed */ if (!accessible((ulong)nloc.rb_right)) return NULL; node = nloc.rb_right; while (rb_left(node, &nloc)) { /* rb_left is destroyed */ if (!accessible((ulong)nloc.rb_left)) return NULL; node = nloc.rb_left; } return node; } while ((parent = rb_parent(node, &nloc))) { /* parent is destroyed */ if (!accessible((ulong)parent)) return NULL; if (node != rb_right(parent, &nloc)) break; node = parent; } return parent; } struct rb_node * rb_last(struct rb_root *root) { struct rb_node *node; struct rb_node nloc; /* meet destroyed data */ if (!accessible((ulong)(root + OFFSET(rb_root_rb_node)))) return NULL; readmem((ulong)(root + OFFSET(rb_root_rb_node)), KVADDR, &node, sizeof(node), "rb_root node", FAULT_ON_ERROR); while (1) { if (!node) break; /* meet destroyed data */ if (!accessible((ulong)node)) return NULL; readmem((ulong)node, KVADDR, &nloc, sizeof(struct rb_node), "rb_node last", FAULT_ON_ERROR); /* meet the last one */ if (!nloc.rb_right) break; /* meet destroyed data */ if (!!accessible((ulong)nloc.rb_right)) break; node = nloc.rb_right; } return node; } crash-7.2.8/lkcd_fix_mem.h0000664000000000000000000002576313614623427014147 0ustar rootroot/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* OBSOLETE */ #ifdef IA64 #define UTSNAME_ENTRY_SZ 65 /* necessary header definitions in all cases */ #define DUMP_KIOBUF_NUMBER 0xdeadbeef /* special number for kiobuf maps */ /* size of a dump header page */ #define DUMP_PAGE_SZ 64 * 1024 /* size of dump page buffer */ /* header definitions for s390 dump */ #define DUMP_MAGIC_S390 0xa8190173618f23fdULL /* s390 magic number */ #define S390_DUMP_HEADER_SIZE 4096 /* standard header definitions */ #define DUMP_MAGIC_NUMBER 0xa8190173618f23edULL /* dump magic number */ #define DUMP_MAGIC_LIVE 0xa8190173618f23cdULL /* live magic number */ #define DUMP_VERSION_NUMBER 0x5 /* dump version number */ #define DUMP_PANIC_LEN 0x100 /* dump panic string length */ /* dump levels - type specific stuff added later -- add as necessary */ #define DUMP_LEVEL_NONE 0x0 /* no dumping at all -- just bail */ #define DUMP_LEVEL_HEADER 0x1 /* kernel dump header only */ #define DUMP_LEVEL_KERN 0x2 /* dump header and kernel pages */ #define DUMP_LEVEL_USED 0x4 /* dump header, kernel/user pages */ #define DUMP_LEVEL_ALL 0x8 /* dump header, all memory pages */ /* dump compression options -- add as necessary */ #define DUMP_COMPRESS_NONE 0x0 /* don't compress this dump */ #define DUMP_COMPRESS_RLE 0x1 /* use RLE compression */ #define DUMP_COMPRESS_GZIP 0x2 /* use GZIP compression */ /* dump flags - any dump-type specific flags -- add as necessary */ #define DUMP_FLAGS_NONE 0x0 /* no flags are set for this dump */ #define DUMP_FLAGS_NONDISRUPT 0x1 /* try to keep running after dump */ /* dump header flags -- add as necessary */ #define DUMP_DH_FLAGS_NONE 0x0 /* no flags set (error condition!) */ #define DUMP_DH_RAW 0x1 /* raw page (no compression) */ #define DUMP_DH_COMPRESSED 0x2 /* page is compressed */ #define DUMP_DH_END 0x4 /* end marker on a full dump */ /* names for various dump tunables (they are now all read-only) */ #define DUMP_ROOT_NAME "sys/dump" #define DUMP_DEVICE_NAME "dump_device" #define DUMP_COMPRESS_NAME "dump_compress" #define DUMP_LEVEL_NAME "dump_level" #define DUMP_FLAGS_NAME "dump_flags" /* page size for gzip compression -- buffered beyond PAGE_SIZE slightly */ #define DUMP_DPC_PAGE_SIZE (PAGE_SIZE + 512) /* dump ioctl() control options */ #define DIOSDUMPDEV 1 /* set the dump device */ #define DIOGDUMPDEV 2 /* get the dump device */ #define DIOSDUMPLEVEL 3 /* set the dump level */ #define DIOGDUMPLEVEL 4 /* get the dump level */ #define DIOSDUMPFLAGS 5 /* set the dump flag parameters */ #define DIOGDUMPFLAGS 6 /* get the dump flag parameters */ #define DIOSDUMPCOMPRESS 7 /* set the dump compress level */ #define DIOGDUMPCOMPRESS 8 /* get the dump compress level */ /* the major number used for the dumping device */ #ifndef DUMP_MAJOR #define DUMP_MAJOR 227 #endif /* * Structure: dump_header_t * Function: This is the header dumped at the top of every valid crash * dump. * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_header_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dh_magic_number; /* the version number of this dump */ uint32_t dh_version; /* the size of this header (in case we can't read it) */ uint32_t dh_header_size; /* the level of this dump (just a header?) */ uint32_t dh_dump_level; /* the size of a Linux memory page (4K, 8K, 16K, etc.) */ uint32_t dh_page_size; /* the size of all physical memory */ uint64_t dh_memory_size; /* the start of physical memory */ uint64_t dh_memory_start; /* the end of physical memory */ uint64_t dh_memory_end; /* the number of pages in this dump specifically */ uint32_t dh_num_pages; /* the panic string, if available */ char dh_panic_string[DUMP_PANIC_LEN]; /* timeval depends on architecture, two long values */ struct { uint64_t tv_sec; uint64_t tv_usec; } dh_time; /* the time of the system crash */ /* the NEW utsname (uname) information -- in character form */ /* we do this so we don't have to include utsname.h */ /* plus it helps us be more architecture independent */ /* now maybe one day soon they'll make the [65] a #define! */ char dh_utsname_sysname[65]; char dh_utsname_nodename[65]; char dh_utsname_release[65]; char dh_utsname_version[65]; char dh_utsname_machine[65]; char dh_utsname_domainname[65]; /* the address of current task (OLD = task_struct *, NEW = void *) */ uint64_t dh_current_task; /* what type of compression we're using in this dump (if any) */ uint32_t dh_dump_compress; /* any additional flags */ uint32_t dh_dump_flags; /* any additional flags */ uint32_t dh_dump_device; } __attribute__((packed)) dump_header_t; /* * Structure: dump_page_t * Function: To act as the header associated to each physical page of * memory saved in the system crash dump. This allows for * easy reassembly of each crash dump page. The address bits * are split to make things easier for 64-bit/32-bit system * conversions. */ typedef struct _dump_page_s { /* the address of this dump page */ uint64_t dp_address; /* the size of this dump page */ uint32_t dp_size; /* flags (currently DUMP_COMPRESSED, DUMP_RAW or DUMP_END) */ uint32_t dp_flags; } __attribute__((packed)) dump_page_t; /* * This structure contains information needed for the lkcdutils * package (particularly lcrash) to determine what information is * associated to this kernel, specifically. */ typedef struct lkcdinfo_s { int arch; int ptrsz; int byte_order; int linux_release; int page_shift; int page_size; uint64_t page_mask; uint64_t page_offset; int stack_offset; } lkcdinfo_t; #define DUMP_ASM_MAGIC_NUMBER 0xdeaddeadULL /* magic number */ struct pt_regs { /* The following registers are saved by SAVE_MIN: */ unsigned long b6; /* scratch */ unsigned long b7; /* scratch */ unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */ unsigned long ar_ssd; /* reserved for future use (scratch) */ unsigned long r8; /* scratch (return value register 0) */ unsigned long r9; /* scratch (return value register 1) */ unsigned long r10; /* scratch (return value register 2) */ unsigned long r11; /* scratch (return value register 3) */ unsigned long cr_ipsr; /* interrupted task's psr */ unsigned long cr_iip; /* interrupted task's instruction pointer */ unsigned long cr_ifs; /* interrupted task's function state */ unsigned long ar_unat; /* interrupted task's NaT register (preserved) */ unsigned long ar_pfs; /* prev function state */ unsigned long ar_rsc; /* RSE configuration */ /* The following two are valid only if cr_ipsr.cpl > 0: */ unsigned long ar_rnat; /* RSE NaT */ unsigned long ar_bspstore; /* RSE bspstore */ unsigned long pr; /* 64 predicate registers (1 bit each) */ unsigned long b0; /* return pointer (bp) */ unsigned long loadrs; /* size of dirty partition << 16 */ unsigned long r1; /* the gp pointer */ unsigned long r12; /* interrupted task's memory stack pointer */ unsigned long r13; /* thread pointer */ unsigned long ar_fpsr; /* floating point status (preserved) */ unsigned long r15; /* scratch */ /* The remaining registers are NOT saved for system calls. */ unsigned long r14; /* scratch */ unsigned long r2; /* scratch */ unsigned long r3; /* scratch */ /* The following registers are saved by SAVE_REST: */ unsigned long r16; /* scratch */ unsigned long r17; /* scratch */ unsigned long r18; /* scratch */ unsigned long r19; /* scratch */ unsigned long r20; /* scratch */ unsigned long r21; /* scratch */ unsigned long r22; /* scratch */ unsigned long r23; /* scratch */ unsigned long r24; /* scratch */ unsigned long r25; /* scratch */ unsigned long r26; /* scratch */ unsigned long r27; /* scratch */ unsigned long r28; /* scratch */ unsigned long r29; /* scratch */ unsigned long r30; /* scratch */ unsigned long r31; /* scratch */ unsigned long ar_ccv; /* compare/exchange value (scratch) */ /* * Floating point registers that the kernel considers scratch: */ struct ia64_fpreg f6; /* scratch */ struct ia64_fpreg f7; /* scratch */ struct ia64_fpreg f8; /* scratch */ struct ia64_fpreg f9; /* scratch */ struct ia64_fpreg f10; /* scratch */ struct ia64_fpreg f11; /* scratch */ }; /* * Structure: dump_header_asm_t * Function: This is the header for architecture-specific stuff. It * follows right after the dump header. * */ typedef struct _dump_header_asm_s { /* the dump magic number -- unique to verify dump is valid */ uint64_t dha_magic_number; /* the version number of this dump */ uint32_t dha_version; /* the size of this header (in case we can't read it) */ uint32_t dha_header_size; /* pointer to pt_regs */ // struct pt_regs *dha_pt_regs; // version 4 changed this uint64_t dha_pt_regs; /* the dump registers */ struct pt_regs dha_regs; /* the rnat register saved after flushrs */ uint64_t dha_rnat; /* the pfs register saved after flushrs */ uint64_t dha_pfs; /* the bspstore register saved after flushrs */ uint64_t dha_bspstore; /* smp specific */ uint32_t dha_smp_num_cpus; uint32_t dha_dumping_cpu; // v4 changed this struct pt_regs dha_smp_regs[NR_CPUS]; uint64_t dha_smp_current_task[NR_CPUS]; // v4 changed this uint64_t dha_stack[NR_CPUS]; // v4 changed this uint64_t dha_switch_stack[NR_CPUS]; // v4 changed this } __attribute__((packed)) dump_header_asm_t; #endif // IA64 crash-7.2.8/alpha.c0000775000000000000000000022547413614623427012612 0ustar rootroot/* alpha.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2010-2013 David Anderson * Copyright (C) 2002-2006, 2010-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #ifdef ALPHA #include "defs.h" static void alpha_back_trace(struct gnu_request *, struct bt_info *); static int alpha_trace_status(struct gnu_request *, struct bt_info *); static void alpha_exception_frame(ulong, ulong, struct gnu_request *, struct bt_info *); static void alpha_frame_offset(struct gnu_request *, ulong); static int alpha_backtrace_resync(struct gnu_request *, ulong, struct bt_info *); static void alpha_print_stack_entry(struct gnu_request *, ulong, char *, ulong, struct bt_info *); static int alpha_resync_speculate(struct gnu_request *, ulong,struct bt_info *); static int alpha_dis_filter(ulong, char *, unsigned int); static void dis_address_translation(ulong, char *, unsigned int); static void alpha_cmd_mach(void); static int alpha_get_smp_cpus(void); static void alpha_display_machine_stats(void); static void alpha_dump_line_number(char *, ulong); static void display_hwrpb(unsigned int); static void alpha_post_init(void); static struct line_number_hook alpha_line_number_hooks[]; #define ALPHA_CONTINUE_TRACE (1) #define ALPHA_END_OF_TRACE (2) #define ALPHA_EXCEPTION_FRAME (3) #define ALPHA_SYSCALL_FRAME (4) #define ALPHA_MM_FAULT (5) #define ALPHA_INTERRUPT_PENDING (6) #define ALPHA_RESCHEDULE (7) #define ALPHA_DOWN_FAILED (8) #define ALPHA_RET_FROM_SMP_FORK (9) #define ALPHA_SIGNAL_RETURN (10) #define ALPHA_STRACE (11) static int alpha_eframe_search(struct bt_info *); static int alpha_uvtop(struct task_context *, ulong, physaddr_t *, int); static int alpha_kvtop(struct task_context *, ulong, physaddr_t *, int); static void alpha_back_trace_cmd(struct bt_info *); static ulong alpha_get_task_pgd(ulong task); static ulong alpha_processor_speed(void); static void alpha_dump_irq(int); static void alpha_get_stack_frame(struct bt_info *, ulong *, ulong *); static void get_alpha_frame(struct bt_info *, ulong *, ulong *); static int verify_user_eframe(struct bt_info *, ulong, ulong); static int alpha_translate_pte(ulong, void *, ulonglong); static uint64_t alpha_memory_size(void); static ulong alpha_vmalloc_start(void); static int alpha_is_task_addr(ulong); static int alpha_verify_symbol(const char *, ulong, char); struct percpu_data { ulong halt_PC; ulong halt_ra; ulong halt_pv; }; #define GET_HALT_PC 0x1 #define GET_HALT_RA 0x2 #define GET_HALT_PV 0x3 static ulong get_percpu_data(int, ulong, struct percpu_data *); /* * Do all necessary machine-specific setup here. This is called three times, * before symbol table initialization, and before and after GDB has been * initialized. */ void alpha_init(int when) { int tmp; switch (when) { case PRE_SYMTAB: machdep->verify_symbol = alpha_verify_symbol; if (pc->flags & KERNEL_DEBUG_QUERY) return; machdep->pagesize = memory_page_size(); machdep->pageshift = ffs(machdep->pagesize) - 1; machdep->pageoffset = machdep->pagesize - 1; machdep->pagemask = ~(machdep->pageoffset); machdep->stacksize = machdep->pagesize * 2; if ((machdep->pgd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pgd space."); if ((machdep->pmd = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc pmd space."); if ((machdep->ptbl = (char *)malloc(PAGESIZE())) == NULL) error(FATAL, "cannot malloc ptbl space."); machdep->last_pgd_read = 0; machdep->last_pmd_read = 0; machdep->last_ptbl_read = 0; machdep->verify_paddr = generic_verify_paddr; machdep->ptrs_per_pgd = PTRS_PER_PGD; break; case PRE_GDB: switch (symbol_value("_stext") & KSEG_BASE) { case KSEG_BASE: machdep->kvbase = KSEG_BASE; break; case KSEG_BASE_48_BIT: machdep->kvbase = KSEG_BASE_48_BIT; break; default: error(FATAL, "cannot determine KSEG base from _stext: %lx\n", symbol_value("_stext")); } machdep->identity_map_base = machdep->kvbase; machdep->is_kvaddr = generic_is_kvaddr; machdep->is_uvaddr = generic_is_uvaddr; machdep->eframe_search = alpha_eframe_search; machdep->back_trace = alpha_back_trace_cmd; machdep->processor_speed = alpha_processor_speed; machdep->uvtop = alpha_uvtop; machdep->kvtop = alpha_kvtop; machdep->get_task_pgd = alpha_get_task_pgd; if (symbol_exists("irq_desc")) machdep->dump_irq = generic_dump_irq; else machdep->dump_irq = alpha_dump_irq; machdep->get_stack_frame = alpha_get_stack_frame; machdep->get_stackbase = generic_get_stackbase; machdep->get_stacktop = generic_get_stacktop; machdep->translate_pte = alpha_translate_pte; machdep->memory_size = alpha_memory_size; machdep->vmalloc_start = alpha_vmalloc_start; machdep->is_task_addr = alpha_is_task_addr; if (symbol_exists("console_crash")) { get_symbol_data("console_crash", sizeof(int), &tmp); if (tmp) machdep->flags |= HWRESET; } machdep->dis_filter = alpha_dis_filter; machdep->cmd_mach = alpha_cmd_mach; machdep->get_smp_cpus = alpha_get_smp_cpus; machdep->line_number_hooks = alpha_line_number_hooks; machdep->value_to_symbol = generic_machdep_value_to_symbol; machdep->init_kernel_pgd = NULL; break; case POST_GDB: MEMBER_OFFSET_INIT(thread_struct_ptbr, "thread_struct", "ptbr"); MEMBER_OFFSET_INIT(hwrpb_struct_cycle_freq, "hwrpb_struct", "cycle_freq"); MEMBER_OFFSET_INIT(hwrpb_struct_processor_offset, "hwrpb_struct", "processor_offset"); MEMBER_OFFSET_INIT(hwrpb_struct_processor_size, "hwrpb_struct", "processor_size"); MEMBER_OFFSET_INIT(percpu_struct_halt_PC, "percpu_struct", "halt_PC"); MEMBER_OFFSET_INIT(percpu_struct_halt_ra, "percpu_struct", "halt_ra"); MEMBER_OFFSET_INIT(percpu_struct_halt_pv, "percpu_struct", "halt_pv"); MEMBER_OFFSET_INIT(switch_stack_r26, "switch_stack", "r26"); if (symbol_exists("irq_action")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_action, "irq_action", NULL, 0); else if (symbol_exists("irq_desc")) ARRAY_LENGTH_INIT(machdep->nr_irqs, irq_desc, "irq_desc", NULL, 0); else machdep->nr_irqs = 0; if (!machdep->hz) machdep->hz = HZ; break; case POST_INIT: alpha_post_init(); break; } } /* * Unroll a kernel stack. */ static void alpha_back_trace_cmd(struct bt_info *bt) { char buf[BUFSIZE]; struct gnu_request *req; bt->flags |= BT_EXCEPTION_FRAME; if (CRASHDEBUG(1) || bt->debug) fprintf(fp, " => PC: %lx (%s) FP: %lx \n", bt->instptr, value_to_symstr(bt->instptr, buf, 0), bt->stkptr ); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_STACK_TRACE; req->flags = GNU_RETURN_ON_ERROR; req->buf = GETBUF(BUFSIZE); req->debug = bt->debug; req->task = bt->task; req->pc = bt->instptr; req->sp = bt->stkptr; if (bt->flags & BT_USE_GDB) { strcpy(req->buf, "backtrace"); gdb_interface(req); } else alpha_back_trace(req, bt); FREEBUF(req->buf); FREEBUF(req); } /* * Unroll the kernel stack. */ #define ALPHA_BACKTRACE_SPECULATE(X) \ { \ speculate_location = X; \ \ if (bt->flags & BT_SPECULATE) \ return; \ \ BZERO(btloc, sizeof(struct bt_info)); \ btloc->task = req->task; \ btloc->tc = bt->tc; \ btloc->stackbase = bt->stackbase; \ btloc->stacktop = bt->stacktop; \ btloc->flags = BT_TEXT_SYMBOLS_NOPRINT; \ hook.eip = 0; \ hook.esp = req->lastsp ? req->lastsp + sizeof(long) : 0; \ btloc->hp = &hook; \ \ back_trace(btloc); \ \ if (hook.esp && hook.eip) { \ req->hookp = &hook; \ if (alpha_resync_speculate(req, bt->flags, bt)) { \ req->pc = hook.eip; \ req->sp = hook.esp; \ continue; \ } \ goto show_remaining_text; \ } \ goto show_remaining_text; \ } static void alpha_back_trace(struct gnu_request *req, struct bt_info *bt) { char buf[BUFSIZE]; int frame; int done; int status; struct stack_hook hook; int eframe_same_pc_ra_function; int speculate_location; struct bt_info bt_info, *btloc; frame = 0; req->curframe = 0; btloc = &bt_info; if (!IS_KVADDR(req->pc)) { if (BT_REFERENCE_CHECK(bt)) return; if ((machdep->flags & HWRESET) && is_task_active(req->task)) { fprintf(fp, "(hardware reset while in user space)\n"); return; } fprintf(fp, "invalid pc: %lx\n", req->pc); alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME, req, bt); return; } for (done = FALSE; !done && (frame < 100); frame++) { speculate_location = 0; if ((req->name = closest_symbol(req->pc)) == NULL) { req->ra = req->pc = 0; if (alpha_backtrace_resync(req, bt->flags | BT_FROM_CALLFRAME, bt)) continue; if (BT_REFERENCE_FOUND(bt)) return; ALPHA_BACKTRACE_SPECULATE(1); } if (!INSTACK(req->sp, bt)) break; if (!is_kernel_text(req->pc)) ALPHA_BACKTRACE_SPECULATE(2); alpha_print_stack_entry(req, req->pc, req->name, bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; switch (status = alpha_trace_status(req, bt)) { case ALPHA_CONTINUE_TRACE: alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_END_OF_TRACE: done = TRUE; break; case ALPHA_STRACE: alpha_exception_frame(req->sp, BT_USER_EFRAME|BT_STRACE, req, bt); done = TRUE; break; case ALPHA_RET_FROM_SMP_FORK: alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME|BT_RET_FROM_SMP_FORK, req, bt); done = TRUE; break; case ALPHA_DOWN_FAILED: frame++; alpha_print_stack_entry(req, req->pc, closest_symbol(req->pc), bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_RESCHEDULE: alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME|BT_RESCHEDULE, req, bt); done = TRUE; break; case ALPHA_MM_FAULT: alpha_exception_frame(req->sp, bt->flags, req, bt); if (!IS_KVADDR(req->pc)) { done = TRUE; break; } alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } frame++; alpha_print_stack_entry(req, req->pc, closest_symbol(req->pc), bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; if (!IS_KVADDR(req->pc)) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_SYSCALL_FRAME: req->sp = verify_user_eframe(bt, req->task, req->sp) ? req->sp : USER_EFRAME_ADDR(req->task); alpha_exception_frame(req->sp, bt->flags, req, bt); if (!IS_KVADDR(req->pc)) { done = TRUE; break; } alpha_frame_offset(req, 0); if (!req->value) { done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_SIGNAL_RETURN: alpha_exception_frame(USER_EFRAME_ADDR(req->task), bt->flags, req, bt); done = TRUE; break; case ALPHA_EXCEPTION_FRAME: alpha_frame_offset(req, 0); if (!req->value) { fprintf(fp, "ALPHA EXCEPTION FRAME w/no frame offset for %lx (%s)\n", req->pc, value_to_symstr(req->pc, buf, 0)); done = TRUE; break; } alpha_exception_frame(req->sp + req->value, bt->flags, req, bt); if (!IS_KVADDR(req->pc)) { done = TRUE; break; } alpha_frame_offset(req, 0); if (!req->value) { fprintf(fp, "ALPHA EXCEPTION FRAME w/no frame offset for %lx (%s)\n", req->pc, value_to_symstr(req->pc, buf, 0)); done = TRUE; break; } eframe_same_pc_ra_function = SAME_FUNCTION(req->pc, req->ra); frame++; alpha_print_stack_entry(req, req->pc, closest_symbol(req->pc), bt->flags | BT_SAVE_LASTSP, bt); if (BT_REFERENCE_FOUND(bt)) return; if (!IS_KVADDR(req->pc)) { done = TRUE; break; } if (STREQ(closest_symbol(req->pc), "ret_from_reschedule")) { alpha_exception_frame( USER_EFRAME_ADDR(req->task), BT_USER_EFRAME|BT_RESCHEDULE, req, bt); done = TRUE; break; } req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); if (!is_kernel_text(req->pc)) { if (alpha_backtrace_resync(req, bt->flags | BT_FROM_EXCEPTION, bt)) break; if (BT_REFERENCE_FOUND(bt)) return; ALPHA_BACKTRACE_SPECULATE(3); } if (!eframe_same_pc_ra_function && (req->pc != req->ra)) { req->pc = req->ra; break; } req->prevsp = req->sp; req->sp += req->value; break; case ALPHA_INTERRUPT_PENDING: alpha_frame_offset(req, 0); if (!req->value) { req->prevpc = req->pc; req->pc = req->addr; req->prevsp = req->sp; req->sp = req->frame; } else { req->prevpc = req->pc; req->pc = GET_STACK_ULONG(req->sp); req->prevsp = req->sp; req->sp += req->value; } break; } } return; show_remaining_text: if (BT_REFERENCE_CHECK(bt)) return; BZERO(btloc, sizeof(struct bt_info)); btloc->task = req->task; btloc->tc = bt->tc; btloc->stackbase = bt->stackbase; btloc->stacktop = bt->stacktop; btloc->flags = BT_TEXT_SYMBOLS_NOPRINT; hook.esp = req->lastsp + sizeof(long); btloc->hp = &hook; back_trace(btloc); if (hook.eip) { fprintf(fp, "NOTE: cannot resolve trace from this point -- remaining text symbols on stack:\n"); btloc->flags = BT_TEXT_SYMBOLS_PRINT|BT_ERROR_MASK; hook.esp = req->lastsp + sizeof(long); back_trace(btloc); } else fprintf(fp, "NOTE: cannot resolve trace from this point -- no remaining text symbols\n"); if (CRASHDEBUG(1)) fprintf(fp, "speculate_location: %d\n", speculate_location); alpha_exception_frame(USER_EFRAME_ADDR(req->task), BT_USER_EFRAME, req, bt); } /* * print one entry of a stack trace */ static void alpha_print_stack_entry(struct gnu_request *req, ulong callpc, char *name, ulong flags, struct bt_info *bt) { struct load_module *lm; if (BT_REFERENCE_CHECK(bt)) { switch (bt->ref->cmdflags & (BT_REF_SYMBOL|BT_REF_HEXVAL)) { case BT_REF_SYMBOL: if (STREQ(name, bt->ref->str) || (STREQ(name, "strace") && STREQ(bt->ref->str, "entSys"))) { bt->ref->cmdflags |= BT_REF_FOUND; } break; case BT_REF_HEXVAL: if (bt->ref->hexval == callpc) bt->ref->cmdflags |= BT_REF_FOUND; break; } } else { fprintf(fp, "%s#%d [%lx] %s at %lx", req->curframe < 10 ? " " : "", req->curframe, req->sp, STREQ(name, "strace") ? "strace (via entSys)" : name, callpc); if (module_symbol(callpc, NULL, &lm, NULL, 0)) fprintf(fp, " [%s]", lm->mod_name); fprintf(fp, "\n"); } if (!(flags & BT_SPECULATE)) req->curframe++; if (flags & BT_SAVE_LASTSP) req->lastsp = req->sp; if (BT_REFERENCE_CHECK(bt)) return; if (flags & BT_LINE_NUMBERS) alpha_dump_line_number(name, callpc); } static const char *hook_files[] = { "arch/alpha/kernel/entry.S", "arch/alpha/kernel/head.S", "init/main.c", "arch/alpha/kernel/smp.c", }; #define ENTRY_S ((char **)&hook_files[0]) #define HEAD_S ((char **)&hook_files[1]) #define MAIN_C ((char **)&hook_files[2]) #define SMP_C ((char **)&hook_files[3]) static struct line_number_hook alpha_line_number_hooks[] = { {"entInt", ENTRY_S}, {"entMM", ENTRY_S}, {"entArith", ENTRY_S}, {"entIF", ENTRY_S}, {"entDbg", ENTRY_S}, {"kernel_clone", ENTRY_S}, {"kernel_thread", ENTRY_S}, {"__kernel_execve", ENTRY_S}, {"do_switch_stack", ENTRY_S}, {"undo_switch_stack", ENTRY_S}, {"entUna", ENTRY_S}, {"entUnaUser", ENTRY_S}, {"sys_fork", ENTRY_S}, {"sys_clone", ENTRY_S}, {"sys_vfork", ENTRY_S}, {"alpha_switch_to", ENTRY_S}, {"entSys", ENTRY_S}, {"ret_from_sys_call", ENTRY_S}, {"ret_from_reschedule", ENTRY_S}, {"restore_all", ENTRY_S}, {"strace", ENTRY_S}, {"strace_success", ENTRY_S}, {"strace_error", ENTRY_S}, {"syscall_error", ENTRY_S}, {"ret_success", ENTRY_S}, {"signal_return", ENTRY_S}, {"ret_from_fork", ENTRY_S}, {"reschedule", ENTRY_S}, {"sys_sigreturn", ENTRY_S}, {"sys_rt_sigreturn", ENTRY_S}, {"sys_sigsuspend", ENTRY_S}, {"sys_rt_sigsuspend", ENTRY_S}, {"ret_from_smpfork", ENTRY_S}, {"_stext", HEAD_S}, {"__start", HEAD_S}, {"__smp_callin", HEAD_S}, {"cserve_ena", HEAD_S}, {"cserve_dis", HEAD_S}, {"halt", HEAD_S}, {"start_kernel", MAIN_C}, {"smp_callin", SMP_C}, {NULL, NULL} /* list must be NULL-terminated */ }; static void alpha_dump_line_number(char *name, ulong callpc) { char buf[BUFSIZE], *p; int retries; retries = 0; try_closest: get_line_number(callpc, buf, FALSE); if (strlen(buf)) { if (retries) { p = strstr(buf, ": "); if (p) *p = NULLCHAR; } fprintf(fp, " %s\n", buf); } else { if (retries) fprintf(fp, GDB_PATCHED() ? "" : " (cannot determine file and line number)\n"); else { retries++; callpc = closest_symbol_value(callpc); goto try_closest; } } } /* * Look for the frame size storage at the beginning of a function. * If it's not obvious, try gdb. * * For future reference, here's where the numbers come from: * * 0xfffffc00003217e8 : subq sp,0x50,sp * fffffc00003217e8: 43ca153e * 010000 11110 01010000 1 0101001 11110 * * 0xfffffc0000321668 : subq sp,0x60,sp * fffffc0000321668: 43cc153e * 010000 11110 01100000 1 0101001 11110 * * 0xfffffc000035d028 : subq sp,0x70,sp * fffffc000035d028: 43ce153e * 010000 11110 01110000 1 0101001 11110 * * 0100 0011 110x xxxx xxx1 0101 0011 1110 * 1111 1111 111x xxxx xxx1 1111 1111 1111 * 0000 0000 0001 1111 1110 0000 0000 0000 * f f e 0 1 f f f instruction mask * 0 0 1 f e 0 0 0 offset * * stq ra,0(sp) * fffffc000035d034: b75e0000 */ static void alpha_frame_offset(struct gnu_request *req, ulong alt_pc) { uint *ip, ival; ulong value; req->value = value = 0; if (alt_pc && !is_kernel_text(alt_pc)) error(FATAL, "trying to get frame offset of non-text address: %lx\n", alt_pc); else if (!alt_pc && !is_kernel_text(req->pc)) error(FATAL, "trying to get frame offset of non-text address: %lx\n", req->pc); ip = alt_pc ? (int *)closest_symbol_value(alt_pc) : (int *)closest_symbol_value(req->pc); if (!ip) goto use_gdb; ival = 0; /* * Don't go any farther than "stq ra,0(sp)" (0xb75e0000) */ while (ival != 0xb75e0000) { if (!text_value_cache((ulong)ip, 0, &ival)) { readmem((ulong)ip, KVADDR, &ival, sizeof(uint), "uncached text value", FAULT_ON_ERROR); text_value_cache((ulong)ip, ival, NULL); } if ((ival & 0xffe01fff) == 0x43c0153e) { value = (ival & 0x1fe000) >> 13; break; } ip++; } if (value) { req->value = value; return; } use_gdb: #ifndef GDB_5_3 { static int gdb_frame_offset_warnings = 10; if (gdb_frame_offset_warnings-- > 0) error(WARNING, "GNU_ALPHA_FRAME_OFFSET functionality not ported to gdb\n"); } #endif req->command = GNU_ALPHA_FRAME_OFFSET; if (alt_pc) { ulong pc_save; pc_save = req->pc; req->pc = alt_pc; gdb_interface(req); req->pc = pc_save; } else gdb_interface(req); } /* * Look for key routines that either mean the trace has ended or has * bumped into an exception frame. */ int alpha_trace_status(struct gnu_request *req, struct bt_info *bt) { ulong value; char *func; ulong frame; req->addr = 0; func = req->name; frame = req->sp; if (STREQ(func, "start_kernel") || STREQ(func, "smp_callin") || STREQ(func, "kernel_thread") || STREQ(func, "__kernel_thread")) return ALPHA_END_OF_TRACE; if (STREQ(func, "ret_from_smp_fork") || STREQ(func, "ret_from_smpfork")) return ALPHA_RET_FROM_SMP_FORK; if (STREQ(func, "entSys")) return ALPHA_SYSCALL_FRAME; if (STREQ(func, "entMM")) { req->sp += 56; /* see entMM in entry.S */ return ALPHA_MM_FAULT; } if (STREQ(func, "do_entInt")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "do_entArith")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "do_entIF")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "do_entDbg")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "handle_bottom_half")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "handle_softirq")) return ALPHA_EXCEPTION_FRAME; if (STREQ(func, "reschedule")) return ALPHA_RESCHEDULE; if (STREQ(func, "ret_from_reschedule")) return ALPHA_RESCHEDULE; if (STREQ(func, "signal_return")) return ALPHA_SIGNAL_RETURN; if (STREQ(func, "strace")) return ALPHA_STRACE; if (STREQ(func, "__down_failed") || STREQ(func, "__down_failed_interruptible")) { readmem(req->sp + 144, KVADDR, &req->pc, sizeof(ulong), "__down_failed r26", FAULT_ON_ERROR); req->sp += 160; return ALPHA_DOWN_FAILED; } value = GET_STACK_ULONG(frame); if (STREQ(closest_symbol(value), "do_entInt") || STREQ(closest_symbol(value), "do_entArith") || STREQ(closest_symbol(value), "do_entIF") || STREQ(closest_symbol(value), "do_entDbg")) { req->addr = value; req->frame = 0; while (INSTACK(frame, bt)) { frame += sizeof(ulong); value = GET_STACK_ULONG(frame); if (STREQ(closest_symbol(value), "ret_from_sys_call")) { alpha_frame_offset(req, req->addr); /* req->frame = frame + req->value; XXX */ break; } } return ALPHA_INTERRUPT_PENDING; } return ALPHA_CONTINUE_TRACE; } /* * Redo the gdb pt_regs structure output. */ enum regnames { _r0_, _r1_, _r2_, _r3_, _r4_, _r5_, _r6_, _r7_, _r8_, _r19_, _r20_, _r21_, _r22_, _r23_, _r24_, _r25_, _r26_, _r27_, _r28_, _hae_, _trap_a0_, _trap_a1_, _trap_a2_, _ps_, _pc_, _gp_, _r16_, _r17_, _r18_, NUMREGS}; struct alpha_eframe { char regs[30][30]; ulong value[29]; }; static void alpha_exception_frame(ulong addr, ulong flags, struct gnu_request *req, struct bt_info *bt) { int i, j; char buf[BUFSIZE]; ulong value; physaddr_t paddr; struct alpha_eframe eframe; if (CRASHDEBUG(4)) fprintf(fp, "alpha_exception_frame: %lx\n", addr); if (flags & BT_SPECULATE) { req->pc = 0; fprintf(fp, "ALPHA EXCEPTION FRAME\n"); return; } BZERO(&eframe, sizeof(struct alpha_eframe)); open_tmpfile(); dump_struct("pt_regs", addr, RADIX(16)); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { strip_comma(clean_line(buf)); if (!strstr(buf, "0x")) continue; extract_hex(buf, &value, NULLCHAR, TRUE); if (CRASHDEBUG(4)) fprintf(pc->saved_fp, "<%s> %lx\n", buf, value); if (STRNEQ(buf, "r0 = ")) { sprintf(eframe.regs[_r0_], " V0/R0: %016lx", value); eframe.value[_r0_] = value; } if (STRNEQ(buf, "r1 = ")) { sprintf(eframe.regs[_r1_], " T0/R1: %016lx", value); eframe.value[_r1_] = value; } if (STRNEQ(buf, "r2 = ")) { sprintf(eframe.regs[_r2_], " T1/R2: %016lx", value); eframe.value[_r2_] = value; } if (STRNEQ(buf, "r3 = ")) { sprintf(eframe.regs[_r3_], " T2/R3: %016lx", value); eframe.value[_r3_] = value; } if (STRNEQ(buf, "r4 = ")) { sprintf(eframe.regs[_r4_], " T3/R4: %016lx", value); eframe.value[_r4_] = value; } if (STRNEQ(buf, "r5 = ")) { sprintf(eframe.regs[_r5_], " T4/R5: %016lx", value); eframe.value[_r5_] = value; } if (STRNEQ(buf, "r6 = ")) { sprintf(eframe.regs[_r6_], " T5/R6: %016lx", value); eframe.value[_r6_] = value; } if (STRNEQ(buf, "r7 = ")) { sprintf(eframe.regs[_r7_], " T6/R7: %016lx", value); eframe.value[_r7_] = value; } if (STRNEQ(buf, "r8 = ")) { sprintf(eframe.regs[_r8_], " T7/R8: %016lx", value); eframe.value[_r8_] = value; } if (STRNEQ(buf, "r19 = ")) { sprintf(eframe.regs[_r19_], " A3/R19: %016lx", value); eframe.value[_r19_] = value; } if (STRNEQ(buf, "r20 = ")) { sprintf(eframe.regs[_r20_], " A4/R20: %016lx", value); eframe.value[_r20_] = value; } if (STRNEQ(buf, "r21 = ")) { sprintf(eframe.regs[_r21_], " A5/R21: %016lx", value); eframe.value[_r21_] = value; } if (STRNEQ(buf, "r22 = ")) { sprintf(eframe.regs[_r22_], " T8/R22: %016lx", value); eframe.value[_r22_] = value; } if (STRNEQ(buf, "r23 = ")) { sprintf(eframe.regs[_r23_], " T9/R23: %016lx", value); eframe.value[_r23_] = value; } if (STRNEQ(buf, "r24 = ")) { sprintf(eframe.regs[_r24_], "T10/R24: %016lx", value); eframe.value[_r24_] = value; } if (STRNEQ(buf, "r25 = ")) { sprintf(eframe.regs[_r25_], "T11/R25: %016lx", value); eframe.value[_r25_] = value; } if (STRNEQ(buf, "r26 = ")) { sprintf(eframe.regs[_r26_], " RA/R26: %016lx", value); eframe.value[_r26_] = value; } if (STRNEQ(buf, "r27 = ")) { sprintf(eframe.regs[_r27_], "T12/R27: %016lx", value); eframe.value[_r27_] = value; } if (STRNEQ(buf, "r28 = ")) { sprintf(eframe.regs[_r28_], " AT/R28: %016lx", value); eframe.value[_r28_] = value; } if (STRNEQ(buf, "hae = ")) { sprintf(eframe.regs[_hae_], " HAE: %016lx", value); eframe.value[_hae_] = value; } if (STRNEQ(buf, "trap_a0 = ")) { sprintf(eframe.regs[_trap_a0_], "TRAP_A0: %016lx", value); eframe.value[_trap_a0_] = value; } if (STRNEQ(buf, "trap_a1 = ")) { sprintf(eframe.regs[_trap_a1_], "TRAP_A1: %016lx", value); eframe.value[_trap_a1_] = value; } if (STRNEQ(buf, "trap_a2 = ")) { sprintf(eframe.regs[_trap_a2_], "TRAP_A2: %016lx", value); eframe.value[_trap_a2_] = value; } if (STRNEQ(buf, "ps = ")) { sprintf(eframe.regs[_ps_], " PS: %016lx", value); eframe.value[_ps_] = value; } if (STRNEQ(buf, "pc = ")) { sprintf(eframe.regs[_pc_], " PC: %016lx", value); eframe.value[_pc_] = value; } if (STRNEQ(buf, "gp = ")) { sprintf(eframe.regs[_gp_], " GP/R29: %016lx", value); eframe.value[_gp_] = value; } if (STRNEQ(buf, "r16 = ")) { sprintf(eframe.regs[_r16_], " A0/R16: %016lx", value); eframe.value[_r16_] = value; } if (STRNEQ(buf, "r17 = ")) { sprintf(eframe.regs[_r17_], " A1/R17: %016lx", value); eframe.value[_r17_] = value; } if (STRNEQ(buf, "r18 =")) { sprintf(eframe.regs[_r18_], " A2/R18: %016lx", value); eframe.value[_r18_] = value; } } close_tmpfile(); if ((flags & BT_EXCEPTION_FRAME) && !BT_REFERENCE_CHECK(bt)) { dump_eframe: fprintf(fp, " EFRAME: %lx ", addr); fprintf(fp, "%s\n", eframe.regs[_r24_]); for (i = 0; i < (((NUMREGS+1)/2)-1); i++) { fprintf(fp, "%s ", eframe.regs[i]); pad_line(fp, 21 - strlen(eframe.regs[i]), ' '); j = i+((NUMREGS+1)/2); fprintf(fp, "%s", eframe.regs[j]); if (((j == _pc_) || (j == _r26_)) && is_kernel_text(eframe.value[j])) fprintf(fp, " <%s>", value_to_symstr(eframe.value[j], buf, 0)); fprintf(fp, "\n"); } } req->ra = eframe.value[_r26_]; req->pc = eframe.value[_pc_]; req->sp = addr + (29 * sizeof(ulong)); if (flags & BT_USER_EFRAME) { flags &= ~BT_USER_EFRAME; if (!BT_REFERENCE_CHECK(bt) && (eframe.value[_ps_] == 8) && (((uvtop(task_to_context(req->task), req->pc, &paddr, 0) || (volatile ulong)paddr) && (uvtop(task_to_context(req->task), req->ra, &paddr, 0) || (volatile ulong)paddr)) || (IS_ZOMBIE(req->task) || IS_EXITING(req->task)))) { if (!(flags & (BT_RESCHEDULE|BT_RET_FROM_SMP_FORK|BT_STRACE))) fprintf(fp, "NOTE: kernel-entry exception frame:\n"); goto dump_eframe; } } } /* * Look for likely exception frames in a stack. */ struct alpha_pt_regs { ulong reg_value[NUMREGS]; }; static int alpha_eframe_search(struct bt_info *bt) { ulong *first, *last; ulong eframe; struct alpha_pt_regs *pt; struct gnu_request *req; /* needed for alpha_exception_frame */ ulong *stack; int cnt; stack = (ulong *)bt->stackbuf; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->task = bt->task; first = stack + (roundup(SIZE(task_struct), sizeof(ulong)) / sizeof(ulong)); last = stack + (((bt->stacktop - bt->stackbase) - SIZE(pt_regs)) / sizeof(ulong)); for (cnt = 0; first <= last; first++) { pt = (struct alpha_pt_regs *)first; /* check for kernel exception frame */ if (!(pt->reg_value[_ps_] & 0xfffffffffffffff8) && (is_kernel_text(pt->reg_value[_pc_]) || IS_MODULE_VADDR(pt->reg_value[_pc_])) && (is_kernel_text(pt->reg_value[_r26_]) || IS_MODULE_VADDR(pt->reg_value[_r26_])) && IS_KVADDR(pt->reg_value[_gp_])) { cnt++; if (bt->flags & BT_EFRAME_COUNT) continue; fprintf(fp, "\nKERNEL-MODE EXCEPTION FRAME:\n"); eframe = bt->task + ((ulong)first - (ulong)stack); alpha_exception_frame(eframe, BT_EXCEPTION_FRAME, req, bt); continue; } /* check for user exception frame */ if ((pt->reg_value[_ps_] == 0x8) && ((IN_TASK_VMA(bt->task, pt->reg_value[_pc_]) && IN_TASK_VMA(bt->task, pt->reg_value[_r26_]) && IS_UVADDR(pt->reg_value[_gp_], bt->tc)) || ((first == last) && (IS_ZOMBIE(bt->task) || IS_EXITING(bt->task))))) { cnt++; if (bt->flags & BT_EFRAME_COUNT) continue; fprintf(fp, "\nUSER-MODE EXCEPTION FRAME:\n"); eframe = bt->task + ((ulong)first - (ulong)stack); alpha_exception_frame(eframe, BT_EXCEPTION_FRAME, req, bt); } } FREEBUF(req); return cnt; } /* * Before dumping a nonsensical exception frame, give it a quick test. */ static int verify_user_eframe(struct bt_info *bt, ulong task, ulong sp) { struct alpha_pt_regs ptbuf, *pt; readmem(sp, KVADDR, &ptbuf, sizeof(struct alpha_pt_regs), "pt_regs", FAULT_ON_ERROR); pt = &ptbuf; if ((pt->reg_value[_ps_] == 0x8) && ((IN_TASK_VMA(task, pt->reg_value[_pc_]) && IN_TASK_VMA(task, pt->reg_value[_r26_]) && IS_UVADDR(pt->reg_value[_gp_], bt->tc)) || ((pt == (struct alpha_pt_regs *)USER_EFRAME_ADDR(task)) && (IS_ZOMBIE(task) || IS_EXITING(task))))) { return TRUE; } return FALSE; } /* * Try to resync the stack location when there is no valid stack frame, * typically just above an exception frame. Use the req->ra value from the * exception frame as the new starting req->pc. Then walk up the stack until * a text routine that calls the newly-assigned pc is found -- that stack * location then becomes the new req->sp. * * If we're not coming from an exception frame, req-ra and req->pc will be * purposely zeroed out. In that case, use the prevsp value to find the * first pc that called the last frame's pc. * * Add any other repeatable "special-case" frames to the beginning of this * routine (ex. debug_spin_lock). Last ditch -- at the end of this routine, * speculate what might have happened (possibly in the background) -- and * if it looks good, run with it. */ static int alpha_backtrace_resync(struct gnu_request *req, ulong flags, struct bt_info *bt) { char addr[BUFSIZE]; char buf[BUFSIZE]; char lookfor1[BUFSIZE]; char lookfor2[BUFSIZE]; ulong newpc; ulong *stkp; ulong *stkp_newpc, *stkp_next; ulong value; int found; char *name; int exception; if (CRASHDEBUG(1)) fprintf(fp, "RESYNC1: [%lx-%d] ra: %lx pc: %lx sp: %lx\n", flags, req->curframe, req->ra, req->pc, req->sp); if (!req->ra && !req->pc) { req->ra = req->prevpc; exception = FALSE; } else exception = TRUE; if (!IS_KVADDR(req->ra)) return FALSE; name = closest_symbol(req->ra); sprintf(lookfor1, "<%s>", name); sprintf(lookfor2, "<%s+", name); if (CRASHDEBUG(1)) fprintf(fp, "RESYNC2: exception: %s lookfor: %s or %s\n", exception ? "TRUE" : "FALSE", lookfor1, lookfor2); /* * This is common when a non-panicking active CPU is spinning * in debug_spin_lock(). The next pc is offset by 0x30 from * the top of the exception frame, and the next sp is equal * to the frame offset of debug_spin_lock(). I can't explain it... */ if ((flags & BT_FROM_EXCEPTION) && STREQ(name, "debug_spin_lock")) { alpha_print_stack_entry(req, req->ra, closest_symbol(req->ra), flags, bt); if (BT_REFERENCE_FOUND(bt)) return FALSE; alpha_frame_offset(req, req->ra); stkp = (ulong *)(req->sp + 0x30); value = GET_STACK_ULONG(stkp); if (!is_kernel_text(value)) { req->sp = req->prevsp; return FALSE; } req->pc = value; req->sp += req->value; return TRUE; } /* * If the ra is a system call, then all we should have to do is * find the next reference to entSys on the stack, and set the * sp to that value. */ if (is_system_call(name, 0)) { /* stkp = (ulong *)req->sp; */ stkp = (ulong *)req->prevsp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (IS_KVADDR(value) && is_kernel_text(value)) { if (STREQ(closest_symbol(value), "entSys")) { req->pc = value; req->sp = USER_EFRAME_ADDR(req->task); return TRUE; } } } } /* * Just find the next location containing text. (?) */ if (STREQ(name, "do_coredump")) { stkp = (ulong *)(req->sp + sizeof(long)); for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (IS_KVADDR(value) && is_kernel_text(value)) { req->pc = req->ra; req->sp = (ulong)stkp; return TRUE; } } } if (flags & BT_SPECULATE) return FALSE; if (CRASHDEBUG(1)) { fprintf(fp, "RESYNC3: prevsp: %lx ra: %lx name: %s\n", req->prevsp, req->ra, name); fprintf(fp, "RESYNC3: prevpc: %lx\n", req->prevpc); } stkp_newpc = stkp_next = 0; newpc = 0; found = FALSE; if (exception) { newpc = req->ra; stkp = (ulong *)req->sp; } else stkp = (ulong *)req->prevsp; if (CRASHDEBUG(1)) fprintf(fp, "RESYNC4: stkp: %lx newpc: %lx\n", (ulong)stkp, newpc); for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); /* * First find the new pc on the stack. */ if (!found) { if (!exception && is_kernel_text(value)) { found = TRUE; } else if (value == newpc) { found = TRUE; stkp_newpc = stkp; continue; } } if (!IS_KVADDR(value)) continue; if (is_kernel_text(value)) { if (!stkp_next) stkp_next = stkp; if (CRASHDEBUG(2)) { fprintf(fp, "RESYNC6: disassemble %lx (%s)\n", value - sizeof(uint), value_to_symstr(value - sizeof(uint), buf, 0)); } req->command = GNU_DISASSEMBLE; req->addr = value - sizeof(uint); sprintf(addr, "0x%lx", req->addr); open_tmpfile(); req->fp = pc->tmpfile; gdb_interface(req); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { clean_line(buf); if (STRNEQ(buf, "Dump of") || STRNEQ(buf, "End of")) continue; if (STRNEQ(buf, addr)) { if (LASTCHAR(buf) == ':') { fgets(buf, BUFSIZE, pc->tmpfile); clean_line(buf); } if (CRASHDEBUG(2) && (strstr(buf, "jsr") || strstr(buf, "bsr"))) fprintf(pc->saved_fp, "%s\n", buf); if ((strstr(buf, "jsr") || strstr(buf, "bsr")) && (strstr(buf, lookfor1) || strstr(buf, lookfor2))) { if (exception) { req->pc = newpc; req->sp = (ulong)stkp; } else req->pc = req->addr; close_tmpfile(); return TRUE; } } } close_tmpfile(); } } if (CRASHDEBUG(1)) { fprintf(fp, "RESYNC9: [%d] name: %s pc: %lx ra: %lx\n", req->curframe, name, req->pc, req->ra); fprintf(fp, "RESYNC9: sp: %lx lastsp: %lx\n", req->sp, req->lastsp); fprintf(fp, "RESYNC9: prevpc: %lx prevsp: %lx\n", req->prevpc, req->prevsp); } /* * At this point, all we can do is speculate based upon * past experiences... */ return (alpha_resync_speculate(req, flags, bt)); } /* * Try one level of speculation. If it works, fine -- if not, give up. */ static int alpha_resync_speculate(struct gnu_request *req, ulong flags, struct bt_info *bt) { ulong *stkp; ulong value; ulong found_sp, found_ra; struct stack_hook hook; struct bt_info bt_info, *btloc; char buf[BUFSIZE]; int kernel_thread; int looks_good; if (flags & BT_SPECULATE) /* already been here on this trace... */ return FALSE; if (pc->tmpfile) return FALSE; found_ra = found_sp = 0; kernel_thread = is_kernel_thread(req->task); /* * Add "known" possibilities here. */ switch (flags & (BT_FROM_EXCEPTION|BT_FROM_CALLFRAME)) { case BT_FROM_EXCEPTION: if (STREQ(closest_symbol(req->prevpc), "read_lock") || STREQ(closest_symbol(req->ra), "do_select") || STREQ(closest_symbol(req->ra), "schedule")) { stkp = (ulong *)req->sp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (found_ra) { if (is_kernel_text_offset(value)) { found_sp = (ulong)stkp; break; } continue; } if (value == req->ra) found_ra = value; } } break; case BT_FROM_CALLFRAME: if (STREQ(closest_symbol(req->ra), "sys_read")) { value = GET_STACK_ULONG(req->prevsp - 32); if (STREQ(closest_symbol(value), "entSys")) { found_ra = value; found_sp = req->prevsp - 32; } } else if (STREQ(closest_symbol(req->ra), "exit_autofs4_fs")) { stkp = (ulong *)req->sp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (found_ra && (value != found_ra)) { if (is_kernel_text_offset(value)) { found_sp = (ulong)stkp; break; } continue; } if (is_kernel_text_offset(value)) found_ra = value; } } break; default: if (req->hookp && STREQ(closest_symbol(req->prevpc), "filemap_nopage") && !STREQ(closest_symbol(req->hookp->eip), "do_no_page")) { found_ra = found_sp = 0; stkp = (ulong *)req->prevsp; for (stkp++; INSTACK(stkp, bt); stkp++) { value = GET_STACK_ULONG(stkp); if (found_ra && (value != found_ra)) { if (is_kernel_text_offset(value)) { found_sp = (ulong)stkp; break; } continue; } if (is_kernel_text_offset(value) && STREQ(closest_symbol(value), "do_no_page")) found_ra = value; } if (found_ra && found_sp) { req->hookp->eip = found_ra; req->hookp->esp = found_sp; return TRUE; } } if (req->hookp) { found_ra = req->hookp->eip; found_sp = req->hookp->esp; } break; } if (found_ra && found_sp) { looks_good = FALSE; hook.esp = found_sp; hook.eip = found_ra; if (CRASHDEBUG(1)) fprintf(pc->saved_fp, "----- RESYNC SPECULATE START -----\n"); open_tmpfile(); btloc = &bt_info; BZERO(btloc, sizeof(struct bt_info)); btloc->task = req->task; btloc->tc = bt->tc; btloc->stackbase = bt->stackbase; btloc->stacktop = bt->stacktop; btloc->flags = BT_SPECULATE; btloc->hp = &hook; back_trace(btloc); rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1)) fprintf(pc->saved_fp, "%s", buf); if (strstr(buf, "NOTE: cannot resolve")) { looks_good = FALSE; break; } if (strstr(buf, "ALPHA EXCEPTION FRAME")) { looks_good = TRUE; break; } if (kernel_thread) { if (strstr(buf, " kernel_thread ") || strstr(buf, " __kernel_thread ") || strstr(buf, " start_kernel ") || strstr(buf, " smp_callin ")) { looks_good = TRUE; break; } } } close_tmpfile(); if (CRASHDEBUG(1)) fprintf(pc->saved_fp, "----- RESYNC SPECULATE DONE ------\n"); if (looks_good) { req->pc = found_ra; req->sp = found_sp; return TRUE; } } return FALSE; } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ static int alpha_uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { ulong mm; ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; if (!tc) error(FATAL, "current context invalid\n"); *paddr = 0; if (is_kernel_thread(tc->task) && IS_KVADDR(vaddr)) { pgd = (ulong *)machdep->get_task_pgd(tc->task); } else { if (!tc->mm_struct) pgd = (ulong *)machdep->get_task_pgd(tc->task); else { if ((mm = task_mm(tc->task, TRUE))) pgd = ULONG_PTR(tt->mm_struct + OFFSET(mm_struct_pgd)); else readmem(tc->mm_struct + OFFSET(mm_struct_pgd), KVADDR, &pgd, sizeof(long), "mm_struct pgd", FAULT_ON_ERROR); } } if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + ((vaddr >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte & _PAGE_VALID)) goto no_upage; page_middle = (ulong *) (PTOV((pgd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + ((vaddr >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte & _PAGE_VALID)) goto no_upage; page_table = (ulong *) (PTOV((pmd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + (BTOP(vaddr) & (PTRS_PER_PAGE - 1)); FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_VALID))) { *paddr = pte; if (pte && verbose) { fprintf(fp, "\n"); alpha_translate_pte(pte, 0, 0); } goto no_upage; } *paddr = ((pte & _PFN_MASK) >> (32-PAGESHIFT())) + PAGEOFFSET(vaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); alpha_translate_pte(pte, 0, 0); } return TRUE; no_upage: return FALSE; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ static int alpha_kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { ulong *pgd; ulong *page_dir; ulong *page_middle; ulong *page_table; ulong pgd_pte; ulong pmd_pte; ulong pte; if (!IS_KVADDR(kvaddr)) return FALSE; if (!vt->vmalloc_start) { /* presume KSEG this early */ *paddr = VTOP(kvaddr); return TRUE; } if (!IS_VMALLOC_ADDR(kvaddr)) { *paddr = VTOP(kvaddr); return TRUE; } pgd = (ulong *)vt->kernel_pgd[0]; if (verbose) fprintf(fp, "PAGE DIRECTORY: %lx\n", (ulong)pgd); page_dir = pgd + ((kvaddr >> PGDIR_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PGD(PAGEBASE(pgd), KVADDR, PAGESIZE()); pgd_pte = ULONG(machdep->pgd + PAGEOFFSET(page_dir)); if (verbose) fprintf(fp, " PGD: %lx => %lx\n", (ulong)page_dir, pgd_pte); if (!(pgd_pte & _PAGE_VALID)) goto no_kpage; page_middle = (ulong *) (PTOV((pgd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + ((kvaddr >> PMD_SHIFT) & (PTRS_PER_PAGE - 1)); FILL_PMD(PAGEBASE(page_middle), KVADDR, PAGESIZE()); pmd_pte = ULONG(machdep->pmd + PAGEOFFSET(page_middle)); if (verbose) fprintf(fp, " PMD: %lx => %lx\n", (ulong)page_middle, pmd_pte); if (!(pmd_pte & _PAGE_VALID)) goto no_kpage; page_table = (ulong *) (PTOV((pmd_pte & _PFN_MASK) >> (32-PAGESHIFT()))) + (BTOP(kvaddr) & (PTRS_PER_PAGE - 1)); FILL_PTBL(PAGEBASE(page_table), KVADDR, PAGESIZE()); pte = ULONG(machdep->ptbl + PAGEOFFSET(page_table)); if (verbose) fprintf(fp, " PTE: %lx => %lx\n", (ulong)page_table, pte); if (!(pte & (_PAGE_VALID))) { if (pte && verbose) { fprintf(fp, "\n"); alpha_translate_pte(pte, 0, 0); } goto no_kpage; } *paddr = ((pte & _PFN_MASK) >> (32-PAGESHIFT())) + PAGEOFFSET(kvaddr); if (verbose) { fprintf(fp, " PAGE: %lx\n\n", PAGEBASE(*paddr)); alpha_translate_pte(pte, 0, 0); } return TRUE; no_kpage: return FALSE; } /* * Get the relevant page directory pointer from a task structure. */ static ulong alpha_get_task_pgd(ulong task) { long offset; ulong ptbr; offset = OFFSET_OPTION(task_struct_thread, task_struct_tss); offset += OFFSET(thread_struct_ptbr); readmem(task + offset, KVADDR, &ptbr, sizeof(ulong), "task thread ptbr", FAULT_ON_ERROR); return(PTOV(PTOB(ptbr))); } /* * Calculate and return the speed of the processor. */ static ulong alpha_processor_speed(void) { ulong hwrpb; long offset; long cycle_freq; ulong mhz; if (machdep->mhz) return machdep->mhz; mhz = 0; get_symbol_data("hwrpb", sizeof(void *), &hwrpb); offset = OFFSET(hwrpb_struct_cycle_freq); if (!hwrpb || (offset == -1) || !readmem(hwrpb+offset, KVADDR, &cycle_freq, sizeof(ulong), "hwrpb cycle_freq", RETURN_ON_ERROR)) return (machdep->mhz = mhz); mhz = cycle_freq/1000000; return (machdep->mhz = mhz); } void alpha_dump_machdep_table(ulong arg) { int others; others = 0; fprintf(fp, " flags: %lx (", machdep->flags); if (machdep->flags & HWRESET) fprintf(fp, "%sHWRESET", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " kvbase: %lx\n", machdep->kvbase); fprintf(fp, " identity_map_base: %lx\n", machdep->identity_map_base); fprintf(fp, " pagesize: %d\n", machdep->pagesize); fprintf(fp, " pageshift: %d\n", machdep->pageshift); fprintf(fp, " pagemask: %llx\n", machdep->pagemask); fprintf(fp, " pageoffset: %lx\n", machdep->pageoffset); fprintf(fp, " stacksize: %ld\n", machdep->stacksize); fprintf(fp, " hz: %d\n", machdep->hz); fprintf(fp, " mhz: %ld\n", machdep->mhz); fprintf(fp, " memsize: %ld (0x%lx)\n", machdep->memsize, machdep->memsize); fprintf(fp, " bits: %d\n", machdep->bits); fprintf(fp, " nr_irqs: %d\n", machdep->nr_irqs); fprintf(fp, " eframe_search: alpha_eframe_search()\n"); fprintf(fp, " back_trace: alpha_back_trace_cmd()\n"); fprintf(fp, " processor_speed: alpha_processor_speed()\n"); fprintf(fp, " uvtop: alpha_uvtop()\n"); fprintf(fp, " kvtop: alpha_uvtop()\n"); fprintf(fp, " get_task_pgd: alpha_get_task_pgd()\n"); if (machdep->dump_irq == generic_dump_irq) fprintf(fp, " dump_irq: generic_dump_irq()\n"); else fprintf(fp, " dump_irq: alpha_dump_irq()\n"); fprintf(fp, " get_stack_frame: alpha_get_stack_frame()\n"); fprintf(fp, " get_stackbase: generic_get_stackbase()\n"); fprintf(fp, " get_stacktop: generic_get_stacktop()\n"); fprintf(fp, " translate_pte: alpha_translate_pte()\n"); fprintf(fp, " memory_size: alpha_get_memory_size()\n"); fprintf(fp, " vmalloc_start: alpha_get_vmalloc_start()\n"); fprintf(fp, " is_task_addr: alpha_is_task_addr()\n"); fprintf(fp, " verify_symbol: alpha_verify_symbol()\n"); fprintf(fp, " dis_filter: alpha_dis_filter()\n"); fprintf(fp, " cmd_mach: alpha_cmd_mach()\n"); fprintf(fp, " get_smp_cpus: alpha_get_smp_cpus()\n"); fprintf(fp, " is_kvaddr: generic_is_kvaddr()\n"); fprintf(fp, " is_uvaddr: generic_is_uvaddr()\n"); fprintf(fp, " verify_paddr: generic_verify_paddr()\n"); fprintf(fp, " init_kernel_pgd: NULL\n"); fprintf(fp, " value_to_symbol: generic_machdep_value_to_symbol()\n"); fprintf(fp, " line_number_hooks: alpha_line_number_hooks\n"); fprintf(fp, " last_pgd_read: %lx\n", machdep->last_pgd_read); fprintf(fp, " last_pmd_read: %lx\n", machdep->last_pmd_read); fprintf(fp, " last_ptbl_read: %lx\n", machdep->last_ptbl_read); fprintf(fp, " pgd: %lx\n", (ulong)machdep->pgd); fprintf(fp, " pmd: %lx\n", (ulong)machdep->pmd); fprintf(fp, " ptbl: %lx\n", (ulong)machdep->ptbl); fprintf(fp, " ptrs_per_pgd: %d\n", machdep->ptrs_per_pgd); fprintf(fp, " machspec: %lx\n", (ulong)machdep->machspec); } /* * Fix up jsr's to show the right target. * * If a value is passed with no buf, then cmd_dis is fishing for whether * the GP can be calculated from the first couple of instructions of the * target routine: * * 0xfffffc0000349fa0 : ldah gp,35(t12) * 0xfffffc0000349fa4 : lda gp,6216(gp) * * If a buf pointer is passed, then check whether the t12 register * is being set up as an offset from gp, then calculate the target address: * * 0xfffffc000042c364 : ldq t12,-29336(gp) * 0xfffffc000042c368 : * jsr ra,(t12),0xfffffc0000429dc0 * * If the next instruction is a jsr ra,(t12), then correct the bracketed * target address translation. * */ #define LDAH_GP_T12 (0x27bb0000) #define LDA_GP_GP (0x23bd0000) #define LDQ_T12_GP (0xa77d0000) #define JSR_RA_T12 (0x6b5b0000) #define OPCODE_OPERAND_MASK (0xffff0000) #define OPCODE_MEM_DISP_MASK (0x0000ffff) static struct instruction_data { uint inst[2]; short mem_disp[2]; ulong gp; ulong target; char *curfunc; } instruction_data = { {0} }; static int alpha_dis_filter(ulong vaddr, char *buf, unsigned int output_radix) { struct syment *sp; struct instruction_data *id; char buf2[BUFSIZE], *p1; id = &instruction_data; if (!buf) { BZERO(id, sizeof(struct instruction_data)); if (!(sp = value_search(vaddr, NULL))) return FALSE; readmem(sp->value, KVADDR, &id->inst[0], sizeof(uint) * 2, "two instructions", FAULT_ON_ERROR); if (((id->inst[0] & OPCODE_OPERAND_MASK) == LDAH_GP_T12) && ((id->inst[1] & OPCODE_OPERAND_MASK) == LDA_GP_GP)) { id->mem_disp[0] = (short)(id->inst[0] & OPCODE_MEM_DISP_MASK); id->mem_disp[1] = (short)(id->inst[1] & OPCODE_MEM_DISP_MASK); id->gp = sp->value + (65536*id->mem_disp[0]) + id->mem_disp[1]; id->curfunc = sp->name; if (CRASHDEBUG(1)) console("%s: ldah(%d) and lda(%d) gp: %lx\n", id->curfunc, id->mem_disp[0], id->mem_disp[1], id->gp); return TRUE; } /* send all lines through the generic */ return TRUE; /* dis_address_translation() filter */ } dis_address_translation(vaddr, buf, output_radix); if (!id->gp || !(sp = value_search(vaddr, NULL)) || !STREQ(id->curfunc, sp->name)) { BZERO(id, sizeof(struct instruction_data)); return FALSE; } readmem(vaddr, KVADDR, &id->inst[0], sizeof(uint), "one instruction", FAULT_ON_ERROR); if ((id->inst[0] & OPCODE_OPERAND_MASK) == JSR_RA_T12) { if (!id->target || !strstr(buf, "jsr\tra,(t12)") || !strstr(buf, "<")) return FALSE; p1 = strstr(strstr(buf, "jsr"), "0x"); sprintf(p1, "0x%lx <%s>%s", id->target, value_to_symstr(id->target, buf2, output_radix), CRASHDEBUG(1) ? " [PATCHED]\n" : "\n"); return TRUE; } if ((id->inst[0] & OPCODE_OPERAND_MASK) == LDQ_T12_GP) { id->mem_disp[0] = (short)(id->inst[0] & OPCODE_MEM_DISP_MASK); readmem(id->gp + id->mem_disp[0], KVADDR, &id->target, sizeof(ulong), "jsr target", FAULT_ON_ERROR); } else id->target = 0; return TRUE; } /* * For some reason gdb can go off into the weeds translating text addresses, * so this routine both fixes the references as well as imposing the current * output radix on the translations. */ static void dis_address_translation(ulong vaddr, char *inbuf, unsigned int output_radix) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *colon, *p1; int argc; char *argv[MAXARGS]; ulong value; console("IN: %s", inbuf); colon = strstr(inbuf, ":"); if (colon) { sprintf(buf1, "0x%lx <%s>", vaddr, value_to_symstr(vaddr, buf2, output_radix)); sprintf(buf2, "%s%s", buf1, colon); strcpy(inbuf, buf2); } strcpy(buf1, inbuf); argc = parse_line(buf1, argv); if ((FIRSTCHAR(argv[argc-1]) == '<') && (LASTCHAR(argv[argc-1]) == '>')) { p1 = rindex(inbuf, '<'); while ((p1 > inbuf) && (*p1 != ',')) p1--; if (!STRNEQ(p1, ",0x")) return; p1++; if (!extract_hex(p1, &value, NULLCHAR, TRUE)) return; sprintf(buf1, "0x%lx <%s>\n", value, value_to_symstr(value, buf2, output_radix)); sprintf(p1, "%s", buf1); } console(" %s", inbuf); } /* * If we're generically-inclined, call generic_dump_irq(). Otherwise * dump the IRQ table the old-fashioned way. */ static void alpha_dump_irq(int irq) { ulong action; ulong value; char *arglist[MAXARGS]; int argc, others; char buf[BUFSIZE]; if (symbol_exists("irq_desc")) { machdep->dump_irq = generic_dump_irq; return(generic_dump_irq(irq)); } action = symbol_value("irq_action") + (sizeof(void *) * irq); readmem(action, KVADDR, &action, sizeof(void *), "irq_action pointer", FAULT_ON_ERROR); if (!action) { fprintf(fp, " IRQ: %d\n", irq); fprintf(fp, "handler:\n"); fprintf(fp, " flags: \n"); fprintf(fp, " mask: \n"); fprintf(fp, " name: \n"); fprintf(fp, " dev_id: \n"); fprintf(fp, " next: \n\n"); return; } fprintf(fp, " IRQ: %d\n", irq); open_tmpfile(); do_linked_action: dump_struct("irqaction", action, RADIX(16)); action = 0; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { strip_comma(buf); argc = parse_line(buf, arglist); if (STREQ(arglist[0], "struct") || STREQ(buf, "};")) continue; if (STREQ(arglist[0], "handler")) { fprintf(pc->saved_fp, "handler: %s ", strip_hex(arglist[2])); if (argc == 4) fprintf(pc->saved_fp, "%s", arglist[3]); fprintf(pc->saved_fp, "\n"); } if (STREQ(arglist[0], "flags")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " flags: %lx ", value); if (value) { others = 0; fprintf(pc->saved_fp, "("); if (value & SA_INTERRUPT) fprintf(pc->saved_fp, "%sSA_INTERRUPT", others++ ? "|" : ""); if (value & SA_PROBE) fprintf(pc->saved_fp, "%sSA_PROBE", others++ ? "|" : ""); if (value & SA_SAMPLE_RANDOM) fprintf(pc->saved_fp, "%sSA_SAMPLE_RANDOM", others++ ? "|" : ""); if (value & SA_SHIRQ) fprintf(pc->saved_fp, "%sSA_SHIRQ", others++ ? "|" : ""); fprintf(pc->saved_fp, ")"); if (value & ~ACTION_FLAGS) { fprintf(pc->saved_fp, " (bits %lx not translated)", value & ~ACTION_FLAGS); } } fprintf(pc->saved_fp, "\n"); } if (STREQ(arglist[0], "mask")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " mask: %lx\n", value); } if (STREQ(arglist[0], "name")) { fprintf(pc->saved_fp, " name: %s ", strip_hex(arglist[2])); if (argc == 4) fprintf(pc->saved_fp, "\"%s\"", arglist[3]); fprintf(pc->saved_fp, "\n"); } if (STREQ(arglist[0], "dev_id")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " dev_id: %lx\n", value); } if (STREQ(arglist[0], "next")) { value = htol(strip_comma(arglist[2]), FAULT_ON_ERROR, NULL); fprintf(pc->saved_fp, " next: %s\n", strip_hex(arglist[2])); if (value) action = value; } } close_tmpfile(); fprintf(fp, "\n"); if (action) goto do_linked_action; } /* * Get a stack frame combination of pc and ra from the most relevent spot. */ static void alpha_get_stack_frame(struct bt_info *bt, ulong *pcp, ulong *spp) { struct syment *sp; ulong ksp; ulong ip; if (pcp) { if (DUMPFILE() && is_panic_thread(bt->task)) { sp = next_symbol("crash_save_current_state", NULL); if (HWRESET_TASK(bt->task)) ip = get_percpu_data(0, GET_HALT_PC, 0); else if (sp) ip = sp->value - 4; else ip = symbol_value("crash_save_current_state") + 16; } else get_alpha_frame(bt, &ip, NULL); *pcp = ip; } if (spp) { ip = 0; if (!get_panic_ksp(bt, &ksp)) get_alpha_frame(bt, HWRESET_TASK(bt->task) ? &ip : NULL, &ksp); if (!INSTACK(ksp, bt)) error(FATAL, "cannot determine starting stack address\n", bt->task); *spp = ksp; if (ip) *pcp = ip; } } /* * Do the work formerly done by alpha_get_sp() and alpha_get_pc(). */ static void get_alpha_frame(struct bt_info *bt, ulong *getpc, ulong *getsp) { int i; ulong ip; ulong r26; ulong ksp, sp; ulong *spp; ulong percpu_ra; ulong percpu_pv; struct percpu_data percpu_data; char buf[BUFSIZE]; ulong task; ulong *stack; task = bt->task; stack = (ulong *)bt->stackbuf; if (tt->flags & THREAD_INFO) { /* pcb.ksp is 1st word in thread_info */ readmem(bt->tc->thread_info, KVADDR, &ksp, sizeof(ulong), "thread_info pcb ksp", FAULT_ON_ERROR); sp = ksp; } else if (VALID_MEMBER(task_struct_tss_ksp)) ksp = sp = stack[OFFSET(task_struct_tss_ksp)/sizeof(long)]; else ksp = sp = stack[OFFSET(task_struct_thread_ksp)/sizeof(long)]; ip = 0; percpu_ra = percpu_pv = 0; spp = &stack[(sp - task)/sizeof(long)]; if (DUMPFILE() && getsp) { if (HWRESET_TASK(task)) { if (INSTACK(sp, bt)) { *getsp = sp; return; } else { get_percpu_data(0, 0, &percpu_data); percpu_ra = percpu_data.halt_ra; percpu_pv = percpu_data.halt_pv; spp = &stack[roundup(SIZE(task_struct), sizeof(ulong)) / sizeof(ulong)]; } } if (!percpu_ra && (STREQ(closest_symbol(*spp), "panic") || STREQ(closest_symbol(*spp), "handle_ipi"))) { *getsp = sp; return; } } percpu_retry: if (CRASHDEBUG(1) && percpu_ra) { fprintf(fp, "get_alpha_frame: look for %lx (%s)\n", percpu_ra, value_to_symstr(percpu_ra, buf, 0)); } for (i = 0, spp++; spp < &stack[LONGS_PER_STACK]; spp++,i++) { if (CRASHDEBUG(1) && (percpu_ra || percpu_pv) && is_kernel_text(*spp)) { fprintf(fp, "%lx: %lx (%s)\n", ((ulong)spp - (ulong)stack) + task, *spp, value_to_symstr(*spp, buf, 0)); } if (percpu_ra) { if (*spp == percpu_ra) { *getsp = ((ulong)spp - (ulong)stack) + task; return; } continue; } else if (percpu_pv) { if (*spp == percpu_pv) { *getsp = ((ulong)spp - (ulong)stack) + task; if (getpc) *getpc = percpu_pv; return; } continue; } if (!INSTACK(*spp, bt)) continue; if (is_kernel_text(*(spp+1))) { sp = *spp; ip = *(spp+1); break; } } if (percpu_ra) { percpu_ra = 0; error(INFO, "cannot find return address (percpu_ra) in HARDWARE RESET stack\n"); error(INFO, "looking for procedure address (percpu_pv) in HARDWARE RESET stack\n"); if (CRASHDEBUG(1)) { fprintf(fp, "get_alpha_frame: look for %lx (%s)\n", percpu_pv, value_to_symstr(percpu_pv, buf, 0)); } spp = &stack[roundup(SIZE(task_struct), sizeof(ulong)) / sizeof(ulong)]; goto percpu_retry; } if (percpu_pv) { error(INFO, "cannot find procedure address (percpu_pv) in HARDWARE RESET stack\n"); } /* * Check for a forked task that has not yet run in user space. */ if (!ip) { if (INSTACK(ksp + OFFSET(switch_stack_r26), bt)) { readmem(ksp + OFFSET(switch_stack_r26), KVADDR, &r26, sizeof(ulong), "ret_from_smp_fork check", FAULT_ON_ERROR); if (STREQ(closest_symbol(r26), "ret_from_smp_fork") || STREQ(closest_symbol(r26), "ret_from_smpfork")) { ip = r26; sp = ksp; } } } if (getsp) *getsp = sp; if (getpc) *getpc = ip; } /* * Fill the percpu_data structure with information from the * hwrpb/percpu_data structures for a given CPU. If requested, * return one of the specified entries. */ static ulong get_percpu_data(int cpu, ulong flag, struct percpu_data *pd) { ulong hwrpb, halt_ra, halt_PC, halt_pv; unsigned long processor_offset, processor_size; get_symbol_data("hwrpb", sizeof(void *), &hwrpb); readmem(hwrpb+OFFSET(hwrpb_struct_processor_offset), KVADDR, &processor_offset, sizeof(ulong), "hwrpb processor_offset", FAULT_ON_ERROR); readmem(hwrpb+OFFSET(hwrpb_struct_processor_size), KVADDR, &processor_size, sizeof(ulong), "hwrpb processor_size", FAULT_ON_ERROR); readmem(hwrpb + processor_offset + (cpu * processor_size) + OFFSET(percpu_struct_halt_PC), KVADDR, &halt_PC, sizeof(ulong), "percpu halt_PC", FAULT_ON_ERROR); readmem(hwrpb + processor_offset + (cpu * processor_size) + OFFSET(percpu_struct_halt_ra), KVADDR, &halt_ra, sizeof(ulong), "percpu halt_ra", FAULT_ON_ERROR); readmem(hwrpb + processor_offset + (cpu * processor_size) + OFFSET(percpu_struct_halt_pv), KVADDR, &halt_pv, sizeof(ulong), "percpu halt_pv", FAULT_ON_ERROR); if (pd) { pd->halt_PC = halt_PC; pd->halt_ra = halt_ra; pd->halt_pv = halt_pv; } switch (flag) { case GET_HALT_PC: return halt_PC; case GET_HALT_RA: return halt_ra; case GET_HALT_PV: return halt_pv; default: return 0; } } /* * Translate a PTE, returning TRUE if the page is _PAGE_VALID or _PAGE_PRESENT, * whichever is appropriate for the machine type. If a physaddr pointer is * passed in, don't print anything. */ static int alpha_translate_pte(ulong pte, void *physaddr, ulonglong unused) { int c, len1, len2, len3, others, page_present; char buf[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char ptebuf[BUFSIZE]; char physbuf[BUFSIZE]; char *arglist[MAXARGS]; physaddr_t paddr; paddr = PTOB(pte >> 32); page_present = (pte & _PAGE_VALID); if (physaddr) { *((ulong *)physaddr) = paddr; return page_present; } sprintf(ptebuf, "%lx", pte); len1 = MAX(strlen(ptebuf), strlen("PTE")); fprintf(fp, "%s ", mkstring(buf, len1, CENTER|LJUST, "PTE")); if (!page_present && pte) { swap_location(pte, buf); if ((c = parse_line(buf, arglist)) != 3) error(FATAL, "cannot determine swap location\n"); len2 = MAX(strlen(arglist[0]), strlen("SWAP")); len3 = MAX(strlen(arglist[2]), strlen("OFFSET")); fprintf(fp, "%s %s\n", mkstring(buf2, len2, CENTER|LJUST, "SWAP"), mkstring(buf3, len3, CENTER|LJUST, "OFFSET")); strcpy(buf2, arglist[0]); strcpy(buf3, arglist[2]); fprintf(fp, "%s %s %s\n", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(buf2, len2, CENTER|RJUST, NULL), mkstring(buf3, len3, CENTER|RJUST, NULL)); return page_present; } sprintf(physbuf, "%llx", paddr); len2 = MAX(strlen(physbuf), strlen("PHYSICAL")); fprintf(fp, "%s ", mkstring(buf, len2, CENTER|LJUST, "PHYSICAL")); fprintf(fp, "FLAGS\n"); fprintf(fp, "%s %s ", mkstring(ptebuf, len1, CENTER|RJUST, NULL), mkstring(physbuf, len2, CENTER|RJUST, NULL)); fprintf(fp, "("); others = 0; if (pte) { if (pte & _PAGE_VALID) fprintf(fp, "%sVALID", others++ ? "|" : ""); if (pte & _PAGE_FOR) fprintf(fp, "%sFOR", others++ ? "|" : ""); if (pte & _PAGE_FOW) fprintf(fp, "%sFOW", others++ ? "|" : ""); if (pte & _PAGE_FOE) fprintf(fp, "%sFOE", others++ ? "|" : ""); if (pte & _PAGE_ASM) fprintf(fp, "%sASM", others++ ? "|" : ""); if (pte & _PAGE_KRE) fprintf(fp, "%sKRE", others++ ? "|" : ""); if (pte & _PAGE_URE) fprintf(fp, "%sURE", others++ ? "|" : ""); if (pte & _PAGE_KWE) fprintf(fp, "%sKWE", others++ ? "|" : ""); if (pte & _PAGE_UWE) fprintf(fp, "%sUWE", others++ ? "|" : ""); if (pte & _PAGE_DIRTY) fprintf(fp, "%sDIRTY", others++ ? "|" : ""); if (pte & _PAGE_ACCESSED) fprintf(fp, "%sACCESSED", others++ ? "|" : ""); } else { fprintf(fp, "no mapping"); } fprintf(fp, ")\n"); return page_present; } /* * This is currently not machine-dependent, but eventually I'd prefer to use * the HWPCB for the real physical memory size. */ static uint64_t alpha_memory_size(void) { return (generic_memory_size()); } /* * Determine where vmalloc'd memory starts. */ static ulong alpha_vmalloc_start(void) { return VMALLOC_START; } /* * ALPHA tasks are all stacksize-aligned. */ static int alpha_is_task_addr(ulong task) { return (IS_KVADDR(task) && (ALIGNED_STACK_OFFSET(task) == 0)); } /* * Keep or reject a symbol from the kernel namelist. */ int alpha_verify_symbol(const char *name, ulong value, char type) { if (CRASHDEBUG(8) && name && strlen(name)) fprintf(fp, "%016lx %s\n", value, name); return (name && strlen(name) && (value > MIN_SYMBOL_VALUE)); } /* * Override smp_num_cpus if possible and necessary. */ int alpha_get_smp_cpus(void) { int cpus; if ((cpus = get_cpus_online())) return cpus; else return kt->cpus; } /* * Machine dependent command. */ void alpha_cmd_mach(void) { int c, cflag; unsigned int radix; cflag = radix = 0; while ((c = getopt(argcnt, args, "cxd")) != EOF) { switch(c) { case 'c': cflag++; break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (cflag) display_hwrpb(radix); else alpha_display_machine_stats(); } /* * "mach" command output. */ static void alpha_display_machine_stats(void) { struct new_utsname *uts; char buf[BUFSIZE]; ulong mhz; uts = &kt->utsname; fprintf(fp, " MACHINE TYPE: %s\n", uts->machine); fprintf(fp, " MEMORY SIZE: %s\n", get_memory_size(buf)); fprintf(fp, " CPUS: %d\n", kt->cpus); fprintf(fp, " PROCESSOR SPEED: "); if ((mhz = machdep->processor_speed())) fprintf(fp, "%ld Mhz\n", mhz); else fprintf(fp, "(unknown)\n"); fprintf(fp, " HZ: %d\n", machdep->hz); fprintf(fp, " PAGE SIZE: %d\n", PAGESIZE()); fprintf(fp, " L1 CACHE SIZE: %d\n", l1_cache_size()); fprintf(fp, "KERNEL VIRTUAL BASE: %lx\n", machdep->kvbase); fprintf(fp, "KERNEL VMALLOC BASE: %lx\n", vt->vmalloc_start); fprintf(fp, " KERNEL STACK SIZE: %ld\n", STACKSIZE()); } /* * Display the hwrpb_struct and each percpu_struct. */ static void display_hwrpb(unsigned int radix) { int cpu; ulong hwrpb, percpu; ulong processor_offset, processor_size; get_symbol_data("hwrpb", sizeof(void *), &hwrpb); readmem(hwrpb+OFFSET(hwrpb_struct_processor_offset), KVADDR, &processor_offset, sizeof(ulong), "hwrpb processor_offset", FAULT_ON_ERROR); readmem(hwrpb+OFFSET(hwrpb_struct_processor_size), KVADDR, &processor_size, sizeof(ulong), "hwrpb processor_size", FAULT_ON_ERROR); fprintf(fp, "HWRPB:\n"); dump_struct("hwrpb_struct", hwrpb, radix); for (cpu = 0; cpu < kt->cpus; cpu++) { fprintf(fp, "\nCPU %d:\n", cpu); percpu = hwrpb + processor_offset + (processor_size * cpu); dump_struct("percpu_struct", percpu, radix); } } /* * Perform any leftover pre-prompt machine-specific initialization tasks here. */ static void alpha_post_init(void) { modify_signame(7, "SIGEMT", NULL); modify_signame(10, "SIGBUS", NULL); modify_signame(12, "SIGSYS", NULL); modify_signame(16, "SIGURG", NULL); modify_signame(17, "SIGSTOP", NULL); modify_signame(18, "SIGTSTP", NULL); modify_signame(19, "SIGCONT", NULL); modify_signame(20, "SIGCHLD", NULL); modify_signame(23, "SIGIO", "SIGPOLL"); modify_signame(29, "SIGINFO", "SIGPWR"); modify_signame(30, "SIGUSR1", NULL); modify_signame(31, "SIGUSR2", NULL); } #endif /* ALPHA */ crash-7.2.8/ramdump.c0000664000000000000000000002152713614623427013160 0ustar rootroot/* * ramdump.c - core analysis suite * * Copyright (c) 2014 Broadcom Corporation * Oza Pawandeep * Vikram Prakash * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Author: Oza Pawandeep */ #define _LARGEFILE64_SOURCE 1 /* stat64() */ #include "defs.h" #include struct ramdump_def { char *path; int rfd; ulonglong start_paddr; ulonglong end_paddr; }; static struct ramdump_def *ramdump; static int nodes; static char *user_elf = NULL; static char elf_default[] = "/var/tmp/ramdump_elf_XXXXXX"; static void alloc_elf_header(Elf64_Ehdr *ehdr, ushort e_machine) { memcpy(ehdr->e_ident, ELFMAG, SELFMAG); ehdr->e_ident[EI_CLASS] = ELFCLASS64; ehdr->e_ident[EI_DATA] = ELFDATA2LSB; ehdr->e_ident[EI_VERSION] = EV_CURRENT; ehdr->e_ident[EI_OSABI] = ELFOSABI_LINUX; ehdr->e_ident[EI_ABIVERSION] = 0; memset(ehdr->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); ehdr->e_type = ET_CORE; ehdr->e_machine = e_machine; ehdr->e_version = EV_CURRENT; ehdr->e_entry = 0; ehdr->e_phoff = sizeof(Elf64_Ehdr); ehdr->e_shoff = 0; ehdr->e_flags = 0; ehdr->e_ehsize = sizeof(Elf64_Ehdr); ehdr->e_phentsize = sizeof(Elf64_Phdr); ehdr->e_phnum = 1 + nodes; ehdr->e_shentsize = 0; ehdr->e_shnum = 0; ehdr->e_shstrndx = 0; } static void alloc_program_headers(Elf64_Phdr *phdr) { unsigned int i; for (i = 0; i < nodes; i++) { phdr[i].p_type = PT_LOAD; phdr[i].p_filesz = ramdump[i].end_paddr + 1 - ramdump[i].start_paddr; phdr[i].p_memsz = phdr[i].p_filesz; phdr[i].p_vaddr = 0; phdr[i].p_paddr = ramdump[i].start_paddr; phdr[i].p_flags = PF_R | PF_W | PF_X; phdr[i].p_align = 0; } } static char *write_elf(Elf64_Phdr *load, Elf64_Ehdr *e_head, size_t data_offset) { #define CPY_BUF_SZ 4096 int fd1, fd2, i, err = 1; char *buf; char *out_elf; size_t offset; ssize_t rd, len; buf = (char *)malloc(CPY_BUF_SZ); offset = data_offset; if (user_elf) { fd2 = open(user_elf, O_CREAT|O_RDWR, S_IRUSR|S_IWUSR); if (fd2 < 0) { error(INFO, "%s open error, %s\n", user_elf, strerror(errno)); goto end1; } out_elf = user_elf; } else { fd2 = mkstemp(elf_default); if (fd2 < 0) { error(INFO, "%s open error, %s\n", elf_default, strerror(errno)); goto end1; } out_elf = elf_default; pc->flags2 |= RAMDUMP; } if (user_elf) { sprintf(buf, "creating ELF dumpfile: %s", out_elf); please_wait(buf); } else if (CRASHDEBUG(1)) fprintf(fp, "creating temporary ELF header: %s\n\n", elf_default); while (offset > 0) { len = write(fd2, e_head + (data_offset - offset), offset); if (len < 0) { error(INFO, "ramdump write error, %s\n", strerror(errno)); goto end; } offset -= len; } if (user_elf) { for (i = 0; i < nodes; i++) { offset = load[i].p_offset; fd1 = open(ramdump[i].path, O_RDONLY, S_IRUSR); if (fd1 < 0) { error(INFO, "%s open error, %s\n", ramdump[i].path, strerror(errno)); goto end; } lseek(fd2, (off_t)offset, SEEK_SET); while ((rd = read(fd1, buf, CPY_BUF_SZ)) > 0) { if (write(fd2, buf, rd) != rd) { error(INFO, "%s write error, %s\n", ramdump[i].path, strerror(errno)); close(fd1); goto end; } } close(fd1); } please_wait_done(); } err = 0; end: close(fd2); end1: free(buf); return err ? NULL : out_elf; } static void alloc_notes(Elf64_Phdr *notes) { /* Nothing filled in as of now */ notes->p_type = PT_NOTE; notes->p_offset = 0; notes->p_vaddr = 0; notes->p_paddr = 0; notes->p_filesz = 0; notes->p_memsz = 0; notes->p_flags = 0; notes->p_align = 0; } char *ramdump_to_elf(void) { int i; char *ptr, *e_file = NULL; ushort e_machine = 0; size_t offset, data_offset; size_t l_offset; Elf64_Phdr *notes, *load; Elf64_Ehdr *e_head; if (machine_type("ARM")) e_machine = EM_ARM; else if (machine_type("ARM64")) e_machine = EM_AARCH64; else if (machine_type("MIPS")) e_machine = EM_MIPS; else if (machine_type("X86_64")) e_machine = EM_X86_64; else error(FATAL, "ramdump: unsupported machine type: %s\n", MACHINE_TYPE); e_head = (Elf64_Ehdr *)malloc(sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) + (nodes * sizeof(Elf64_Phdr)) + (CPY_BUF_SZ * 2)); ptr = (char *)e_head; offset = 0; alloc_elf_header(e_head, e_machine); ptr += sizeof(Elf64_Ehdr); offset += sizeof(Elf64_Ehdr); notes = (Elf64_Phdr *)ptr; alloc_notes(notes); offset += sizeof(Elf64_Phdr); ptr += sizeof(Elf64_Phdr); load = (Elf64_Phdr *)ptr; alloc_program_headers(load); offset += sizeof(Elf64_Phdr) * nodes; ptr += sizeof(Elf64_Phdr) * nodes; /* Empty note */ notes->p_offset = offset; l_offset = offset; data_offset = offset; for (i = 0; i < nodes; i++) { load[i].p_offset = l_offset; l_offset += load[i].p_filesz; } e_file = write_elf(load, e_head, data_offset); free(e_head); return e_file; } #define PREFIX(ptr, pat) \ (strncmp((ptr), (pat), sizeof(pat)-1) ? 0 : \ ((ptr) += sizeof(pat)-1, 1)) int is_ramdump(char *p) { char *x = NULL, *y = NULL, *pat; size_t len; char *pattern; struct stat64 st; int is_live; int err = 0; is_live = PREFIX(p, "live:"); if (nodes || !strchr(p, '@')) return 0; len = strlen(p); pattern = (char *)malloc(len + 1); strlcpy(pattern, p, len + 1); pat = pattern; while ((pat = strtok_r(pat, ",", &x))) { if ((pat = strtok_r(pat, "@", &y))) { nodes++; ramdump = realloc(ramdump, sizeof(struct ramdump_def) * nodes); if (!ramdump) error(FATAL, "realloc failure\n"); ramdump[nodes - 1].path = pat; pat = strtok_r(NULL, "@", &y); ramdump[nodes - 1].start_paddr = htoll(pat, RETURN_ON_ERROR, &err); if (err == TRUE) error(FATAL, "Invalid ramdump address\n"); if ((ramdump[nodes - 1].rfd = open(ramdump[nodes - 1].path, O_RDONLY)) < 0) error(FATAL, "ramdump %s open failed:%s\n", ramdump[nodes - 1].path, strerror(errno)); if (fstat64(ramdump[nodes - 1].rfd, &st) < 0) error(FATAL, "ramdump stat failed\n"); ramdump[nodes - 1].end_paddr = ramdump[nodes - 1].start_paddr + st.st_size - 1; } pat = NULL; } if (nodes && is_live) { pc->flags |= LIVE_SYSTEM; pc->dumpfile = ramdump[0].path; pc->live_memsrc = pc->dumpfile; } return nodes; } void ramdump_elf_output_file(char *opt) { user_elf = opt; } void ramdump_cleanup(void) { if (!user_elf) unlink(elf_default); } int read_ramdump(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { off_t offset; int i, found; struct ramdump_def *r = &ramdump[0]; offset = 0; for (i = found = 0; i < nodes; i++) { r = &ramdump[i]; if ((paddr >= r->start_paddr) && (paddr <= r->end_paddr)) { offset = (off_t)paddr - (off_t)r->start_paddr; found++; break; } } if (!found) { if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: READ_ERROR: " "offset not found for paddr: %llx\n", (ulonglong)paddr); return READ_ERROR; } if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: addr: %lx paddr: %llx cnt: %d offset: %llx\n", addr, (ulonglong)paddr, cnt, (ulonglong)offset); if (lseek(r->rfd, offset, SEEK_SET) == -1) { if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: SEEK_ERROR: " "offset: %llx\n", (ulonglong)offset); return SEEK_ERROR; } if (read(r->rfd, bufptr, cnt) != cnt) { if (CRASHDEBUG(8)) fprintf(fp, "read_ramdump: READ_ERROR: " "offset: %llx\n", (ulonglong)offset); return READ_ERROR; } return cnt; } void show_ramdump_files(void) { int i; fprintf(fp, "%s [temporary ELF header]\n", elf_default); for (i = 0; i < nodes; i++) { fprintf(fp, "%s %s", i ? "\n" : "", ramdump[i].path); } } void dump_ramdump_data() { int i; if (!user_elf && !is_ramdump_image()) return; fprintf(fp, "\nramdump data:\n"); fprintf(fp, " user_elf: %s\n", user_elf ? user_elf : "(unused)"); fprintf(fp, " elf_default: %s\n", user_elf ? "(unused)" : elf_default); fprintf(fp, " nodes: %d\n", nodes); for (i = 0; i < nodes; i++) { fprintf(fp, " ramdump[%d]:\n", i); fprintf(fp, " path: %s\n", ramdump[i].path); fprintf(fp, " rfd: %d\n", ramdump[i].rfd); fprintf(fp, " start_paddr: %llx\n", (ulonglong)ramdump[i].start_paddr); fprintf(fp, " end_paddr: %llx\n", (ulonglong)ramdump[i].end_paddr); } fprintf(fp, "\n"); } int is_ramdump_image(void) { return (pc->flags2 & RAMDUMP ? TRUE : FALSE); } crash-7.2.8/help.c0000775000000000000000000150015213614623427012443 0ustar rootroot/* help.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2020 David Anderson * Copyright (C) 2002-2020 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" static void reshuffle_cmdlist(void); static int sort_command_name(const void *, const void *); static void display_commands(void); static void display_copying_info(void); static void display_warranty_info(void); static void display_output_info(void); static void display_input_info(void); static void display_README(void); static char *gnu_public_license[]; static char *gnu_public_license_v3[]; static char *version_info[]; static char *output_info[]; static char *input_info[]; static char *README[]; static void dump_registers(void); #define GPLv2 2 #define GPLv3 3 #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) static int GPL_version = GPLv2; #else static int GPL_version = GPLv3; #endif static char *program_usage_info[] = { "", "USAGE:", "", " crash [OPTION]... NAMELIST MEMORY-IMAGE[@ADDRESS] (dumpfile form)", " crash [OPTION]... [NAMELIST] (live system form)", "", "OPTIONS:", "", " NAMELIST", " This is a pathname to an uncompressed kernel image (a vmlinux", " file), or a Xen hypervisor image (a xen-syms file) which has", " been compiled with the \"-g\" option. If using the dumpfile form,", " a vmlinux file may be compressed in either gzip or bzip2 formats.", "", " MEMORY-IMAGE", " A kernel core dump file created by the netdump, diskdump, LKCD", " kdump, xendump or kvmdump facilities.", "", " If a MEMORY-IMAGE argument is not entered, the session will be", " invoked on the live system, which typically requires root privileges", " because of the device file used to access system RAM. By default, ", " /dev/crash will be used if it exists. If it does not exist, then ", " /dev/mem will be used; but if the kernel has been configured with ", " CONFIG_STRICT_DEVMEM, then /proc/kcore will be used. It is permissible", " to explicitly enter /dev/crash, /dev/mem or /proc/kcore.", "", " An @ADDRESS value must be appended to the MEMORY-IMAGE if the dumpfile", " is a raw RAM dumpfile that has no header information describing the file", " contents. Multiple MEMORY-IMAGE@ADDRESS ordered pairs may be entered,", " with each dumpfile containing a contiguous block of RAM, where the ADDRESS", " value is the physical start address of the block expressed in hexadecimal.", " The physical address value(s) will be used to create a temporary ELF header", " in /var/tmp, which will only exist during the crash session. If a raw RAM", " dumpile represents a live memory source, such as that specified by the QEMU", " mem-path argument of a memory-backend-file object, then \"live:\" must be", " prepended to the MEMORY-IMAGE name.", "", " mapfile", " If the NAMELIST file is not the same kernel that is running", " (live system form), or the kernel that was running when the system", " crashed (dumpfile form), then the System.map file of the original ", " kernel should be entered on the command line.", "", " -h [option]", " --help [option]", " Without an option argument, display a crash usage help message.", " If the option argument is a crash command name, the help page", " for that command is displayed. If it is the string \"input\", a", " page describing the various crash command line input options is", " displayed. If it is the string \"output\", a page describing command", " line output options is displayed. If it is the string \"all\", then", " all of the possible help messages are displayed. After the help", " message is displayed, crash exits.", "", " -s ", " Silently proceed directly to the \"crash>\" prompt without displaying", " any version, GPL, or crash initialization data during startup, and by", " default, runtime command output is not passed to any scrolling command.", "", " -i file", " Execute the command(s) contained in \"file\" prior to displaying ", " the \"crash>\" prompt for interactive user input.", "", " -d num ", " Set the internal debug level. The higher the number, the more", " debugging data will be printed when crash initializes and runs.", "", " -S ", " Use /boot/System.map as the mapfile.", "", " -e vi | emacs", " Set the readline(3) command line editing mode to \"vi\" or \"emacs\". ", " The default editing mode is \"vi\".", "", " -f ", " Force the usage of a compressed vmlinux file if its original", " name does not start with \"vmlinux\".", "", " -k ", " Indicate that the NAMELIST file is an LKCD \"Kerntypes\" debuginfo file.", "", " -g [namelist]", " Determine if a vmlinux or xen-syms namelist file contains debugging data.", "", " -t ", " Display the system-crash timestamp and exit.", "", " -L ", " Attempt to lock all of its virtual address space into memory by", " calling mlockall(MCL_CURRENT|MCL_FUTURE) during initialization.", " If the system call fails, an error message will be displayed,", " but the session continues.", "", " -c tty-device", " Open the tty-device as the console used for debug messages.", "", " -p page-size", " If a processor's page size cannot be determined by the dumpfile, ", " and the processor default cannot be used, use page-size.", "", " -o filename", " Only used with the MEMORY-IMAGE@ADDRESS format for raw RAM dumpfiles,", " specifies a filename of a new ELF vmcore that will be created and used", " as the dumpfile. It will be saved to allow future use as a standalone", " vmcore, replacing the original raw RAM dumpfile.", "", " -m option=value", " --machdep option=value", " Pass an option and value pair to machine-dependent code. These", " architecture-specific option/pairs should only be required in", " very rare circumstances:", "", " X86_64:", " phys_base=", " irq_eframe_link=", " irq_stack_gap=", " max_physmem_bits=", " kernel_image_size=", " vm=orig (pre-2.6.11 virtual memory address ranges)", " vm=2.6.11 (2.6.11 and later virtual memory address ranges)", " vm=xen (Xen kernel virtual memory address ranges)", " vm=xen-rhel4 (RHEL4 Xen kernel virtual address ranges)", " vm=5level (5-level page tables)", " page_offset=", " PPC64:", " vm=orig", " vm=2.6.14 (4-level page tables)", " IA64:", " phys_start=", " init_stack_size=", " vm=4l (4-level page tables)", " ARM:", " phys_base=", " ARM64:", " phys_offset=", " kimage_voffset=", " max_physmem_bits=", " X86:", " page_offset=", "", " -x ", " Automatically load extension modules from a particular directory.", " The directory is determined by the following order of precedence:", "", " (1) the directory specified in the CRASH_EXTENSIONS shell ", " environment variable", " (2) /usr/lib64/crash/extensions (64-bit architectures)", " (3) /usr/lib/crash/extensions (32-bit architectures)", " (4) the ./extensions subdirectory of the current directory", "", " --active", " Track only the active task on each cpu.", "", " --buildinfo", " Display the crash binary's build date, the user ID of the builder,", " the hostname of the machine where the build was done, the target", " architecture, the version number, and the compiler version.", "", " --memory_module modname", " Use the modname as an alternative kernel module to the crash.ko", " module that creates the /dev/crash device.", "", " --memory_device device", " Use device as an alternative device to the /dev/crash, /dev/mem", " or /proc/kcore devices.", "", " --log dumpfile", " Dump the contents of the kernel log buffer. A kernel namelist", " argument is not necessary, but the dumpfile must contain the", " VMCOREINFO data taken from the original /proc/vmcore ELF header.", "", " --no_kallsyms", " Do not use kallsyms-generated symbol information contained within", " kernel module object files.", "", " --no_modules", " Do not access or display any kernel module related information.", "", " --no_ikconfig", " Do not attempt to read configuration data that was built into", " kernels configured with CONFIG_IKCONFIG.", "", " --no_data_debug", " Do not verify the validity of all structure member offsets and", " structure sizes that it uses.", "", " --no_kmem_cache", " Do not initialize the kernel's slab cache infrastructure, and", " commands that use kmem_cache-related data will not work.", "", " --no_elf_notes", " Do not use the registers from the ELF NT_PRSTATUS notes saved", " in a compressed kdump header for backtraces.", "", " --kmem_cache_delay", " Delay the initialization of the kernel's slab cache infrastructure", " until it is required by a run-time command.", "", " --readnow", " Pass this flag to the embedded gdb module, which will override", " the two-stage strategy that it uses for reading symbol tables", " from the NAMELIST. If module symbol tables are loaded during", " runtime with the \"mod\" command, the same override will occur.", "", " --smp ", " Specify that the system being analyzed is an SMP kernel.", "", " -v", " --version", " Display the version of the crash utility, the version of the", " embedded gdb module, GPL information, and copyright notices.", "", " --cpus number", " Specify the number of cpus in the SMP system being analyzed.", "", " --osrelease dumpfile", " Display the OSRELEASE vmcoreinfo string from a kdump dumpfile", " header.", "", " --hyper", " Force the session to be that of a Xen hypervisor.", "", " --p2m_mfn pfn", " When a Xen Hypervisor or its dom0 kernel crashes, the dumpfile", " is typically analyzed with either the Xen hypervisor or the dom0", " kernel. It is also possible to analyze any of the guest domU", " kernels if the pfn_to_mfn_list_list pfn value of the guest kernel", " is passed on the command line along with its NAMELIST and the ", " dumpfile.", "", " --xen_phys_start physical-address", " Supply the base physical address of the Xen hypervisor's text", " and static data for older xendump dumpfiles that did not pass", " that information in the dumpfile header.", "", " --zero_excluded", " If the makedumpfile(8) facility has filtered a compressed kdump", " dumpfile to exclude various types of non-essential pages, or has", " marked a compressed or ELF kdump dumpfile as incomplete due to", " an ENOSPC or other error during its creation, any attempt to", " read missing pages will fail. With this flag, reads from any", " of those pages will return zero-filled memory.", "", " --no_panic", " Do not attempt to find the task that was running when the kernel", " crashed. Set the initial context to that of the \"swapper\" task", " on cpu 0.", "", " --more ", " Use /bin/more as the command output scroller, overriding the", " default of /usr/bin/less and any settings in either ./.crashrc", " or $HOME/.crashrc.", "", " --less ", " Use /usr/bin/less as the command output scroller, overriding any", " settings in either ./.crashrc or $HOME/.crashrc.", "", " --CRASHPAGER", " Use the output paging command defined in the CRASHPAGER shell", " environment variable, overriding any settings in either ./.crashrc ", " or $HOME/.crashrc.", "", " --no_scroll", " Do not pass run-time command output to any scrolling command.", "", " --no_strip", " Do not strip cloned kernel text symbol names.", "", " --no_crashrc", " Do not execute the commands in either $HOME/.crashrc or ./.crashrc.", "", " --mod directory", " When loading the debuginfo data of kernel modules with the \"mod -S\"", " command, search for their object files in directory instead of in ", " the standard location.", "", " --src directory", " Search for the kernel source code in directory instead of in the", " standard location that is compiled into the debuginfo data.", "", " --reloc size", " When analyzing live x86 kernels configured with a CONFIG_PHYSICAL_START ", " value that is larger than its CONFIG_PHYSICAL_ALIGN value, then it will", " be necessary to enter a relocation size equal to the difference between", " the two values.", "", " --hash count", " Set the number of internal hash queue heads used for list gathering", " and verification. The default count is 32768.", "", " --kaslr offset | auto", " If x86, x86_64 or s390x kernel was configured with CONFIG_RANDOMIZE_BASE,", " the offset value is equal to the difference between the symbol values ", " compiled into the vmlinux file and their relocated KASLR value. If", " set to auto, the KASLR offset value will be automatically calculated.", "", " --minimal", " Bring up a session that is restricted to the log, dis, rd, sym,", " eval, set and exit commands. This option may provide a way to", " extract some minimal/quick information from a corrupted or truncated", " dumpfile, or in situations where one of the several kernel subsystem ", " initialization routines would abort the crash session.", "", " --kvmhost [32|64]", " When examining an x86 KVM guest dumpfile, this option specifies", " that the KVM host that created the dumpfile was an x86 (32-bit)", " or an x86_64 (64-bit) machine, overriding the automatically", " determined value.", "", " --kvmio ", " override the automatically-calculated KVM guest I/O hole size.", "", " --offline [show|hide]", " Show or hide command output that is associated with offline cpus,", " overriding any settings in either ./.crashrc or $HOME/.crashrc.", "", "FILES:", "", " .crashrc", " Initialization commands. The file can be located in the user's", " HOME directory and/or the current directory. Commands found in", " the .crashrc file in the HOME directory are executed before", " those in the current directory's .crashrc file.", "", "ENVIRONMENT VARIABLES:", "", " EDITOR ", " Command input is read using readline(3). If EDITOR is set to", " emacs or vi then suitable keybindings are used. If EDITOR is", " not set, then vi is used. This can be overridden by \"set vi\" or", " \"set emacs\" commands located in a .crashrc file, or by entering", " \"-e emacs\" on the crash command line.", "", " CRASHPAGER", " If CRASHPAGER is set, its value is used as the name of the program", " to which command output will be sent. If not, then command output", " output is sent to \"/usr/bin/less -E -X\" by default.", "", " CRASH_MODULE_PATH", " Specifies an alternative directory tree to search for kernel", " module object files.", "", " CRASH_EXTENSIONS", " Specifies a directory containing extension modules that will be", " loaded automatically if the -x command line option is used.", "", NULL }; void program_usage(int form) { if (form == SHORT_FORM) { fprintf(fp, "\nUsage:\n\n"); fprintf(fp, "%s\n%s\n", program_usage_info[3], program_usage_info[4]); fprintf(fp, "\nEnter \"%s -h\" for details.\n", pc->program_name); clean_exit(1); } else { FILE *scroll; char *scroll_command; char **p; if ((scroll_command = setup_scroll_command()) && (scroll = popen(scroll_command, "w"))) fp = scroll; else scroll = NULL; for (p = program_usage_info; *p; p++) { fprintf(fp, *p, pc->program_name); fprintf(fp, "\n"); } fflush(fp); if (scroll) pclose(scroll); clean_exit(0); } } /* * Get an updated count of commands for subsequent help menu display, * reshuffling the deck if this is the first time or if something's changed. */ void help_init(void) { struct command_table_entry *cp; struct extension_table *ext; for (pc->ncmds = 0, cp = pc->cmd_table; cp->name; cp++) { if (!(cp->flags & HIDDEN_COMMAND)) pc->ncmds++; } for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) pc->ncmds++; } } if (!pc->cmdlist) { pc->cmdlistsz = pc->ncmds; if ((pc->cmdlist = (char **) malloc(sizeof(char *) * pc->cmdlistsz)) == NULL) error(FATAL, "cannot malloc command list space\n"); } else if (pc->ncmds > pc->cmdlistsz) { pc->cmdlistsz = pc->ncmds; if ((pc->cmdlist = (char **)realloc(pc->cmdlist, sizeof(char *) * pc->cmdlistsz)) == NULL) error(FATAL, "cannot realloc command list space\n"); } reshuffle_cmdlist(); } /* * If the command list is modified during runtime, re-shuffle the list * for proper help menu display. */ static void reshuffle_cmdlist(void) { int i, cnt; struct command_table_entry *cp; struct extension_table *ext; for (i = 0; i < pc->cmdlistsz; i++) pc->cmdlist[i] = NULL; for (cnt = 0, cp = pc->cmd_table; cp->name; cp++) { if (!(cp->flags & HIDDEN_COMMAND)) pc->cmdlist[cnt++] = cp->name; } for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) pc->cmdlist[cnt++] = cp->name; } } if (cnt > pc->cmdlistsz) error(FATAL, "help table malfunction!\n"); qsort((void *)pc->cmdlist, (size_t)cnt, sizeof(char *), sort_command_name); } /* * The help list is in alphabetical order, with exception of the "q" command, * which has historically always been the last command in the list. */ static int sort_command_name(const void *name1, const void *name2) { char **s1, **s2; s1 = (char **)name1; s2 = (char **)name2; if (STREQ(*s1, "q")) return 1; return strcmp(*s1, *s2); } /* * Get help for a command, to dump an internal table, or the GNU public * license copying/warranty information. */ void cmd_help(void) { int c; int oflag; oflag = 0; while ((c = getopt(argcnt, args, "efNDdmM:ngcaBbHhkKsvVoptTzLxOr")) != EOF) { switch(c) { case 'e': dump_extension_table(VERBOSE); return; case 'f': dump_filesys_table(VERBOSE); return; case 'n': case 'D': dumpfile_memory(DUMPFILE_MEM_DUMP); return; case 'x': dump_text_value_cache(VERBOSE); return; case 'd': dump_dev_table(); return; case 'M': dump_machdep_table(stol(optarg, FAULT_ON_ERROR, NULL)); return; case 'm': dump_machdep_table(0); return; case 'g': dump_gdb_data(); return; case 'N': dump_net_table(); return; case 'a': dump_alias_data(); return; case 'b': dump_shared_bufs(); return; case 'B': dump_build_data(); return; case 'c': dump_numargs_cache(); return; case 'H': dump_hash_table(VERBOSE); return; case 'h': dump_hash_table(!VERBOSE); return; case 'k': dump_kernel_table(!VERBOSE); return; case 'K': dump_kernel_table(VERBOSE); return; case 's': dump_symbol_table(); return; case 'V': dump_vm_table(VERBOSE); return; case 'v': dump_vm_table(!VERBOSE); return; case 'O': dump_offset_table(NULL, TRUE); return; case 'o': oflag = TRUE; break; case 'T': dump_task_table(VERBOSE); return; case 't': dump_task_table(!VERBOSE); return; case 'p': dump_program_context(); return; case 'z': fprintf(fp, "help options:\n"); fprintf(fp, " -a - alias data\n"); fprintf(fp, " -b - shared buffer data\n"); fprintf(fp, " -B - build data\n"); fprintf(fp, " -c - numargs cache\n"); fprintf(fp, " -d - device table\n"); fprintf(fp, " -D - dumpfile contents/statistics\n"); fprintf(fp, " -e - extension table data\n"); fprintf(fp, " -f - filesys table\n"); fprintf(fp, " -g - gdb data\n"); fprintf(fp, " -h - hash_table data\n"); fprintf(fp, " -H - hash_table data (verbose)\n"); fprintf(fp, " -k - kernel_table\n"); fprintf(fp, " -K - kernel_table (verbose)\n"); fprintf(fp, " -L - LKCD page cache environment\n"); fprintf(fp, " -M machine specific\n"); fprintf(fp, " -m - machdep_table\n"); fprintf(fp, " -N - net_table\n"); fprintf(fp, " -n - dumpfile contents/statistics\n"); fprintf(fp, " -o - offset_table and size_table\n"); fprintf(fp, " -p - program_context\n"); fprintf(fp, " -r - dump registers from dumpfile header\n"); fprintf(fp, " -s - symbol table data\n"); fprintf(fp, " -t - task_table\n"); fprintf(fp, " -T - task_table plus context_array\n"); fprintf(fp, " -v - vm_table\n"); fprintf(fp, " -V - vm_table (verbose)\n"); fprintf(fp, " -x - text cache\n"); fprintf(fp, " -z - help options\n"); return; case 'L': dumpfile_memory(DUMPFILE_ENVIRONMENT); return; case 'r': dump_registers(); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, COMPLETE_HELP); if (!args[optind]) { if (oflag) dump_offset_table(NULL, FALSE); else display_help_screen(""); return; } do { if (oflag) dump_offset_table(args[optind], FALSE); else cmd_usage(args[optind], COMPLETE_HELP|MUST_HELP); optind++; } while (args[optind]); } static void dump_registers(void) { if (pc->flags2 & QEMU_MEM_DUMP_ELF) { dump_registers_for_qemu_mem_dump(); return; } else if (DISKDUMP_DUMPFILE()) { dump_registers_for_compressed_kdump(); return; } else if (NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) { dump_registers_for_elf_dumpfiles(); return; } else if (VMSS_DUMPFILE()) { dump_registers_for_vmss_dump(); return; } error(FATAL, "-r option not supported on %s\n", ACTIVE() ? "a live system" : "this dumpfile type"); } /* * Format and display the help menu. */ void display_help_screen(char *indent) { int i, j, rows; char **namep; help_init(); fprintf(fp, "\n%s", indent); rows = (pc->ncmds + (HELP_COLUMNS-1)) / HELP_COLUMNS; for (i = 0; i < rows; i++) { namep = &pc->cmdlist[i]; for (j = 0; j < HELP_COLUMNS; j++) { fprintf(fp,"%-15s", *namep); namep += rows; if ((namep - pc->cmdlist) >= pc->ncmds) break; } fprintf(fp,"\n%s", indent); } fprintf(fp, "\n%s%s version: %-6s gdb version: %s\n", indent, pc->program_name, pc->program_version, pc->gdb_version); fprintf(fp, "%sFor help on any command above, enter \"help \".\n", indent); fprintf(fp, "%sFor help on input options, enter \"help input\".\n", indent); fprintf(fp, "%sFor help on output options, enter \"help output\".\n", indent); #ifdef NO_LONGER_TRUE fprintf(fp, "%sFor the most recent version: " "http://www.missioncriticallinux.com/download\n\n", indent); #else fprintf(fp, "\n"); #endif } /* * Used for generating HTML pages, dump the commands in the order * they would be seen on the help menu, i.e., from left-to-right, row-by-row. * Line ends are signaled with a "BREAK" string. */ static void display_commands(void) { int i, j, rows; char **namep; help_init(); rows = (pc->ncmds + (HELP_COLUMNS-1)) / HELP_COLUMNS; for (i = 0; i < rows; i++) { namep = &pc->cmdlist[i]; for (j = 0; j < HELP_COLUMNS; j++) { fprintf(fp,"%s\n", *namep); namep += rows; if ((namep - pc->cmdlist) >= pc->ncmds) { fprintf(fp, "BREAK\n"); break; } } } } /* * Help data for a command must be formatted using the following template: "command-name", "command description line", "argument-usage line", "description...", "description...", "description...", NULL, * The first line is concatenated with the second line, and will follow the * help command's "NAME" header. * The first and third lines will also be concatenated, and will follow the * help command's "SYNOPSIS" header. If the command has no arguments, enter * a string consisting of a space, i.e., " ". * The fourth and subsequent lines will follow the help command's "DESCRIPTION" * header. * * The program name can be referenced by using the %%s format. The final * entry in each command's help data string list must be a NULL. */ char *help_foreach[] = { "foreach", "display command data for multiple tasks in the system", "[[pid | taskp | name | state | [kernel | user | gleader]] ...]\n" " command [flag] [argument]", " This command allows for an examination of various kernel data associated", " with any, or all, tasks in the system, without having to set the context", " to each targeted task.\n", " pid perform the command(s) on this PID.", " taskp perform the command(s) on task referenced by this hexadecimal", " task_struct pointer.", " name perform the command(s) on all tasks with this name. If the", " task name can be confused with a foreach command name, then", " precede the name string with a \"\\\". If the name string is", " enclosed within \"'\" characters, then the encompassed string", " must be a POSIX extended regular expression that will be used", " to match task names.", " user perform the command(s) on all user (non-kernel) threads.", " gleader perform the command(s) on all user (non-kernel) thread group leaders.", " kernel perform the command(s) on all kernel threads.", " active perform the command(s) on the active thread on each CPU.", " state perform the command(s) on all tasks in the specified state, which", " may be one of: RU, IN, UN, ST, ZO, TR, SW, DE, WA, PA, ID or NE.\n", " If none of the task-identifying arguments above are entered, the command", " will be performed on all tasks.\n", " command select one or more of the following commands to be run on the tasks", " selected, or on all tasks:\n", " bt run the \"bt\" command (optional flags: -r -t -l -e -R -f -F", " -o -s -x -d)", " vm run the \"vm\" command (optional flags: -p -v -m -R -d -x)", " task run the \"task\" command (optional flags: -R -d -x)", " files run the \"files\" command (optional flag: -c -R)", " net run the \"net\" command (optional flags: -s -S -R -d -x)", " set run the \"set\" command", " ps run the \"ps\" command (optional flags: -G -s -p -c -t -l -a", " -g -r -y)", " sig run the \"sig\" command (optional flag: -g)", " vtop run the \"vtop\" command (optional flags: -c -u -k)\n", " flag Pass this optional flag to the command selected.", " argument Pass this argument to the command selected.", " ", " A header containing the PID, task address, cpu and command name will be", " pre-pended before the command output for each selected task. Consult the", " help page of each of the command types above for details.", "\nEXAMPLES", " Display the stack traces for all tasks:\n", " %s> foreach bt", " PID: 4752 TASK: c7680000 CPU: 1 COMMAND: \"xterm\"", " #0 [c7681edc] schedule at c01135f6", " (void)", " #1 [c7681f34] schedule_timeout at c01131ff", " (24)", " #2 [c7681f64] do_select at c0132838", " (5, c7681fa4, c7681fa0)", " #3 [c7681fbc] sys_select at c0132dad", " (5, 8070300, 8070380, 0, 0)", " #4 [bffffb0c] system_call at c0109944", " EAX: 0000008e EBX: 00000005 ECX: 08070300 EDX: 08070380 ", " DS: 002b ESI: 00000000 ES: 002b EDI: 00000000 ", " SS: 002b ESP: bffffadc EBP: bffffb0c ", " CS: 0023 EIP: 402259ee ERR: 0000008e EFLAGS: 00000246 ", " ", " PID: 557 TASK: c5600000 CPU: 0 COMMAND: \"nfsd\"", " #0 [c5601f38] schedule at c01135f6", " (void)", " #1 [c5601f90] schedule_timeout at c01131ff", " (c5600000)", " #2 [c5601fb8] svc_recv at c805363a", " (c0096f40, c5602800, 7fffffff, 100, c65c9f1c)", " #3 [c5601fec] (nfsd module) at c806e303", " (c5602800, c5602800, c0096f40, 6c6e0002, 50)", " #4 [c65c9f24] kernel_thread at c010834f", " (0, 0, ext2_file_inode_operations)", " ", " PID: 824 TASK: c7c84000 CPU: 0 COMMAND: \"mingetty\"", " ...\n", " Display the task_struct structure for each \"bash\" command:\n", " %s> foreach bash task", " ...\n", " Display the open files for all tasks:\n", " %s> foreach files", " ...\n", " Display the state of tasks whose name contains a match to \"event.*\":\n", " %s> foreach 'event.*' task -R state", " PID: 99 TASK: ffff8804750d5500 CPU: 0 COMMAND: \"events/0\"", " state = 1,", " ", " PID: 100 TASK: ffff8804750d4ac0 CPU: 1 COMMAND: \"events/1\"", " state = 1,", " ", " PID: 101 TASK: ffff8804750d4080 CPU: 2 COMMAND: \"events/2\"", " state = 1,", " ...\n", " Display the stack traces for all blocked (TASK_UNINTERRUPTIBLE) tasks:\n", " %s> foreach UN bt", " PID: 428 TASK: ffff880036b6c560 CPU: 1 COMMAND: \"jbd2/dm-1-8\"", " #0 [ffff880035779a70] __schedule at ffffffff815df272", " #1 [ffff880035779b08] schedule at ffffffff815dfacf", " #2 [ffff880035779b18] io_schedule at ffffffff815dfb7f", " #3 [ffff880035779b38] sleep_on_page at ffffffff81119a4e", " #4 [ffff880035779b48] __wait_on_bit at ffffffff815e039f", " #5 [ffff880035779b98] wait_on_page_bit at ffffffff81119bb8", " #6 [ffff880035779be8] filemap_fdatawait_range at ffffffff81119ccc", " #7 [ffff880035779cd8] filemap_fdatawait at ffffffff81119d8b", " #8 [ffff880035779ce8] jbd2_journal_commit_transaction at ffffffff8123a99c", " #9 [ffff880035779e58] kjournald2 at ffffffff8123ee7b", " #10 [ffff880035779ee8] kthread at ffffffff8108fb9c", " #11 [ffff880035779f48] kernel_thread_helper at ffffffff815ebaf4", " ...\n", NULL }; char *help_ascii[] = { "ascii", "translate a hexadecimal string to ASCII", "value ...", " Translates 32-bit or 64-bit hexadecimal values to ASCII. If no argument", " is entered, an ASCII chart is displayed.", "\nEXAMPLES", " Translate the hexadecimal value of 0x62696c2f7273752f to ASCII:", "\n %s> ascii 62696c2f7273752f", " 62696c2f7273752f: /usr/lib", "\n Display an ASCII chart:", "\n %s> ascii", " ", " 0 1 2 3 4 5 6 7", " +-------------------------------", " 0 | NUL DLE SP 0 @ P ' p", " 1 | SOH DC1 ! 1 A Q a q", " 2 | STX DC2 \" 2 B R b r", " 3 | ETX DC3 # 3 C S c s", " 4 | EOT DC4 $ 4 D T d t", " 5 | ENQ NAK \% 5 E U e u", " 6 | ACK SYN & 6 F V f v", " 7 | BEL ETB ` 7 G W g w", " 8 | BS CAN ( 8 H X h x", " 9 | HT EM ) 9 I Y i y", " A | LF SUB * : J Z j z", " B | VT ESC + ; K [ k {", " C | FF FS , < L \\ l |", " D | CR GS _ = M ] m }", " E | SO RS . > N ^ n ~", " F | SI US / ? O - o DEL", NULL }; char *help_quit[] = { "quit", "exit this session", " ", " Bail out of the current %s session.", "\nNOTE", " This command is equivalent to the \"exit\" command.", NULL }; char *help_exit[] = { "exit", "exit this session", " ", " Bail out of the current %s session.", "\nNOTE", " This command is equivalent to the \"q\" command.", NULL }; char *help_help[] = { "help", "get help", "[command | all] [-