crash-7.2.1/0000755000000000000000000000000013240637645011331 5ustar rootrootcrash-7.2.1/main.c0000775000000000000000000017030513240637645012434 0ustar rootroot/* main.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2017 David Anderson * Copyright (C) 2002-2017 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include "xen_hyper_defs.h" #include #include #include static void setup_environment(int, char **); static int is_external_command(void); static int is_builtin_command(void); static int is_input_file(void); static void check_xen_hyper(void); static void show_untrusted_files(void); static void get_osrelease(char *); static void get_log(char *); static char *no_vmcoreinfo(const char *); static struct option long_options[] = { {"memory_module", required_argument, 0, 0}, {"memory_device", required_argument, 0, 0}, {"no_kallsyms", 0, 0, 0}, {"no_modules", 0, 0, 0}, {"help", optional_argument, 0, 'h'}, {"no_data_debug", 0, 0, 0}, {"no_crashrc", 0, 0, 0}, {"no_kmem_cache", 0, 0, 0}, {"kmem_cache_delay", 0, 0, 0}, {"readnow", 0, 0, 0}, {"smp", 0, 0, 0}, {"machdep", required_argument, 0, 0}, {"version", 0, 0, 0}, {"buildinfo", 0, 0, 0}, {"cpus", required_argument, 0, 0}, {"no_ikconfig", 0, 0, 0}, {"hyper", 0, 0, 0}, {"p2m_mfn", required_argument, 0, 0}, {"xen_phys_start", required_argument, 0, 0}, {"zero_excluded", 0, 0, 0}, {"no_panic", 0, 0, 0}, {"more", 0, 0, 0}, {"less", 0, 0, 0}, {"CRASHPAGER", 0, 0, 0}, {"no_scroll", 0, 0, 0}, {"reloc", required_argument, 0, 0}, {"kaslr", required_argument, 0, 0}, {"active", 0, 0, 0}, {"minimal", 0, 0, 0}, {"mod", required_argument, 0, 0}, {"kvmhost", required_argument, 0, 0}, {"kvmio", required_argument, 0, 0}, {"no_elf_notes", 0, 0, 0}, {"osrelease", required_argument, 0, 0}, {"log", required_argument, 0, 0}, {"hex", 0, 0, 0}, {"dec", 0, 0, 0}, {"no_strip", 0, 0, 0}, {"hash", required_argument, 0, 0}, {"offline", required_argument, 0, 0}, {"src", required_argument, 0, 0}, {0, 0, 0, 0} }; int main(int argc, char **argv) { int i, c, option_index; char *tmpname; setup_environment(argc, argv); /* * Get and verify command line options. */ opterr = 0; optind = 0; while((c = getopt_long(argc, argv, "Lkgh::e:i:sSvc:d:tfp:m:xo:", long_options, &option_index)) != -1) { switch (c) { case 0: if (STREQ(long_options[option_index].name, "memory_module")) pc->memory_module = optarg; else if (STREQ(long_options[option_index].name, "memory_device")) pc->memory_device = optarg; else if (STREQ(long_options[option_index].name, "no_kallsyms")) kt->flags |= NO_KALLSYMS; else if (STREQ(long_options[option_index].name, "no_modules")) kt->flags |= NO_MODULE_ACCESS; else if (STREQ(long_options[option_index].name, "no_ikconfig")) kt->flags |= NO_IKCONFIG; else if (STREQ(long_options[option_index].name, "no_data_debug")) pc->flags &= ~DATADEBUG; else if (STREQ(long_options[option_index].name, "no_kmem_cache")) vt->flags |= KMEM_CACHE_UNAVAIL; else if (STREQ(long_options[option_index].name, "kmem_cache_delay")) vt->flags |= KMEM_CACHE_DELAY; else if (STREQ(long_options[option_index].name, "readnow")) pc->flags |= READNOW; else if (STREQ(long_options[option_index].name, "smp")) kt->flags |= SMP; else if (STREQ(long_options[option_index].name, "machdep")) { for (i = 0; i < MAX_MACHDEP_ARGS; i++) { if (machdep->cmdline_args[i]) continue; machdep->cmdline_args[i] = optarg; break; } if (i == MAX_MACHDEP_ARGS) error(INFO, "option ignored: %s\n", optarg); } else if (STREQ(long_options[option_index].name, "version")) { pc->flags |= VERSION_QUERY; display_version(); display_gdb_banner(); clean_exit(0); } else if (STREQ(long_options[option_index].name, "buildinfo")) { dump_build_data(); clean_exit(0); } else if (STREQ(long_options[option_index].name, "cpus")) kt->cpus_override = optarg; else if (STREQ(long_options[option_index].name, "hyper")) pc->flags |= XEN_HYPER; else if (STREQ(long_options[option_index].name, "p2m_mfn")) xen_kdump_p2m_mfn(optarg); else if (STREQ(long_options[option_index].name, "xen_phys_start")) set_xen_phys_start(optarg); else if (STREQ(long_options[option_index].name, "zero_excluded")) *diskdump_flags |= ZERO_EXCLUDED; else if (STREQ(long_options[option_index].name, "no_elf_notes")) { if (machine_type("X86") || machine_type("X86_64")) *diskdump_flags |= NO_ELF_NOTES; else error(INFO, "--no_elf_notes is only applicable to " "the X86 and X86_64 architectures.\n"); } else if (STREQ(long_options[option_index].name, "no_panic")) tt->flags |= PANIC_TASK_NOT_FOUND; else if (STREQ(long_options[option_index].name, "no_strip")) st->flags |= NO_STRIP; else if (STREQ(long_options[option_index].name, "more")) { if ((pc->scroll_command != SCROLL_NONE) && file_exists("/bin/more", NULL)) pc->scroll_command = SCROLL_MORE; } else if (STREQ(long_options[option_index].name, "less")) { if ((pc->scroll_command != SCROLL_NONE) && file_exists("/usr/bin/less", NULL)) pc->scroll_command = SCROLL_LESS; } else if (STREQ(long_options[option_index].name, "CRASHPAGER")) { if ((pc->scroll_command != SCROLL_NONE) && CRASHPAGER_valid()) pc->scroll_command = SCROLL_CRASHPAGER; } else if (STREQ(long_options[option_index].name, "no_scroll")) pc->flags &= ~SCROLL; else if (STREQ(long_options[option_index].name, "no_crashrc")) pc->flags |= NOCRASHRC; else if (STREQ(long_options[option_index].name, "active")) tt->flags |= ACTIVE_ONLY; else if (STREQ(long_options[option_index].name, "mod")) kt->module_tree = optarg; else if (STREQ(long_options[option_index].name, "hash")) { if (!calculate(optarg, &pc->nr_hash_queues, NULL, 0)) { error(INFO, "invalid --hash argument: %s\n", optarg); } } else if (STREQ(long_options[option_index].name, "kaslr")) { if (!machine_type("X86_64") && !machine_type("ARM64") && !machine_type("X86")) error(INFO, "--kaslr not valid " "with this machine type.\n"); else if (STREQ(optarg, "auto")) kt->flags2 |= (RELOC_AUTO|KASLR); else { if (!calculate(optarg, &kt->relocate, NULL, 0)) { error(INFO, "invalid --kaslr argument: %s\n", optarg); program_usage(SHORT_FORM); } kt->relocate *= -1; kt->flags |= RELOC_SET; kt->flags2 |= KASLR; } } else if (STREQ(long_options[option_index].name, "reloc")) { if (!calculate(optarg, &kt->relocate, NULL, 0)) { error(INFO, "invalid --reloc argument: %s\n", optarg); program_usage(SHORT_FORM); } kt->flags |= RELOC_SET; } else if (STREQ(long_options[option_index].name, "minimal")) pc->flags |= MINIMAL_MODE; else if (STREQ(long_options[option_index].name, "kvmhost")) set_kvmhost_type(optarg); else if (STREQ(long_options[option_index].name, "kvmio")) set_kvm_iohole(optarg); else if (STREQ(long_options[option_index].name, "osrelease")) { pc->flags2 |= GET_OSRELEASE; get_osrelease(optarg); } else if (STREQ(long_options[option_index].name, "log")) { pc->flags2 |= GET_LOG; get_log(optarg); } else if (STREQ(long_options[option_index].name, "hex")) { pc->flags2 |= RADIX_OVERRIDE; pc->output_radix = 16; } else if (STREQ(long_options[option_index].name, "dec")) { pc->flags2 |= RADIX_OVERRIDE; pc->output_radix = 10; } else if (STREQ(long_options[option_index].name, "offline")) { if (STREQ(optarg, "show")) pc->flags2 &= ~OFFLINE_HIDE; else if (STREQ(optarg, "hide")) pc->flags2 |= OFFLINE_HIDE; else { error(INFO, "invalid --offline argument: %s\n", optarg); program_usage(SHORT_FORM); } } else if (STREQ(long_options[option_index].name, "src")) kt->source_tree = optarg; else { error(INFO, "internal error: option %s unhandled\n", long_options[option_index].name); program_usage(SHORT_FORM); } break; case 'f': st->flags |= FORCE_DEBUGINFO; break; case 'g': pc->flags |= KERNEL_DEBUG_QUERY; break; case 'h': /* note: long_getopt's handling of optional arguments is weak. * To it, an optional argument must be part of the same argument * as the flag itself (eg. --help=commands or -hcommands). * We want to accept "--help commands" or "-h commands". * So we must do that part ourselves. */ if (optarg != NULL) cmd_usage(optarg, COMPLETE_HELP|PIPE_TO_SCROLL|MUST_HELP); else if (argv[optind] != NULL && argv[optind][0] != '-') cmd_usage(argv[optind++], COMPLETE_HELP|PIPE_TO_SCROLL|MUST_HELP); else program_usage(LONG_FORM); clean_exit(0); case 'k': pc->flags |= KERNTYPES; break; case 'e': if (STREQ(optarg, "vi")) pc->editing_mode = "vi"; else if (STREQ(optarg, "emacs")) pc->editing_mode = "emacs"; else fprintf(fp, "invalid edit mode: %s\n", optarg); break; case 't': kt->flags2 |= GET_TIMESTAMP; break; case 'i': pc->input_file = optarg; pc->flags |= CMDLINE_IFILE; break; case 'v': pc->flags |= VERSION_QUERY; display_version(); display_gdb_banner(); clean_exit(0); case 's': pc->flags |= SILENT; pc->flags &= ~SCROLL; // pc->scroll_command = SCROLL_NONE; (why?) break; case 'L': if (mlockall(MCL_CURRENT|MCL_FUTURE) == -1) perror("mlockall"); break; case 'S': if (is_system_map("/boot/System.map")) { pc->system_map = "/boot/System.map"; pc->flags |= (SYSMAP|SYSMAP_ARG); } break; case 'c': create_console_device(optarg); break; case 'd': pc->debug = atol(optarg); set_lkcd_debug(pc->debug); set_vas_debug(pc->debug); break; case 'p': force_page_size(optarg); break; case 'm': for (i = 0; i < MAX_MACHDEP_ARGS; i++) { if (machdep->cmdline_args[i]) continue; machdep->cmdline_args[i] = optarg; break; } if (i == MAX_MACHDEP_ARGS) error(INFO, "option ignored: %s\n", optarg); break; case 'x': pc->flags |= PRELOAD_EXTENSIONS; break; case 'o': ramdump_elf_output_file(optarg); break; default: error(INFO, "invalid option: %s\n", argv[optind-1]); program_usage(SHORT_FORM); } } opterr = 1; display_version(); /* * Take the kernel and dumpfile arguments in either order. */ while (argv[optind]) { if (is_ramdump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } if (ACTIVE()) { pc->flags |= LIVE_RAMDUMP; pc->readmem = read_ramdump; pc->writemem = NULL; optind++; continue; } pc->dumpfile = ramdump_to_elf(); if (is_kdump(pc->dumpfile, KDUMP_LOCAL)) { pc->flags |= KDUMP; if (is_ramdump_image()) pc->readmem = read_ramdump; else pc->readmem = read_kdump; pc->writemem = NULL; } else { error(INFO, "malformed ELF file: %s\n", pc->dumpfile); program_usage(SHORT_FORM); } optind++; continue; } if (is_remote_daemon(argv[optind])) { if (pc->flags & DUMPFILE_TYPES) { error(INFO, "too many dumpfile/memory arguments\n"); program_usage(SHORT_FORM); } pc->flags2 |= REMOTE_DAEMON; optind++; continue; } if (STREQ(argv[optind], "/dev/crash")) { pc->memory_device = argv[optind]; optind++; continue; } if (!file_exists(argv[optind], NULL)) { error(INFO, "%s: %s\n", argv[optind], strerror(ENOENT)); program_usage(SHORT_FORM); } else if (is_directory(argv[optind])) { error(INFO, "%s: not a supported file format\n", argv[optind]); program_usage(SHORT_FORM); } else if (!is_readable(argv[optind])) program_usage(SHORT_FORM); if (is_kernel(argv[optind])) { if (pc->namelist || pc->server_namelist) { if (!select_namelist(argv[optind])) { error(INFO, "too many namelist arguments\n"); program_usage(SHORT_FORM); } } else pc->namelist = argv[optind]; } else if (is_compressed_kernel(argv[optind], &tmpname)) { if (pc->namelist) { if (!select_namelist(tmpname)) { error(INFO, "too many namelist arguments\n"); program_usage(SHORT_FORM); } if (pc->namelist_debug == tmpname) { pc->namelist_debug_orig = argv[optind]; } else { pc->namelist_debug_orig = pc->namelist_orig; pc->namelist_orig = argv[optind]; } } else { pc->namelist = tmpname; pc->namelist_orig = argv[optind]; } pc->cleanup = NULL; } else if (!(pc->flags & KERNEL_DEBUG_QUERY)) { if (is_flattened_format(argv[optind])) pc->flags2 |= FLAT; if (STREQ(argv[optind], "/dev/mem")) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= DEVMEM; pc->dumpfile = NULL; pc->readmem = read_dev_mem; pc->writemem = write_dev_mem; pc->live_memsrc = argv[optind]; } else if (is_proc_kcore(argv[optind], KCORE_LOCAL)) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= PROC_KCORE; pc->dumpfile = NULL; pc->readmem = read_proc_kcore; pc->writemem = write_proc_kcore; pc->live_memsrc = argv[optind]; } else if (is_netdump(argv[optind], NETDUMP_LOCAL)) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= NETDUMP; pc->dumpfile = argv[optind]; if (is_sadump_xen()) { pc->readmem = read_kdump; pc->writemem = write_kdump; } else { pc->readmem = read_netdump; pc->writemem = write_netdump; } } else if (is_kdump(argv[optind], KDUMP_LOCAL)) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= KDUMP; pc->dumpfile = argv[optind]; pc->readmem = read_kdump; pc->writemem = write_kdump; } else if (is_kvmdump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= KVMDUMP; pc->dumpfile = argv[optind]; pc->readmem = read_kvmdump; pc->writemem = write_kvmdump; } else if (is_kvmdump_mapfile(argv[optind])) { if (pc->kvmdump_mapfile) { error(INFO, "too many KVM map file arguments\n"); program_usage(SHORT_FORM); } pc->kvmdump_mapfile = argv[optind]; } else if (is_xendump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= XENDUMP; pc->dumpfile = argv[optind]; pc->readmem = read_xendump; pc->writemem = write_xendump; } else if (is_system_map(argv[optind])) { pc->system_map = argv[optind]; pc->flags |= (SYSMAP|SYSMAP_ARG); } else if (is_diskdump(argv[optind])) { if ((pc->flags & MEMORY_SOURCES) && (!dumpfile_is_split())) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= DISKDUMP; pc->dumpfile = argv[optind]; pc->readmem = read_diskdump; pc->writemem = write_diskdump; } else if (is_lkcd_compressed_dump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= LKCD; pc->dumpfile = argv[optind]; pc->readmem = read_lkcd_dumpfile; pc->writemem = write_lkcd_dumpfile; } else if (is_mclx_compressed_dump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= MCLXCD; pc->dumpfile = argv[optind]; pc->readmem = read_mclx_dumpfile; pc->writemem = write_mclx_dumpfile; } else if (is_s390_dump(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= S390D; pc->dumpfile = argv[optind]; pc->readmem = read_s390_dumpfile; pc->writemem = write_s390_dumpfile; } else if (is_sadump(argv[optind])) { if ((pc->flags & MEMORY_SOURCES) && !sadump_is_diskset()) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= SADUMP; pc->dumpfile = argv[optind]; pc->readmem = read_sadump; pc->writemem = write_sadump; } else if (is_vmware_vmss(argv[optind])) { if (pc->flags & MEMORY_SOURCES) { error(INFO, "too many dumpfile arguments\n"); program_usage(SHORT_FORM); } pc->flags |= VMWARE_VMSS; pc->dumpfile = argv[optind]; pc->readmem = read_vmware_vmss; pc->writemem = write_vmware_vmss; } else { error(INFO, "%s: not a supported file format\n", argv[optind]); program_usage(SHORT_FORM); } } optind++; } check_xen_hyper(); if (setjmp(pc->main_loop_env)) clean_exit(1); /* * Initialize various subsystems. */ fd_init(); buf_init(); cmdline_init(); mem_init(); hq_init(); machdep_init(PRE_SYMTAB); symtab_init(); paravirt_init(); machdep_init(PRE_GDB); datatype_init(); /* * gdb_main_loop() modifies "command_loop_hook" to point to the * main_loop() function below, and then calls gdb's main() function. * After gdb initializes itself, it calls back to main_loop(). */ gdb_main_loop(argc, argv); clean_exit(0); exit(0); } /* * This routine is called from above, but also will be re-entered * as part of gdb's SIGINT handling. Since GDB_INIT and RUNTIME * will be set on re-entrancy, the initialization routines won't * be called. This can be avoided by always making gdb ignore SIGINT. */ void main_loop(void) { if (pc->flags2 & ERASEINFO_DATA) error(WARNING, "\n%s:\n " "Kernel data has been erased from this dumpfile. This may " "cause\n the crash session to fail entirely, may " "cause commands to fail,\n or may result in " "unpredictable\n runtime behavior.\n", pc->dumpfile); if (pc->flags2 & INCOMPLETE_DUMP) { error(WARNING, "\n%s:\n " "This dumpfile is incomplete. This may cause the crash session" "\n to fail entirely, may cause commands to fail, or may" " result in\n unpredictable runtime behavior.\n", pc->dumpfile); if (!(*diskdump_flags & ZERO_EXCLUDED)) fprintf(fp, " NOTE: This dumpfile may be analyzed with the --zero_excluded command\n" " line option, in which case any read requests from missing pages\n" " will return zero-filled memory.\n"); } if (pc->flags2 & EXCLUDED_VMEMMAP) { error(WARNING, "\n%s:\n " "This dumpfile is incomplete because the page structures associated\n" " with excluded pages may also be excluded. This may cause the crash\n" " session to fail entirely, may cause commands to fail (most notably\n" " the \"kmem\" command), or may result in unpredictable runtime behavior.\n", pc->dumpfile); } if (!(pc->flags & GDB_INIT)) { gdb_session_init(); machdep_init(POST_RELOC); show_untrusted_files(); kdump_backup_region_init(); if (XEN_HYPER_MODE()) { #ifdef XEN_HYPERVISOR_ARCH machdep_init(POST_GDB); xen_hyper_init(); machdep_init(POST_INIT); #else error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); #endif } else if (!(pc->flags & MINIMAL_MODE)) { read_in_kernel_config(IKCFG_INIT); kernel_init(); machdep_init(POST_GDB); vm_init(); machdep_init(POST_VM); module_init(); help_init(); task_init(); vfs_init(); net_init(); dev_init(); machdep_init(POST_INIT); } } else SIGACTION(SIGINT, restart, &pc->sigaction, NULL); /* * Display system statistics and current context. */ if (!(pc->flags & SILENT) && !(pc->flags & RUNTIME)) { if (XEN_HYPER_MODE()) { #ifdef XEN_HYPERVISOR_ARCH xen_hyper_display_sys_stats(); xen_hyper_show_vcpu_context(XEN_HYPER_VCPU_LAST_CONTEXT()); fprintf(fp, "\n"); #else error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); #endif } else if (!(pc->flags & MINIMAL_MODE)) { display_sys_stats(); show_context(CURRENT_CONTEXT()); fprintf(fp, "\n"); } } if (pc->flags & MINIMAL_MODE) error(NOTE, "minimal mode commands: log, dis, rd, sym, eval, set, extend and exit\n\n"); pc->flags |= RUNTIME; if (pc->flags & PRELOAD_EXTENSIONS) preload_extensions(); /* * Return here if a non-recoverable error occurs * during command execution. */ if (setjmp(pc->main_loop_env)) { ; } /* * process_command_line() reads, parses and stores input command lines * in the global args[] array. exec_command() figures out what to * do with the parsed line. */ while (TRUE) { process_command_line(); exec_command(); } } /* * Most of the time args[0] simply contains the name string of a command * found in the global command_table[]. Special consideration is done for * dealing with input files, "known" external commands, and built-in commands. * If none of the above apply, the args[0] string is checked against the * known list of structure, union and typedef names, and if found, passed * on to cmd_struct(), cmd_union() or cmd_whatis(). */ void exec_command(void) { struct command_table_entry *ct; struct args_input_file args_ifile; if (args[0] && (args[0][0] == '\\') && args[0][1]) { shift_string_left(args[0], 1); shift_string_left(pc->orig_line, 1); pc->curcmd_flags |= NO_MODIFY; } reattempt: if (!args[0]) return; optind = argerrs = 0; if ((ct = get_command_table_entry(args[0]))) { if (ct->flags & REFRESH_TASK_TABLE) { if (XEN_HYPER_MODE()) { #ifdef XEN_HYPERVISOR_ARCH xen_hyper_refresh_domain_context_space(); xen_hyper_refresh_vcpu_context_space(); #else error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); #endif } else if (!(pc->flags & MINIMAL_MODE)) { tt->refresh_task_table(); sort_context_array(); sort_tgid_array(); } } if (!STREQ(pc->curcmd, pc->program_name)) pc->lastcmd = pc->curcmd; pc->curcmd = ct->name; pc->cmdgencur++; if (is_args_input_file(ct, &args_ifile)) exec_args_input_file(ct, &args_ifile); else (*ct->func)(); pc->lastcmd = pc->curcmd; pc->curcmd = pc->program_name; return; } if (is_input_file()) return; if (is_external_command()) return; if (is_builtin_command()) return; if (is_datatype_command()) goto reattempt; if (STRNEQ(args[0], "#") || STRNEQ(args[0], "//")) return; if (!(pc->flags & MINIMAL_MODE) && is_gdb_command(TRUE, FAULT_ON_ERROR)) goto reattempt; if (REMOTE() && remote_execute()) return; pc->curcmd = pc->program_name; if (pc->flags & MINIMAL_MODE) error(INFO, "%s: command not available in minimal mode\n" "NOTE: minimal mode commands: log, dis, rd, sym, eval, set, extend and exit\n", args[0]); else error(INFO, "command not found: %s\n", args[0]); if (pc->curcmd_flags & REPEAT) pc->curcmd_flags &= ~REPEAT; } /* * Find the command_table structure associated with a command name. */ struct command_table_entry * get_command_table_entry(char *name) { int i; struct command_table_entry *cp; struct extension_table *ext; if (pc->flags2 & GDB_CMD_MODE) { if (STREQ(name, "crash")) { if (argcnt == 1) error(FATAL, "a crash command must follow " "the \"crash\" directive\n"); for (i = 1; i <= argcnt; i++) args[i-1] = args[i]; argcnt--; name = args[0]; } else name = "gdb"; } for (cp = pc->cmd_table; cp->name; cp++) { if (STREQ(cp->name, name)) { if (!(pc->flags & MINIMAL_MODE) || (cp->flags & MINIMAL)) return cp; else return NULL; } } for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (STREQ(cp->name, name)) { if (!(pc->flags & MINIMAL_MODE) || (cp->flags & MINIMAL)) return cp; else return NULL; } } } return NULL; } static int is_input_file(void) { if (STREQ(args[0], "<")) { exec_input_file(); return TRUE; } return FALSE; } static int is_builtin_command(void) { int i; struct remote_file remote_file, *rfp; /* * cmd_test() is used strictly for debugging -- but not advertised * in the help menu. */ if (STREQ(args[0], "test")) { pc->curcmd = "test"; cmd_test(); return TRUE; } if (STREQ(args[0], "save")) { pc->curcmd = "save"; rfp = &remote_file; BZERO(rfp, sizeof(struct remote_file)); rfp->flags |= REMOTE_VERBOSE; for (i = 1; i < argcnt; i++) { rfp->filename = args[i]; get_remote_file(rfp); } return TRUE; } return FALSE; } /* * Pure laziness -- to avoid having to type the exclamation point at the * beginning of the line. */ static int is_external_command(void) { int i; char *cmd; char command[BUFSIZE]; cmd = args[0]; if (STREQ(cmd, "vi") || STREQ(cmd, "pwd") || STREQ(cmd, "grep") || STREQ(cmd, "cat") || STREQ(cmd, "more") || STREQ(cmd, "less") || STREQ(cmd, "echo") || STREQ(cmd, "ls")) { sprintf(command, "%s", cmd); for (i = 1; i < argcnt; i++) { strcat(command, " "); if (strstr(args[i], " ")) { strcat(command, "\""); strcat(command, args[i]); strcat(command, "\""); } else strcat(command, args[i]); } if (system(command) == -1) perror(command); return TRUE; } return FALSE; } void cmd_quit(void) { if (REMOTE()) remote_exit(); clean_exit(0); } void cmd_mach(void) { machdep->cmd_mach(); } static void setup_environment(int argc, char **argv) { int i; char *p1; char buf[BUFSIZE]; char homerc[BUFSIZE]; char localrc[BUFSIZE]; FILE *afp; char *program; program = argv[0]; /* * Program output typically goes via "fprintf(fp, ...)", but the * contents of fp are modified on the fly to handle redirection * to pipes or output files. */ fp = stdout; /* * Start populating the program_context structure. It's used so * frequently that "pc" has been declared globally to point to the * "program_context" structure. */ pc->program_name = (char *)basename(program); pc->program_path = program; pc->program_version = build_version; pc->program_pid = (ulong)getpid(); pc->curcmd = pc->program_name; pc->flags = (HASH|SCROLL); pc->flags |= DATADEBUG; /* default until unnecessary */ pc->flags2 |= REDZONE; pc->confd = -2; pc->machine_type = MACHINE_TYPE; pc->readmem = read_dev_mem; /* defaults until argv[] is parsed */ pc->writemem = write_dev_mem; pc->read_vmcoreinfo = no_vmcoreinfo; pc->memory_module = NULL; pc->memory_device = MEMORY_DRIVER_DEVICE; machdep->bits = sizeof(long) * 8; machdep->verify_paddr = generic_verify_paddr; machdep->get_kvaddr_ranges = generic_get_kvaddr_ranges; pc->redhat_debug_loc = DEFAULT_REDHAT_DEBUG_LOCATION; pc->cmdgencur = 0; pc->cmd_table = linux_command_table; kt->BUG_bytes = -1; kt->flags |= PRE_KERNEL_INIT; /* * Set up to perform a clean_exit() upon parent death. */ SIGACTION(SIGUSR2, restart, &pc->sigaction, NULL); prctl(PR_SET_PDEATHSIG, SIGUSR2); /* * Get gdb version before initializing it since this might be one * of the short-hand commands that need it without running gdb. */ get_gdb_version(); /* * Set up the default scrolling behavior for terminal output. */ if (isatty(fileno(stdout))) { if (CRASHPAGER_valid()) { pc->flags |= SCROLL; pc->scroll_command = SCROLL_CRASHPAGER; } else if (file_exists("/usr/bin/less", NULL)) { pc->flags |= SCROLL; pc->scroll_command = SCROLL_LESS; } else if (file_exists("/bin/more", NULL)) { pc->flags |= SCROLL; pc->scroll_command = SCROLL_MORE; } else { pc->scroll_command = SCROLL_NONE; pc->flags &= ~SCROLL; } } /* * Setup the readline command line editing mode based upon the * following order: * * (1) EDITOR environment variable * (2) overridden by any .crashrc entry: "set vi" or "set emacs" * (3) RL_VI_MODE if not set anywhere else */ pc->flags |= READLINE; pc->editing_mode = "no_mode"; if ((p1 = getenv("EDITOR"))) { if (strstr(p1, "vi")) pc->editing_mode = "vi"; if (strstr(p1, "emacs")) pc->editing_mode = "emacs"; } /* * Resolve $HOME .rc file first, then the one in the local directory. * Note that only "set" and "alias" commands are done at this time. */ for (i = 1; i < argc; i++) if (STREQ(argv[i], "--no_crashrc")) pc->flags |= NOCRASHRC; alias_init(NULL); if ((p1 = getenv("HOME"))) { if ((pc->home = (char *)malloc(strlen(p1)+1)) == NULL) { error(INFO, "home directory malloc: %s\n", strerror(errno)); pc->home = "(unknown)"; } else strcpy(pc->home, p1); sprintf(homerc, "%s/.%src", pc->home, pc->program_name); if (!(pc->flags & NOCRASHRC) && file_exists(homerc, NULL)) { if ((afp = fopen(homerc, "r")) == NULL) error(INFO, "cannot open %s: %s\n", homerc, strerror(errno)); else if (untrusted_file(afp, homerc)) fclose(afp); else { while (fgets(buf, BUFSIZE, afp)) resolve_rc_cmd(buf, ALIAS_RCHOME); fclose(afp); } } } sprintf(localrc, ".%src", pc->program_name); if (!same_file(homerc, localrc) && !(pc->flags & NOCRASHRC) && file_exists(localrc, NULL)) { if ((afp = fopen(localrc, "r")) == NULL) error(INFO, "cannot open %s: %s\n", localrc, strerror(errno)); else if (untrusted_file(afp, localrc)) fclose(afp); else { while (fgets(buf, BUFSIZE, afp)) resolve_rc_cmd(buf, ALIAS_RCLOCAL); fclose(afp); } } if (STREQ(pc->editing_mode, "no_mode")) pc->editing_mode = "vi"; machdep_init(SETUP_ENV); } /* * "help -p" output */ void dump_program_context(void) { int i; int others = 0; char *p1; char buf[BUFSIZE]; char buf2[BUFSIZE]; fprintf(fp, " program_name: %s\n", pc->program_name); fprintf(fp, " program_path: %s\n", pc->program_path); fprintf(fp, " program_version: %s\n", pc->program_version); fprintf(fp, " gdb_version: %s\n", pc->gdb_version); fprintf(fp, " program_pid: %ld\n", pc->program_pid); fprintf(fp, " prompt: \"%s\"\n", pc->prompt); fprintf(fp, " flags: %llx ", pc->flags); if (pc->flags) sprintf(buf, "("); if (pc->flags & RUNTIME) sprintf(&buf[strlen(buf)], "%sRUNTIME", others++ ? "|" : ""); if (pc->flags & LIVE_SYSTEM) sprintf(&buf[strlen(buf)], "%sLIVE_SYSTEM", others++ ? "|" : ""); if (pc->flags & TTY) sprintf(&buf[strlen(buf)], "%sTTY", others++ ? "|" : ""); if (pc->flags & IN_FOREACH) sprintf(&buf[strlen(buf)], "%sIN_FOREACH", others++ ? "|" : ""); if (pc->flags & MFD_RDWR) sprintf(&buf[strlen(buf)], "%sMFD_RDWR", others++ ? "|" : ""); if (pc->flags & KVMDUMP) sprintf(&buf[strlen(buf)], "%sKVMDUMP", others++ ? "|" : ""); if (pc->flags & SILENT) sprintf(&buf[strlen(buf)], "%sSILENT", others++ ? "|" : ""); if (pc->flags & HASH) sprintf(&buf[strlen(buf)], "%sHASH", others++ ? "|" : ""); if (pc->flags & SCROLL) sprintf(&buf[strlen(buf)], "%sSCROLL", others++ ? "|" : ""); if (pc->flags & NO_CONSOLE) sprintf(&buf[strlen(buf)], "%sNO_CONSOLE", others++ ? "|" : ""); if (pc->flags & MCLXCD) sprintf(&buf[strlen(buf)], "%sMCLXCD", others++ ? "|" : ""); if (pc->flags & RUNTIME_IFILE) sprintf(&buf[strlen(buf)], "%sRUNTIME_IFILE", others++ ? "|" : ""); if (pc->flags & CMDLINE_IFILE) sprintf(&buf[strlen(buf)], "%sCMDLINE_IFILE", others++ ? "|" : ""); if (pc->flags & DROP_CORE) sprintf(&buf[strlen(buf)], "%sDROP_CORE", others++ ? "|" : ""); if (pc->flags & LKCD) sprintf(&buf[strlen(buf)], "%sLKCD", others++ ? "|" : ""); if (pc->flags & GDB_INIT) sprintf(&buf[strlen(buf)], "%sGDB_INIT", others++ ? "|" : ""); if (pc->flags & IN_GDB) sprintf(&buf[strlen(buf)], "%sIN_GDB", others++ ? "|" : ""); if (pc->flags & RCHOME_IFILE) sprintf(&buf[strlen(buf)], "%sRCHOME_IFILE", others++ ? "|" : ""); if (pc->flags & RCLOCAL_IFILE) sprintf(&buf[strlen(buf)], "%sRCLOCAL_IFILE", others++ ? "|" : ""); if (pc->flags & READLINE) sprintf(&buf[strlen(buf)], "%sREADLINE", others++ ? "|" : ""); if (pc->flags & _SIGINT_) sprintf(&buf[strlen(buf)], "%s_SIGINT_", others++ ? "|" : ""); if (pc->flags & IN_RESTART) sprintf(&buf[strlen(buf)], "%sIN_RESTART", others++ ? "|" : ""); if (pc->flags & KERNEL_DEBUG_QUERY) sprintf(&buf[strlen(buf)], "%sKERNEL_DEBUG_QUERY", others++ ? "|" : ""); if (pc->flags & DEVMEM) sprintf(&buf[strlen(buf)], "%sDEVMEM", others++ ? "|" : ""); if (pc->flags & MEMMOD) sprintf(&buf[strlen(buf)], "%sMEMMOD", others++ ? "|" : ""); if (pc->flags & MODPRELOAD) sprintf(&buf[strlen(buf)], "%sMODPRELOAD", others++ ? "|" : ""); if (pc->flags & REM_LIVE_SYSTEM) sprintf(&buf[strlen(buf)], "%sREM_LIVE_SYSTEM", others++ ? "|" : ""); if (pc->flags & NAMELIST_LOCAL) sprintf(&buf[strlen(buf)], "%sNAMELIST_LOCAL", others++ ? "|" : ""); if (pc->flags & DUMPFILE_SAVED) sprintf(&buf[strlen(buf)], "%sDUMPFILE_SAVED", others++ ? "|" : ""); if (pc->flags & NAMELIST_SAVED) sprintf(&buf[strlen(buf)], "%sNAMELIST_SAVED", others++ ? "|" : ""); if (pc->flags & UNLINK_NAMELIST) sprintf(&buf[strlen(buf)], "%sUNLINK_NAMELIST", others++ ? "|" : ""); if (pc->flags & NAMELIST_UNLINKED) sprintf(&buf[strlen(buf)], "%sNAMELIST_UNLINKED", others++ ? "|" : ""); if (pc->flags & REM_MCLXCD) sprintf(&buf[strlen(buf)], "%sREM_MCLXCD", others++ ? "|" : ""); if (pc->flags & REM_LKCD) sprintf(&buf[strlen(buf)], "%sREM_LKCD", others++ ? "|" : ""); if (pc->flags & NAMELIST_NO_GZIP) sprintf(&buf[strlen(buf)], "%sNAMELIST_NO_GZIP", others++ ? "|" : ""); if (pc->flags & UNLINK_MODULES) sprintf(&buf[strlen(buf)], "%sUNLINK_MODULES", others++ ? "|" : ""); if (pc->flags & S390D) sprintf(&buf[strlen(buf)], "%sS390D", others++ ? "|" : ""); if (pc->flags & REM_S390D) sprintf(&buf[strlen(buf)], "%sREM_S390D", others++ ? "|" : ""); if (pc->flags & NETDUMP) sprintf(&buf[strlen(buf)], "%sNETDUMP", others++ ? "|" : ""); if (pc->flags & XENDUMP) sprintf(&buf[strlen(buf)], "%sXENDUMP", others++ ? "|" : ""); if (pc->flags & KDUMP) sprintf(&buf[strlen(buf)], "%sKDUMP", others++ ? "|" : ""); if (pc->flags & SADUMP) sprintf(&buf[strlen(buf)], "%sSADUMP", others++ ? "|" : ""); if (pc->flags & SYSRQ) sprintf(&buf[strlen(buf)], "%sSYSRQ", others++ ? "|" : ""); if (pc->flags & REM_NETDUMP) sprintf(&buf[strlen(buf)], "%sREM_NETDUMP", others++ ? "|" : ""); if (pc->flags & DISKDUMP) sprintf(&buf[strlen(buf)], "%sDISKDUMP", others++ ? "|" : ""); if (pc->flags & SYSMAP) sprintf(&buf[strlen(buf)], "%sSYSMAP", others++ ? "|" : ""); if (pc->flags & SYSMAP_ARG) sprintf(&buf[strlen(buf)], "%sSYSMAP_ARG", others++ ? "|" : ""); if (pc->flags & DATADEBUG) sprintf(&buf[strlen(buf)], "%sDATADEBUG", others++ ? "|" : ""); if (pc->flags & FINDKERNEL) sprintf(&buf[strlen(buf)], "%sFINDKERNEL", others++ ? "|" : ""); if (pc->flags & VERSION_QUERY) sprintf(&buf[strlen(buf)], "%sVERSION_QUERY", others++ ? "|" : ""); if (pc->flags & READNOW) sprintf(&buf[strlen(buf)], "%sREADNOW", others++ ? "|" : ""); if (pc->flags & NOCRASHRC) sprintf(&buf[strlen(buf)], "%sNOCRASHRC", others++ ? "|" : ""); if (pc->flags & INIT_IFILE) sprintf(&buf[strlen(buf)], "%sINIT_IFILE", others++ ? "|" : ""); if (pc->flags & XEN_HYPER) sprintf(&buf[strlen(buf)], "%sXEN_HYPER", others++ ? "|" : ""); if (pc->flags & XEN_CORE) sprintf(&buf[strlen(buf)], "%sXEN_CORE", others++ ? "|" : ""); if (pc->flags & PLEASE_WAIT) sprintf(&buf[strlen(buf)], "%sPLEASE_WAIT", others++ ? "|" : ""); if (pc->flags & IFILE_ERROR) sprintf(&buf[strlen(buf)], "%sIFILE_ERROR", others++ ? "|" : ""); if (pc->flags & MINIMAL_MODE) sprintf(&buf[strlen(buf)], "%sMINIMAL_MODE", others++ ? "|" : ""); if (pc->flags & CRASHBUILTIN) sprintf(&buf[strlen(buf)], "%sCRASHBUILTIN", others++ ? "|" : ""); if (pc->flags & PRELOAD_EXTENSIONS) sprintf(&buf[strlen(buf)], "%sPRELOAD_EXTENSIONS", others++ ? "|" : ""); if (pc->flags & PROC_KCORE) sprintf(&buf[strlen(buf)], "%sPROC_KCORE", others++ ? "|" : ""); if (pc->flags) strcat(buf, ")"); if (strlen(buf)) { if (strlen(buf) > 46) { sprintf(buf2, "\n%s\n", mkstring(buf, 80, CENTER|LJUST, NULL)); if (strlen(buf2) <= 82) fprintf(fp, "%s", buf2); else { for (i = strlen(buf2)-1; i; i--) { if ((buf2[i] == '|') && (i < 80)) break; } strcpy(buf, buf2); buf[i+1] = NULLCHAR; fprintf(fp, "%s\n %s", buf, &buf2[i+1]); } } else fprintf(fp, "%s\n", buf); } others = 0; fprintf(fp, " flags2: %llx (", pc->flags2); if (pc->flags2 & FLAT) fprintf(fp, "%sFLAT", others++ ? "|" : ""); if (pc->flags2 & ELF_NOTES) fprintf(fp, "%sELF_NOTES", others++ ? "|" : ""); if (pc->flags2 & GET_OSRELEASE) fprintf(fp, "%sGET_OSRELEASE", others++ ? "|" : ""); if (pc->flags2 & REMOTE_DAEMON) fprintf(fp, "%sREMOTE_DAEMON", others++ ? "|" : ""); if (pc->flags2 & LIVE_DUMP) fprintf(fp, "%sLIVE_DUMP", others++ ? "|" : ""); if (pc->flags2 & RADIX_OVERRIDE) fprintf(fp, "%sRADIX_OVERRIDE", others++ ? "|" : ""); if (pc->flags2 & QEMU_MEM_DUMP_ELF) fprintf(fp, "%sQEMU_MEM_DUMP_ELF", others++ ? "|" : ""); if (pc->flags2 & QEMU_MEM_DUMP_COMPRESSED) fprintf(fp, "%sQEMU_MEM_DUMP_COMPRESSED", others++ ? "|" : ""); if (pc->flags2 & GET_LOG) fprintf(fp, "%sGET_LOG", others++ ? "|" : ""); if (pc->flags2 & VMCOREINFO) fprintf(fp, "%sVMCOREINFO", others++ ? "|" : ""); if (pc->flags2 & ALLOW_FP) fprintf(fp, "%sALLOW_FP", others++ ? "|" : ""); if (pc->flags2 & RAMDUMP) fprintf(fp, "%sRAMDUMP", others++ ? "|" : ""); if (pc->flags2 & OFFLINE_HIDE) fprintf(fp, "%sOFFLINE_HIDE", others++ ? "|" : ""); if (pc->flags2 & INCOMPLETE_DUMP) fprintf(fp, "%sINCOMPLETE_DUMP", others++ ? "|" : ""); if (pc->flags2 & SNAP) fprintf(fp, "%sSNAP", others++ ? "|" : ""); if (pc->flags2 & EXCLUDED_VMEMMAP) fprintf(fp, "%sEXCLUDED_VMEMMAP", others++ ? "|" : ""); if (pc->flags2 & MEMSRC_LOCAL) fprintf(fp, "%sMEMSRC_LOCAL", others++ ? "|" : ""); if (pc->flags2 & REDZONE) fprintf(fp, "%sREDZONE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " namelist: %s\n", pc->namelist); fprintf(fp, " dumpfile: %s\n", pc->dumpfile); fprintf(fp, " live_memsrc: %s\n", pc->live_memsrc); fprintf(fp, " system_map: %s\n", pc->system_map); fprintf(fp, " namelist_debug: %s\n", pc->namelist_debug); fprintf(fp, " debuginfo_file: %s\n", pc->debuginfo_file); fprintf(fp, " namelist_orig: %s\n", pc->namelist_orig); fprintf(fp, "namelist_dbg_orig: %s\n", pc->namelist_debug_orig); fprintf(fp, " kvmdump_mapfile: %s\n", pc->kvmdump_mapfile); fprintf(fp, " memory_module: %s\n", pc->memory_module); fprintf(fp, " memory_device: %s\n", pc->memory_device); fprintf(fp, " machine_type: %s\n", pc->machine_type); fprintf(fp, " editing_mode: %s\n", pc->editing_mode); fprintf(fp, " nfd: %d\n", pc->nfd); fprintf(fp, " mfd: %d\n", pc->mfd); fprintf(fp, " kfd: %d\n", pc->kfd); fprintf(fp, " dfd: %d\n", pc->dfd); fprintf(fp, " confd: %d\n", pc->confd); fprintf(fp, " home: %s\n", pc->home); fprintf(fp, " command_line: "); if (STRNEQ(pc->command_line, args[0])) fprintf(fp, "%s\n", concat_args(buf, 0, FALSE)); else fprintf(fp, "%s\n", pc->command_line); fprintf(fp, " orig_line: %s\n", pc->orig_line); fprintf(fp, " eoc_index: %d\n", pc->eoc_index); fprintf(fp, " readline: %lx\n", (ulong)pc->readline); fprintf(fp, " my_tty: %s\n", pc->my_tty); fprintf(fp, " debug: %ld\n", pc->debug); fprintf(fp, " debug_save: %ld\n", pc->debug_save); fprintf(fp, " console: %s\n", pc->console); fprintf(fp, " redhat_debug_loc: %s\n", pc->redhat_debug_loc); fprintf(fp, " pipefd[2]: %d,%d\n", pc->pipefd[0], pc->pipefd[1]); fprintf(fp, " nullfp: %lx\n", (ulong)pc->nullfp); fprintf(fp, " stdpipe: %lx\n", (ulong)pc->stdpipe); fprintf(fp, " pipe: %lx\n", (ulong)pc->pipe); fprintf(fp, " ifile: %lx\n", (ulong)pc->ifile); fprintf(fp, " ofile: %lx\n", (ulong)pc->ofile); fprintf(fp, " ifile_pipe: %lx\n", (ulong)pc->ifile_pipe); fprintf(fp, " ifile_ofile: %lx\n", (ulong)pc->ifile_ofile); fprintf(fp, " args_ifile: %lx\n", (ulong)pc->args_ifile); fprintf(fp, " input_file: %s\n", pc->input_file); fprintf(fp, "ifile_in_progress: %lx (", pc->ifile_in_progress); others = 0; if (pc->ifile_in_progress & RCHOME_IFILE) fprintf(fp, "%sRCHOME_IFILE", others++ ? "|" : ""); if (pc->ifile_in_progress & RCLOCAL_IFILE) fprintf(fp, "%sRCLOCAL_IFILE", others++ ? "|" : ""); if (pc->ifile_in_progress & CMDLINE_IFILE) fprintf(fp, "%sCMDLINE_IFILE", others++ ? "|" : ""); if (pc->ifile_in_progress & RUNTIME_IFILE) fprintf(fp, "%sRUNTIME_IFILE", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " ifile_offset: %lld\n", (ulonglong)pc->ifile_offset); fprintf(fp, "runtime_ifile_cmd: %s\n", pc->runtime_ifile_cmd ? pc->runtime_ifile_cmd : "(unused)"); fprintf(fp, " scroll_command: "); switch (pc->scroll_command) { case SCROLL_NONE: fprintf(fp, "SCROLL_NONE\n"); break; case SCROLL_LESS: fprintf(fp, "SCROLL_LESS\n"); break; case SCROLL_MORE: fprintf(fp, "SCROLL_MORE\n"); break; case SCROLL_CRASHPAGER: fprintf(fp, "SCROLL_CRASHPAGER (%s)\n", getenv("CRASHPAGER")); break; } buf[0] = NULLCHAR; fprintf(fp, " redirect: %lx ", pc->redirect); if (pc->redirect) sprintf(buf, "("); others = 0; if (pc->redirect & FROM_COMMAND_LINE) sprintf(&buf[strlen(buf)], "%sFROM_COMMAND_LINE", others++ ? "|" : ""); if (pc->redirect & FROM_INPUT_FILE) sprintf(&buf[strlen(buf)], "%sFROM_INPUT_FILE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_NOT_DONE) sprintf(&buf[strlen(buf)], "%sREDIRECT_NOT_DONE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_PIPE) sprintf(&buf[strlen(buf)], "%sREDIRECT_TO_PIPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_STDPIPE) sprintf(&buf[strlen(buf)], "%sREDIRECT_TO_STDPIPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_TO_FILE) sprintf(&buf[strlen(buf)], "%sREDIRECT_TO_FILE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_FAILURE) sprintf(&buf[strlen(buf)], "%sREDIRECT_FAILURE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_SHELL_ESCAPE) sprintf(&buf[strlen(buf)], "%sREDIRECT_SHELL_ESCAPE", others++ ? "|" : ""); if (pc->redirect & REDIRECT_SHELL_COMMAND) sprintf(&buf[strlen(buf)], "%sREDIRECT_SHELL_COMMAND", others++ ? "|" : ""); if (pc->redirect & REDIRECT_PID_KNOWN) sprintf(&buf[strlen(buf)], "%sREDIRECT_PID_KNOWN", others++ ? "|" : ""); if (pc->redirect & REDIRECT_MULTI_PIPE) sprintf(&buf[strlen(buf)], "%sREDIRECT_MULTI_PIPE", others++ ? "|" : ""); if (pc->redirect) strcat(buf, ")"); if (strlen(buf)) { if (strlen(buf) > 54) fprintf(fp, "\n%s\n", mkstring(buf, 80, CENTER|LJUST, NULL)); else fprintf(fp, "%s\n", buf); } if (!pc->redirect) fprintf(fp, "\n"); fprintf(fp, " stdpipe_pid: %d\n", pc->stdpipe_pid); fprintf(fp, " pipe_pid: %d\n", pc->pipe_pid); fprintf(fp, " pipe_shell_pid: %d\n", pc->pipe_shell_pid); fprintf(fp, " pipe_command: %s\n", pc->pipe_command); if (pc->symfile && pc->symfile2) { fprintf(fp, " symfile: %lx (%ld)\n", (ulong)pc->symfile, (ulong)ftell(pc->symfile)); fprintf(fp, " symfile2: %lx (%ld)\n", (ulong)pc->symfile2, (ulong)ftell(pc->symfile2)); } else { fprintf(fp, " symfile: %lx \n", (ulong)pc->symfile); fprintf(fp, " symfile2: %lx \n", (ulong)pc->symfile2); } fprintf(fp, " tmpfile: %lx\n", (ulong)pc->tmpfile); fprintf(fp, " saved_fp: %lx\n", (ulong)pc->saved_fp); fprintf(fp, " tmp_fp: %lx\n", (ulong)pc->tmp_fp); fprintf(fp, " tmpfile2: %lx\n", (ulong)pc->tmpfile2); fprintf(fp, " cmd_table: %s\n", XEN_HYPER_MODE() ? "xen_hyper_command_table" : "linux_command_table"); fprintf(fp, " curcmd: %s\n", pc->curcmd); fprintf(fp, " lastcmd: %s\n", pc->lastcmd); fprintf(fp, " cur_gdb_cmd: %d %s\n", pc->cur_gdb_cmd, gdb_command_string(pc->cur_gdb_cmd, buf, FALSE)); fprintf(fp, " last_gdb_cmd: %d %s\n", pc->last_gdb_cmd, gdb_command_string(pc->last_gdb_cmd, buf, FALSE)); fprintf(fp, " cur_req: %lx\n", (ulong)pc->cur_req); fprintf(fp, " cmdgencur: %ld\n", pc->cmdgencur); fprintf(fp, " curcmd_flags: %lx (", pc->curcmd_flags); others = 0; if (pc->curcmd_flags & XEN_MACHINE_ADDR) fprintf(fp, "%sXEN_MACHINE_ADDR", others ? "|" : ""); if (pc->curcmd_flags & REPEAT) fprintf(fp, "%sREPEAT", others ? "|" : ""); if (pc->curcmd_flags & IDLE_TASK_SHOWN) fprintf(fp, "%sIDLE_TASK_SHOWN", others ? "|" : ""); if (pc->curcmd_flags & TASK_SPECIFIED) fprintf(fp, "%sTASK_SPECIFIED", others ? "|" : ""); if (pc->curcmd_flags & MEMTYPE_UVADDR) fprintf(fp, "%sMEMTYPE_UVADDR", others ? "|" : ""); if (pc->curcmd_flags & MEMTYPE_FILEADDR) fprintf(fp, "%sMEMTYPE_FILEADDR", others ? "|" : ""); if (pc->curcmd_flags & HEADER_PRINTED) fprintf(fp, "%sHEADER_PRINTED", others ? "|" : ""); if (pc->curcmd_flags & BAD_INSTRUCTION) fprintf(fp, "%sBAD_INSTRUCTION", others ? "|" : ""); if (pc->curcmd_flags & UD2A_INSTRUCTION) fprintf(fp, "%sUD2A_INSTRUCTION", others ? "|" : ""); if (pc->curcmd_flags & IRQ_IN_USE) fprintf(fp, "%sIRQ_IN_USE", others ? "|" : ""); if (pc->curcmd_flags & IGNORE_ERRORS) fprintf(fp, "%sIGNORE_ERRORS", others ? "|" : ""); if (pc->curcmd_flags & FROM_RCFILE) fprintf(fp, "%sFROM_RCFILE", others ? "|" : ""); if (pc->curcmd_flags & MEMTYPE_KVADDR) fprintf(fp, "%sMEMTYPE_KVADDR", others ? "|" : ""); if (pc->curcmd_flags & NO_MODIFY) fprintf(fp, "%sNO_MODIFY", others ? "|" : ""); if (pc->curcmd_flags & MOD_SECTIONS) fprintf(fp, "%sMOD_SECTIONS", others ? "|" : ""); if (pc->curcmd_flags & MOD_READNOW) fprintf(fp, "%sMOD_READNOW", others ? "|" : ""); if (pc->curcmd_flags & MM_STRUCT_FORCE) fprintf(fp, "%sMM_STRUCT_FORCE", others ? "|" : ""); if (pc->curcmd_flags & CPUMASK) fprintf(fp, "%sCPUMASK", others ? "|" : ""); if (pc->curcmd_flags & PARTIAL_READ_OK) fprintf(fp, "%sPARTIAL_READ_OK", others ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " curcmd_private: %llx\n", pc->curcmd_private); fprintf(fp, " cmd_cleanup: %lx\n", (ulong)pc->cmd_cleanup); fprintf(fp, " cmd_cleanup_arg: %lx\n", (ulong)pc->cmd_cleanup_arg); fprintf(fp, " sigint_cnt: %d\n", pc->sigint_cnt); fprintf(fp, " sigaction: %lx\n", (ulong)&pc->sigaction); fprintf(fp, " gdb_sigaction: %lx\n", (ulong)&pc->gdb_sigaction); fprintf(fp, " main_loop_env: %lx\n", (ulong)&pc->main_loop_env); fprintf(fp, " foreach_loop_env: %lx\n", (ulong)&pc->foreach_loop_env); fprintf(fp, "gdb_interface_env: %lx\n", (ulong)&pc->gdb_interface_env); fprintf(fp, " termios_orig: %lx\n", (ulong)&pc->termios_orig); fprintf(fp, " termios_raw: %lx\n", (ulong)&pc->termios_raw); fprintf(fp, " ncmds: %d\n", pc->ncmds); fprintf(fp, " cmdlist: %lx\n", (ulong)pc->cmdlist); fprintf(fp, " cmdlistsz: %d\n", pc->cmdlistsz); fprintf(fp, " output_radix: %d (%s)\n", pc->output_radix, pc->output_radix == 16 ? "hex" : ((pc->output_radix == 10) ? "decimal" : "???")); fprintf(fp, " server: %s\n", pc->server); fprintf(fp, " server_pid: %ld\n", pc->server_pid); fprintf(fp, " port: %d\n", pc->port); fprintf(fp, " sockfd: %d\n", pc->sockfd); fprintf(fp, " server_memsrc: %s\n", pc->server_memsrc); fprintf(fp, " server_namelist: %s\n", pc->server_namelist); fprintf(fp, " rmfd: %d\n", pc->rmfd); fprintf(fp, " rkfd: %d\n", pc->rkfd); fprintf(fp, " rcvbufsize: %ld\n", pc->rcvbufsize); fprintf(fp, " readmem: "); if ((p1 = readmem_function_name())) fprintf(fp, "%s()\n", p1); else fprintf(fp, "%lx\n", (ulong)pc->readmem); fprintf(fp, " writemem: "); if ((p1 = writemem_function_name())) fprintf(fp, "%s()\n", p1); else fprintf(fp, "%lx\n", (ulong)pc->writemem); fprintf(fp, " dumpfile memory: %d\n", dumpfile_memory(DUMPFILE_MEM_USED)); fprintf(fp, " curext: %lx\n", (ulong)pc->curext); fprintf(fp, " sbrk: %lx\n", (ulong)pc->sbrk); fprintf(fp, " cleanup: %s\n", pc->cleanup); fprintf(fp, " scope: %lx %s\n", pc->scope, pc->scope ? "" : "(not set)"); fprintf(fp, " nr_hash_queues: %ld\n", pc->nr_hash_queues); fprintf(fp, " read_vmcoreinfo: %lx\n", (ulong)pc->read_vmcoreinfo); } char * readmem_function_name(void) { if (pc->readmem == read_dev_mem) return("read_dev_mem"); else if (pc->readmem == read_mclx_dumpfile) return("read_mclx_dumpfile"); else if (pc->readmem == read_lkcd_dumpfile) return("read_lkcd_dumpfile"); else if (pc->readmem == read_daemon) return("read_daemon"); else if (pc->readmem == read_netdump) return("read_netdump"); else if (pc->readmem == read_xendump) return("read_xendump"); else if (pc->readmem == read_kdump) return("read_kdump"); else if (pc->readmem == read_memory_device) return("read_memory_device"); else if (pc->readmem == read_xendump_hyper) return("read_xendump_hyper"); else if (pc->readmem == read_diskdump) return("read_diskdump"); else if (pc->readmem == read_proc_kcore) return("read_proc_kcore"); else if (pc->readmem == read_sadump) return("read_sadump"); else if (pc->readmem == read_s390_dumpfile) return("read_s390_dumpfile"); else if (pc->readmem == read_ramdump) return("read_ramdump"); else if (pc->readmem == read_vmware_vmss) return("read_vmware_vmss"); else return NULL; } char * writemem_function_name(void) { if (pc->writemem == write_dev_mem) return("write_dev_mem"); else if (pc->writemem == write_mclx_dumpfile) return("write_mclx_dumpfile"); else if (pc->writemem == write_lkcd_dumpfile) return("write_lkcd_dumpfile"); else if (pc->writemem == write_daemon) return("write_daemon"); else if (pc->writemem == write_netdump) return("write_netdump"); else if (pc->writemem == write_xendump) return("write_xendump"); else if (pc->writemem == write_kdump) return("write_kdump"); else if (pc->writemem == write_memory_device) return("write_memory_device"); // else if (pc->writemem == write_xendump_hyper) // return("write_xendump_hyper"); else if (pc->writemem == write_diskdump) return("write_diskdump"); else if (pc->writemem == write_proc_kcore) return("write_proc_kcore"); else if (pc->writemem == write_sadump) return("write_sadump"); else if (pc->writemem == write_s390_dumpfile) return("write_s390_dumpfile"); else if (pc->writemem == write_vmware_vmss) return("write_vmware_vmss"); else return NULL; } /* * "help -B" output */ void dump_build_data(void) { fprintf(fp, " build_command: %s\n", build_command); fprintf(fp, " build_data: %s\n", build_data); fprintf(fp, " build_target: %s\n", build_target); fprintf(fp, " build_version: %s\n", build_version); fprintf(fp, "compiler version: %s\n", compiler_version); } /* * Perform any cleanup activity here. */ int clean_exit(int status) { if (pc->flags & MEMMOD) cleanup_memory_driver(); if ((pc->namelist_orig) && file_exists(pc->namelist, NULL)) unlink(pc->namelist); if ((pc->namelist_debug_orig) && file_exists(pc->namelist_debug, NULL)) unlink(pc->namelist_debug); if (pc->cleanup && file_exists(pc->cleanup, NULL)) unlink(pc->cleanup); ramdump_cleanup(); exit(status); } /* * Check whether this session is for xen hypervisor analysis. */ static void check_xen_hyper(void) { if (!pc->namelist) return; if (!XEN_HYPER_MODE()) { if (STRNEQ(basename(pc->namelist), "xen-syms")) pc->flags |= XEN_HYPER; else return; } #ifdef XEN_HYPERVISOR_ARCH pc->cmd_table = xen_hyper_command_table; if (pc->flags & XENDUMP) pc->readmem = read_xendump_hyper; #else error(FATAL, XEN_HYPERVISOR_NOT_SUPPORTED); #endif } /* * Reject untrusted .crashrc, $HOME/.crashrc, * .gdbinit, and $HOME/.gdbinit files. */ static char *untrusted_file_list[4] = { 0 }; int untrusted_file(FILE *filep, char *filename) { struct stat sbuf; int i; if (filep && (fstat(fileno(filep), &sbuf) == 0) && (sbuf.st_uid == getuid()) && !(sbuf.st_mode & S_IWOTH)) return FALSE; for (i = 0; i < 4; i++) { if (!untrusted_file_list[i]) { untrusted_file_list[i] = strdup(filename); break; } } return TRUE; } static void show_untrusted_files(void) { int i, cnt; for (i = cnt = 0; i < 4; i++) { if (untrusted_file_list[i]) { error(WARNING, "not using untrusted file: \"%s\"\n", untrusted_file_list[i]); free(untrusted_file_list[i]); cnt++; } } if (cnt) fprintf(fp, "\n"); } /* * If GET_OSRELEASE is still set, the OS release has been * found and displayed. */ static void get_osrelease(char *dumpfile) { int retval = 1; if (is_flattened_format(dumpfile)) { if (pc->flags2 & GET_OSRELEASE) retval = 0; } else if (is_diskdump(dumpfile)) { if (pc->flags2 & GET_OSRELEASE) retval = 0; } else if (is_kdump(dumpfile, KDUMP_LOCAL)) { if (pc->flags2 & GET_OSRELEASE) retval = 0; } if (retval) fprintf(fp, "unknown\n"); clean_exit(retval); } static void get_log(char *dumpfile) { int retval = 1; if (is_flattened_format(dumpfile)) pc->flags2 |= FLAT; if (is_diskdump(dumpfile)) { if (pc->flags2 & GET_LOG) retval = 0; } else if (is_kdump(dumpfile, KDUMP_LOCAL)) { if (pc->flags2 & GET_LOG) retval = 0; } if (retval) fprintf(fp, "%s: no VMCOREINFO data\n", dumpfile); clean_exit(retval); } static char * no_vmcoreinfo(const char *unused) { return NULL; } crash-7.2.1/tools.c0000775000000000000000000045632213240637645012656 0ustar rootroot/* tools.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2017 David Anderson * Copyright (C) 2002-2017 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include static void print_number(struct number_option *, int, int); static long alloc_hq_entry(void); struct hq_entry; static void dealloc_hq_entry(struct hq_entry *); static void show_options(void); static void dump_struct_members(struct list_data *, int, ulong); static void rbtree_iteration(ulong, struct tree_data *, char *); static void dump_struct_members_for_tree(struct tree_data *, int, ulong); struct req_entry { char *arg, *name, **member; int *is_str, *is_ptr; ulong *width, *offset; int count; }; static void print_value(struct req_entry *, unsigned int, ulong, unsigned int); static struct req_entry *fill_member_offsets(char *); static void dump_struct_members_fast(struct req_entry *, int, ulong); /* * General purpose error reporting routine. Type INFO prints the message * and returns. Type FATAL aborts the command in progress, and longjmps * back to the appropriate recovery location. If a FATAL occurs during * program initialization, exit() is called. * * The idea is to get the message out so that it is seen by the user * regardless of how the command output may be piped or redirected. * Besides stderr, check whether the output is going to a file or pipe, and * if so, intermingle the error message there as well. */ int __error(int type, char *fmt, ...) { int end_of_line, new_line; char buf[BUFSIZE]; char *spacebuf; void *retaddr[NUMBER_STACKFRAMES] = { 0 }; va_list ap; if (CRASHDEBUG(1) || (pc->flags & DROP_CORE)) { SAVE_RETURN_ADDRESS(retaddr); console("error() trace: %lx => %lx => %lx => %lx\n", retaddr[3], retaddr[2], retaddr[1], retaddr[0]); } va_start(ap, fmt); (void)vsnprintf(buf, BUFSIZE, fmt, ap); va_end(ap); if (!fmt && FATAL_ERROR(type)) { fprintf(stdout, "\n"); clean_exit(1); } end_of_line = FATAL_ERROR(type) && !(pc->flags & RUNTIME); if ((new_line = (buf[0] == '\n'))) shift_string_left(buf, 1); else if (pc->flags & PLEASE_WAIT) new_line = TRUE; if (type == CONT) spacebuf = space(strlen(pc->curcmd)); else spacebuf = NULL; if (pc->stdpipe) { fprintf(pc->stdpipe, "%s%s%s %s%s", new_line ? "\n" : "", type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", type == WARNING ? "WARNING: " : type == NOTE ? "NOTE: " : "", buf); fflush(pc->stdpipe); } else { fprintf(stdout, "%s%s%s %s%s", new_line || end_of_line ? "\n" : "", type == WARNING ? "WARNING" : type == NOTE ? "NOTE" : type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", buf, end_of_line ? "\n" : ""); fflush(stdout); } if ((fp != stdout) && (fp != pc->stdpipe) && (fp != pc->tmpfile)) { fprintf(fp, "%s%s%s %s", new_line ? "\n" : "", type == WARNING ? "WARNING" : type == NOTE ? "NOTE" : type == CONT ? spacebuf : pc->curcmd, type == CONT ? " " : ":", buf); fflush(fp); } if ((pc->flags & DROP_CORE) && (type != NOTE)) { dump_trace(retaddr); SIGACTION(SIGSEGV, SIG_DFL, &pc->sigaction, NULL); drop_core("DROP_CORE flag set: forcing a segmentation fault\n"); } switch (type) { case FATAL: if (pc->flags & IN_FOREACH) RESUME_FOREACH(); /* FALLTHROUGH */ case FATAL_RESTART: if (pc->flags & RUNTIME) RESTART(); else { if (REMOTE()) remote_exit(); clean_exit(1); } default: case INFO: case NOTE: case WARNING: return FALSE; } } /* * Parse a line into tokens, populate the passed-in argv[] array, and return * the count of arguments found. This function modifies the passed-string * by inserting a NULL character at the end of each token. Expressions * encompassed by parentheses, and strings encompassed by apostrophes, are * collected into single tokens. */ int parse_line(char *str, char *argv[]) { int i, j, k; int string; int expression; for (i = 0; i < MAXARGS; i++) argv[i] = NULL; clean_line(str); if (str == NULL || strlen(str) == 0) return(0); i = j = k = 0; string = expression = FALSE; /* * Special handling for when the first character is a '"'. */ if (str[0] == '"') { next: do { i++; } while ((str[i] != NULLCHAR) && (str[i] != '"')); switch (str[i]) { case NULLCHAR: argv[j] = &str[k]; return j+1; case '"': argv[j++] = &str[k+1]; str[i++] = NULLCHAR; if (str[i] == '"') { k = i; goto next; } break; } } else argv[j++] = str; while (TRUE) { if (j == MAXARGS) error(FATAL, "too many arguments in string!\n"); while (str[i] != ' ' && str[i] != '\t' && str[i] != NULLCHAR) { i++; } switch (str[i]) { case ' ': case '\t': str[i++] = NULLCHAR; while (str[i] == ' ' || str[i] == '\t') { i++; } if (str[i] == '"') { str[i] = ' '; string = TRUE; i++; } if (!string && str[i] == '(') { expression = TRUE; } if (str[i] != NULLCHAR && str[i] != '\n') { argv[j++] = &str[i]; if (string) { string = FALSE; while (str[i] != '"' && str[i] != NULLCHAR) i++; if (str[i] == '"') str[i] = ' '; } if (expression) { expression = FALSE; while (str[i] != ')' && str[i] != NULLCHAR) i++; } break; } /* else fall through */ case '\n': str[i] = NULLCHAR; /* keep falling... */ case NULLCHAR: argv[j] = NULLCHAR; return(j); } } } /* * Defuse controversy re: extensions to ctype.h */ int whitespace(int c) { return ((c == ' ') ||(c == '\t')); } int ascii(int c) { return ((c >= 0) && ( c <= 0x7f)); } /* * Strip line-ending whitespace and linefeeds. */ char * strip_line_end(char *line) { strip_linefeeds(line); strip_ending_whitespace(line); return(line); } /* * Strip line-beginning and line-ending whitespace and linefeeds. */ char * clean_line(char *line) { strip_beginning_whitespace(line); strip_linefeeds(line); strip_ending_whitespace(line); return(line); } /* * Strip line-ending linefeeds in a string. */ char * strip_linefeeds(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == '\n') { *p = NULLCHAR; if (--p < line) break; } return(line); } /* * Strip a specified line-ending character in a string. */ char * strip_ending_char(char *line, char c) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); if (*p == c) *p = NULLCHAR; return(line); } /* * Strip a specified line-beginning character in a string. */ char * strip_beginning_char(char *line, char c) { if (line == NULL || strlen(line) == 0) return(line); if (FIRSTCHAR(line) == c) shift_string_left(line, 1); return(line); } /* * Strip line-ending whitespace. */ char * strip_ending_whitespace(char *line) { char *p; if (line == NULL || strlen(line) == 0) return(line); p = &LASTCHAR(line); while (*p == ' ' || *p == '\t') { *p = NULLCHAR; if (p == line) break; p--; } return(line); } /* * Strip line-beginning whitespace. */ char * strip_beginning_whitespace(char *line) { char buf[BUFSIZE]; char *p; if (line == NULL || strlen(line) == 0) return(line); strcpy(buf, line); p = &buf[0]; while (*p == ' ' || *p == '\t') p++; strcpy(line, p); return(line); } /* * End line at first comma found. */ char * strip_comma(char *line) { char *p; if ((p = strstr(line, ","))) *p = NULLCHAR; return(line); } /* * Strip the 0x from the beginning of a hexadecimal value string. */ char * strip_hex(char *line) { if (STRNEQ(line, "0x")) shift_string_left(line, 2); return(line); } /* * Turn a string into upper-case. */ char * upper_case(const char *s, char *buf) { const char *p1; char *p2; p1 = s; p2 = buf; while (*p1) { *p2 = toupper(*p1); p1++, p2++; } *p2 = NULLCHAR; return(buf); } /* * Return pointer to first non-space/tab in a string. */ char * first_nonspace(char *s) { return(s + strspn(s, " \t")); } /* * Return pointer to first space/tab in a string. If none are found, * return a pointer to the string terminating NULL. */ char * first_space(char *s) { return(s + strcspn(s, " \t")); } /* * Replace the first space/tab found in a string with a NULL character. */ char * null_first_space(char *s) { char *p1; p1 = first_space(s); if (*p1) *p1 = NULLCHAR; return s; } /* * Replace any instances of the characters in string c that are found in * string s with the character passed in r. */ char * replace_string(char *s, char *c, char r) { int i, j; for (i = 0; s[i]; i++) { for (j = 0; c[j]; j++) { if (s[i] == c[j]) s[i] = r; } } return s; } void string_insert(char *insert, char *where) { char *p; p = GETBUF(strlen(insert) + strlen(where) + 1); sprintf(p, "%s%s", insert, where); strcpy(where, p); FREEBUF(p); } /* * Find the rightmost instance of a substring in a string. */ char * strstr_rightmost(char *s, char *lookfor) { char *next, *last, *p; for (p = s, last = NULL; *p; p++) { if (!(next = strstr(p, lookfor))) break; last = p = next; } return last; } /* * Prints a string verbatim, allowing strings with % signs to be displayed * without printf conversions. */ void print_verbatim(FILE *filep, char *line) { int i; for (i = 0; i < strlen(line); i++) { fputc(line[i], filep); fflush(filep); } } char * fixup_percent(char *s) { char *p1; if ((p1 = strstr(s, "%")) == NULL) return s; s[strlen(s)+1] = NULLCHAR; memmove(p1+1, p1, strlen(p1)); *p1 = '%'; return s; } /* * Convert an indeterminate number string to either a hexadecimal or decimal * long value. Translate with a bias towards decimal unless HEX_BIAS is set. */ ulong stol(char *s, int flags, int *errptr) { if ((flags & HEX_BIAS) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); else { if (decimal(s, 0)) return(dtol(s, flags, errptr)); else if (hexadecimal(s, 0)) return(htol(s, flags, errptr)); } if (!(flags & QUIET)) error(INFO, "not a valid number: %s\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } ulonglong stoll(char *s, int flags, int *errptr) { if ((flags & HEX_BIAS) && hexadecimal(s, 0)) return(htoll(s, flags, errptr)); else { if (decimal(s, 0)) return(dtoll(s, flags, errptr)); else if (hexadecimal(s, 0)) return(htoll(s, flags, errptr)); } if (!(flags & QUIET)) error(INFO, "not a valid number: %s\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Append a two-character string to a number to make 1, 2, 3 and 4 into * 1st, 2nd, 3rd, 4th, and so on... */ char * ordinal(ulong val, char *buf) { char *p1; sprintf(buf, "%ld", val); p1 = &buf[strlen(buf)-1]; switch (*p1) { case '1': strcat(buf, "st"); break; case '2': strcat(buf, "nd"); break; case '3': strcat(buf, "rd"); break; default: strcat(buf, "th"); break; } return buf; } /* * Convert a string into: * * 1. an evaluated expression if it's enclosed within parentheses. * 2. to a decimal value if the string is all decimal characters. * 3. to a hexadecimal value if the string is all hexadecimal characters. * 4. to a symbol value if the string is a known symbol. * * If HEX_BIAS is set, pass the value on to htol(). */ ulong convert(char *s, int flags, int *errptr, ulong numflag) { struct syment *sp; if ((numflag & NUM_EXPR) && can_eval(s)) return(eval(s, flags, errptr)); if ((flags & HEX_BIAS) && (numflag & NUM_HEX) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); else { if ((numflag & NUM_DEC) && decimal(s, 0)) return(dtol(s, flags, errptr)); if ((numflag & NUM_HEX) && hexadecimal(s, 0)) return(htol(s, flags, errptr)); } if ((sp = symbol_search(s))) return(sp->value); error(INFO, "cannot convert \"%s\"\n", s); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a hexadecimal long value. */ ulong htol(char *s, int flags, int *errptr) { long i, j; ulong n; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto htol_error; } if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > MAX_HEXADDR_STRLEN) { if (!(flags & QUIET)) error(INFO, "input string too large: \"%s\" (%d vs %d)\n", s, strlen(s), MAX_HEXADDR_STRLEN); goto htol_error; } for (n = i = 0; s[i] != 0; i++) { switch (s[i]) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j = (s[i] - 'a') + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j = (s[i] - 'A') + 10; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': j = s[i] - '0'; break; case 'x': case 'X': case 'h': continue; default: if (!(flags & QUIET)) error(INFO, "invalid input: \"%s\"\n", s); goto htol_error; } n = (16 * n) + j; } return(n); htol_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return BADADDR; } /* * Convert a string to a hexadecimal unsigned long long value. */ ulonglong htoll(char *s, int flags, int *errptr) { long i, j; ulonglong n; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto htoll_error; } if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > LONG_LONG_PRLEN) { if (!(flags & QUIET)) error(INFO, "input string too large: \"%s\" (%d vs %d)\n", s, strlen(s), LONG_LONG_PRLEN); goto htoll_error; } for (n = i = 0; s[i] != 0; i++) { switch (s[i]) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': j = (s[i] - 'a') + 10; break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': j = (s[i] - 'A') + 10; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': j = s[i] - '0'; break; case 'x': case 'X': case 'h': continue; default: if (!(flags & QUIET)) error(INFO, "invalid input: \"%s\"\n", s); goto htoll_error; } n = (16 * n) + j; } return(n); htoll_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a decimal long value. */ ulong dtol(char *s, int flags, int *errptr) { ulong retval; char *p, *orig; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtol_error; } if (strlen(s) == 0) goto dtol_error; p = orig = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0') { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", orig, s[j]); goto dtol_error; } else if (sscanf(s, "%lu", &retval) != 1) { if (!(flags & QUIET)) error(INFO, "invalid expression\n"); goto dtol_error; } return(retval); dtol_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Convert a string to a decimal long value. */ ulonglong dtoll(char *s, int flags, int *errptr) { ulonglong retval; char *p, *orig; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtoll_error; } if (strlen(s) == 0) goto dtoll_error; p = orig = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0') { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", orig, s[j]); goto dtoll_error; } else if (sscanf(s, "%llu", &retval) != 1) { if (!(flags & QUIET)) error(INFO, "invalid expression\n"); goto dtoll_error; } return (retval); dtoll_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return ((ulonglong)(-1)); } /* * Convert a string to a decimal integer value. */ unsigned int dtoi(char *s, int flags, int *errptr) { unsigned int retval; char *p; int j; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "received NULL string\n"); goto dtoi_error; } p = &s[0]; while (*p++ == ' ') s++; for (j = 0; s[j] != '\0'; j++) if ((s[j] < '0' || s[j] > '9')) break ; if (s[j] != '\0' || (sscanf(s, "%d", (int *)&retval) != 1)) { if (!(flags & QUIET)) error(INFO, "%s: \"%c\" is not a digit 0 - 9\n", s, s[j]); goto dtoi_error; } return(retval); dtoi_error: switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return((unsigned int)(-1)); } /* * Determine whether a string contains only decimal characters. * If count is non-zero, limit the search to count characters. */ int decimal(char *s, int count) { char *p; int cnt, digits; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; for (p = &s[0], digits = 0; *p; p++) { switch(*p) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': digits++; case ' ': break; default: return FALSE; } if (count && (--cnt == 0)) break; } return (digits ? TRUE : FALSE); } /* * Extract a hexadecimal number from a string. If first_instance is FALSE, * and two possibilities are found, a fatal error results. */ int extract_hex(char *s, ulong *result, char stripchar, ulong first_instance) { int i, found; char *arglist[MAXARGS]; int argc; ulong value; char *buf; buf = GETBUF(strlen(s)); strcpy(buf, s); argc = parse_line(buf, arglist); for (i = found = value = 0; i < argc; i++) { if (stripchar) strip_ending_char(arglist[i], stripchar); if (hexadecimal(arglist[i], 0)) { if (found) { FREEBUF(buf); error(FATAL, "two hexadecimal args in: \"%s\"\n", strip_linefeeds(s)); } value = htol(arglist[i], FAULT_ON_ERROR, NULL); found = TRUE; if (first_instance) break; } } FREEBUF(buf); if (found) { *result = value; return TRUE; } return FALSE; } /* * Determine whether a string contains only ASCII characters. */ int ascii_string(char *s) { char *p; for (p = &s[0]; *p; p++) { if (!ascii(*p)) return FALSE; } return TRUE; } /* * Check whether a string contains only printable ASCII characters. */ int printable_string(char *s) { char *p; for (p = &s[0]; *p; p++) { if (!isprint(*p)) return FALSE; } return TRUE; } /* * Determine whether a string contains only hexadecimal characters. * If count is non-zero, limit the search to count characters. */ int hexadecimal(char *s, int count) { char *p; int cnt, digits; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; for (p = &s[0], digits = 0; *p; p++) { switch(*p) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': digits++; case 'x': case 'X': break; case ' ': if (*(p+1) == NULLCHAR) break; else return FALSE; default: return FALSE; } if (count && (--cnt == 0)) break; } return (digits ? TRUE : FALSE); } /* * Determine whether a string contains only hexadecimal characters. * and cannot be construed as a decimal number. * If count is non-zero, limit the search to count characters. */ int hexadecimal_only(char *s, int count) { char *p; int cnt, only; if (!count) { strip_line_end(s); cnt = 0; } else cnt = count; only = 0; for (p = &s[0]; *p; p++) { switch(*p) { case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'x': case 'X': only++; break; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '0': break; case ' ': if (*(p+1) == NULLCHAR) break; else return FALSE; default: return FALSE; } if (count && (--cnt == 0)) break; } return only; } /* * Clean a command argument that has an obvious but ignorable error. * The first one is an attached comma to a number, that usually is the * result of a cut-and-paste of an address from a structure display. * The second on is an attached colon to a number, usually from a * cut-and-paste of a memory dump. * Add more when they become annoynance. * * It presumes args[optind] is the argument being tinkered with, and * always returns TRUE for convenience of use. */ int clean_arg(void) { char buf[BUFSIZE]; if (LASTCHAR(args[optind]) == ',' || LASTCHAR(args[optind]) == ':') { strcpy(buf, args[optind]); LASTCHAR(buf) = NULLCHAR; if (IS_A_NUMBER(buf)) LASTCHAR(args[optind]) = NULLCHAR; } return TRUE; } /* * Translate a hexadecimal string into its ASCII components. */ void cmd_ascii(void) { int i; ulonglong value; char *s; int c, prlen, bytes; optind = 1; if (!args[optind]) { fprintf(fp, "\n"); fprintf(fp, " 0 1 2 3 4 5 6 7\n"); fprintf(fp, " +-------------------------------\n"); fprintf(fp, " 0 | NUL DLE SP 0 @ P ' p\n"); fprintf(fp, " 1 | SOH DC1 ! 1 A Q a q\n"); fprintf(fp, " 2 | STX DC2 %c 2 B R b r\n", 0x22); fprintf(fp, " 3 | ETX DC3 # 3 C S c s\n"); fprintf(fp, " 4 | EOT DC4 $ 4 D T d t\n"); fprintf(fp, " 5 | ENQ NAK %c 5 E U e u\n", 0x25); fprintf(fp, " 6 | ACK SYN & 6 F V f v\n"); fprintf(fp, " 7 | BEL ETB ` 7 G W g w\n"); fprintf(fp, " 8 | BS CAN ( 8 H X h x\n"); fprintf(fp, " 9 | HT EM ) 9 I Y i y\n"); fprintf(fp, " A | LF SUB * : J Z j z\n"); fprintf(fp, " B | VT ESC + ; K [ k {\n"); fprintf(fp, " C | FF FS , < L %c l |\n", 0x5c); fprintf(fp, " D | CR GS _ = M ] m }\n"); fprintf(fp, " E | SO RS . > N ^ n ~\n"); fprintf(fp, " F | SI US / ? O - o DEL\n"); fprintf(fp, "\n"); return; } while (args[optind]) { s = args[optind]; if (STRNEQ(s, "0x") || STRNEQ(s, "0X")) s += 2; if (strlen(s) > LONG_PRLEN) { prlen = LONG_LONG_PRLEN; bytes = sizeof(long long); } else { prlen = LONG_PRLEN; bytes = sizeof(long); } value = htoll(s, FAULT_ON_ERROR, NULL); fprintf(fp, "%.*llx: ", prlen, value); for (i = 0; i < bytes; i++) { c = (value >> (8*i)) & 0xff; if ((c >= 0x20) && (c < 0x7f)) { fprintf(fp, "%c", (char)c); continue; } if (c > 0x7f) { fprintf(fp, "<%02x>", c); continue; } switch (c) { case 0x0: fprintf(fp, ""); break; case 0x1: fprintf(fp, ""); break; case 0x2: fprintf(fp, ""); break; case 0x3: fprintf(fp, ""); break; case 0x4: fprintf(fp, ""); break; case 0x5: fprintf(fp, ""); break; case 0x6: fprintf(fp, ""); break; case 0x7: fprintf(fp, ""); break; case 0x8: fprintf(fp, ""); break; case 0x9: fprintf(fp, ""); break; case 0xa: fprintf(fp, ""); break; case 0xb: fprintf(fp, ""); break; case 0xc: fprintf(fp, ""); break; case 0xd: fprintf(fp, ""); break; case 0xe: fprintf(fp, ""); break; case 0xf: fprintf(fp, ""); break; case 0x10: fprintf(fp, ""); break; case 0x11: fprintf(fp, ""); break; case 0x12: fprintf(fp, ""); break; case 0x13: fprintf(fp, ""); break; case 0x14: fprintf(fp, ""); break; case 0x15: fprintf(fp, ""); break; case 0x16: fprintf(fp, ""); break; case 0x17: fprintf(fp, ""); break; case 0x18: fprintf(fp, ""); break; case 0x19: fprintf(fp, ""); break; case 0x1a: fprintf(fp, ""); break; case 0x1b: fprintf(fp, ""); break; case 0x1c: fprintf(fp, ""); break; case 0x1d: fprintf(fp, ""); break; case 0x1e: fprintf(fp, ""); break; case 0x1f: fprintf(fp, ""); break; case 0x7f: fprintf(fp, ""); break; } } fprintf(fp, "\n"); optind++; } } /* * Counts number of leading whitespace characters in a string. */ int count_leading_spaces(char *s) { return (strspn(s, " \t")); } /* * Prints the requested number of spaces. */ void pad_line(FILE *filep, int cnt, char c) { int i; for (i = 0; i < cnt; i++) fputc(c, filep); } /* * Returns appropriate number of inter-field spaces in a usable string. * MINSPACE is defined as -100, but implies the minimum space between two * fields. Currently this can be either one or two spaces, depending upon * the architecture. Since the mininum space must be at least 1, MINSPACE, * MINSPACE-1 and MINSPACE+1 are all valid, special numbers. Otherwise * the space count must be greater than or equal to 0. * * If the cnt request is greater than SPACES, a dynamic buffer is * allocated, and normal buffer garbage collection will return it * back to the pool. */ char * space(int cnt) { #define SPACES 40 static char spacebuf[SPACES+1] = { 0 }; int i; char *bigspace; if (cnt > SPACES) { bigspace = GETBUF(cnt); for (i = 0; i < cnt; i++) bigspace[i] = ' '; bigspace[i] = NULLCHAR; return bigspace; } if (!strlen(spacebuf)) { for (i = 0; i < SPACES; i++) spacebuf[i] = ' '; spacebuf[i] = NULLCHAR; } if (cnt < (MINSPACE-1)) error(FATAL, "illegal spacing request: %d\n", cnt); if ((cnt > MINSPACE+1) && (cnt < 0)) error(FATAL, "illegal spacing request\n"); switch (cnt) { case (MINSPACE-1): if (VADDR_PRLEN > 8) return (&spacebuf[SPACES]); /* NULL */ else return (&spacebuf[SPACES-1]); /* 1 space */ case MINSPACE: if (VADDR_PRLEN > 8) return (&spacebuf[SPACES-1]); /* 1 space */ else return (&spacebuf[SPACES-2]); /* 2 spaces */ case (MINSPACE+1): if (VADDR_PRLEN > 8) return (&spacebuf[SPACES-2]); /* 2 spaces */ else return (&spacebuf[SPACES-3]); /* 3 spaces */ default: return (&spacebuf[SPACES-cnt]); /* as requested */ } } /* * Determine whether substring s1, with length len, and contained within * string s, is surrounded by characters. If len is 0, calculate * it. */ int bracketed(char *s, char *s1, int len) { char *s2; if (!len) { if (!(s2 = strstr(s1, ">"))) return FALSE; len = s2-s1; } if (((s1-s) < 1) || (*(s1-1) != '<') || ((s1+len) >= &s[strlen(s)]) || (*(s1+len) != '>')) return FALSE; return TRUE; } /* * Counts the number of a specified character in a string. */ int count_chars(char *s, char c) { char *p; int count; if (!s) return 0; count = 0; for (p = s; *p; p++) { if (*p == c) count++; } return count; } /* * Counts the number of a specified characters in a buffer. */ long count_buffer_chars(char *bufptr, char c, long len) { long i, cnt; for (i = cnt = 0; i < len; i++, bufptr++) { if (*bufptr == c) cnt++; } return cnt; } /* * Concatenates the tokens in the global args[] array into one string, * separating each token with one space. If the no_options flag is set, * don't include any args beginning with a dash character. */ char * concat_args(char *buf, int arg, int no_options) { int i; BZERO(buf, BUFSIZE); for (i = arg; i < argcnt; i++) { if (no_options && STRNEQ(args[i], "-")) continue; strcat(buf, args[i]); strcat(buf, " "); } return(strip_ending_whitespace(buf)); } /* * Shifts the contents of a string to the left by cnt characters, * disposing the leftmost characters. */ char * shift_string_left(char *s, int cnt) { int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s, s+cnt, (origlen-cnt)); *(s+(origlen-cnt)) = NULLCHAR; return(s); } /* * Shifts the contents of a string to the right by cnt characters, * inserting space characters. (caller confirms space is available) */ char * shift_string_right(char *s, int cnt) { int origlen; if (!cnt) return(s); origlen = strlen(s); memmove(s+cnt, s, origlen); s[origlen+cnt] = NULLCHAR; return(memset(s, ' ', cnt)); } /* * Create a string in a buffer of a given size, centering, or justifying * left or right as requested. If the opt argument is used, then the string * is created with its string/integer value. If opt is NULL, then the * string is already in contained in string s (not justified). Note that * flag LONGLONG_HEX implies that opt is a ulonglong pointer to the * actual value. */ char * mkstring(char *s, int size, ulong flags, const char *opt) { int len; int extra; int left; int right; switch (flags & (LONG_DEC|LONG_HEX|INT_HEX|INT_DEC|LONGLONG_HEX|ZERO_FILL)) { case LONG_DEC: sprintf(s, "%lu", (ulong)opt); break; case LONG_HEX: sprintf(s, "%lx", (ulong)opt); break; case (LONG_HEX|ZERO_FILL): if (VADDR_PRLEN == 8) sprintf(s, "%08lx", (ulong)opt); else if (VADDR_PRLEN == 16) sprintf(s, "%016lx", (ulong)opt); break; case INT_DEC: sprintf(s, "%u", (uint)((ulong)opt)); break; case INT_HEX: sprintf(s, "%x", (uint)((ulong)opt)); break; case LONGLONG_HEX: sprintf(s, "%llx", *((ulonglong *)opt)); break; default: if (opt) strcpy(s, opt); break; } /* * At this point, string s has the string to be justified, * and has room to work with. The relevant flags from this * point on are of CENTER, LJUST and RJUST. If the length * of string s is already larger than the requested size, * just return it as is. */ len = strlen(s); if (size <= len) return(s); extra = size - len; if (flags & CENTER) { /* * If absolute centering is not possible, justify the * string as requested -- or to the left if no justify * argument was passed in. */ if (extra % 2) { switch (flags & (LJUST|RJUST)) { default: case LJUST: right = (extra/2) + 1; left = extra/2; break; case RJUST: right = extra/2; left = (extra/2) + 1; break; } } else left = right = extra/2; shift_string_right(s, left); len = strlen(s); memset(s + len, ' ', right); s[len + right] = NULLCHAR; return(s); } if (flags & LJUST) { len = strlen(s); memset(s + len, ' ', extra); s[len + extra] = NULLCHAR; } else if (flags & RJUST) shift_string_right(s, extra); return(s); } /* * Prints the requested number of BACKSPACE characters. */ void backspace(int cnt) { int i; for (i = 0; i < cnt; i++) fprintf(fp, "\b"); } /* * Set/display process context or internal variables. Processes are set * by their task or PID number, or to the panic context with the -p flag. * Internal variables may be viewed or changed, depending whether an argument * follows the variable name. If no arguments are entered, the current * process context is dumped. The current set of variables and their * acceptable settings are: * * debug "on", "off", or any number. "on" sets it to a value of 1. * hash "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * scroll "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * silent "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * refresh "on", "off", or any number. Non-zero numbers are converted * to "on", zero is converted to "off". * sym regular filename * console device filename * radix 10 or 16 * core (no arg) drop core when error() is called. * vi (no arg) set editing mode to vi (from .rc file only). * emacs (no arg) set editing mode to emacs (from .rc file only). * namelist kernel name (from .rc file only). * dumpfile dumpfile name (from .rc file only). * * gdb variable settings not changeable by gdb's "set" command: * * print_max value (default is 200). */ void cmd_set(void) { int i, c; ulong value; int cpu, runtime, from_rc_file; char buf[BUFSIZE]; char *extra_message; struct task_context *tc; struct syment *sp; #define defer() do { } while (0) #define already_done() do { } while (0) #define ignore() do { } while (0) extra_message = NULL; runtime = pc->flags & RUNTIME ? TRUE : FALSE; from_rc_file = pc->curcmd_flags & FROM_RCFILE ? TRUE : FALSE; while ((c = getopt(argcnt, args, "pvc:a:")) != EOF) { switch(c) { case 'c': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) { error(INFO, "not allowed on a live system\n"); argerrs++; break; } cpu = dtoi(optarg, FAULT_ON_ERROR, NULL); set_cpu(cpu); return; case 'p': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) { set_context(tt->this_task, NO_PID); show_context(CURRENT_CONTEXT()); return; } if (!tt->panic_task) { error(INFO, "no panic task found!\n"); return; } set_context(tt->panic_task, NO_PID); show_context(CURRENT_CONTEXT()); return; case 'v': if (!runtime) return; show_options(); return; case 'a': if (XEN_HYPER_MODE() || (pc->flags & MINIMAL_MODE)) option_not_supported(c); if (!runtime) return; if (ACTIVE()) error(FATAL, "-a option not allowed on live systems\n"); switch (str_to_context(optarg, &value, &tc)) { case STR_PID: if ((i = TASKS_PER_PID(value)) > 1) error(FATAL, "pid %d has %d tasks: " "use a task address\n", value, i); break; case STR_TASK: break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); } cpu = tc->processor; tt->active_set[cpu] = tc->task; if (tt->panic_threads[cpu]) tt->panic_threads[cpu] = tc->task; fprintf(fp, "\"%s\" task %lx has been marked as the active task on cpu %d\n", tc->comm, tc->task, cpu); return; default: argerrs++; break; } } if (argerrs) { if (runtime) cmd_usage(pc->curcmd, SYNOPSIS); return; } if (!args[optind]) { if (XEN_HYPER_MODE()) error(INFO, "requires an option with the Xen hypervisor\n"); else if (pc->flags & MINIMAL_MODE) show_options(); else if (runtime) show_context(CURRENT_CONTEXT()); return; } while (args[optind]) { if (STREQ(args[optind], "debug")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) pc->debug = 1; else if (STREQ(args[optind], "off")) pc->debug = 0; else if (IS_A_NUMBER(args[optind])) pc->debug = stol(args[optind], FAULT_ON_ERROR, NULL); else goto invalid_set_command; } if (runtime) fprintf(fp, "debug: %ld\n", pc->debug); set_lkcd_debug(pc->debug); set_vas_debug(pc->debug); return; } else if (STREQ(args[optind], "hash")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) pc->flags |= HASH; else if (STREQ(args[optind], "off")) pc->flags &= ~HASH; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= HASH; else pc->flags &= ~HASH; } else goto invalid_set_command; } if (runtime) fprintf(fp, "hash: %s\n", pc->flags & HASH ? "on" : "off"); return; } else if (STREQ(args[optind], "unwind")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) { if ((kt->flags & DWARF_UNWIND_CAPABLE) || !runtime) { kt->flags |= DWARF_UNWIND; kt->flags &= ~NO_DWARF_UNWIND; } } else if (STREQ(args[optind], "off")) { kt->flags &= ~DWARF_UNWIND; if (!runtime) kt->flags |= NO_DWARF_UNWIND; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { if ((kt->flags & DWARF_UNWIND_CAPABLE) || !runtime) { kt->flags |= DWARF_UNWIND; kt->flags &= ~NO_DWARF_UNWIND; } } else { kt->flags &= ~DWARF_UNWIND; if (!runtime) kt->flags |= NO_DWARF_UNWIND; } } else goto invalid_set_command; } if (runtime) fprintf(fp, "unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off"); return; } else if (STREQ(args[optind], "refresh")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) tt->flags |= TASK_REFRESH; else if (STREQ(args[optind], "off")) { tt->flags &= ~TASK_REFRESH; if (!runtime) tt->flags |= TASK_REFRESH_OFF; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) tt->flags |= TASK_REFRESH; else { tt->flags &= ~TASK_REFRESH; if (!runtime) tt->flags |= TASK_REFRESH_OFF; } } else goto invalid_set_command; } if (runtime) fprintf(fp, "refresh: %s\n", tt->flags & TASK_REFRESH ? "on" : "off"); return; } else if (STREQ(args[optind], "gdb")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (STREQ(args[optind], "on")) { if (pc->flags & MINIMAL_MODE) goto invalid_set_command; else pc->flags2 |= GDB_CMD_MODE; } else if (STREQ(args[optind], "off")) pc->flags2 &= ~GDB_CMD_MODE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { if (pc->flags & MINIMAL_MODE) goto invalid_set_command; else pc->flags2 |= GDB_CMD_MODE; } else pc->flags2 &= ~GDB_CMD_MODE; } else goto invalid_set_command; set_command_prompt(pc->flags2 & GDB_CMD_MODE ? "gdb> " : NULL); } if (runtime) fprintf(fp, "gdb: %s\n", pc->flags2 & GDB_CMD_MODE ? "on" : "off"); return; } else if (STREQ(args[optind], "scroll")) { if (args[optind+1] && pc->scroll_command) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "on")) pc->flags |= SCROLL; else if (STREQ(args[optind], "off")) pc->flags &= ~SCROLL; else if (STREQ(args[optind], "more")) pc->scroll_command = SCROLL_MORE; else if (STREQ(args[optind], "less")) pc->scroll_command = SCROLL_LESS; else if (STREQ(args[optind], "CRASHPAGER")) { if (CRASHPAGER_valid()) pc->scroll_command = SCROLL_CRASHPAGER; } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= SCROLL; else pc->flags &= ~SCROLL; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "scroll: %s ", pc->flags & SCROLL ? "on" : "off"); switch (pc->scroll_command) { case SCROLL_LESS: fprintf(fp, "(/usr/bin/less)\n"); break; case SCROLL_MORE: fprintf(fp, "(/bin/more)\n"); break; case SCROLL_NONE: fprintf(fp, "(none)\n"); break; case SCROLL_CRASHPAGER: fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); break; } } return; } else if (STREQ(args[optind], "silent")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) { pc->flags |= SILENT; pc->flags &= ~SCROLL; } else if (STREQ(args[optind], "off")) pc->flags &= ~SILENT; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { pc->flags |= SILENT; pc->flags &= ~SCROLL; } else pc->flags &= ~SILENT; } else goto invalid_set_command; if (!(pc->flags & SILENT)) fprintf(fp, "silent: off\n"); } else if (runtime && !(pc->flags & SILENT)) fprintf(fp, "silent: off\n"); return; } else if (STREQ(args[optind], "console")) { int assignment; if (args[optind+1]) { create_console_device(args[optind+1]); optind++; assignment = optind; } else assignment = 0; if (runtime) { fprintf(fp, "console: "); if (pc->console) fprintf(fp, "%s\n", pc->console); else { if (assignment) fprintf(fp, "assignment to %s failed\n", args[assignment]); else fprintf(fp, "not set\n"); } } return; } else if (STREQ(args[optind], "core")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) pc->flags |= DROP_CORE; else if (STREQ(args[optind], "off")) pc->flags &= ~DROP_CORE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags |= DROP_CORE; else pc->flags &= ~DROP_CORE; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "core: %s on error message)\n", pc->flags & DROP_CORE ? "on (drop core" : "off (do NOT drop core"); } return; } else if (STREQ(args[optind], "radix")) { if (args[optind+1]) { optind++; if (!runtime) defer(); else if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (STREQ(args[optind], "10") || STRNEQ(args[optind], "dec") || STRNEQ(args[optind], "ten")) pc->output_radix = 10; else if (STREQ(args[optind], "16") || STRNEQ(args[optind], "hex") || STRNEQ(args[optind], "six")) pc->output_radix = 16; else goto invalid_set_command; } if (runtime) { sprintf(buf, "set output-radix %d", pc->output_radix); gdb_pass_through(buf, NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: %d (%s)\n", pc->output_radix, pc->output_radix == 10 ? "decimal" : "hex"); } return; } else if (STREQ(args[optind], "hex")) { if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (runtime) { pc->output_radix = 16; gdb_pass_through("set output-radix 16", NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: 16 (hex)\n"); } return; } else if (STREQ(args[optind], "dec")) { if (from_rc_file && (pc->flags2 & RADIX_OVERRIDE)) ignore(); else if (runtime) { pc->output_radix = 10; gdb_pass_through("set output-radix 10", NULL, GNU_FROM_TTY_OFF); fprintf(fp, "output radix: 10 (decimal)\n"); } return; } else if (STREQ(args[optind], "edit")) { if (args[optind+1]) { if (runtime && !from_rc_file) error(FATAL, "cannot change editing mode during runtime\n"); optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "vi")) pc->editing_mode = "vi"; else if (STREQ(args[optind], "emacs")) pc->editing_mode = "emacs"; else goto invalid_set_command; } if (runtime) fprintf(fp, "edit: %s\n", pc->editing_mode); return; } else if (STREQ(args[optind], "vi")) { if (runtime) { if (!from_rc_file) error(FATAL, "cannot change editing mode during runtime\n"); fprintf(fp, "edit: %s\n", pc->editing_mode); } else pc->editing_mode = "vi"; return; } else if (STREQ(args[optind], "emacs")) { if (runtime) { if (!from_rc_file) error(FATAL, "cannot change %s editing mode during runtime\n", pc->editing_mode); fprintf(fp, "edit: %s\n", pc->editing_mode); } else pc->editing_mode = "emacs"; return; } else if (STREQ(args[optind], "print_max")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (decimal(args[optind], 0)) *gdb_print_max = atoi(args[optind]); else if (hexadecimal(args[optind], 0)) *gdb_print_max = (unsigned int) htol(args[optind], FAULT_ON_ERROR, NULL); else goto invalid_set_command; } if (runtime) fprintf(fp, "print_max: %d\n", *gdb_print_max); return; } else if (STREQ(args[optind], "scope")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (can_eval(args[optind])) value = eval(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) value = htol(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) value = sp->value; else goto invalid_set_command; if (runtime) { if (gdb_set_crash_scope(value, args[optind])) pc->scope = value; else return; } } if (runtime) { fprintf(fp, "scope: %lx ", pc->scope); if (pc->scope) fprintf(fp, "(%s)\n", value_to_symstr(pc->scope, buf, 0)); else fprintf(fp, "(not set)\n"); } return; } else if (STREQ(args[optind], "null-stop")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (STREQ(args[optind], "on")) *gdb_stop_print_at_null = 1; else if (STREQ(args[optind], "off")) *gdb_stop_print_at_null = 0; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) *gdb_stop_print_at_null = 1; else *gdb_stop_print_at_null = 0; } else goto invalid_set_command; } if (runtime) fprintf(fp, "null-stop: %s\n", *gdb_stop_print_at_null ? "on" : "off"); return; } else if (STREQ(args[optind], "print_array")) { optind++; if (args[optind]) { if (!runtime) defer(); else if (STREQ(args[optind], "on")) *gdb_prettyprint_arrays = 1; else if (STREQ(args[optind], "off")) *gdb_prettyprint_arrays = 0; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) *gdb_prettyprint_arrays = 1; else *gdb_prettyprint_arrays = 0; } else goto invalid_set_command; } if (runtime) fprintf(fp, "print_array: %s\n", *gdb_prettyprint_arrays ? "on" : "off"); return; } else if (STREQ(args[optind], "namelist")) { optind++; if (!runtime && args[optind]) { if (!is_elf_file(args[optind])) error(FATAL, "%s: not a kernel namelist (from .%src file)\n", args[optind], pc->program_name); if ((pc->namelist = (char *) malloc(strlen(args[optind])+1)) == NULL) { error(INFO, "cannot malloc memory for namelist: %s: %s\n", args[optind], strerror(errno)); } else strcpy(pc->namelist, args[optind]); } if (runtime) fprintf(fp, "namelist: %s\n", pc->namelist); return; } else if (STREQ(args[optind], "free")) { if (!runtime) defer(); else fprintf(fp, "%d pages freed\n", dumpfile_memory(DUMPFILE_FREE_MEM)); return; } else if (STREQ(args[optind], "data_debug")) { pc->flags |= DATADEBUG; return; } else if (STREQ(args[optind], "zero_excluded")) { if (args[optind+1]) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "on")) { *diskdump_flags |= ZERO_EXCLUDED; sadump_set_zero_excluded(); } else if (STREQ(args[optind], "off")) { *diskdump_flags &= ~ZERO_EXCLUDED; sadump_unset_zero_excluded(); } else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) { *diskdump_flags |= ZERO_EXCLUDED; sadump_set_zero_excluded(); } else { *diskdump_flags &= ~ZERO_EXCLUDED; sadump_unset_zero_excluded(); } } else goto invalid_set_command; } if (runtime) fprintf(fp, "zero_excluded: %s\n", (*diskdump_flags & ZERO_EXCLUDED) || sadump_is_zero_excluded() ? "on" : "off"); return; } else if (STREQ(args[optind], "offline")) { if (args[optind+1]) { optind++; if (from_rc_file) already_done(); else if (STREQ(args[optind], "show")) pc->flags2 &= ~OFFLINE_HIDE; else if(STREQ(args[optind], "hide")) pc->flags2 |= OFFLINE_HIDE; else goto invalid_set_command; } if (runtime) fprintf(fp, " offline: %s\n", pc->flags2 & OFFLINE_HIDE ? "hide" : "show"); return; } else if (STREQ(args[optind], "redzone")) { if (args[optind+1]) { optind++; if (STREQ(args[optind], "on")) pc->flags2 |= REDZONE; else if (STREQ(args[optind], "off")) pc->flags2 &= ~REDZONE; else if (IS_A_NUMBER(args[optind])) { value = stol(args[optind], FAULT_ON_ERROR, NULL); if (value) pc->flags2 |= REDZONE; else pc->flags2 &= ~REDZONE; } else goto invalid_set_command; } if (runtime) { fprintf(fp, "redzone: %s\n", pc->flags2 & REDZONE ? "on" : "off"); } return; } else if (XEN_HYPER_MODE()) { error(FATAL, "invalid argument for the Xen hypervisor\n"); } else if (pc->flags & MINIMAL_MODE) { error(FATAL, "invalid argument in minimal mode\n"); } else if (runtime) { ulong pid, task; switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: pid = value; task = NO_TASK; if (set_context(task, pid)) show_context(CURRENT_CONTEXT()); break; case STR_TASK: task = value; pid = NO_PID; if (set_context(task, pid)) show_context(CURRENT_CONTEXT()); break; case STR_INVALID: error(INFO, "invalid task or pid value: %s\n", args[optind]); break; } } else console("set: ignoring \"%s\"\n", args[optind]); optind++; } return; invalid_set_command: sprintf(buf, "invalid command"); if (!runtime) sprintf(&buf[strlen(buf)], " in .%src file", pc->program_name); strcat(buf, ": "); for (i = 0; i < argcnt; i++) sprintf(&buf[strlen(buf)], "%s ", args[i]); strcat(buf, "\n"); if (extra_message) strcat(buf, extra_message); error(runtime ? FATAL : INFO, buf); #undef defer #undef already_done #undef ignore } /* * Display the set of settable internal variables. */ static void show_options(void) { char buf[BUFSIZE]; fprintf(fp, " scroll: %s ", pc->flags & SCROLL ? "on" : "off"); switch (pc->scroll_command) { case SCROLL_LESS: fprintf(fp, "(/usr/bin/less)\n"); break; case SCROLL_MORE: fprintf(fp, "(/bin/more)\n"); break; case SCROLL_NONE: fprintf(fp, "(none)\n"); break; case SCROLL_CRASHPAGER: fprintf(fp, "(CRASHPAGER: %s)\n", getenv("CRASHPAGER")); break; } fprintf(fp, " radix: %d (%s)\n", pc->output_radix, pc->output_radix == 10 ? "decimal" : pc->output_radix == 16 ? "hexadecimal" : "unknown"); fprintf(fp, " refresh: %s\n", tt->flags & TASK_REFRESH ? "on" : "off"); fprintf(fp, " print_max: %d\n", *gdb_print_max); fprintf(fp, " print_array: %s\n", *gdb_prettyprint_arrays ? "on" : "off"); fprintf(fp, " console: %s\n", pc->console ? pc->console : "(not assigned)"); fprintf(fp, " debug: %ld\n", pc->debug); fprintf(fp, " core: %s\n", pc->flags & DROP_CORE ? "on" : "off"); fprintf(fp, " hash: %s\n", pc->flags & HASH ? "on" : "off"); fprintf(fp, " silent: %s\n", pc->flags & SILENT ? "on" : "off"); fprintf(fp, " edit: %s\n", pc->editing_mode); fprintf(fp, " namelist: %s\n", pc->namelist); fprintf(fp, " dumpfile: %s\n", pc->dumpfile); fprintf(fp, " unwind: %s\n", kt->flags & DWARF_UNWIND ? "on" : "off"); fprintf(fp, " zero_excluded: %s\n", (*diskdump_flags & ZERO_EXCLUDED) || sadump_is_zero_excluded() ? "on" : "off"); fprintf(fp, " null-stop: %s\n", *gdb_stop_print_at_null ? "on" : "off"); fprintf(fp, " gdb: %s\n", pc->flags2 & GDB_CMD_MODE ? "on" : "off"); fprintf(fp, " scope: %lx ", pc->scope); if (pc->scope) fprintf(fp, "(%s)\n", value_to_symstr(pc->scope, buf, 0)); else fprintf(fp, "(not set)\n"); fprintf(fp, " offline: %s\n", pc->flags2 & OFFLINE_HIDE ? "hide" : "show"); fprintf(fp, " redzone: %s\n", pc->flags2 & REDZONE ? "on" : "off"); } /* * Evaluate an expression, which can consist of a single symbol, single value, * or an expression consisting of two values and an operator. If the * expression contains redirection characters, the whole expression must * be enclosed with parentheses. The result is printed in decimal, hex, * octal and binary. Input number values can only be hex or decimal, with * a bias towards decimal (use 0x when necessary). */ void cmd_eval(void) { int flags; int bitflag, longlongflag, longlongflagforce; struct number_option nopt; char buf1[BUFSIZE]; /* * getopt() is not used to avoid confusion with minus sign. */ optind = 1; bitflag = 0; longlongflag = longlongflagforce = 0; BZERO(&nopt, sizeof(struct number_option)); if (STREQ(args[optind], "-lb") || STREQ(args[optind], "-bl")) { longlongflagforce++; bitflag++; optind++; } else if (STREQ(args[optind], "-l")) { longlongflagforce++; optind++; if (STREQ(args[optind], "-b") && args[optind+1]) { optind++; bitflag++; } } else if (STREQ(args[optind], "-b")) { if (STREQ(args[optind+1], "-l")) { if (args[optind+2]) { bitflag++; longlongflagforce++; optind += 2; } else cmd_usage(pc->curcmd, SYNOPSIS); } else if (args[optind+1]) { bitflag++; optind++; } } if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); longlongflag = BITS32() ? TRUE : FALSE; flags = longlongflag ? (LONG_LONG|RETURN_ON_ERROR) : FAULT_ON_ERROR; if(!BITS32()) longlongflagforce = 0; BZERO(buf1, BUFSIZE); buf1[0] = '('; while (args[optind]) { if (*args[optind] == '(') { if (eval_common(args[optind], flags, NULL, &nopt)) print_number(&nopt, bitflag, longlongflagforce); else error(FATAL, "invalid expression: %s\n", args[optind]); return; } else { strcat(buf1, args[optind]); strcat(buf1, " "); } optind++; } clean_line(buf1); strcat(buf1, ")"); if (eval_common(buf1, flags, NULL, &nopt)) print_number(&nopt, bitflag, longlongflagforce); else error(FATAL, "invalid expression: %s\n", buf1); } /* * Pre-check a string for eval-worthiness. This allows callers to avoid * having to encompass a non-whitespace expression with parentheses. * Note that the data being evaluated is not error-checked here, but * rather that it exists in the proper format. */ int can_eval(char *s) { char *op; char *element1, *element2; char work[BUFSIZE]; /* * If we've got a () pair containing any sort of stuff in between, * then presume it's eval-able. It might contain crap, but it * should be sent to eval() regardless. */ if ((FIRSTCHAR(s) == '(') && (count_chars(s, '(') == 1) && (count_chars(s, ')') == 1) && (strlen(s) > 2) && (LASTCHAR(s) == ')')) return TRUE; /* * If the string contains any of the operators except the shifters, * and has any kind of data on either side, it's also eval-able. */ strcpy(work, s); if (!(op = strpbrk(work, "><+-&|*/%^"))) return FALSE; element1 = &work[0]; *op = NULLCHAR; element2 = op+1; if (!strlen(element1) || !strlen(element2)) return FALSE; return TRUE; } /* * Evaluate an expression involving two values and an operator. */ #define OP_ADD (1) #define OP_SUB (2) #define OP_AND (3) #define OP_OR (4) #define OP_MUL (5) #define OP_DIV (6) #define OP_MOD (7) #define OP_SL (8) #define OP_SR (9) #define OP_EXOR (10) #define OP_POWER (11) ulong eval(char *s, int flags, int *errptr) { struct number_option nopt; if (eval_common(s, flags, errptr, &nopt)) { return(nopt.num); } else { switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: error(FATAL, "invalid expression: %s\n", s); case RETURN_ON_ERROR: error(INFO, "invalid expression: %s\n", s); if (errptr) *errptr = TRUE; break; } return UNUSED; } } ulonglong evall(char *s, int flags, int *errptr) { struct number_option nopt; if (BITS32()) flags |= LONG_LONG; if (eval_common(s, flags, errptr, &nopt)) { return(nopt.ll_num); } else { switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: error(FATAL, "invalid expression: %s\n", s); case RETURN_ON_ERROR: error(INFO, "invalid expression: %s\n", s); if (errptr) *errptr = TRUE; break; } return UNUSED; } } int eval_common(char *s, int flags, int *errptr, struct number_option *np) { char *p1, *p2; char *op, opcode; ulong value1; ulong value2; ulonglong ll_value1; ulonglong ll_value2; char work[BUFSIZE]; char *element1; char *element2; struct syment *sp; opcode = 0; value1 = value2 = 0; ll_value1 = ll_value2 = 0; if (strstr(s, "(") || strstr(s, ")")) { p1 = s; if (*p1 != '(') goto malformed; if (LASTCHAR(s) != ')') goto malformed; p2 = &LASTCHAR(s); if (strstr(s, ")") != p2) goto malformed; strcpy(work, p1+1); LASTCHAR(work) = NULLCHAR; if (strstr(work, "(") || strstr(work, ")")) goto malformed; } else strcpy(work, s); if (work[0] == '-') { shift_string_right(work, 1); work[0] = '0'; } if (!(op = strpbrk(work, "#><+-&|*/%^"))) { if (calculate(work, &value1, &ll_value1, flags & (HEX_BIAS|LONG_LONG))) { if (flags & LONG_LONG) { np->ll_num = ll_value1; if (BITS32() && (ll_value1 > 0xffffffff)) np->retflags |= LONG_LONG; return TRUE; } else { np->num = value1; return TRUE; } } goto malformed; } switch (*op) { case '+': opcode = OP_ADD; break; case '-': opcode = OP_SUB; break; case '&': opcode = OP_AND; break; case '|': opcode = OP_OR; break; case '*': opcode = OP_MUL; break; case '%': opcode = OP_MOD; break; case '/': opcode = OP_DIV; break; case '<': if (*(op+1) != '<') goto malformed; opcode = OP_SL; break; case '>': if (*(op+1) != '>') goto malformed; opcode = OP_SR; break; case '^': opcode = OP_EXOR; break; case '#': opcode = OP_POWER; break; } element1 = &work[0]; *op = NULLCHAR; if ((opcode == OP_SL) || (opcode == OP_SR)) { *(op+1) = NULLCHAR; element2 = op+2; } else element2 = op+1; if (strlen(clean_line(element1)) == 0) goto malformed; if (strlen(clean_line(element2)) == 0) goto malformed; if ((sp = symbol_search(element1))) value1 = ll_value1 = sp->value; else { if (!calculate(element1, &value1, &ll_value1, flags & (HEX_BIAS|LONG_LONG))) goto malformed; if (BITS32() && (ll_value1 > 0xffffffff)) np->retflags |= LONG_LONG; } if ((sp = symbol_search(element2))) value2 = ll_value2 = sp->value; else if (!calculate(element2, &value2, &ll_value2, flags & (HEX_BIAS|LONG_LONG))) goto malformed; if (flags & LONG_LONG) { if (BITS32() && (ll_value2 > 0xffffffff)) np->retflags |= LONG_LONG; switch (opcode) { case OP_ADD: np->ll_num = (ll_value1 + ll_value2); break; case OP_SUB: np->ll_num = (ll_value1 - ll_value2); break; case OP_AND: np->ll_num = (ll_value1 & ll_value2); break; case OP_OR: np->ll_num = (ll_value1 | ll_value2); break; case OP_MUL: np->ll_num = (ll_value1 * ll_value2); break; case OP_DIV: np->ll_num = (ll_value1 / ll_value2); break; case OP_MOD: np->ll_num = (ll_value1 % ll_value2); break; case OP_SL: np->ll_num = (ll_value1 << ll_value2); break; case OP_SR: np->ll_num = (ll_value1 >> ll_value2); break; case OP_EXOR: np->ll_num = (ll_value1 ^ ll_value2); break; case OP_POWER: np->ll_num = ll_power(ll_value1, ll_value2); break; } } else { switch (opcode) { case OP_ADD: np->num = (value1 + value2); break; case OP_SUB: np->num = (value1 - value2); break; case OP_AND: np->num = (value1 & value2); break; case OP_OR: np->num = (value1 | value2); break; case OP_MUL: np->num = (value1 * value2); break; case OP_DIV: np->num = (value1 / value2); break; case OP_MOD: np->num = (value1 % value2); break; case OP_SL: np->num = (value1 << value2); break; case OP_SR: np->num = (value1 >> value2); break; case OP_EXOR: np->num = (value1 ^ value2); break; case OP_POWER: np->num = power(value1, value2); break; } } return TRUE; malformed: return FALSE; } /* * Take string containing a number, and possibly a multiplier, and calculate * its real value. The allowable multipliers are k, K, m, M, g and G, for * kilobytes, megabytes and gigabytes. */ int calculate(char *s, ulong *value, ulonglong *llvalue, ulong flags) { ulong factor, bias; int errflag; int ones_complement; ulong localval; ulonglong ll_localval; struct syment *sp; bias = flags & HEX_BIAS; if (*s == '~') { ones_complement = TRUE; s++; } else ones_complement = FALSE; if ((sp = symbol_search(s))) { if (flags & LONG_LONG) { *llvalue = (ulonglong)sp->value; if (ones_complement) *llvalue = ~(*llvalue); } else *value = ones_complement ? ~(sp->value) : sp->value; return TRUE; } factor = 1; errflag = 0; switch (LASTCHAR(s)) { case 'k': case 'K': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = 1024; else return FALSE; break; case 'm': case 'M': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = (1024*1024); else return FALSE; break; case 'g': case 'G': LASTCHAR(s) = NULLCHAR; if (IS_A_NUMBER(s)) factor = (1024*1024*1024); else return FALSE; break; default: if (!IS_A_NUMBER(s)) return FALSE; break; } if (flags & LONG_LONG) { ll_localval = stoll(s, RETURN_ON_ERROR|bias, &errflag); if (errflag) return FALSE; if (ones_complement) *llvalue = ~(ll_localval * factor); else *llvalue = ll_localval * factor; } else { localval = stol(s, RETURN_ON_ERROR|bias, &errflag); if (errflag) return FALSE; if (ones_complement) *value = ~(localval * factor); else *value = localval * factor; } return TRUE; } /* * Print a 32-bit or 64-bit number in hexadecimal, decimal, octal and binary, * also showing the bits set if appropriate. * */ static void print_number(struct number_option *np, int bitflag, int longlongflagforce) { int i; ulong hibit; ulonglong ll_hibit; int ccnt; ulong mask; ulonglong ll_mask; char *hdr = " bits set: "; char buf[BUFSIZE]; int hdrlen; int longlongformat; longlongformat = longlongflagforce; if (!longlongflagforce) { if (BITS32()) { if (np->retflags & LONG_LONG) longlongformat = TRUE; if (np->ll_num > 0xffffffff) longlongformat = TRUE; else np->num = (ulong)np->ll_num; } } if (longlongformat) { ll_hibit = (ulonglong)(1) << ((sizeof(long long)*8)-1); fprintf(fp, "hexadecimal: %llx ", np->ll_num); if (np->ll_num >= KILOBYTES(1)) { if ((np->ll_num % GIGABYTES(1)) == 0) fprintf(fp, "(%lldGB)", np->ll_num / GIGABYTES(1)); else if ((np->ll_num % MEGABYTES(1)) == 0) fprintf(fp, "(%lldMB)", np->ll_num / MEGABYTES(1)); else if ((np->ll_num % KILOBYTES(1)) == 0) fprintf(fp, "(%lldKB)", np->ll_num / KILOBYTES(1)); } fprintf(fp, "\n"); fprintf(fp, " decimal: %llu ", np->ll_num); if ((long long)np->ll_num < 0) fprintf(fp, "(%lld)\n", (long long)np->ll_num); else fprintf(fp, "\n"); fprintf(fp, " octal: %llo\n", np->ll_num); fprintf(fp, " binary: "); for(i = 0, ll_mask = np->ll_num; i < (sizeof(long long)*8); i++, ll_mask <<= 1) if (ll_mask & ll_hibit) fprintf(fp, "1"); else fprintf(fp, "0"); fprintf(fp,"\n"); } else { hibit = (ulong)(1) << ((sizeof(long)*8)-1); fprintf(fp, "hexadecimal: %lx ", np->num); if (np->num >= KILOBYTES(1)) { if ((np->num % GIGABYTES(1)) == 0) fprintf(fp, "(%ldGB)", np->num / GIGABYTES(1)); else if ((np->num % MEGABYTES(1)) == 0) fprintf(fp, "(%ldMB)", np->num / MEGABYTES(1)); else if ((np->num % KILOBYTES(1)) == 0) fprintf(fp, "(%ldKB)", np->num / KILOBYTES(1)); } fprintf(fp, "\n"); fprintf(fp, " decimal: %lu ", np->num); if ((long)np->num < 0) fprintf(fp, "(%ld)\n", (long)np->num); else fprintf(fp, "\n"); fprintf(fp, " octal: %lo\n", np->num); fprintf(fp, " binary: "); for(i = 0, mask = np->num; i < (sizeof(long)*8); i++, mask <<= 1) if (mask & hibit) fprintf(fp, "1"); else fprintf(fp, "0"); fprintf(fp,"\n"); } if (!bitflag) return; hdrlen = strlen(hdr); ccnt = hdrlen; fprintf(fp, "%s", hdr); if (longlongformat) { for (i = 63; i >= 0; i--) { ll_mask = (ulonglong)(1) << i; if (np->ll_num & ll_mask) { sprintf(buf, "%d ", i); fprintf(fp, "%s", buf); ccnt += strlen(buf); if (ccnt >= 77) { fprintf(fp, "\n"); INDENT(strlen(hdr)); ccnt = hdrlen; } } } } else { for (i = BITS()-1; i >= 0; i--) { mask = (ulong)(1) << i; if (np->num & mask) { sprintf(buf, "%d ", i); fprintf(fp, "%s", buf); ccnt += strlen(buf); if (ccnt >= 77) { fprintf(fp, "\n"); INDENT(strlen(hdr)); ccnt = hdrlen; } } } } fprintf(fp, "\n"); } /* * Display the contents of a linked list. Minimum requirements are a starting * address, typically of a structure which contains the "next" list entry at * some offset into the structure. The default offset is zero bytes, and need * not be entered if that's the case. Otherwise a number argument that's not * a kernel * virtual address will be understood to be the offset. * Alternatively the offset may be entered in "struct.member" format. Each * item in the list is dumped, and the list will be considered terminated upon * encountering a "next" value that is: * * a NULL pointer. * a pointer to the starting address. * a pointer to the entry pointed to by the starting address. * a pointer to the structure itself. * a pointer to the value specified with the "-e ending_addr" option. * * If the structures are linked using list_head structures, the -h or -H * options must be used. In that case, the "start" address is: * a pointer to the structure that contains the list_head structure (-h), * or a pointer to a LIST_HEAD() structure (-H). * * Given that the contents of the structures containing the next pointers * often contain useful data, the "-s structname" also prints each structure * in the list. * * By default, the list members are hashed to guard against duplicate entries * causing the list to wrap back upon itself. * * WARNING: There's an inordinate amount of work parsing arguments below * in order to maintain backwards compatibility re: not having to use -o, * which gets sticky with zero-based kernel virtual address space. */ void cmd_list(void) { int c; struct list_data list_data, *ld; struct datatype_member struct_member, *sm; struct syment *sp; ulong value, struct_list_offset; sm = &struct_member; ld = &list_data; BZERO(ld, sizeof(struct list_data)); struct_list_offset = 0; while ((c = getopt(argcnt, args, "Hhrs:S:e:o:xdl:")) != EOF) { switch(c) { case 'H': ld->flags |= LIST_HEAD_FORMAT; ld->flags |= LIST_HEAD_POINTER; break; case 'h': ld->flags |= LIST_HEAD_FORMAT; break; case 'r': ld->flags |= LIST_HEAD_REVERSE; break; case 's': case 'S': if (ld->structname_args++ == 0) hq_open(); hq_enter((ulong)optarg); ld->flags |= (c == 's') ? LIST_PARSE_MEMBER : LIST_READ_MEMBER; if (count_bits_long(ld->flags & (LIST_PARSE_MEMBER|LIST_READ_MEMBER)) > 1) error(FATAL, "-S and -s options are mutually exclusive\n"); break; case 'l': if (IS_A_NUMBER(optarg)) struct_list_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) struct_list_offset = sm->member_offset; else error(FATAL, "invalid -l option: %s\n", optarg); break; case 'o': if (ld->flags & LIST_OFFSET_ENTERED) error(FATAL, "offset value %d (0x%lx) already entered\n", ld->member_offset, ld->member_offset); else if (IS_A_NUMBER(optarg)) ld->member_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) ld->member_offset = sm->member_offset; else error(FATAL, "invalid -o argument: %s\n", optarg); ld->flags |= LIST_OFFSET_ENTERED; break; case 'e': ld->end = htol(optarg, FAULT_ON_ERROR, NULL); break; case 'x': if (ld->flags & LIST_STRUCT_RADIX_10) error(FATAL, "-d and -x are mutually exclusive\n"); ld->flags |= LIST_STRUCT_RADIX_16; break; case 'd': if (ld->flags & LIST_STRUCT_RADIX_16) error(FATAL, "-d and -x are mutually exclusive\n"); ld->flags |= LIST_STRUCT_RADIX_10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind] && args[optind+1] && args[optind+2]) { error(INFO, "too many arguments\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (ld->structname_args) { ld->structname = (char **)GETBUF(sizeof(char *) * ld->structname_args); retrieve_list((ulong *)ld->structname, ld->structname_args); hq_close(); ld->struct_list_offset = struct_list_offset; } else if (struct_list_offset) { error(INFO, "-l option can only be used with -s or -S option\n"); cmd_usage(pc->curcmd, SYNOPSIS); } while (args[optind]) { if (strstr(args[optind], ".") && arg_to_datatype(args[optind], sm, RETURN_ON_ERROR) > 1) { if (ld->flags & LIST_OFFSET_ENTERED) error(FATAL, "offset value %ld (0x%lx) already entered\n", ld->member_offset, ld->member_offset); ld->member_offset = sm->member_offset; ld->flags |= LIST_OFFSET_ENTERED; } else { /* * Do an inordinate amount of work to avoid -o... * * OK, if it's a symbol, then it has to be a start. */ if ((sp = symbol_search(args[optind]))) { if (ld->flags & LIST_START_ENTERED) error(FATAL, "list start already entered\n"); ld->start = sp->value; ld->flags |= LIST_START_ENTERED; goto next_arg; } /* * If it's not a symbol nor a number, bail out if it * cannot be evaluated as a start address. */ if (!IS_A_NUMBER(args[optind])) { if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { if (ld->flags & LIST_START_ENTERED) error(FATAL, "list start already entered\n"); ld->start = value; ld->flags |= LIST_START_ENTERED; goto next_arg; } } error(FATAL, "invalid argument: %s\n", args[optind]); } /* * If the start is known, it's got to be an offset. */ if (ld->flags & LIST_START_ENTERED) { value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; break; } /* * If the offset is known, or there's no subsequent * argument, then it's got to be a start. */ if ((ld->flags & LIST_OFFSET_ENTERED) || !args[optind+1]) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (!IS_KVADDR(value)) error(FATAL, "invalid kernel virtual address: %s\n", args[optind]); ld->start = value; ld->flags |= LIST_START_ENTERED; break; } /* * Neither start nor offset has been entered, and * it's a number. Look ahead to the next argument. * If it's a symbol, then this must be an offset. */ if ((sp = symbol_search(args[optind+1]))) { value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; goto next_arg; } else if ((!IS_A_NUMBER(args[optind+1]) && !can_eval(args[optind+1])) && !strstr(args[optind+1], ".")) error(FATAL, "symbol not found: %s\n", args[optind+1]); /* * Crunch time. We've got two numbers. If they're * both ambigous we must have zero-based kernel * virtual address space. */ if (COMMON_VADDR_SPACE() && AMBIGUOUS_NUMBER(args[optind]) && AMBIGUOUS_NUMBER(args[optind+1])) { error(INFO, "ambiguous arguments: \"%s\" and \"%s\": -o is required\n", args[optind], args[optind+1]); cmd_usage(pc->curcmd, SYNOPSIS); } if (hexadecimal_only(args[optind], 0)) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { ld->start = value; ld->flags |= LIST_START_ENTERED; goto next_arg; } } value = stol(args[optind], FAULT_ON_ERROR, NULL); ld->member_offset = value; ld->flags |= LIST_OFFSET_ENTERED; } next_arg: optind++; } if (!(ld->flags & LIST_START_ENTERED)) { error(INFO, "starting address required\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if ((ld->flags & LIST_OFFSET_ENTERED) && ld->struct_list_offset) { error(INFO, "-l and -o are mutually exclusive\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (ld->flags & LIST_HEAD_FORMAT) { ld->list_head_offset = ld->member_offset; if (ld->flags & LIST_HEAD_REVERSE) ld->member_offset = sizeof(void *); else ld->member_offset = 0; if (ld->flags & LIST_HEAD_POINTER) { if (!ld->end) ld->end = ld->start; readmem(ld->start + ld->member_offset, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (ld->start == ld->end) { fprintf(fp, "(empty)\n"); return; } } else ld->start += ld->list_head_offset; } ld->flags &= ~(LIST_OFFSET_ENTERED|LIST_START_ENTERED); ld->flags |= VERBOSE; hq_open(); c = do_list(ld); hq_close(); if (ld->structname_args) FREEBUF(ld->structname); } void dump_struct_members_fast(struct req_entry *e, int radix, ulong p) { unsigned int i; char b[BUFSIZE]; if (!(e && IS_KVADDR(p))) return; if (!radix) radix = *gdb_output_radix; for (i = 0; i < e->count; i++) { if (0 < e->width[i] && (e->width[i] <= 8 || e->is_str[i])) { print_value(e, i, p, e->is_ptr[i] ? 16 : radix); } else if (e->width[i] == 0 || e->width[i] > 8) { snprintf(b, BUFSIZE, "%s.%s", e->name, e->member[i]); dump_struct_member(b, p, radix); } } } static struct req_entry * fill_member_offsets(char *arg) { int j; char *p, m; struct req_entry *e; char buf[BUFSIZE]; if (!(arg && *arg)) return NULL; j = count_chars(arg, ',') + 1; e = (struct req_entry *)GETBUF(sizeof(*e)); e->arg = GETBUF(strlen(arg + 1)); strcpy(e->arg, arg); m = ((p = strchr(e->arg, '.')) != NULL); if (!p++) p = e->arg + strlen(e->arg) + 1; e->name = GETBUF(p - e->arg); strncpy(e->name, e->arg, p - e->arg - 1); if (!m) return e; e->count = count_chars(p, ',') + 1; e->width = (ulong *)GETBUF(e->count * sizeof(ulong)); e->is_ptr = (int *)GETBUF(e->count * sizeof(int)); e->is_str = (int *)GETBUF(e->count * sizeof(int)); e->member = (char **)GETBUF(e->count * sizeof(char *)); e->offset = (ulong *)GETBUF(e->count * sizeof(ulong)); replace_string(p, ",", ' '); parse_line(p, e->member); for (j = 0; j < e->count; j++) { e->offset[j] = MEMBER_OFFSET(e->name, e->member[j]); if (e->offset[j] == INVALID_OFFSET) e->offset[j] = ANON_MEMBER_OFFSET(e->name, e->member[j]); if (e->offset[j] == INVALID_OFFSET) error(FATAL, "Can't get offset of '%s.%s'\n", e->name, e->member[j]); e->is_ptr[j] = MEMBER_TYPE(e->name, e->member[j]) == TYPE_CODE_PTR; e->is_str[j] = is_string(e->name, e->member[j]); /* Dirty hack for obtaining size of particular field */ snprintf(buf, BUFSIZE, "%s + 1", e->member[j]); e->width[j] = ANON_MEMBER_OFFSET(e->name, buf) - e->offset[j]; } return e; } static void print_value(struct req_entry *e, unsigned int i, ulong addr, unsigned int radix) { union { uint64_t v64; uint32_t v32; uint16_t v16; uint8_t v8; } v; char buf[BUFSIZE]; struct syment *sym; addr += e->offset[i]; /* Read up to 8 bytes, counters, pointers, etc. */ if (e->width[i] <= 8 && !readmem(addr, KVADDR, &v, e->width[i], "structure value", RETURN_ON_ERROR | QUIET)) { error(INFO, "cannot access member: %s at %lx\n", e->member[i], addr); return; } snprintf(buf, BUFSIZE, " %%s = %s%%%s%s", (radix == 16 ? "0x" : ""), (e->width[i] == 8 ? "l" : ""), (radix == 16 ? "x" : "u" ) ); switch (e->width[i]) { case 1: fprintf(fp, buf, e->member[i], v.v8); break; case 2: fprintf(fp, buf, e->member[i], v.v16); break; case 4: fprintf(fp, buf, e->member[i], v.v32); break; case 8: fprintf(fp, buf, e->member[i], v.v64); break; } if (e->is_str[i]) { if (e->is_ptr[i]) { read_string(v.v64, buf, BUFSIZE); fprintf(fp, " \"%s\"", buf); } else { read_string(addr, buf, e->width[i]); fprintf(fp, " %s = \"%s\"", e->member[i], buf); } } else if ((sym = value_search(v.v64, 0)) && is_symbol_text(sym)) fprintf(fp, " <%s>", sym->name); fprintf(fp, "\n"); } /* * Does the work for cmd_list() and any other function that requires the * contents of a linked list. See cmd_list description above for details. */ int do_list(struct list_data *ld) { ulong next, last, first, offset; ulong searchfor, readflag; int i, count, others, close_hq_on_return; unsigned int radix; struct req_entry **e = NULL; if (CRASHDEBUG(1)) { others = 0; console(" flags: %lx (", ld->flags); if (ld->flags & VERBOSE) console("%sVERBOSE", others++ ? "|" : ""); if (ld->flags & LIST_OFFSET_ENTERED) console("%sLIST_OFFSET_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_START_ENTERED) console("%sLIST_START_ENTERED", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_FORMAT) console("%sLIST_HEAD_FORMAT", others++ ? "|" : ""); if (ld->flags & LIST_HEAD_POINTER) console("%sLIST_HEAD_POINTER", others++ ? "|" : ""); if (ld->flags & RETURN_ON_DUPLICATE) console("%sRETURN_ON_DUPLICATE", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & RETURN_ON_LIST_ERROR) console("%sRETURN_ON_LIST_ERROR", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_10) console("%sLIST_STRUCT_RADIX_10", others++ ? "|" : ""); if (ld->flags & LIST_STRUCT_RADIX_16) console("%sLIST_STRUCT_RADIX_16", others++ ? "|" : ""); if (ld->flags & LIST_ALLOCATE) console("%sLIST_ALLOCATE", others++ ? "|" : ""); if (ld->flags & LIST_CALLBACK) console("%sLIST_CALLBACK", others++ ? "|" : ""); if (ld->flags & CALLBACK_RETURN) console("%sCALLBACK_RETURN", others++ ? "|" : ""); console(")\n"); console(" start: %lx\n", ld->start); console(" member_offset: %ld\n", ld->member_offset); console(" list_head_offset: %ld\n", ld->list_head_offset); console(" end: %lx\n", ld->end); console(" searchfor: %lx\n", ld->searchfor); console(" structname_args: %lx\n", ld->structname_args); if (!ld->structname_args) console(" structname: (unused)\n"); for (i = 0; i < ld->structname_args; i++) console(" structname[%d]: %s\n", i, ld->structname[i]); console(" header: %s\n", ld->header); console(" list_ptr: %lx\n", (ulong)ld->list_ptr); console(" callback_func: %lx\n", (ulong)ld->callback_func); console(" callback_data: %lx\n", (ulong)ld->callback_data); console("struct_list_offset: %lx\n", ld->struct_list_offset); } count = 0; searchfor = ld->searchfor; ld->searchfor = 0; if (ld->flags & LIST_STRUCT_RADIX_10) radix = 10; else if (ld->flags & LIST_STRUCT_RADIX_16) radix = 16; else radix = 0; next = ld->start; close_hq_on_return = FALSE; if (ld->flags & LIST_ALLOCATE) { if (!hq_is_open()) { hq_open(); close_hq_on_return = TRUE; } else if (hq_is_inuse()) { error(ld->flags & RETURN_ON_LIST_ERROR ? INFO : FATAL, "\ndo_list: hash queue is in use?\n"); return -1; } } readflag = ld->flags & RETURN_ON_LIST_ERROR ? (RETURN_ON_ERROR|QUIET) : FAULT_ON_ERROR; if (!readmem(next + ld->member_offset, KVADDR, &first, sizeof(void *), "first list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } if (ld->header) fprintf(fp, "%s", ld->header); offset = ld->list_head_offset + ld->struct_list_offset; if (ld->structname && (ld->flags & LIST_READ_MEMBER)) { e = (struct req_entry **)GETBUF(sizeof(*e) * ld->structname_args); for (i = 0; i < ld->structname_args; i++) e[i] = fill_member_offsets(ld->structname[i]); } while (1) { if (ld->flags & VERBOSE) { fprintf(fp, "%lx\n", next - ld->list_head_offset); if (ld->structname) { for (i = 0; i < ld->structname_args; i++) { switch (count_chars(ld->structname[i], '.')) { case 0: dump_struct(ld->structname[i], next - offset, radix); break; default: if (ld->flags & LIST_PARSE_MEMBER) dump_struct_members(ld, i, next); else if (ld->flags & LIST_READ_MEMBER) dump_struct_members_fast(e[i], radix, next - offset); break; } } } } if (next && !hq_enter(next - ld->list_head_offset)) { if (ld->flags & (RETURN_ON_DUPLICATE|RETURN_ON_LIST_ERROR)) { error(INFO, "\nduplicate list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } error(FATAL, "\nduplicate list entry: %lx\n", next); } if ((searchfor == next) || (searchfor == (next - ld->list_head_offset))) ld->searchfor = searchfor; count++; last = next; if ((ld->flags & LIST_CALLBACK) && ld->callback_func((void *)(next - ld->list_head_offset), ld->callback_data) && (ld->flags & CALLBACK_RETURN)) break; if (!readmem(next + ld->member_offset, KVADDR, &next, sizeof(void *), "list entry", readflag)) { error(INFO, "\ninvalid list entry: %lx\n", next); if (close_hq_on_return) hq_close(); return -1; } if (next == 0) { if (ld->flags & LIST_HEAD_FORMAT) { error(INFO, "\ninvalid list entry: 0\n"); if (close_hq_on_return) hq_close(); return -1; } if (CRASHDEBUG(1)) console("do_list end: next:%lx\n", next); break; } if (next == ld->end) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == end:%lx\n", next, ld->end); break; } if (next == ld->start) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == start:%lx\n", next, ld->start); break; } if (next == last) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == last:%lx\n", next, last); break; } if ((next == first) && (count != 1)) { if (CRASHDEBUG(1)) console("do_list end: next:%lx == first:%lx (count %d)\n", next, last, count); break; } } if (CRASHDEBUG(1)) console("do_list count: %d\n", count); if (ld->flags & LIST_ALLOCATE) { ld->list_ptr = (ulong *)GETBUF(count * sizeof(void *)); count = retrieve_list(ld->list_ptr, count); if (close_hq_on_return) hq_close(); } return count; } /* * Issue a dump_struct_member() call for one or more structure * members. Multiple members are passed in a comma-separated * list using the the format: * * struct.member1,member2,member3 */ void dump_struct_members(struct list_data *ld, int idx, ulong next) { int i, argc; char *p1, *p2; char *structname, *members; char *arglist[MAXARGS]; unsigned int radix; if (ld->flags & LIST_STRUCT_RADIX_10) radix = 10; else if (ld->flags & LIST_STRUCT_RADIX_16) radix = 16; else radix = 0; structname = GETBUF(strlen(ld->structname[idx])+1); members = GETBUF(strlen(ld->structname[idx])+1); strcpy(structname, ld->structname[idx]); p1 = strstr(structname, ".") + 1; p2 = strstr(ld->structname[idx], ".") + 1; strcpy(members, p2); replace_string(members, ",", ' '); argc = parse_line(members, arglist); for (i = 0; i < argc; i++) { *p1 = NULLCHAR; strcat(structname, arglist[i]); dump_struct_member(structname, next - ld->list_head_offset - ld->struct_list_offset, radix); } FREEBUF(structname); FREEBUF(members); } #define RADIXTREE_REQUEST (0x1) #define RBTREE_REQUEST (0x2) void cmd_tree() { int c, type_flag, others; long root_offset; struct tree_data tree_data, *td; struct datatype_member struct_member, *sm; struct syment *sp; ulong value; type_flag = 0; root_offset = 0; sm = &struct_member; td = &tree_data; BZERO(td, sizeof(struct tree_data)); while ((c = getopt(argcnt, args, "xdt:r:o:s:S:pN")) != EOF) { switch (c) { case 't': if (type_flag & (RADIXTREE_REQUEST|RBTREE_REQUEST)) { error(INFO, "multiple tree types may not be entered\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (STRNEQ(optarg, "ra")) type_flag = RADIXTREE_REQUEST; else if (STRNEQ(optarg, "rb")) type_flag = RBTREE_REQUEST; else { error(INFO, "invalid tree type: %s\n", optarg); cmd_usage(pc->curcmd, SYNOPSIS); } break; case 'r': if (td->flags & TREE_ROOT_OFFSET_ENTERED) error(FATAL, "root offset value %d (0x%lx) already entered\n", root_offset, root_offset); else if (IS_A_NUMBER(optarg)) root_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) root_offset = sm->member_offset; else error(FATAL, "invalid -r argument: %s\n", optarg); td->flags |= TREE_ROOT_OFFSET_ENTERED; break; case 'o': if (td->flags & TREE_NODE_OFFSET_ENTERED) error(FATAL, "node offset value %d (0x%lx) already entered\n", td->node_member_offset, td->node_member_offset); else if (IS_A_NUMBER(optarg)) td->node_member_offset = stol(optarg, FAULT_ON_ERROR, NULL); else if (arg_to_datatype(optarg, sm, RETURN_ON_ERROR) > 1) td->node_member_offset = sm->member_offset; else error(FATAL, "invalid -o argument: %s\n", optarg); td->flags |= TREE_NODE_OFFSET_ENTERED; break; case 's': case 'S': if (td->structname_args++ == 0) hq_open(); hq_enter((ulong)optarg); td->flags |= (c == 's') ? TREE_PARSE_MEMBER : TREE_READ_MEMBER; if (count_bits_long(td->flags & (TREE_PARSE_MEMBER|TREE_READ_MEMBER)) > 1) error(FATAL, "-S and -s options are mutually exclusive\n"); break; case 'p': td->flags |= TREE_POSITION_DISPLAY; break; case 'N': td->flags |= TREE_NODE_POINTER; break; case 'x': if (td->flags & TREE_STRUCT_RADIX_10) error(FATAL, "-d and -x are mutually exclusive\n"); td->flags |= TREE_STRUCT_RADIX_16; break; case 'd': if (td->flags & TREE_STRUCT_RADIX_16) error(FATAL, "-d and -x are mutually exclusive\n"); td->flags |= TREE_STRUCT_RADIX_10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if ((type_flag & RADIXTREE_REQUEST) && (td->flags & TREE_NODE_OFFSET_ENTERED)) error(FATAL, "-o option is not applicable to radix trees\n"); if ((td->flags & TREE_ROOT_OFFSET_ENTERED) && (td->flags & TREE_NODE_POINTER)) error(INFO, "-r and -N options are mutually exclusive\n"); if (!args[optind]) { error(INFO, "a starting address is required\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if ((sp = symbol_search(args[optind]))) { td->start = sp->value; goto next_arg; } if (!IS_A_NUMBER(args[optind])) { if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { td->start = value; goto next_arg; } } error(FATAL, "invalid start argument: %s\n", args[optind]); } if (hexadecimal_only(args[optind], 0)) { value = htol(args[optind], FAULT_ON_ERROR, NULL); if (IS_KVADDR(value)) { td->start = value; goto next_arg; } } error(FATAL, "invalid start argument: %s\n", args[optind]); next_arg: if (args[optind+1]) { error(INFO, "too many arguments entered\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (td->structname_args) { td->structname = (char **)GETBUF(sizeof(char *) * td->structname_args); retrieve_list((ulong *)td->structname, td->structname_args); hq_close(); } if (!(td->flags & TREE_NODE_POINTER)) td->start = td->start + root_offset; if (CRASHDEBUG(1)) { others = 0; fprintf(fp, " flags: %lx (", td->flags); if (td->flags & TREE_ROOT_OFFSET_ENTERED) fprintf(fp, "%sTREE_ROOT_OFFSET_ENTERED", others++ ? "|" : ""); if (td->flags & TREE_NODE_OFFSET_ENTERED) fprintf(fp, "%sTREE_NODE_OFFSET_ENTERED", others++ ? "|" : ""); if (td->flags & TREE_NODE_POINTER) fprintf(fp, "%sTREE_NODE_POINTER", others++ ? "|" : ""); if (td->flags & TREE_POSITION_DISPLAY) fprintf(fp, "%sTREE_POSITION_DISPLAY", others++ ? "|" : ""); if (td->flags & TREE_STRUCT_RADIX_10) fprintf(fp, "%sTREE_STRUCT_RADIX_10", others++ ? "|" : ""); if (td->flags & TREE_STRUCT_RADIX_16) fprintf(fp, "%sTREE_STRUCT_RADIX_16", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " type: %s\n", type_flag & RADIXTREE_REQUEST ? "radix" : "red-black"); fprintf(fp, " node pointer: %s\n", td->flags & TREE_NODE_POINTER ? "yes" : "no"); fprintf(fp, " start: %lx\n", td->start); fprintf(fp, "node_member_offset: %ld\n", td->node_member_offset); fprintf(fp, " structname_args: %d\n", td->structname_args); fprintf(fp, " count: %d\n", td->count); } td->flags &= ~TREE_NODE_OFFSET_ENTERED; td->flags |= VERBOSE; hq_open(); if (type_flag & RADIXTREE_REQUEST) do_rdtree(td); else do_rbtree(td); hq_close(); if (td->structname_args) FREEBUF(td->structname); } static ulong RADIX_TREE_MAP_SHIFT = UNINITIALIZED; static ulong RADIX_TREE_MAP_SIZE = UNINITIALIZED; static ulong RADIX_TREE_MAP_MASK = UNINITIALIZED; #define RADIX_TREE_ENTRY_MASK 3UL #define RADIX_TREE_INTERNAL_NODE 1UL static void do_radix_tree_iter(ulong node, uint height, char *path, ulong index, struct radix_tree_ops *ops) { uint off; if (!hq_enter(node)) error(FATAL, "\nduplicate tree node: %lx\n", node); for (off = 0; off < RADIX_TREE_MAP_SIZE; off++) { ulong slot; ulong shift = (height - 1) * RADIX_TREE_MAP_SHIFT; readmem(node + OFFSET(radix_tree_node_slots) + sizeof(void *) * off, KVADDR, &slot, sizeof(void *), "radix_tree_node.slot[off]", FAULT_ON_ERROR); if (!slot) continue; if (slot & RADIX_TREE_INTERNAL_NODE) slot &= ~RADIX_TREE_INTERNAL_NODE; if (height == 1) ops->entry(node, slot, path, index | off, ops->private); else { ulong child_index = index | (off << shift); char child_path[BUFSIZE]; sprintf(child_path, "%s/%d", path, off); do_radix_tree_iter(slot, height - 1, child_path, child_index, ops); } } } int do_radix_tree_traverse(ulong ptr, int is_root, struct radix_tree_ops *ops) { static ulong max_height = UNINITIALIZED; ulong node_p; long nlen; uint height, is_internal; unsigned char shift; char path[BUFSIZE]; if (!VALID_STRUCT(radix_tree_root) || !VALID_STRUCT(radix_tree_node) || ((!VALID_MEMBER(radix_tree_root_height) || !VALID_MEMBER(radix_tree_root_rnode) || !VALID_MEMBER(radix_tree_node_slots) || !ARRAY_LENGTH(height_to_maxindex)) && (!VALID_MEMBER(radix_tree_root_rnode) || !VALID_MEMBER(radix_tree_node_shift) || !VALID_MEMBER(radix_tree_node_slots) || !ARRAY_LENGTH(height_to_maxnodes)))) error(FATAL, "radix trees do not exist or have changed " "their format\n"); if (RADIX_TREE_MAP_SHIFT == UNINITIALIZED) { if (!(nlen = MEMBER_SIZE("radix_tree_node", "slots"))) error(FATAL, "cannot determine length of " "radix_tree_node.slots[] array\n"); nlen /= sizeof(void *); RADIX_TREE_MAP_SHIFT = ffsl(nlen) - 1; RADIX_TREE_MAP_SIZE = (1UL << RADIX_TREE_MAP_SHIFT); RADIX_TREE_MAP_MASK = (RADIX_TREE_MAP_SIZE-1); if (ARRAY_LENGTH(height_to_maxindex)) max_height = ARRAY_LENGTH(height_to_maxindex); else max_height = ARRAY_LENGTH(height_to_maxnodes); } height = 0; if (!is_root) { node_p = ptr; if (node_p & RADIX_TREE_INTERNAL_NODE) node_p &= ~RADIX_TREE_INTERNAL_NODE; if (VALID_MEMBER(radix_tree_node_height)) { readmem(node_p + OFFSET(radix_tree_node_height), KVADDR, &height, sizeof(uint), "radix_tree_node height", FAULT_ON_ERROR); } else if (VALID_MEMBER(radix_tree_node_shift)) { readmem(node_p + OFFSET(radix_tree_node_shift), KVADDR, &shift, sizeof(shift), "radix_tree_node shift", FAULT_ON_ERROR); height = (shift / RADIX_TREE_MAP_SHIFT) + 1; } else error(FATAL, "-N option is not supported or applicable" " for radix trees on this architecture or kernel\n"); if (height > max_height) goto error_height; } else { if (VALID_MEMBER(radix_tree_root_height)) { readmem(ptr + OFFSET(radix_tree_root_height), KVADDR, &height, sizeof(uint), "radix_tree_root height", FAULT_ON_ERROR); } readmem(ptr + OFFSET(radix_tree_root_rnode), KVADDR, &node_p, sizeof(void *), "radix_tree_root rnode", FAULT_ON_ERROR); is_internal = (node_p & RADIX_TREE_INTERNAL_NODE); if (node_p & RADIX_TREE_INTERNAL_NODE) node_p &= ~RADIX_TREE_INTERNAL_NODE; if (is_internal && VALID_MEMBER(radix_tree_node_shift)) { readmem(node_p + OFFSET(radix_tree_node_shift), KVADDR, &shift, sizeof(shift), "radix_tree_node shift", FAULT_ON_ERROR); height = (shift / RADIX_TREE_MAP_SHIFT) + 1; } if (height > max_height) { node_p = ptr; goto error_height; } } if (CRASHDEBUG(1)) { fprintf(fp, "radix_tree_node.slots[%ld]\n", RADIX_TREE_MAP_SIZE); fprintf(fp, "max_height %ld: ", max_height); fprintf(fp, "\n"); fprintf(fp, "pointer at %lx (is_root? %s):\n", node_p, is_root ? "yes" : "no"); if (is_root) dump_struct("radix_tree_root", ptr, RADIX(ops->radix)); else dump_struct("radix_tree_node", node_p, RADIX(ops->radix)); } if (height == 0) { strcpy(path, "direct"); ops->entry(node_p, node_p, path, 0, ops->private); } else { strcpy(path, "root"); do_radix_tree_iter(node_p, height, path, 0, ops); } return 0; error_height: fprintf(fp, "radix_tree_node at %lx\n", node_p); dump_struct("radix_tree_node", node_p, RADIX(ops->radix)); error(FATAL, "height %d is greater than " "maximum radix tree height index %ld\n", height, max_height); return -1; } static void do_rdtree_entry(ulong node, ulong slot, const char *path, ulong index, void *private) { struct tree_data *td = private; static struct req_entry **e = NULL; uint print_radix; int i; if (!td->count && td->structname_args) { /* * Retrieve all members' info only once (count == 0) * After last iteration all memory will be freed up */ e = (struct req_entry **)GETBUF(sizeof(*e) * td->structname_args); for (i = 0; i < td->structname_args; i++) e[i] = fill_member_offsets(td->structname[i]); } td->count++; if (td->flags & VERBOSE) fprintf(fp, "%lx\n", slot); if (td->flags & TREE_POSITION_DISPLAY) { fprintf(fp, " position: %s/%ld\n", path, index & RADIX_TREE_MAP_MASK); } if (td->structname) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch (count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], slot, print_radix); break; default: if (td->flags & TREE_PARSE_MEMBER) dump_struct_members_for_tree(td, i, slot); else if (td->flags & TREE_READ_MEMBER) dump_struct_members_fast(e[i], print_radix, slot); break; } } } } int do_rdtree(struct tree_data *td) { struct radix_tree_ops ops = { .entry = do_rdtree_entry, .private = td, }; int is_root = !(td->flags & TREE_NODE_POINTER); if (td->flags & TREE_STRUCT_RADIX_10) ops.radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) ops.radix = 16; else ops.radix = 0; do_radix_tree_traverse(td->start, is_root, &ops); return 0; } int do_rbtree(struct tree_data *td) { ulong start; char pos[BUFSIZE]; if (!VALID_MEMBER(rb_root_rb_node) || !VALID_MEMBER(rb_node_rb_left) || !VALID_MEMBER(rb_node_rb_right)) error(FATAL, "red-black trees do not exist or have changed " "their format\n"); sprintf(pos, "root"); if (td->flags & TREE_NODE_POINTER) start = td->start; else readmem(td->start + OFFSET(rb_root_rb_node), KVADDR, &start, sizeof(void *), "rb_root rb_node", FAULT_ON_ERROR); rbtree_iteration(start, td, pos); return td->count; } void rbtree_iteration(ulong node_p, struct tree_data *td, char *pos) { int i; uint print_radix; ulong struct_p, left_p, right_p; char left_pos[BUFSIZE], right_pos[BUFSIZE]; static struct req_entry **e; if (!node_p) return; if (!td->count && td->structname_args) { /* * Retrieve all members' info only once (count == 0) * After last iteration all memory will be freed up */ e = (struct req_entry **)GETBUF(sizeof(*e) * td->structname_args); for (i = 0; i < td->structname_args; i++) e[i] = fill_member_offsets(td->structname[i]); } if (hq_enter(node_p)) td->count++; else error(FATAL, "\nduplicate tree entry: %lx\n", node_p); struct_p = node_p - td->node_member_offset; if (td->flags & VERBOSE) fprintf(fp, "%lx\n", struct_p); if (td->flags & TREE_POSITION_DISPLAY) fprintf(fp, " position: %s\n", pos); if (td->structname) { if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; for (i = 0; i < td->structname_args; i++) { switch(count_chars(td->structname[i], '.')) { case 0: dump_struct(td->structname[i], struct_p, print_radix); break; default: if (td->flags & TREE_PARSE_MEMBER) dump_struct_members_for_tree(td, i, struct_p); else if (td->flags & TREE_READ_MEMBER) dump_struct_members_fast(e[i], print_radix, struct_p); break; } } } readmem(node_p+OFFSET(rb_node_rb_left), KVADDR, &left_p, sizeof(void *), "rb_node rb_left", FAULT_ON_ERROR); readmem(node_p+OFFSET(rb_node_rb_right), KVADDR, &right_p, sizeof(void *), "rb_node rb_right", FAULT_ON_ERROR); sprintf(left_pos, "%s/l", pos); sprintf(right_pos, "%s/r", pos); rbtree_iteration(left_p, td, left_pos); rbtree_iteration(right_p, td, right_pos); } void dump_struct_members_for_tree(struct tree_data *td, int idx, ulong struct_p) { int i, argc; uint print_radix; char *p1; char *structname, *members; char *arglist[MAXARGS]; if (td->flags & TREE_STRUCT_RADIX_10) print_radix = 10; else if (td->flags & TREE_STRUCT_RADIX_16) print_radix = 16; else print_radix = 0; structname = GETBUF(strlen(td->structname[idx])+1); members = GETBUF(strlen(td->structname[idx])+1); strcpy(structname, td->structname[idx]); p1 = strstr(structname, ".") + 1; strcpy(members, p1); replace_string(members, ",", ' '); argc = parse_line(members, arglist); for (i = 0; i pageshift) #define HQ_INDEX(X) (((X) >> HQ_SHIFT) % pc->nr_hash_queues) struct hq_entry { int next; int order; ulong value; }; struct hq_head { int next; int qcnt; }; struct hash_table { ulong flags; struct hq_head *queue_heads; struct hq_entry *memptr; long count; long index; int reallocs; } hash_table = { 0 }; /* * For starters, allocate a hash table containing HQ_ENTRY_CHUNK entries. * If necessary during runtime, it will be increased in size. */ void hq_init(void) { struct hash_table *ht; ht = &hash_table; if (pc->nr_hash_queues == 0) pc->nr_hash_queues = NR_HASH_QUEUES_DEFAULT; if ((ht->queue_heads = (struct hq_head *)malloc(pc->nr_hash_queues * sizeof(struct hq_head))) == NULL) { error(INFO, "cannot malloc memory for hash queue heads: %s\n", strerror(errno)); ht->flags = HASH_QUEUE_NONE; pc->flags &= ~HASH; return; } if ((ht->memptr = (struct hq_entry *)malloc(HQ_ENTRY_CHUNK * sizeof(struct hq_entry))) == NULL) { error(INFO, "cannot malloc memory for hash queues: %s\n", strerror(errno)); ht->flags = HASH_QUEUE_NONE; pc->flags &= ~HASH; return; } BZERO(ht->memptr, HQ_ENTRY_CHUNK * sizeof(struct hq_entry)); ht->count = HQ_ENTRY_CHUNK; ht->index = 0; } /* * Get a free hash queue entry. If there's no more available, realloc() * a new chunk of memory with another HQ_ENTRY_CHUNK entries stuck on the end. */ static long alloc_hq_entry(void) { struct hash_table *ht; struct hq_entry *new, *end_of_old; ht = &hash_table; if (++ht->index == ht->count) { if (!(new = (void *)realloc((void *)ht->memptr, (ht->count+HQ_ENTRY_CHUNK) * sizeof(struct hq_entry)))) { error(INFO, "cannot realloc memory for hash queues: %s\n", strerror(errno)); ht->flags |= HASH_QUEUE_FULL; return(-1); } ht->reallocs++; ht->memptr = new; end_of_old = ht->memptr + ht->count; BZERO(end_of_old, HQ_ENTRY_CHUNK * sizeof(struct hq_entry)); ht->count += HQ_ENTRY_CHUNK; } return(ht->index); } /* * Restore the hash queue to its state before the duplicate entry * was attempted. */ static void dealloc_hq_entry(struct hq_entry *entry) { struct hash_table *ht; long hqi; ht = &hash_table; hqi = HQ_INDEX(entry->value); ht->index--; BZERO(entry, sizeof(struct hq_entry)); ht->queue_heads[hqi].qcnt--; } /* * Initialize the hash table for a hashing session. */ int hq_open(void) { struct hash_table *ht; if (!(pc->flags & HASH)) return FALSE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE|HASH_QUEUE_OPEN)) return FALSE; ht->flags &= ~(HASH_QUEUE_FULL|HASH_QUEUE_CLOSED); BZERO(ht->queue_heads, sizeof(struct hq_head) * pc->nr_hash_queues); BZERO(ht->memptr, ht->count * sizeof(struct hq_entry)); ht->index = 0; ht->flags |= HASH_QUEUE_OPEN; return TRUE; } int hq_is_open(void) { struct hash_table *ht; ht = &hash_table; return (ht->flags & HASH_QUEUE_OPEN ? TRUE : FALSE); } int hq_is_inuse(void) { struct hash_table *ht; if (!hq_is_open()) return FALSE; ht = &hash_table; return (ht->index ? TRUE : FALSE); } /* * Close the hash table, returning the number of items hashed in this session. */ int hq_close(void) { struct hash_table *ht; ht = &hash_table; ht->flags &= ~(HASH_QUEUE_OPEN); ht->flags |= HASH_QUEUE_CLOSED; if (!(pc->flags & HASH)) return(0); if (ht->flags & HASH_QUEUE_NONE) return(0); ht->flags &= ~HASH_QUEUE_FULL; return(ht->index); } char *corrupt_hq = "corrupt hash queue entry: value: %lx next: %d order: %d\n"; /* * For a given value, allocate a hash queue entry and hash it into the * open hash table. If a duplicate entry is found, return FALSE; for all * other possibilities return TRUE. Note that it's up to the user to deal * with failure. */ int hq_enter(ulong value) { struct hash_table *ht; struct hq_entry *entry; struct hq_entry *list_entry; long hqi; long index; if (!(pc->flags & HASH)) return TRUE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE|HASH_QUEUE_FULL)) return TRUE; if (!(ht->flags & HASH_QUEUE_OPEN)) return TRUE; if ((index = alloc_hq_entry()) < 0) return TRUE; entry = ht->memptr + index; if (entry->next || entry->value || entry->order) { error(INFO, corrupt_hq, entry->value, entry->next, entry->order); ht->flags |= HASH_QUEUE_NONE; return TRUE; } entry->next = 0; entry->value = value; entry->order = index; hqi = HQ_INDEX(value); if (ht->queue_heads[hqi].next == 0) { ht->queue_heads[hqi].next = index; ht->queue_heads[hqi].qcnt = 1; return TRUE; } else ht->queue_heads[hqi].qcnt++; list_entry = ht->memptr + ht->queue_heads[hqi].next; while (TRUE) { if (list_entry->value == entry->value) { dealloc_hq_entry(entry); return FALSE; } if (list_entry->next >= ht->count) { error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return TRUE; } if (list_entry->next == 0) break; list_entry = ht->memptr + list_entry->next; } list_entry->next = index; return TRUE; } /* * "hash -d" output */ void dump_hash_table(int verbose) { int i; struct hash_table *ht; struct hq_entry *list_entry; long elements; long queues_in_use; int others; uint minq, maxq; ht = &hash_table; others = 0; fprintf(fp, " flags: %lx (", ht->flags); if (ht->flags & HASH_QUEUE_NONE) fprintf(fp, "%sHASH_QUEUE_NONE", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_OPEN) fprintf(fp, "%sHASH_QUEUE_OPEN", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_CLOSED) fprintf(fp, "%sHASH_QUEUE_CLOSED", others++ ? "|" : ""); if (ht->flags & HASH_QUEUE_FULL) fprintf(fp, "%sHASH_QUEUE_FULL", others++ ? "|" : ""); fprintf(fp, ")\n"); fprintf(fp, " queue_heads[%ld]: %lx\n", pc->nr_hash_queues, (ulong)ht->queue_heads); fprintf(fp, " memptr: %lx\n", (ulong)ht->memptr); fprintf(fp, " count: %ld ", ht->count); if (ht->reallocs) fprintf(fp, " (%d reallocs)", ht->reallocs); fprintf(fp, "\n"); fprintf(fp, " index: %ld\n", ht->index); queues_in_use = 0; minq = ~(0); maxq = 0; for (i = 0; i < pc->nr_hash_queues; i++) { if (ht->queue_heads[i].next == 0) { minq = 0; continue; } if (ht->queue_heads[i].qcnt < minq) minq = ht->queue_heads[i].qcnt; if (ht->queue_heads[i].qcnt > maxq) maxq = ht->queue_heads[i].qcnt; queues_in_use++; } elements = 0; list_entry = ht->memptr; for (i = 0; i < ht->count; i++, list_entry++) { if (!list_entry->order) { if (list_entry->value || list_entry->next) goto corrupt_list_entry; continue; } if (list_entry->next >= ht->count) goto corrupt_list_entry; ++elements; } if (elements != ht->index) fprintf(fp, " elements found: %ld (expected %ld)\n", elements, ht->index); fprintf(fp, " queues in use: %ld of %ld\n", queues_in_use, pc->nr_hash_queues); fprintf(fp, " queue length range: %d to %d\n", minq, maxq); if (verbose) { if (!elements) { fprintf(fp, " entries: (none)\n"); return; } fprintf(fp, " entries: "); list_entry = ht->memptr; for (i = 0; i < ht->count; i++, list_entry++) { if (list_entry->order) fprintf(fp, "%s%lx (%d)\n", list_entry->order == 1 ? "" : " ", list_entry->value, list_entry->order); } } return; corrupt_list_entry: error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; } /* * Retrieve the count of, and optionally stuff a pre-allocated array with, * the current hash table entries. The entries will be sorted according * to the order in which they were entered, so from this point on, no * further hq_enter() operations on this list will be allowed. However, * multiple calls to retrieve_list are allowed because the second and * subsequent ones will go directly to where the non-zero (valid) entries * start in the potentially very large list_entry memory chunk. */ int retrieve_list(ulong array[], int count) { int i; struct hash_table *ht; struct hq_entry *list_entry; int elements; if (!(pc->flags & HASH)) error(FATAL, "cannot perform this command with hash turned off\n"); ht = &hash_table; list_entry = ht->memptr; for (i = elements = 0; i < ht->count; i++, list_entry++) { if (!list_entry->order) { if (list_entry->value || list_entry->next) goto corrupt_list_entry; continue; } if (list_entry->next >= ht->count) goto corrupt_list_entry; if (array) array[elements] = list_entry->value; if (++elements == count) break; } return elements; corrupt_list_entry: error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return(-1); } /* * For a given value, check to see if a hash queue entry exists. If an * entry is found, return TRUE; for all other possibilities return FALSE. */ int hq_entry_exists(ulong value) { struct hash_table *ht; struct hq_entry *list_entry; long hqi; if (!(pc->flags & HASH)) return FALSE; ht = &hash_table; if (ht->flags & (HASH_QUEUE_NONE)) return FALSE; if (!(ht->flags & HASH_QUEUE_OPEN)) return FALSE; hqi = HQ_INDEX(value); list_entry = ht->memptr + ht->queue_heads[hqi].next; while (TRUE) { if (list_entry->value == value) return TRUE; if (list_entry->next >= ht->count) { error(INFO, corrupt_hq, list_entry->value, list_entry->next, list_entry->order); ht->flags |= HASH_QUEUE_NONE; return FALSE; } if (list_entry->next == 0) break; list_entry = ht->memptr + list_entry->next; } return FALSE; } /* * K&R power function for integers */ long power(long base, int exp) { int i; long p; p = 1; for (i = 1; i <= exp; i++) p = p * base; return p; } long long ll_power(long long base, long long exp) { long long i; long long p; p = 1; for (i = 1; i <= exp; i++) p = p * base; return p; } /* * Internal buffer allocation scheme to avoid inline malloc() calls and * resultant memory leaks due to aborted commands. These buffers are * for TEMPORARY use on a per-command basis. They are allocated by calls * to GETBUF(size). They can explicitly freed by FREEBUF(address), but * they are all freed by free_all_bufs() which is called in a number of * places, most not */ #define NUMBER_1K_BUFS (10) #define NUMBER_2K_BUFS (10) #define NUMBER_4K_BUFS (5) #define NUMBER_8K_BUFS (5) #define NUMBER_32K_BUFS (1) #define SHARED_1K_BUF_FULL (0x003ff) #define SHARED_2K_BUF_FULL (0x003ff) #define SHARED_4K_BUF_FULL (0x0001f) #define SHARED_8K_BUF_FULL (0x0001f) #define SHARED_32K_BUF_FULL (0x00001) #define SHARED_1K_BUF_AVAIL(X) \ (NUMBER_1K_BUFS && !(((X) & SHARED_1K_BUF_FULL) == SHARED_1K_BUF_FULL)) #define SHARED_2K_BUF_AVAIL(X) \ (NUMBER_2K_BUFS && !(((X) & SHARED_2K_BUF_FULL) == SHARED_2K_BUF_FULL)) #define SHARED_4K_BUF_AVAIL(X) \ (NUMBER_4K_BUFS && !(((X) & SHARED_4K_BUF_FULL) == SHARED_4K_BUF_FULL)) #define SHARED_8K_BUF_AVAIL(X) \ (NUMBER_8K_BUFS && !(((X) & SHARED_8K_BUF_FULL) == SHARED_8K_BUF_FULL)) #define SHARED_32K_BUF_AVAIL(X) \ (NUMBER_32K_BUFS && !(((X) & SHARED_32K_BUF_FULL) == SHARED_32K_BUF_FULL)) #define B1K (0) #define B2K (1) #define B4K (2) #define B8K (3) #define B32K (4) #define SHARED_BUF_SIZES (B32K+1) #define MAX_MALLOC_BUFS (2000) #define MAX_CACHE_SIZE (KILOBYTES(32)) struct shared_bufs { char buf_1K[NUMBER_1K_BUFS][1024]; char buf_2K[NUMBER_2K_BUFS][2048]; char buf_4K[NUMBER_4K_BUFS][4096]; char buf_8K[NUMBER_8K_BUFS][8192]; char buf_32K[NUMBER_32K_BUFS][32768]; long buf_1K_used; long buf_2K_used; long buf_4K_used; long buf_8K_used; long buf_32K_used; long buf_1K_maxuse; long buf_2K_maxuse; long buf_4K_maxuse; long buf_8K_maxuse; long buf_32K_maxuse; long buf_1K_ovf; long buf_2K_ovf; long buf_4K_ovf; long buf_8K_ovf; long buf_32K_ovf; int buf_inuse[SHARED_BUF_SIZES]; char *malloc_bp[MAX_MALLOC_BUFS]; long smallest; long largest; long embedded; long max_embedded; long mallocs; long frees; double total; ulong reqs; } shared_bufs; void buf_init(void) { struct shared_bufs *bp; bp = &shared_bufs; BZERO(bp, sizeof(struct shared_bufs)); bp->smallest = 0x7fffffff; bp->total = 0.0; } /* * Free up all buffers used by the last command. */ void free_all_bufs(void) { int i; struct shared_bufs *bp; bp = &shared_bufs; bp->embedded = 0; for (i = 0; i < SHARED_BUF_SIZES; i++) bp->buf_inuse[i] = 0; for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i]) { free(bp->malloc_bp[i]); bp->malloc_bp[i] = NULL; bp->frees++; } } if (bp->mallocs != bp->frees) error(WARNING, "malloc/free mismatch (%ld/%ld)\n", bp->mallocs, bp->frees); } /* * Free a specific buffer that may have been returned by malloc(). * If the address is one of the static buffers, look for it and * clear its inuse bit. */ void freebuf(char *addr) { int i; struct shared_bufs *bp; bp = &shared_bufs; bp->embedded--; if (CRASHDEBUG(8)) { INDENT(bp->embedded*2); fprintf(fp, "FREEBUF(%ld)\n", bp->embedded); } for (i = 0; i < NUMBER_1K_BUFS; i++) { if (addr == (char *)&bp->buf_1K[i]) { bp->buf_inuse[B1K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_2K_BUFS; i++) { if (addr == (char *)&bp->buf_2K[i]) { bp->buf_inuse[B2K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_4K_BUFS; i++) { if (addr == (char *)&bp->buf_4K[i]) { bp->buf_inuse[B4K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_8K_BUFS; i++) { if (addr == (char *)&bp->buf_8K[i]) { bp->buf_inuse[B8K] &= ~(1 << i); return; } } for (i = 0; i < NUMBER_32K_BUFS; i++) { if (addr == (char *)&bp->buf_32K[i]) { bp->buf_inuse[B32K] &= ~(1 << i); return; } } for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i] == addr) { free(bp->malloc_bp[i]); bp->malloc_bp[i] = NULL; bp->frees++; return; } } error(FATAL, "freeing an unknown buffer -- shared buffer inconsistency!\n"); } /* DEBUG */ void dump_embedded(char *s) { struct shared_bufs *bp; char *p1; p1 = s ? s : ""; bp = &shared_bufs; console("%s: embedded: %ld mallocs: %ld frees: %ld\n", p1, bp->embedded, bp->mallocs, bp->frees); } /* DEBUG */ long get_embedded(void) { struct shared_bufs *bp; bp = &shared_bufs; return(bp->embedded); } /* * "help -b" output */ void dump_shared_bufs(void) { int i; struct shared_bufs *bp; bp = &shared_bufs; fprintf(fp, " buf_1K_used: %ld\n", bp->buf_1K_used); fprintf(fp, " buf_2K_used: %ld\n", bp->buf_2K_used); fprintf(fp, " buf_4K_used: %ld\n", bp->buf_4K_used); fprintf(fp, " buf_8K_used: %ld\n", bp->buf_8K_used); fprintf(fp, " buf_32K_used: %ld\n", bp->buf_32K_used); fprintf(fp, " buf_1K_ovf: %ld\n", bp->buf_1K_ovf); fprintf(fp, " buf_2K_ovf: %ld\n", bp->buf_2K_ovf); fprintf(fp, " buf_4K_ovf: %ld\n", bp->buf_4K_ovf); fprintf(fp, " buf_8K_ovf: %ld\n", bp->buf_8K_ovf); fprintf(fp, " buf_32K_ovf: %ld\n", bp->buf_32K_ovf); fprintf(fp, " buf_1K_maxuse: %2ld of %d\n", bp->buf_1K_maxuse, NUMBER_1K_BUFS); fprintf(fp, " buf_2K_maxuse: %2ld of %d\n", bp->buf_2K_maxuse, NUMBER_2K_BUFS); fprintf(fp, " buf_4K_maxuse: %2ld of %d\n", bp->buf_4K_maxuse, NUMBER_4K_BUFS); fprintf(fp, " buf_8K_maxuse: %2ld of %d\n", bp->buf_8K_maxuse, NUMBER_8K_BUFS); fprintf(fp, "buf_32K_maxuse: %2ld of %d\n", bp->buf_32K_maxuse, NUMBER_32K_BUFS); fprintf(fp, " buf_inuse[%d]: ", SHARED_BUF_SIZES); for (i = 0; i < SHARED_BUF_SIZES; i++) fprintf(fp, "[%lx]", (ulong)bp->buf_inuse[i]); fprintf(fp, "\n"); for (i = 0; i < MAX_MALLOC_BUFS; i++) if (bp->malloc_bp[i]) fprintf(fp, " malloc_bp[%d]: %lx\n", i, (ulong)bp->malloc_bp[i]); if (bp->smallest == 0x7fffffff) fprintf(fp, " smallest: 0\n"); else fprintf(fp, " smallest: %ld\n", bp->smallest); fprintf(fp, " largest: %ld\n", bp->largest); fprintf(fp, " embedded: %ld\n", bp->embedded); fprintf(fp, " max_embedded: %ld\n", bp->max_embedded); fprintf(fp, " mallocs: %ld\n", bp->mallocs); fprintf(fp, " frees: %ld\n", bp->frees); fprintf(fp, " reqs/total: %ld/%.0f\n", bp->reqs, bp->total); fprintf(fp, " average size: %.0f\n", bp->total/bp->reqs); } /* * Try to get one of the static buffers first. If not available, fall * through and get it from malloc(), keeping trace of the returned address. */ #define SHARED_BUFSIZE(size) \ ((size <= 1024) ? 1024 >> 7 : \ ((size <= 2048) ? 2048 >> 7 : \ ((size <= 4096) ? 4096 >> 7 : \ ((size <= 8192) ? 8192 >> 7 : \ ((size <= 32768) ? 32768 >> 7 : -1))))) char * getbuf(long reqsize) { int i; int index; int bdx; int mask; struct shared_bufs *bp; char *bufp; if (!reqsize) { ulong retaddr = (ulong)__builtin_return_address(0); error(FATAL, "zero-size memory allocation! (called from %lx)\n", retaddr); } bp = &shared_bufs; index = SHARED_BUFSIZE(reqsize); if (CRASHDEBUG(7) && (reqsize > MAX_CACHE_SIZE)) error(NOTE, "GETBUF request > MAX_CACHE_SIZE: %ld\n", reqsize); if (CRASHDEBUG(8)) { INDENT(bp->embedded*2); fprintf(fp, "GETBUF(%ld -> %ld)\n", reqsize, bp->embedded); } bp->embedded++; if (bp->embedded > bp->max_embedded) bp->max_embedded = bp->embedded; if (reqsize < bp->smallest) bp->smallest = reqsize; if (reqsize > bp->largest) bp->largest = reqsize; bp->total += reqsize; bp->reqs++; switch (index) { case -1: break; case 8: if (SHARED_1K_BUF_AVAIL(bp->buf_inuse[B1K])) { mask = ~(bp->buf_inuse[B1K]); bdx = ffs(mask) - 1; bufp = bp->buf_1K[bdx]; bp->buf_1K_used++; bp->buf_inuse[B1K] |= (1 << bdx); bp->buf_1K_maxuse = MAX(bp->buf_1K_maxuse, count_bits_int(bp->buf_inuse[B1K])); BZERO(bufp, 1024); return(bufp); } bp->buf_1K_ovf++; /* FALLTHROUGH */ case 16: if (SHARED_2K_BUF_AVAIL(bp->buf_inuse[B2K])) { mask = ~(bp->buf_inuse[B2K]); bdx = ffs(mask) - 1; bufp = bp->buf_2K[bdx]; bp->buf_2K_used++; bp->buf_inuse[B2K] |= (1 << bdx); bp->buf_2K_maxuse = MAX(bp->buf_2K_maxuse, count_bits_int(bp->buf_inuse[B2K])); BZERO(bufp, 2048); return(bufp); } bp->buf_2K_ovf++; /* FALLTHROUGH */ case 32: if (SHARED_4K_BUF_AVAIL(bp->buf_inuse[B4K])) { mask = ~(bp->buf_inuse[B4K]); bdx = ffs(mask) - 1; bufp = bp->buf_4K[bdx]; bp->buf_4K_used++; bp->buf_inuse[B4K] |= (1 << bdx); bp->buf_4K_maxuse = MAX(bp->buf_4K_maxuse, count_bits_int(bp->buf_inuse[B4K])); BZERO(bufp, 4096); return(bufp); } bp->buf_4K_ovf++; /* FALLTHROUGH */ case 64: if (SHARED_8K_BUF_AVAIL(bp->buf_inuse[B8K])) { mask = ~(bp->buf_inuse[B8K]); bdx = ffs(mask) - 1; bufp = bp->buf_8K[bdx]; bp->buf_8K_used++; bp->buf_inuse[B8K] |= (1 << bdx); bp->buf_8K_maxuse = MAX(bp->buf_8K_maxuse, count_bits_int(bp->buf_inuse[B8K])); BZERO(bufp, 8192); return(bufp); } bp->buf_8K_ovf++; /* FALLTHROUGH */ case 256: if (SHARED_32K_BUF_AVAIL(bp->buf_inuse[B32K])) { mask = ~(bp->buf_inuse[B32K]); bdx = ffs(mask) - 1; bufp = bp->buf_32K[bdx]; bp->buf_32K_used++; bp->buf_inuse[B32K] |= (1 << bdx); bp->buf_32K_maxuse = MAX(bp->buf_32K_maxuse, count_bits_int(bp->buf_inuse[B32K])); BZERO(bufp, 32768); return(bufp); } bp->buf_32K_ovf++; break; } for (i = 0; i < MAX_MALLOC_BUFS; i++) { if (bp->malloc_bp[i]) continue; if ((bp->malloc_bp[i] = (char *)calloc(reqsize, 1))) { bp->mallocs++; return(bp->malloc_bp[i]); } break; } dump_shared_bufs(); return ((char *)(long) error(FATAL, "cannot allocate any more memory!\n")); } /* * Change the size of the previously-allocated memory block * pointed to by oldbuf to newsize bytes. Copy the minimum * of oldsize and newsize bytes from the oldbuf to the newbuf, * and return the address of the new buffer, which will have * a different address than oldbuf. */ char * resizebuf(char *oldbuf, long oldsize, long newsize) { char *newbuf; newbuf = GETBUF(newsize); BCOPY(oldbuf, newbuf, MIN(oldsize, newsize)); FREEBUF(oldbuf); return newbuf; } /* * Duplicate a string into a buffer allocated with GETBUF(). */ char * strdupbuf(char *oldstring) { char *newstring; newstring = GETBUF(strlen(oldstring)+1); strcpy(newstring, oldstring); return newstring; } /* * Return the number of bits set in an int or long. */ int count_bits_int(int val) { int i, cnt; int total; cnt = sizeof(int) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) total++; val >>= 1; } return total; } int count_bits_long(ulong val) { int i, cnt; int total; cnt = sizeof(long) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) total++; val >>= 1; } return total; } int highest_bit_long(ulong val) { int i, cnt; int total; int highest; highest = -1; cnt = sizeof(long) * 8; for (i = total = 0; i < cnt; i++) { if (val & 1) highest = i; val >>= 1; } return highest; } int lowest_bit_long(ulong val) { int i, cnt; int lowest; lowest = -1; cnt = sizeof(long) * 8; for (i = 0; i < cnt; i++) { if (val & 1) { lowest = i; break; } val >>= 1; } return lowest; } /* * Debug routine to stop whatever's going on in its tracks. */ void drop_core(char *s) { volatile int *nullptr; int i ATTRIBUTE_UNUSED; if (s && ascii_string(s)) fprintf(stderr, "%s", s); kill((pid_t)pc->program_pid, 3); nullptr = NULL; while (TRUE) i = *nullptr; } /* * For debug output to a device other than the current terminal. * pc->console must have been preset by: * * 1. by an .rc file setting: "set console /dev/whatever" * 2. by a runtime command: "set console /dev/whatever" * 3. during program invocation: "-c /dev/whatever" * * The first time it's called, the device will be opened. */ int console(char *fmt, ...) { char output[BUFSIZE*2]; va_list ap; if (!pc->console || !strlen(pc->console) || (pc->flags & NO_CONSOLE) || (pc->confd == -1)) return 0; if (!fmt || !strlen(fmt)) return 0; va_start(ap, fmt); (void)vsnprintf(output, BUFSIZE*2, fmt, ap); va_end(ap); if (pc->confd == -2) { if ((pc->confd = open(pc->console, O_WRONLY|O_NDELAY)) < 0) { error(INFO, "console device %s: %s\n", pc->console, strerror(errno), 0, 0); return 0; } } return(write(pc->confd, output, strlen(output))); } /* * Allocate space to store the designated console device name. * If a console device pre-exists, free its name space and close the device. */ void create_console_device(char *dev) { if (pc->console) { if (pc->confd != -1) close(pc->confd); free(pc->console); } pc->confd = -2; if ((pc->console = (char *)malloc(strlen(dev)+1)) == NULL) fprintf(stderr, "console name malloc: %s\n", strerror(errno)); else { strcpy(pc->console, dev); if (console("debug console [%ld]: %s\n", pc->program_pid, (ulong)pc->console) < 0) { close(pc->confd); free(pc->console); pc->console = NULL; pc->confd = -1; if (!(pc->flags & RUNTIME)) error(INFO, "cannot set console to %s\n", dev); } } } /* * Disable console output without closing the device. * Typically used with CONSOLE_OFF() macro. */ int console_off(void) { int orig_no_console; orig_no_console = pc->flags & NO_CONSOLE; pc->flags |= NO_CONSOLE; return orig_no_console; } /* * Re-enable console output. Typically used with CONSOLE_ON() macro. */ int console_on(int orig_no_console) { if (!orig_no_console) pc->flags &= ~NO_CONSOLE; return(pc->flags & NO_CONSOLE); } /* * Print a string to the console device with no formatting, useful for * sending strings containing % signs. */ int console_verbatim(char *s) { char *p; int cnt; if (!pc->console || !strlen(pc->console) || (pc->flags & NO_CONSOLE) || (pc->confd == -1)) return 0; if (!s || !strlen(s)) return 0; if (pc->confd == -2) { if ((pc->confd = open(pc->console, O_WRONLY|O_NDELAY)) < 0) { fprintf(stderr, "%s: %s\n", pc->console, strerror(errno)); return 0; } } for (cnt = 0, p = s; *p; p++) { if (write(pc->confd, p, 1) != 1) break; cnt++; } return cnt; } /* * Set up a signal handler. */ void sigsetup(int sig, void *handler, struct sigaction *act,struct sigaction *oldact) { BZERO(act, sizeof(struct sigaction)); act->sa_handler = handler; act->sa_flags = SA_NOMASK; sigaction(sig, act, oldact); } /* * Convert a jiffies-based time value into a string showing the * the number of days, hours:minutes:seconds. */ #define SEC_MINUTES (60) #define SEC_HOURS (60 * SEC_MINUTES) #define SEC_DAYS (24 * SEC_HOURS) char * convert_time(ulonglong count, char *buf) { ulonglong total, days, hours, minutes, seconds; if (CRASHDEBUG(2)) error(INFO, "convert_time: %lld (%llx)\n", count, count); if (!machdep->hz) { sprintf(buf, "(cannot calculate: unknown HZ value)"); return buf; } total = (count)/(ulonglong)machdep->hz; days = total / SEC_DAYS; total %= SEC_DAYS; hours = total / SEC_HOURS; total %= SEC_HOURS; minutes = total / SEC_MINUTES; seconds = total % SEC_MINUTES; buf[0] = NULLCHAR; if (days) sprintf(buf, "%llu days, ", days); sprintf(&buf[strlen(buf)], "%02llu:%02llu:%02llu", hours, minutes, seconds); return buf; } /* * Stall for a number of microseconds. */ void stall(ulong microseconds) { struct timeval delay; delay.tv_sec = 0; delay.tv_usec = (__time_t)microseconds; (void) select(0, (fd_set *) 0, (fd_set *) 0, (fd_set *) 0, &delay); } /* * Fill a buffer with a page count translated to a GB/MB/KB value. */ char * pages_to_size(ulong pages, char *buf) { double total; char *p1, *p2; if (pages == 0) { sprintf(buf, "0"); return buf; } total = (double)pages * (double)PAGESIZE(); if (total >= GIGABYTES(1)) sprintf(buf, "%.1f GB", total/(double)GIGABYTES(1)); else if (total >= MEGABYTES(1)) sprintf(buf, "%.1f MB", total/(double)MEGABYTES(1)); else sprintf(buf, "%ld KB", (ulong)(total/(double)KILOBYTES(1))); if ((p1 = strstr(buf, ".0 "))) { p2 = p1 + 3; *p1++ = ' '; strcpy(p1, p2); } return buf; } /* * If the list_head.next value points to itself, it's an emtpy list. */ int empty_list(ulong list_head_addr) { ulong next; if (!readmem(list_head_addr, KVADDR, &next, sizeof(void *), "list_head next contents", RETURN_ON_ERROR)) return TRUE; return (next == list_head_addr); } int machine_type(char *type) { return STREQ(MACHINE_TYPE, type); } int machine_type_mismatch(char *file, char *e_machine, char *alt, ulong query) { if (machine_type(e_machine) || machine_type(alt)) return FALSE; if (query == KDUMP_LOCAL) /* already printed by NETDUMP_LOCAL */ return TRUE; error(WARNING, "machine type mismatch:\n"); fprintf(fp, " crash utility: %s\n", MACHINE_TYPE); fprintf(fp, " %s: %s%s%s\n\n", file, e_machine, alt ? " or " : "", alt ? alt : ""); return TRUE; } void command_not_supported() { error(FATAL, "command not supported or applicable on this architecture or kernel\n"); } void option_not_supported(int c) { error(FATAL, "-%c option not supported or applicable on this architecture or kernel\n", (char)c); } static int please_wait_len = 0; void please_wait(char *s) { int fd; char buf[BUFSIZE]; if ((pc->flags & SILENT) || !DUMPFILE() || (pc->flags & RUNTIME)) return; if (!(pc->flags & TTY) && KVMDUMP_DUMPFILE()) { if (!isatty(fileno(stdin)) || ((fd = open("/dev/tty", O_RDONLY)) < 0)) return; close(fd); } pc->flags |= PLEASE_WAIT; please_wait_len = sprintf(buf, "\rplease wait... (%s)", s); fprintf(fp, "%s", buf); fflush(fp); } void please_wait_done(void) { if (!(pc->flags & PLEASE_WAIT)) return; pc->flags &= ~PLEASE_WAIT; fprintf(fp, "\r"); pad_line(fp, please_wait_len, ' '); fprintf(fp, "\r"); fflush(fp); } /* * Compare two pathnames. */ int pathcmp(char *p1, char *p2) { char c1, c2; do { if ((c1 = *p1++) == '/') while (*p1 == '/') { p1++; } if ((c2 = *p2++) == '/') while (*p2 == '/') { p2++; } if (c1 == '\0') return ((c2 == '/') && (*p2 == '\0')) ? 0 : c1 - c2; } while (c1 == c2); return ((c2 == '\0') && (c1 == '/') && (*p1 == '\0')) ? 0 : c1 - c2; } #include /* * Check the byte-order of an ELF file vs. the host byte order. */ int endian_mismatch(char *file, char dumpfile_endian, ulong query) { char *endian; switch (dumpfile_endian) { case ELFDATA2LSB: if (__BYTE_ORDER == __LITTLE_ENDIAN) return FALSE; endian = "little-endian"; break; case ELFDATA2MSB: if (__BYTE_ORDER == __BIG_ENDIAN) return FALSE; endian = "big-endian"; break; default: endian = "unknown"; break; } if (query == KDUMP_LOCAL) /* already printed by NETDUMP_LOCAL */ return TRUE; error(WARNING, "endian mismatch:\n"); fprintf(fp, " crash utility: %s\n", (__BYTE_ORDER == __LITTLE_ENDIAN) ? "little-endian" : "big-endian"); fprintf(fp, " %s: %s\n\n", file, endian); return TRUE; } uint16_t swap16(uint16_t val, int swap) { if (swap) return (((val & 0x00ff) << 8) | ((val & 0xff00) >> 8)); else return val; } uint32_t swap32(uint32_t val, int swap) { if (swap) return (((val & 0x000000ffU) << 24) | ((val & 0x0000ff00U) << 8) | ((val & 0x00ff0000U) >> 8) | ((val & 0xff000000U) >> 24)); else return val; } uint64_t swap64(uint64_t val, int swap) { if (swap) return (((val & 0x00000000000000ffULL) << 56) | ((val & 0x000000000000ff00ULL) << 40) | ((val & 0x0000000000ff0000ULL) << 24) | ((val & 0x00000000ff000000ULL) << 8) | ((val & 0x000000ff00000000ULL) >> 8) | ((val & 0x0000ff0000000000ULL) >> 24) | ((val & 0x00ff000000000000ULL) >> 40) | ((val & 0xff00000000000000ULL) >> 56)); else return val; } /* * Get a sufficiently large buffer for cpumask. * You should call FREEBUF() on the result when you no longer need it. */ ulong * get_cpumask_buf(void) { int cpulen; if ((cpulen = STRUCT_SIZE("cpumask_t")) < 0) cpulen = DIV_ROUND_UP(kt->cpus, BITS_PER_LONG) * sizeof(ulong); return (ulong *)GETBUF(cpulen); } int make_cpumask(char *s, ulong *mask, int flags, int *errptr) { char *p, *q, *orig; int start, end; int i; if (s == NULL) { if (!(flags & QUIET)) error(INFO, "make_cpumask: received NULL string\n"); orig = NULL; goto make_cpumask_error; } orig = strdup(s); p = strtok(s, ","); while (p) { s = strtok(NULL, ""); if (STREQ(p, "a") || STREQ(p, "all")) { start = 0; end = kt->cpus - 1; } else { start = end = -1; q = strtok(p, "-"); start = dtoi(q, flags, errptr); if ((q = strtok(NULL, "-"))) end = dtoi(q, flags, errptr); if (end == -1) end = start; } if ((start < 0) || (start >= kt->cpus) || (end < 0) || (end >= kt->cpus)) { error(INFO, "invalid cpu specification: %s\n", orig); goto make_cpumask_error; } for (i = start; i <= end; i++) SET_BIT(mask, i); p = strtok(s, ","); } free(orig); return TRUE; make_cpumask_error: free(orig); switch (flags & (FAULT_ON_ERROR|RETURN_ON_ERROR)) { case FAULT_ON_ERROR: RESTART(); case RETURN_ON_ERROR: if (errptr) *errptr = TRUE; break; } return UNUSED; } /* * Copy a string into a sized buffer. If necessary, truncate * the resultant string in the sized buffer so that it will * always be NULL-terminated. */ size_t strlcpy(char *dest, char *src, size_t size) { size_t ret = strlen(src); if (size) { size_t len = (ret >= size) ? size - 1 : ret; memcpy(dest, src, len); dest[len] = '\0'; } return ret; } struct rb_node * rb_first(struct rb_root *root) { struct rb_root rloc; struct rb_node *n; struct rb_node nloc; readmem((ulong)root, KVADDR, &rloc, sizeof(struct rb_root), "rb_root", FAULT_ON_ERROR); n = rloc.rb_node; if (!n) return NULL; while (rb_left(n, &nloc)) n = nloc.rb_left; return n; } struct rb_node * rb_parent(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return (struct rb_node *)(nloc->rb_parent_color & ~3); } struct rb_node * rb_right(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return nloc->rb_right; } struct rb_node * rb_left(struct rb_node *node, struct rb_node *nloc) { readmem((ulong)node, KVADDR, nloc, sizeof(struct rb_node), "rb_node", FAULT_ON_ERROR); return nloc->rb_left; } struct rb_node * rb_next(struct rb_node *node) { struct rb_node nloc; struct rb_node *parent; /* node is destroyed */ if (!accessible((ulong)node)) return NULL; parent = rb_parent(node, &nloc); if (parent == node) return NULL; if (nloc.rb_right) { /* rb_right is destroyed */ if (!accessible((ulong)nloc.rb_right)) return NULL; node = nloc.rb_right; while (rb_left(node, &nloc)) { /* rb_left is destroyed */ if (!accessible((ulong)nloc.rb_left)) return NULL; node = nloc.rb_left; } return node; } while ((parent = rb_parent(node, &nloc))) { /* parent is destroyed */ if (!accessible((ulong)parent)) return NULL; if (node != rb_right(parent, &nloc)) break; node = parent; } return parent; } struct rb_node * rb_last(struct rb_root *root) { struct rb_node *node; struct rb_node nloc; /* meet destroyed data */ if (!accessible((ulong)(root + OFFSET(rb_root_rb_node)))) return NULL; readmem((ulong)(root + OFFSET(rb_root_rb_node)), KVADDR, &node, sizeof(node), "rb_root node", FAULT_ON_ERROR); while (1) { if (!node) break; /* meet destroyed data */ if (!accessible((ulong)node)) return NULL; readmem((ulong)node, KVADDR, &nloc, sizeof(struct rb_node), "rb_node last", FAULT_ON_ERROR); /* meet the last one */ if (!nloc.rb_right) break; /* meet destroyed data */ if (!!accessible((ulong)nloc.rb_right)) break; node = nloc.rb_right; } return node; } crash-7.2.1/global_data.c0000775000000000000000000001304013240637645013731 0ustar rootroot/* global_data.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2006, 2010, 2012-2013 David Anderson * Copyright (C) 2002-2006, 2010, 2012-2013 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" /* * Data output FILE pointer. The contents of fp are changed on the fly * depending upon whether the output is going to stdout, redirected to a * user-designated pipe or file, or to the "standard" scrolling pipe. * Regardless of where it ends up, fprintf(fp, ...) is used throughout * instead of printf(). */ FILE *fp; /* * The state of the program is kept in the program_context structure. * Given that it's consulted so often, "pc" is globally available to * quickly access the structure contents. */ struct program_context program_context = { 0 }; struct program_context *pc = &program_context; /* * The same thing goes for accesses to the frequently-accessed task_table, * kernel_table, vm_table, symbol_table_data and machdep_table, making the * "tt", "kt", "vt", "st" and "machdep" pointers globally available. */ struct task_table task_table = { 0 }; struct task_table *tt = &task_table; struct kernel_table kernel_table = { 0 }; struct kernel_table *kt = &kernel_table; struct vm_table vm_table = { 0 }; struct vm_table *vt = &vm_table; struct symbol_table_data symbol_table_data = { 0 }; struct symbol_table_data *st = &symbol_table_data; struct machdep_table machdep_table = { 0 }; struct machdep_table *machdep = &machdep_table; /* * Command functions are entered with the args[] array and argcnt value * pre-set for issuance to getopt(). */ char *args[MAXARGS]; /* argument array */ int argcnt; /* argument count */ int argerrs; /* argument error counter */ /* * To add a new command, declare it in defs.h and enter it in this table. */ struct command_table_entry linux_command_table[] = { {"*", cmd_pointer, help_pointer, 0}, {"alias", cmd_alias, help_alias, 0}, {"ascii", cmd_ascii, help_ascii, 0}, {"bt", cmd_bt, help_bt, REFRESH_TASK_TABLE}, {"btop", cmd_btop, help_btop, 0}, {"dev", cmd_dev, help_dev, 0}, {"dis", cmd_dis, help_dis, MINIMAL}, {"eval", cmd_eval, help_eval, MINIMAL}, {"exit", cmd_quit, help_exit, MINIMAL}, {"extend", cmd_extend, help_extend, MINIMAL}, {"files", cmd_files, help_files, REFRESH_TASK_TABLE}, {"foreach", cmd_foreach, help_foreach, REFRESH_TASK_TABLE}, {"fuser", cmd_fuser, help_fuser, REFRESH_TASK_TABLE}, {"gdb", cmd_gdb, help_gdb, REFRESH_TASK_TABLE}, {"help", cmd_help, help_help, MINIMAL}, {"ipcs", cmd_ipcs, help_ipcs, REFRESH_TASK_TABLE}, {"irq", cmd_irq, help_irq, 0}, {"kmem", cmd_kmem, help_kmem, 0}, {"list", cmd_list, help__list, REFRESH_TASK_TABLE}, {"log", cmd_log, help_log, MINIMAL}, {"mach", cmd_mach, help_mach, 0}, {"map", cmd_map, help_map, HIDDEN_COMMAND}, {"mod", cmd_mod, help_mod, 0}, {"mount", cmd_mount, help_mount, 0}, {"net", cmd_net, help_net, REFRESH_TASK_TABLE}, {"p", cmd_p, help_p, 0}, {"ps", cmd_ps, help_ps, REFRESH_TASK_TABLE}, {"pte", cmd_pte, help_pte, 0}, {"ptob", cmd_ptob, help_ptob, 0}, {"ptov", cmd_ptov, help_ptov, 0}, {"q", cmd_quit, help_quit, MINIMAL}, {"tree", cmd_tree, help_tree, REFRESH_TASK_TABLE}, {"rd", cmd_rd, help_rd, MINIMAL}, {"repeat", cmd_repeat, help_repeat, 0}, {"runq", cmd_runq, help_runq, REFRESH_TASK_TABLE}, {"search", cmd_search, help_search, 0}, {"set", cmd_set, help_set, REFRESH_TASK_TABLE | MINIMAL}, {"sig", cmd_sig, help_sig, REFRESH_TASK_TABLE}, {"struct", cmd_struct, help_struct, 0}, {"swap", cmd_swap, help_swap, 0}, {"sym", cmd_sym, help_sym, MINIMAL}, {"sys", cmd_sys, help_sys, REFRESH_TASK_TABLE}, {"task", cmd_task, help_task, REFRESH_TASK_TABLE}, {"test", cmd_test, NULL, HIDDEN_COMMAND}, {"timer", cmd_timer, help_timer, 0}, {"union", cmd_union, help_union, 0}, {"vm", cmd_vm, help_vm, REFRESH_TASK_TABLE}, {"vtop", cmd_vtop, help_vtop, REFRESH_TASK_TABLE}, {"waitq", cmd_waitq, help_waitq, REFRESH_TASK_TABLE}, {"whatis", cmd_whatis, help_whatis, 0}, {"wr", cmd_wr, help_wr, 0}, #if defined(S390) || defined(S390X) {"s390dbf", cmd_s390dbf, help_s390dbf, 0}, #endif {(char *)NULL} }; struct extension_table *extension_table = NULL; /* * The offset_table and size_table structure contents are referenced * through several OFFSET- and SIZE-related macros. The array_table * is a shortcut used by get_array_length(). */ struct offset_table offset_table = { 0 }; struct size_table size_table = { 0 }; struct array_table array_table = { 0 }; crash-7.2.1/memory.c0000775000000000000000000202352313240637645013021 0ustar rootroot/* memory.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2017 David Anderson * Copyright (C) 2002-2017 Red Hat, Inc. All rights reserved. * Copyright (C) 2002 Silicon Graphics, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include struct meminfo { /* general purpose memory information structure */ ulong cache; /* used by the various memory searching/dumping */ ulong slab; /* routines. Only one of these is used per cmd */ ulong c_flags; /* so stuff whatever's helpful in here... */ ulong c_offset; ulong c_num; ulong s_mem; void *s_freep; ulong *s_index; ulong s_inuse; ulong cpucached_cache; ulong cpucached_slab; ulong inuse; ulong order; ulong slabsize; ulong num_slabs; ulong objects; ulonglong spec_addr; ulong flags; ulong size; ulong objsize; int memtype; int free; int slab_offset; char *reqname; char *curname; ulong *addrlist; int *kmem_bufctl; ulong *cpudata[NR_CPUS]; ulong *shared_array_cache; int current_cache_index; ulong found; ulong retval; struct struct_member_data *page_member_cache; ulong nr_members; char *ignore; int errors; int calls; int cpu; int cache_count; ulong get_shared; ulong get_totalram; ulong get_buffers; ulong get_slabs; char *slab_buf; char *cache_buf; ulong *cache_list; struct vmlist { ulong addr; ulong size; } *vmlist; ulong container; int *freelist; int freelist_index_size; ulong random; }; /* * Search modes */ #define SEARCH_ULONG (0) #define SEARCH_UINT (1) #define SEARCH_USHORT (2) #define SEARCH_CHARS (3) #define SEARCH_DEFAULT (SEARCH_ULONG) /* search mode information */ struct searchinfo { int mode; int vcnt; int val; int context; int memtype; int do_task_header; int tasks_found; struct task_context *task_context; ulong vaddr_start; ulong vaddr_end; ulonglong paddr_start; ulonglong paddr_end; union { /* default ulong search */ struct { ulong value[MAXARGS]; char *opt_string[MAXARGS]; ulong mask; } s_ulong; /* uint search */ struct { uint value[MAXARGS]; char *opt_string[MAXARGS]; uint mask; } s_uint; /* ushort search */ struct { ushort value[MAXARGS]; char *opt_string[MAXARGS]; ushort mask; } s_ushort; /* string (chars) search */ struct { char *value[MAXARGS]; int len[MAXARGS]; int started_flag; /* string search needs history */ } s_chars; } s_parms; char buf[BUFSIZE]; }; static char *memtype_string(int, int); static char *error_handle_string(ulong); static void collect_page_member_data(char *, struct meminfo *); struct integer_data { ulong value; ulong bitfield_value; struct struct_member_data *pmd; }; static int get_bitfield_data(struct integer_data *); static int show_page_member_data(char *, ulong, struct meminfo *, char *); static void dump_mem_map(struct meminfo *); static void dump_mem_map_SPARSEMEM(struct meminfo *); static void fill_mem_map_cache(ulong, ulong, char *); static void page_flags_init(void); static int page_flags_init_from_pageflag_names(void); static int page_flags_init_from_pageflags_enum(void); static int translate_page_flags(char *, ulong); static void dump_free_pages(struct meminfo *); static int dump_zone_page_usage(void); static void dump_multidimensional_free_pages(struct meminfo *); static void dump_free_pages_zones_v1(struct meminfo *); static void dump_free_pages_zones_v2(struct meminfo *); struct free_page_callback_data; static int dump_zone_free_area(ulong, int, ulong, struct free_page_callback_data *); static void dump_page_hash_table(struct meminfo *); static void kmem_search(struct meminfo *); static void kmem_cache_init(void); static void kmem_cache_init_slub(void); static ulong max_cpudata_limit(ulong, ulong *); static int kmem_cache_downsize(void); static int ignore_cache(struct meminfo *, char *); static char *is_kmem_cache_addr(ulong, char *); static char *is_kmem_cache_addr_common(ulong, char *); static void kmem_cache_list(void); static void dump_kmem_cache(struct meminfo *); static void dump_kmem_cache_percpu_v1(struct meminfo *); static void dump_kmem_cache_percpu_v2(struct meminfo *); static void dump_kmem_cache_slub(struct meminfo *); static void dump_kmem_cache_info_v2(struct meminfo *); static void kmem_cache_list_common(void); static ulong get_cpu_slab_ptr(struct meminfo *, int, ulong *); static unsigned int oo_order(ulong); static unsigned int oo_objects(ulong); static char *vaddr_to_kmem_cache(ulong, char *, int); static char *is_slab_overload_page(ulong, ulong *, char *); static ulong vaddr_to_slab(ulong); static void do_slab_chain(int, struct meminfo *); static void do_slab_chain_percpu_v1(long, struct meminfo *); static void do_slab_chain_percpu_v2(long, struct meminfo *); static void do_slab_chain_percpu_v2_nodes(long, struct meminfo *); static void do_slab_chain_slab_overload_page(long, struct meminfo *); static int slab_freelist_index_size(void); static int do_slab_slub(struct meminfo *, int); static void do_kmem_cache_slub(struct meminfo *); static void save_slab_data(struct meminfo *); static int slab_data_saved(struct meminfo *); static void dump_saved_slab_data(void); static void dump_slab(struct meminfo *); static void dump_slab_percpu_v1(struct meminfo *); static void dump_slab_percpu_v2(struct meminfo *); static void dump_slab_overload_page(struct meminfo *); static int verify_slab_v1(struct meminfo *, ulong, int); static int verify_slab_v2(struct meminfo *, ulong, int); static int verify_slab_overload_page(struct meminfo *, ulong, int); static void gather_slab_free_list(struct meminfo *); static void gather_slab_free_list_percpu(struct meminfo *); static void gather_slab_free_list_slab_overload_page(struct meminfo *); static void gather_cpudata_list_v1(struct meminfo *); static void gather_cpudata_list_v2(struct meminfo *); static void gather_cpudata_list_v2_nodes(struct meminfo *, int); static int check_cpudata_list(struct meminfo *, ulong); static int check_shared_list(struct meminfo *, ulong); static void gather_slab_cached_count(struct meminfo *); static void dump_slab_objects(struct meminfo *); static void dump_slab_objects_percpu(struct meminfo *); static void dump_vmlist(struct meminfo *); static void dump_vmap_area(struct meminfo *); static int dump_page_lists(struct meminfo *); static void dump_kmeminfo(void); static int page_to_phys(ulong, physaddr_t *); static void display_memory(ulonglong, long, ulong, int, void *); static char *show_opt_string(struct searchinfo *); static void display_with_pre_and_post(void *, ulonglong, struct searchinfo *); static ulong search_ulong(ulong *, ulong, int, struct searchinfo *); static ulong search_uint(ulong *, ulong, int, struct searchinfo *); static ulong search_ushort(ulong *, ulong, int, struct searchinfo *); static ulong search_chars(ulong *, ulong, int, struct searchinfo *); static ulonglong search_ulong_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_uint_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_ushort_p(ulong *, ulonglong, int, struct searchinfo *); static ulonglong search_chars_p(ulong *, ulonglong, int, struct searchinfo *); static void search_virtual(struct searchinfo *); static void search_physical(struct searchinfo *); static int next_upage(struct task_context *, ulong, ulong *); static int next_kpage(ulong, ulong *); static int next_physpage(ulonglong, ulonglong *); static int next_vmlist_vaddr(ulong, ulong *); static int next_module_vaddr(ulong, ulong *); static int next_identity_mapping(ulong, ulong *); static int vm_area_page_dump(ulong, ulong, ulong, ulong, ulong, struct reference *); static void rss_page_types_init(void); static int dump_swap_info(ulong, ulong *, ulong *); static int get_hugetlb_total_pages(ulong *, ulong *); static void swap_info_init(void); static char *get_swapdev(ulong, char *); static void fill_swap_info(ulong); static char *vma_file_offset(ulong, ulong, char *); static ssize_t read_dev_kmem(ulong, char *, long); static void dump_memory_nodes(int); static void dump_zone_stats(void); #define MEMORY_NODES_DUMP (0) #define MEMORY_NODES_INITIALIZE (1) static void node_table_init(void); static int compare_node_data(const void *, const void *); static void do_vm_flags(ulonglong); static ulonglong get_vm_flags(char *); static void PG_reserved_flag_init(void); static void PG_slab_flag_init(void); static ulong nr_blockdev_pages(void); void sparse_mem_init(void); void dump_mem_sections(void); void list_mem_sections(void); ulong sparse_decode_mem_map(ulong, ulong); char *read_mem_section(ulong); ulong nr_to_section(ulong); int valid_section(ulong); int section_has_mem_map(ulong); ulong section_mem_map_addr(ulong); ulong valid_section_nr(ulong); ulong pfn_to_map(ulong); static int get_nodes_online(void); static int next_online_node(int); static ulong next_online_pgdat(int); static int vm_stat_init(void); static int vm_event_state_init(void); static int dump_vm_stat(char *, long *, ulong); static int dump_vm_event_state(void); static int dump_page_states(void); static int generic_read_dumpfile(ulonglong, void *, long, char *, ulong); static int generic_write_dumpfile(ulonglong, void *, long, char *, ulong); static int page_to_nid(ulong); static int get_kmem_cache_list(ulong **); static int get_kmem_cache_slub_data(long, struct meminfo *); static ulong compound_head(ulong); static long count_partial(ulong, struct meminfo *, ulong *); static short count_cpu_partial(struct meminfo *, int); static ulong get_freepointer(struct meminfo *, void *); static int count_free_objects(struct meminfo *, ulong); char *is_slab_page(struct meminfo *, char *); static void do_cpu_partial_slub(struct meminfo *, int); static void do_node_lists_slub(struct meminfo *, ulong, int); static int devmem_is_restricted(void); static int switch_to_proc_kcore(void); static int verify_pfn(ulong); static void dump_per_cpu_offsets(void); static void dump_page_flags(ulonglong); static ulong kmem_cache_nodelists(ulong); static void dump_hstates(void); static ulong freelist_ptr(struct meminfo *, ulong, ulong); /* * Memory display modes specific to this file. */ #define DISPLAY_8 (0x2) #define DISPLAY_16 (0x4) #define DISPLAY_32 (0x8) #define DISPLAY_64 (0x10) #define SHOW_OFFSET (0x20) #define SYMBOLIC (0x40) #define HEXADECIMAL (0x80) #define DECIMAL (0x100) #define UDECIMAL (0x200) #define ASCII_ENDLINE (0x400) #define NO_ASCII (0x800) #define SLAB_CACHE (0x1000) #define DISPLAY_ASCII (0x2000) #define NET_ENDIAN (0x4000) #define DISPLAY_RAW (0x8000) #define NO_ERROR (0x10000) #define SLAB_CACHE2 (0x20000) #define DISPLAY_TYPES (DISPLAY_RAW|DISPLAY_ASCII|DISPLAY_8|\ DISPLAY_16|DISPLAY_32|DISPLAY_64) #define ASCII_UNLIMITED ((ulong)(-1) >> 1) static ulong DISPLAY_DEFAULT; /* * Verify that the sizeof the primitive types are reasonable. */ void mem_init(void) { if (sizeof(char) != SIZEOF_8BIT) error(FATAL, "unsupported sizeof(char): %d\n", sizeof(char)); if (sizeof(short) != SIZEOF_16BIT) error(FATAL, "unsupported sizeof(short): %d\n", sizeof(short)); if ((sizeof(int) != SIZEOF_32BIT) && (sizeof(int) != SIZEOF_64BIT)) error(FATAL, "unsupported sizeof(int): %d\n", sizeof(int)); if ((sizeof(long) != SIZEOF_32BIT) && (sizeof(long) != SIZEOF_64BIT)) error(FATAL, "unsupported sizeof(long): %d\n", sizeof(long)); if (sizeof(void *) != sizeof(long)) error(FATAL, "pointer size: %d is not sizeof(long): %d\n", sizeof(void *), sizeof(long)); DISPLAY_DEFAULT = (sizeof(long) == 8) ? DISPLAY_64 : DISPLAY_32; } /* * Stash a few popular offsets and some basic kernel virtual memory * items used by routines in this file. */ void vm_init(void) { char buf[BUFSIZE]; int i, len, dimension, nr_node_ids; struct syment *sp_array[2]; ulong value1, value2; char *kmem_cache_node_struct, *nodelists_field; MEMBER_OFFSET_INIT(task_struct_mm, "task_struct", "mm"); MEMBER_OFFSET_INIT(mm_struct_mmap, "mm_struct", "mmap"); MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "rss"); if (!VALID_MEMBER(mm_struct_rss)) MEMBER_OFFSET_INIT(mm_struct_rss, "mm_struct", "_rss"); MEMBER_OFFSET_INIT(mm_struct_anon_rss, "mm_struct", "_anon_rss"); MEMBER_OFFSET_INIT(mm_struct_file_rss, "mm_struct", "_file_rss"); if (!VALID_MEMBER(mm_struct_anon_rss)) { MEMBER_OFFSET_INIT(mm_struct_rss_stat, "mm_struct", "rss_stat"); MEMBER_OFFSET_INIT(mm_rss_stat_count, "mm_rss_stat", "count"); } MEMBER_OFFSET_INIT(mm_struct_total_vm, "mm_struct", "total_vm"); MEMBER_OFFSET_INIT(mm_struct_start_code, "mm_struct", "start_code"); MEMBER_OFFSET_INIT(mm_struct_mm_count, "mm_struct", "mm_count"); MEMBER_OFFSET_INIT(vm_area_struct_vm_mm, "vm_area_struct", "vm_mm"); MEMBER_OFFSET_INIT(vm_area_struct_vm_next, "vm_area_struct", "vm_next"); MEMBER_OFFSET_INIT(vm_area_struct_vm_end, "vm_area_struct", "vm_end"); MEMBER_OFFSET_INIT(vm_area_struct_vm_start, "vm_area_struct", "vm_start"); MEMBER_OFFSET_INIT(vm_area_struct_vm_flags, "vm_area_struct", "vm_flags"); MEMBER_OFFSET_INIT(vm_area_struct_vm_file, "vm_area_struct", "vm_file"); MEMBER_OFFSET_INIT(vm_area_struct_vm_offset, "vm_area_struct", "vm_offset"); MEMBER_OFFSET_INIT(vm_area_struct_vm_pgoff, "vm_area_struct", "vm_pgoff"); MEMBER_SIZE_INIT(vm_area_struct_vm_flags, "vm_area_struct", "vm_flags"); MEMBER_OFFSET_INIT(vm_struct_addr, "vm_struct", "addr"); MEMBER_OFFSET_INIT(vm_struct_size, "vm_struct", "size"); MEMBER_OFFSET_INIT(vm_struct_next, "vm_struct", "next"); MEMBER_OFFSET_INIT(vmap_area_va_start, "vmap_area", "va_start"); MEMBER_OFFSET_INIT(vmap_area_va_end, "vmap_area", "va_end"); MEMBER_OFFSET_INIT(vmap_area_list, "vmap_area", "list"); MEMBER_OFFSET_INIT(vmap_area_flags, "vmap_area", "flags"); MEMBER_OFFSET_INIT(vmap_area_vm, "vmap_area", "vm"); if (INVALID_MEMBER(vmap_area_vm)) MEMBER_OFFSET_INIT(vmap_area_vm, "vmap_area", "private"); STRUCT_SIZE_INIT(vmap_area, "vmap_area"); if (VALID_MEMBER(vmap_area_va_start) && VALID_MEMBER(vmap_area_va_end) && VALID_MEMBER(vmap_area_flags) && VALID_MEMBER(vmap_area_list) && VALID_MEMBER(vmap_area_vm) && kernel_symbol_exists("vmap_area_list")) vt->flags |= USE_VMAP_AREA; if (kernel_symbol_exists("hstates")) { STRUCT_SIZE_INIT(hstate, "hstate"); MEMBER_OFFSET_INIT(hstate_order, "hstate", "order"); MEMBER_OFFSET_INIT(hstate_nr_huge_pages, "hstate", "nr_huge_pages"); MEMBER_OFFSET_INIT(hstate_free_huge_pages, "hstate", "free_huge_pages"); MEMBER_OFFSET_INIT(hstate_name, "hstate", "name"); } MEMBER_OFFSET_INIT(page_next, "page", "next"); if (VALID_MEMBER(page_next)) MEMBER_OFFSET_INIT(page_prev, "page", "prev"); if (INVALID_MEMBER(page_next)) ANON_MEMBER_OFFSET_INIT(page_next, "page", "next"); MEMBER_OFFSET_INIT(page_list, "page", "list"); if (VALID_MEMBER(page_list)) { ASSIGN_OFFSET(page_list_next) = OFFSET(page_list) + OFFSET(list_head_next); ASSIGN_OFFSET(page_list_prev) = OFFSET(page_list) + OFFSET(list_head_prev); } MEMBER_OFFSET_INIT(page_next_hash, "page", "next_hash"); MEMBER_OFFSET_INIT(page_inode, "page", "inode"); MEMBER_OFFSET_INIT(page_offset, "page", "offset"); MEMBER_OFFSET_INIT(page_count, "page", "count"); if (INVALID_MEMBER(page_count)) { MEMBER_OFFSET_INIT(page_count, "page", "_count"); if (INVALID_MEMBER(page_count)) ANON_MEMBER_OFFSET_INIT(page_count, "page", "_count"); if (INVALID_MEMBER(page_count)) MEMBER_OFFSET_INIT(page_count, "page", "_refcount"); if (INVALID_MEMBER(page_count)) ANON_MEMBER_OFFSET_INIT(page_count, "page", "_refcount"); } MEMBER_OFFSET_INIT(page_flags, "page", "flags"); MEMBER_SIZE_INIT(page_flags, "page", "flags"); MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); if (INVALID_MEMBER(page_mapping)) ANON_MEMBER_OFFSET_INIT(page_mapping, "page", "mapping"); if (INVALID_MEMBER(page_mapping) && (THIS_KERNEL_VERSION < LINUX(2,6,17)) && MEMBER_EXISTS("page", "_mapcount")) ASSIGN_OFFSET(page_mapping) = MEMBER_OFFSET("page", "_mapcount") + STRUCT_SIZE("atomic_t") + sizeof(ulong); MEMBER_OFFSET_INIT(page_index, "page", "index"); if (INVALID_MEMBER(page_index)) ANON_MEMBER_OFFSET_INIT(page_index, "page", "index"); MEMBER_OFFSET_INIT(page_buffers, "page", "buffers"); MEMBER_OFFSET_INIT(page_lru, "page", "lru"); if (INVALID_MEMBER(page_lru)) ANON_MEMBER_OFFSET_INIT(page_lru, "page", "lru"); MEMBER_OFFSET_INIT(page_pte, "page", "pte"); MEMBER_OFFSET_INIT(page_compound_head, "page", "compound_head"); if (INVALID_MEMBER(page_compound_head)) ANON_MEMBER_OFFSET_INIT(page_compound_head, "page", "compound_head"); MEMBER_OFFSET_INIT(mm_struct_pgd, "mm_struct", "pgd"); MEMBER_OFFSET_INIT(swap_info_struct_swap_file, "swap_info_struct", "swap_file"); MEMBER_OFFSET_INIT(swap_info_struct_swap_vfsmnt, "swap_info_struct", "swap_vfsmnt"); MEMBER_OFFSET_INIT(swap_info_struct_flags, "swap_info_struct", "flags"); MEMBER_OFFSET_INIT(swap_info_struct_swap_map, "swap_info_struct", "swap_map"); MEMBER_OFFSET_INIT(swap_info_struct_swap_device, "swap_info_struct", "swap_device"); MEMBER_OFFSET_INIT(swap_info_struct_prio, "swap_info_struct", "prio"); MEMBER_OFFSET_INIT(swap_info_struct_max, "swap_info_struct", "max"); MEMBER_OFFSET_INIT(swap_info_struct_pages, "swap_info_struct", "pages"); MEMBER_OFFSET_INIT(swap_info_struct_inuse_pages, "swap_info_struct", "inuse_pages"); MEMBER_OFFSET_INIT(swap_info_struct_old_block_size, "swap_info_struct", "old_block_size"); MEMBER_OFFSET_INIT(block_device_bd_inode, "block_device", "bd_inode"); MEMBER_OFFSET_INIT(block_device_bd_list, "block_device", "bd_list"); MEMBER_OFFSET_INIT(block_device_bd_disk, "block_device", "bd_disk"); MEMBER_OFFSET_INIT(inode_i_mapping, "inode", "i_mapping"); MEMBER_OFFSET_INIT(address_space_page_tree, "address_space", "page_tree"); MEMBER_OFFSET_INIT(address_space_nrpages, "address_space", "nrpages"); if (INVALID_MEMBER(address_space_nrpages)) MEMBER_OFFSET_INIT(address_space_nrpages, "address_space", "__nrpages"); MEMBER_OFFSET_INIT(gendisk_major, "gendisk", "major"); MEMBER_OFFSET_INIT(gendisk_fops, "gendisk", "fops"); MEMBER_OFFSET_INIT(gendisk_disk_name, "gendisk", "disk_name"); STRUCT_SIZE_INIT(block_device, "block_device"); STRUCT_SIZE_INIT(address_space, "address_space"); STRUCT_SIZE_INIT(gendisk, "gendisk"); STRUCT_SIZE_INIT(blk_major_name, "blk_major_name"); if (VALID_STRUCT(blk_major_name)) { MEMBER_OFFSET_INIT(blk_major_name_next, "blk_major_name", "next"); MEMBER_OFFSET_INIT(blk_major_name_name, "blk_major_name", "name"); MEMBER_OFFSET_INIT(blk_major_name_major, "blk_major_name", "major"); } STRUCT_SIZE_INIT(kmem_slab_s, "kmem_slab_s"); STRUCT_SIZE_INIT(slab_s, "slab_s"); STRUCT_SIZE_INIT(slab, "slab"); STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache_s"); STRUCT_SIZE_INIT(pgd_t, "pgd_t"); /* * slab: overload struct slab over struct page * https://lkml.org/lkml/2013/10/16/155 */ if (MEMBER_EXISTS("kmem_cache", "freelist_cache")) { vt->flags |= SLAB_OVERLOAD_PAGE; ANON_MEMBER_OFFSET_INIT(page_s_mem, "page", "s_mem"); ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); ANON_MEMBER_OFFSET_INIT(page_active, "page", "active"); } if (!VALID_STRUCT(kmem_slab_s) && VALID_STRUCT(slab_s)) { vt->flags |= PERCPU_KMALLOC_V1; MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", "objsize"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache_s", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs, "kmem_cache_s", "slabs"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_full, "kmem_cache_s", "slabs_full"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_partial, "kmem_cache_s", "slabs_partial"); MEMBER_OFFSET_INIT(kmem_cache_s_slabs_free, "kmem_cache_s", "slabs_free"); MEMBER_OFFSET_INIT(kmem_cache_s_cpudata, "kmem_cache_s", "cpudata"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.cpudata", NULL, 0); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", "colour_off"); MEMBER_OFFSET_INIT(slab_s_list, "slab_s", "list"); MEMBER_OFFSET_INIT(slab_s_s_mem, "slab_s", "s_mem"); MEMBER_OFFSET_INIT(slab_s_inuse, "slab_s", "inuse"); MEMBER_OFFSET_INIT(slab_s_free, "slab_s", "free"); MEMBER_OFFSET_INIT(cpucache_s_avail, "cpucache_s", "avail"); MEMBER_OFFSET_INIT(cpucache_s_limit, "cpucache_s", "limit"); STRUCT_SIZE_INIT(cpucache_s, "cpucache_s"); } else if (!VALID_STRUCT(kmem_slab_s) && !VALID_STRUCT(slab_s) && (VALID_STRUCT(slab) || (vt->flags & SLAB_OVERLOAD_PAGE))) { vt->flags |= PERCPU_KMALLOC_V2; if (VALID_STRUCT(kmem_cache_s)) { MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache_s", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache_s", "next"); MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache_s", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache_s", "colour_off"); MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache_s", "objsize"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache_s", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache_s", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache_s", "lists"); MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache_s", "array"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache_s.array", NULL, 0); } else { STRUCT_SIZE_INIT(kmem_cache_s, "kmem_cache"); MEMBER_OFFSET_INIT(kmem_cache_s_num, "kmem_cache", "num"); MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "next"); if (INVALID_MEMBER(kmem_cache_s_next)) { /* * slab/slub unification starting in Linux 3.6. */ MEMBER_OFFSET_INIT(kmem_cache_s_next, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_size, "kmem_cache", "size"); STRUCT_SIZE_INIT(kmem_cache, "kmem_cache"); } MEMBER_OFFSET_INIT(kmem_cache_s_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_s_colour_off, "kmem_cache", "colour_off"); if (MEMBER_EXISTS("kmem_cache", "objsize")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "objsize"); else if (MEMBER_EXISTS("kmem_cache", "buffer_size")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "buffer_size"); else if (MEMBER_EXISTS("kmem_cache", "size")) MEMBER_OFFSET_INIT(kmem_cache_s_objsize, "kmem_cache", "size"); MEMBER_OFFSET_INIT(kmem_cache_s_flags, "kmem_cache", "flags"); MEMBER_OFFSET_INIT(kmem_cache_s_gfporder, "kmem_cache", "gfporder"); MEMBER_OFFSET_INIT(kmem_cache_cpu_cache, "kmem_cache", "cpu_cache"); if (MEMBER_EXISTS("kmem_cache", "lists")) MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", "lists"); else if (MEMBER_EXISTS("kmem_cache", "nodelists") || MEMBER_EXISTS("kmem_cache", "node")) { nodelists_field = MEMBER_EXISTS("kmem_cache", "node") ? "node" : "nodelists"; vt->flags |= PERCPU_KMALLOC_V2_NODES; MEMBER_OFFSET_INIT(kmem_cache_s_lists, "kmem_cache", nodelists_field); if (MEMBER_TYPE("kmem_cache", nodelists_field) == TYPE_CODE_PTR) { /* * nodelists now a pointer to an outside array */ vt->flags |= NODELISTS_IS_PTR; if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; } else if (VALID_MEMBER(kmem_cache_cpu_cache)) { /* * commit bf0dea23a9c094ae869a88bb694fbe966671bf6d * mm/slab: use percpu allocator for cpu cache */ vt->flags |= SLAB_CPU_CACHE; MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node"); if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; } else { /* * This should never happen with kmem_cache.node, * only with kmem_cache.nodelists */ ARRAY_LENGTH_INIT(vt->kmem_cache_len_nodes, NULL, "kmem_cache.nodelists", NULL, 0); } } MEMBER_OFFSET_INIT(kmem_cache_s_array, "kmem_cache", "array"); ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.array", NULL, 0); } if (VALID_STRUCT(slab)) { MEMBER_OFFSET_INIT(slab_list, "slab", "list"); MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); MEMBER_OFFSET_INIT(slab_free, "slab", "free"); /* * slab members were moved to an anonymous union in 2.6.39. */ if (INVALID_MEMBER(slab_list)) ANON_MEMBER_OFFSET_INIT(slab_list, "slab", "list"); if (INVALID_MEMBER(slab_s_mem)) ANON_MEMBER_OFFSET_INIT(slab_s_mem, "slab", "s_mem"); if (INVALID_MEMBER(slab_inuse)) ANON_MEMBER_OFFSET_INIT(slab_inuse, "slab", "inuse"); if (INVALID_MEMBER(slab_free)) ANON_MEMBER_OFFSET_INIT(slab_free, "slab", "free"); } MEMBER_OFFSET_INIT(array_cache_avail, "array_cache", "avail"); MEMBER_OFFSET_INIT(array_cache_limit, "array_cache", "limit"); STRUCT_SIZE_INIT(array_cache, "array_cache"); /* * kmem_list3 renamed to kmem_cache_node in kernel 3.11-rc1 */ kmem_cache_node_struct = STRUCT_EXISTS("kmem_cache_node") ? "kmem_cache_node" : "kmem_list3"; MEMBER_OFFSET_INIT(kmem_list3_slabs_partial, kmem_cache_node_struct, "slabs_partial"); MEMBER_OFFSET_INIT(kmem_list3_slabs_full, kmem_cache_node_struct, "slabs_full"); MEMBER_OFFSET_INIT(kmem_list3_slabs_free, kmem_cache_node_struct, "slabs_free"); MEMBER_OFFSET_INIT(kmem_list3_free_objects, kmem_cache_node_struct, "free_objects"); MEMBER_OFFSET_INIT(kmem_list3_shared, kmem_cache_node_struct, "shared"); /* * Common to slab/slub */ MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); if (INVALID_MEMBER(page_slab)) ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); if (INVALID_MEMBER(page_slab_page)) ANON_MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); if (INVALID_MEMBER(page_first_page)) ANON_MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); } else if (MEMBER_EXISTS("kmem_cache", "cpu_slab") && STRUCT_EXISTS("kmem_cache_node")) { vt->flags |= KMALLOC_SLUB; STRUCT_SIZE_INIT(kmem_cache, "kmem_cache"); MEMBER_OFFSET_INIT(kmem_cache_size, "kmem_cache", "size"); MEMBER_OFFSET_INIT(kmem_cache_objsize, "kmem_cache", "objsize"); if (INVALID_MEMBER(kmem_cache_objsize)) MEMBER_OFFSET_INIT(kmem_cache_objsize, "kmem_cache", "object_size"); MEMBER_OFFSET_INIT(kmem_cache_offset, "kmem_cache", "offset"); MEMBER_OFFSET_INIT(kmem_cache_order, "kmem_cache", "order"); MEMBER_OFFSET_INIT(kmem_cache_local_node, "kmem_cache", "local_node"); MEMBER_OFFSET_INIT(kmem_cache_objects, "kmem_cache", "objects"); MEMBER_OFFSET_INIT(kmem_cache_inuse, "kmem_cache", "inuse"); MEMBER_OFFSET_INIT(kmem_cache_align, "kmem_cache", "align"); MEMBER_OFFSET_INIT(kmem_cache_node, "kmem_cache", "node"); MEMBER_OFFSET_INIT(kmem_cache_cpu_slab, "kmem_cache", "cpu_slab"); MEMBER_OFFSET_INIT(kmem_cache_list, "kmem_cache", "list"); MEMBER_OFFSET_INIT(kmem_cache_red_left_pad, "kmem_cache", "red_left_pad"); MEMBER_OFFSET_INIT(kmem_cache_name, "kmem_cache", "name"); MEMBER_OFFSET_INIT(kmem_cache_flags, "kmem_cache", "flags"); MEMBER_OFFSET_INIT(kmem_cache_random, "kmem_cache", "random"); MEMBER_OFFSET_INIT(kmem_cache_cpu_freelist, "kmem_cache_cpu", "freelist"); MEMBER_OFFSET_INIT(kmem_cache_cpu_page, "kmem_cache_cpu", "page"); MEMBER_OFFSET_INIT(kmem_cache_cpu_node, "kmem_cache_cpu", "node"); MEMBER_OFFSET_INIT(kmem_cache_cpu_partial, "kmem_cache_cpu", "partial"); MEMBER_OFFSET_INIT(page_inuse, "page", "inuse"); if (INVALID_MEMBER(page_inuse)) ANON_MEMBER_OFFSET_INIT(page_inuse, "page", "inuse"); MEMBER_OFFSET_INIT(page_offset, "page", "offset"); if (INVALID_MEMBER(page_offset)) ANON_MEMBER_OFFSET_INIT(page_offset, "page", "offset"); MEMBER_OFFSET_INIT(page_slab, "page", "slab"); if (INVALID_MEMBER(page_slab)) ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab"); if (INVALID_MEMBER(page_slab)) { MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); if (INVALID_MEMBER(page_slab)) ANON_MEMBER_OFFSET_INIT(page_slab, "page", "slab_cache"); } MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); if (INVALID_MEMBER(page_slab_page)) ANON_MEMBER_OFFSET_INIT(page_slab_page, "page", "slab_page"); MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); if (INVALID_MEMBER(page_first_page)) ANON_MEMBER_OFFSET_INIT(page_first_page, "page", "first_page"); MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); if (INVALID_MEMBER(page_freelist)) ANON_MEMBER_OFFSET_INIT(page_freelist, "page", "freelist"); if (INVALID_MEMBER(kmem_cache_objects)) { MEMBER_OFFSET_INIT(kmem_cache_oo, "kmem_cache", "oo"); /* NOTE: returns offset of containing bitfield */ ANON_MEMBER_OFFSET_INIT(page_objects, "page", "objects"); } if (VALID_MEMBER(kmem_cache_node)) { ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.node", NULL, 0); vt->flags |= CONFIG_NUMA; } ARRAY_LENGTH_INIT(len, NULL, "kmem_cache.cpu_slab", NULL, 0); STRUCT_SIZE_INIT(kmem_cache_node, "kmem_cache_node"); STRUCT_SIZE_INIT(kmem_cache_cpu, "kmem_cache_cpu"); MEMBER_OFFSET_INIT(kmem_cache_node_nr_partial, "kmem_cache_node", "nr_partial"); MEMBER_OFFSET_INIT(kmem_cache_node_nr_slabs, "kmem_cache_node", "nr_slabs"); MEMBER_OFFSET_INIT(kmem_cache_node_total_objects, "kmem_cache_node", "total_objects"); MEMBER_OFFSET_INIT(kmem_cache_node_partial, "kmem_cache_node", "partial"); MEMBER_OFFSET_INIT(kmem_cache_node_full, "kmem_cache_node", "full"); } else { MEMBER_OFFSET_INIT(kmem_cache_s_c_nextp, "kmem_cache_s", "c_nextp"); MEMBER_OFFSET_INIT(kmem_cache_s_c_name, "kmem_cache_s", "c_name"); MEMBER_OFFSET_INIT(kmem_cache_s_c_num, "kmem_cache_s", "c_num"); MEMBER_OFFSET_INIT(kmem_cache_s_c_org_size, "kmem_cache_s", "c_org_size"); MEMBER_OFFSET_INIT(kmem_cache_s_c_flags, "kmem_cache_s", "c_flags"); MEMBER_OFFSET_INIT(kmem_cache_s_c_offset, "kmem_cache_s", "c_offset"); MEMBER_OFFSET_INIT(kmem_cache_s_c_firstp, "kmem_cache_s", "c_firstp"); MEMBER_OFFSET_INIT(kmem_cache_s_c_gfporder, "kmem_cache_s", "c_gfporder"); MEMBER_OFFSET_INIT(kmem_cache_s_c_magic, "kmem_cache_s", "c_magic"); MEMBER_OFFSET_INIT(kmem_cache_s_c_align, "kmem_cache_s", "c_align"); MEMBER_OFFSET_INIT(kmem_slab_s_s_nextp, "kmem_slab_s", "s_nextp"); MEMBER_OFFSET_INIT(kmem_slab_s_s_freep, "kmem_slab_s", "s_freep"); MEMBER_OFFSET_INIT(kmem_slab_s_s_inuse, "kmem_slab_s", "s_inuse"); MEMBER_OFFSET_INIT(kmem_slab_s_s_mem, "kmem_slab_s", "s_mem"); MEMBER_OFFSET_INIT(kmem_slab_s_s_index, "kmem_slab_s", "s_index"); MEMBER_OFFSET_INIT(kmem_slab_s_s_offset, "kmem_slab_s", "s_offset"); MEMBER_OFFSET_INIT(kmem_slab_s_s_magic, "kmem_slab_s", "s_magic"); } if (!kt->kernel_NR_CPUS) { if (enumerator_value("WORK_CPU_UNBOUND", (long *)&value1)) kt->kernel_NR_CPUS = (int)value1; else if ((i = get_array_length("__per_cpu_offset", NULL, 0))) kt->kernel_NR_CPUS = i; else if (ARRAY_LENGTH(kmem_cache_s_cpudata)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_cpudata); else if (ARRAY_LENGTH(kmem_cache_s_array)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_s_array); else if (ARRAY_LENGTH(kmem_cache_cpu_slab)) kt->kernel_NR_CPUS = ARRAY_LENGTH(kmem_cache_cpu_slab); } if (CRASHDEBUG(1)) fprintf(fp, "kernel NR_CPUS: %d %s\n", kt->kernel_NR_CPUS, kt->kernel_NR_CPUS ? "" : "(unknown)"); if (kt->kernel_NR_CPUS > NR_CPUS) { error(WARNING, "kernel-configured NR_CPUS (%d) greater than compiled-in NR_CPUS (%d)\n", kt->kernel_NR_CPUS, NR_CPUS); error(FATAL, "recompile crash with larger NR_CPUS\n"); } if (machdep->init_kernel_pgd) machdep->init_kernel_pgd(); else if (symbol_exists("swapper_pg_dir")) { value1 = symbol_value("swapper_pg_dir"); for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = value1; } else if (symbol_exists("cpu_pgd")) { len = get_array_length("cpu_pgd", &dimension, 0); if ((len == NR_CPUS) && (dimension == machdep->ptrs_per_pgd)) { value1 = symbol_value("cpu_pgd"); for (i = 0; i < NR_CPUS; i++) { value2 = i * (SIZE(pgd_t) * machdep->ptrs_per_pgd); vt->kernel_pgd[i] = value1 + value2; } error(WARNING, "no swapper_pg_dir: using first entry of cpu_pgd[%d][%d]\n\n", dimension, len); } else { error(WARNING, "unrecognized dimensions: cpu_pgd[%d][%d]\n", dimension, len); value1 = symbol_value("cpu_pgd"); for (i = 0; i < NR_CPUS; i++) vt->kernel_pgd[i] = value1; error(WARNING, "no swapper_pg_dir: using first entry of cpu_pgd[%d][%d]\n\n", dimension, len); } } else error(FATAL, "no swapper_pg_dir or cpu_pgd symbols exist?\n"); get_symbol_data("high_memory", sizeof(ulong), &vt->high_memory); if (kernel_symbol_exists("mem_section")) vt->flags |= SPARSEMEM; else if (kernel_symbol_exists("mem_map")) { get_symbol_data("mem_map", sizeof(char *), &vt->mem_map); vt->flags |= FLATMEM; } else vt->flags |= DISCONTIGMEM; sparse_mem_init(); vt->vmalloc_start = machdep->vmalloc_start(); if (IS_VMALLOC_ADDR(vt->mem_map)) vt->flags |= V_MEM_MAP; vt->total_pages = BTOP(VTOP(vt->high_memory)); switch (get_syment_array("totalram_pages", sp_array, 2)) { case 1: get_symbol_data("totalram_pages", sizeof(ulong), &vt->totalram_pages); break; case 2: if (!(readmem(sp_array[0]->value, KVADDR, &value1, sizeof(ulong), "totalram_pages #1", RETURN_ON_ERROR))) break; if (!(readmem(sp_array[1]->value, KVADDR, &value2, sizeof(ulong), "totalram_pages #2", RETURN_ON_ERROR))) break; vt->totalram_pages = MAX(value1, value2); break; } if (symbol_exists("totalhigh_pages")) { switch (get_syment_array("totalhigh_pages", sp_array, 2)) { case 1: get_symbol_data("totalhigh_pages", sizeof(ulong), &vt->totalhigh_pages); break; case 2: if (!(readmem(sp_array[0]->value, KVADDR, &value1, sizeof(ulong), "totalhigh_pages #1", RETURN_ON_ERROR))) break; if (!(readmem(sp_array[1]->value, KVADDR, &value2, sizeof(ulong), "totalhigh_pages #2", RETURN_ON_ERROR))) break; vt->totalhigh_pages = MAX(value1, value2); break; } vt->total_pages += vt->totalhigh_pages; } if (symbol_exists("num_physpages")) get_symbol_data("num_physpages", sizeof(ulong), &vt->num_physpages); if (kernel_symbol_exists("mem_map")) get_symbol_data("max_mapnr", sizeof(ulong), &vt->max_mapnr); if (kernel_symbol_exists("nr_swapfiles")) get_symbol_data("nr_swapfiles", sizeof(unsigned int), &vt->nr_swapfiles); STRUCT_SIZE_INIT(page, "page"); STRUCT_SIZE_INIT(free_area, "free_area"); STRUCT_SIZE_INIT(free_area_struct, "free_area_struct"); STRUCT_SIZE_INIT(zone, "zone"); STRUCT_SIZE_INIT(zone_struct, "zone_struct"); STRUCT_SIZE_INIT(kmem_bufctl_t, "kmem_bufctl_t"); STRUCT_SIZE_INIT(swap_info_struct, "swap_info_struct"); STRUCT_SIZE_INIT(mm_struct, "mm_struct"); STRUCT_SIZE_INIT(vm_area_struct, "vm_area_struct"); STRUCT_SIZE_INIT(pglist_data, "pglist_data"); if (VALID_STRUCT(pglist_data)) { vt->flags |= ZONES; if (symbol_exists("pgdat_list") && !IS_SPARSEMEM()) vt->flags |= NODES; /* * Determine the number of nodes the best way possible, * starting with a default of 1. */ vt->numnodes = 1; if (symbol_exists("numnodes")) get_symbol_data("numnodes", sizeof(int), &vt->numnodes); if (get_nodes_online()) vt->flags |= NODES_ONLINE; MEMBER_OFFSET_INIT(pglist_data_node_zones, "pglist_data", "node_zones"); MEMBER_OFFSET_INIT(pglist_data_node_mem_map, "pglist_data", "node_mem_map"); MEMBER_OFFSET_INIT(pglist_data_node_start_paddr, "pglist_data", "node_start_paddr"); MEMBER_OFFSET_INIT(pglist_data_node_start_mapnr, "pglist_data", "node_start_mapnr"); MEMBER_OFFSET_INIT(pglist_data_node_size, "pglist_data", "node_size"); MEMBER_OFFSET_INIT(pglist_data_node_id, "pglist_data", "node_id"); MEMBER_OFFSET_INIT(pglist_data_node_next, "pglist_data", "node_next"); MEMBER_OFFSET_INIT(pglist_data_bdata, "pglist_data", "bdata"); MEMBER_OFFSET_INIT(pglist_data_nr_zones, "pglist_data", "nr_zones"); MEMBER_OFFSET_INIT(pglist_data_node_start_pfn, "pglist_data", "node_start_pfn"); MEMBER_OFFSET_INIT(pglist_data_pgdat_next, "pglist_data", "pgdat_next"); MEMBER_OFFSET_INIT(pglist_data_node_present_pages, "pglist_data", "node_present_pages"); MEMBER_OFFSET_INIT(pglist_data_node_spanned_pages, "pglist_data", "node_spanned_pages"); ARRAY_LENGTH_INIT(vt->nr_zones, pglist_data_node_zones, "pglist_data.node_zones", NULL, SIZE_OPTION(zone_struct, zone)); vt->ZONE_HIGHMEM = vt->nr_zones - 1; if (VALID_STRUCT(zone_struct)) { MEMBER_OFFSET_INIT(zone_struct_free_pages, "zone_struct", "free_pages"); MEMBER_OFFSET_INIT(zone_struct_free_area, "zone_struct", "free_area"); MEMBER_OFFSET_INIT(zone_struct_zone_pgdat, "zone_struct", "zone_pgdat"); MEMBER_OFFSET_INIT(zone_struct_name, "zone_struct", "name"); MEMBER_OFFSET_INIT(zone_struct_size, "zone_struct", "size"); if (INVALID_MEMBER(zone_struct_size)) MEMBER_OFFSET_INIT(zone_struct_memsize, "zone_struct", "memsize"); MEMBER_OFFSET_INIT(zone_struct_zone_start_pfn, "zone_struct", "zone_start_pfn"); MEMBER_OFFSET_INIT(zone_struct_zone_start_paddr, "zone_struct", "zone_start_paddr"); MEMBER_OFFSET_INIT(zone_struct_zone_start_mapnr, "zone_struct", "zone_start_mapnr"); MEMBER_OFFSET_INIT(zone_struct_zone_mem_map, "zone_struct", "zone_mem_map"); MEMBER_OFFSET_INIT(zone_struct_inactive_clean_pages, "zone_struct", "inactive_clean_pages"); MEMBER_OFFSET_INIT(zone_struct_inactive_clean_list, "zone_struct", "inactive_clean_list"); ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_struct_free_area, "zone_struct.free_area", NULL, SIZE(free_area_struct)); MEMBER_OFFSET_INIT(zone_struct_inactive_dirty_pages, "zone_struct", "inactive_dirty_pages"); MEMBER_OFFSET_INIT(zone_struct_active_pages, "zone_struct", "active_pages"); MEMBER_OFFSET_INIT(zone_struct_pages_min, "zone_struct", "pages_min"); MEMBER_OFFSET_INIT(zone_struct_pages_low, "zone_struct", "pages_low"); MEMBER_OFFSET_INIT(zone_struct_pages_high, "zone_struct", "pages_high"); vt->dump_free_pages = dump_free_pages_zones_v1; } else if (VALID_STRUCT(zone)) { MEMBER_OFFSET_INIT(zone_vm_stat, "zone", "vm_stat"); MEMBER_OFFSET_INIT(zone_free_pages, "zone", "free_pages"); if (INVALID_MEMBER(zone_free_pages) && VALID_MEMBER(zone_vm_stat)) { long nr_free_pages = 0; if (!enumerator_value("NR_FREE_PAGES", &nr_free_pages)) error(WARNING, "cannot determine NR_FREE_PAGES enumerator\n"); ASSIGN_OFFSET(zone_free_pages) = OFFSET(zone_vm_stat) + (nr_free_pages * sizeof(long)); } MEMBER_OFFSET_INIT(zone_free_area, "zone", "free_area"); MEMBER_OFFSET_INIT(zone_zone_pgdat, "zone", "zone_pgdat"); MEMBER_OFFSET_INIT(zone_name, "zone", "name"); MEMBER_OFFSET_INIT(zone_zone_mem_map, "zone", "zone_mem_map"); MEMBER_OFFSET_INIT(zone_zone_start_pfn, "zone", "zone_start_pfn"); MEMBER_OFFSET_INIT(zone_spanned_pages, "zone", "spanned_pages"); MEMBER_OFFSET_INIT(zone_present_pages, "zone", "present_pages"); MEMBER_OFFSET_INIT(zone_pages_min, "zone", "pages_min"); MEMBER_OFFSET_INIT(zone_pages_low, "zone", "pages_low"); MEMBER_OFFSET_INIT(zone_pages_high, "zone", "pages_high"); MEMBER_OFFSET_INIT(zone_watermark, "zone", "watermark"); MEMBER_OFFSET_INIT(zone_nr_active, "zone", "nr_active"); MEMBER_OFFSET_INIT(zone_nr_inactive, "zone", "nr_inactive"); MEMBER_OFFSET_INIT(zone_all_unreclaimable, "zone", "all_unreclaimable"); MEMBER_OFFSET_INIT(zone_flags, "zone", "flags"); MEMBER_OFFSET_INIT(zone_pages_scanned, "zone", "pages_scanned"); ARRAY_LENGTH_INIT(vt->nr_free_areas, zone_free_area, "zone.free_area", NULL, SIZE(free_area)); vt->dump_free_pages = dump_free_pages_zones_v2; } } else vt->numnodes = 1; node_table_init(); sprintf(buf, "%llx", (ulonglong) MAX((uint64_t)vt->max_mapnr * PAGESIZE(), machdep->memory_size())); vt->paddr_prlen = strlen(buf); if (vt->flags & PERCPU_KMALLOC_V1) vt->dump_kmem_cache = dump_kmem_cache_percpu_v1; else if (vt->flags & PERCPU_KMALLOC_V2) vt->dump_kmem_cache = dump_kmem_cache_percpu_v2; else if (vt->flags & KMALLOC_SLUB) vt->dump_kmem_cache = dump_kmem_cache_slub; else vt->dump_kmem_cache = dump_kmem_cache; if (!(vt->flags & (NODES|ZONES))) { get_array_length("free_area", &dimension, 0); if (dimension) vt->dump_free_pages = dump_multidimensional_free_pages; else vt->dump_free_pages = dump_free_pages; } if (!(vt->vma_cache = (char *)malloc(SIZE(vm_area_struct)*VMA_CACHE))) error(FATAL, "cannot malloc vm_area_struct cache\n"); if (symbol_exists("page_hash_bits")) { unsigned int page_hash_bits; get_symbol_data("page_hash_bits", sizeof(unsigned int), &page_hash_bits); len = (1 << page_hash_bits); builtin_array_length("page_hash_table", len, NULL); get_symbol_data("page_hash_table", sizeof(void *), &vt->page_hash_table); vt->page_hash_table_len = len; STRUCT_SIZE_INIT(page_cache_bucket, "page_cache_bucket"); if (VALID_STRUCT(page_cache_bucket)) MEMBER_OFFSET_INIT(page_cache_bucket_chain, "page_cache_bucket", "chain"); } else if (symbol_exists("page_hash_table")) { vt->page_hash_table = symbol_value("page_hash_table"); vt->page_hash_table_len = 0; } else if (CRASHDEBUG(1)) error(NOTE, "page_hash_table does not exist in this kernel\n"); kmem_cache_init(); page_flags_init(); rss_page_types_init(); vt->flags |= VM_INIT; } /* * This command displays the contents of memory, with the output formatted * in several different manners. The starting address may be entered either * symbolically or by address. The default output size is the size of a long * data type, and the default output format is hexadecimal. When hexadecimal * output is used, the output will be accompanied by an ASCII translation. * These are the options: * * -p address argument is a physical address. * -u address argument is a user virtual address. * -d display output in signed decimal format (default is hexadecimal). * -D display output in unsigned decimal format (default is hexadecimal). * -s displays output symbolically when appropriate. * -8 display output in 8-bit values. * -16 display output in 16-bit values. * -32 display output in 32-bit values (default on 32-bit machines). * -64 display output in 64-bit values (default on 64-bit machines). * * The default number of items to display is 1, but a count argument, if any, * must follow the address. */ void cmd_rd(void) { int c, memtype; ulong flag; long count; ulonglong addr, endaddr; ulong offset; struct syment *sp; FILE *tmpfp; char *outputfile; flag = HEXADECIMAL|DISPLAY_DEFAULT; endaddr = 0; offset = 0; memtype = KVADDR; tmpfp = NULL; outputfile = NULL; count = -1; while ((c = getopt(argcnt, args, "axme:r:pfudDusSNo:81:3:6:")) != EOF) { switch(c) { case 'a': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_ASCII; break; case '8': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_8; break; case '1': if (!STREQ(optarg, "6")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_16; } break; case '3': if (!STREQ(optarg, "2")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_32; } break; case '6': if (!STREQ(optarg, "4")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else { flag &= ~DISPLAY_TYPES; flag |= DISPLAY_64; } break; case 'e': endaddr = htoll(optarg, FAULT_ON_ERROR, NULL); break; case 'r': flag &= ~DISPLAY_TYPES; flag |= DISPLAY_RAW; outputfile = optarg; if ((tmpfp = fopen(outputfile, "w")) == NULL) error(FATAL, "cannot open output file: %s\n", outputfile); set_tmpfile2(tmpfp); break; case 's': case 'S': if (flag & DISPLAY_DEFAULT) { flag |= SYMBOLIC; if (c == 'S') { if (flag & SLAB_CACHE) flag |= SLAB_CACHE2; else flag |= SLAB_CACHE; } } else { error(INFO, "-%c option" " is only allowed with %d-bit display\n", c, DISPLAY_DEFAULT == DISPLAY_64 ? 64 : 32); argerrs++; } break; case 'o': offset = stol(optarg, FAULT_ON_ERROR, NULL); flag |= SHOW_OFFSET; break; case 'p': memtype &= ~(UVADDR|KVADDR|XENMACHADDR|FILEADDR); memtype = PHYSADDR; break; case 'u': memtype &= ~(KVADDR|PHYSADDR|XENMACHADDR|FILEADDR); memtype = UVADDR; break; case 'd': flag &= ~(HEXADECIMAL|DECIMAL); flag |= DECIMAL; break; case 'D': flag &= ~(HEXADECIMAL|UDECIMAL); flag |= UDECIMAL; break; case 'm': if (!(kt->flags & ARCH_XEN)) error(FATAL, "-m option only applies to xen architecture\n"); memtype &= ~(UVADDR|KVADDR|FILEADDR); memtype = XENMACHADDR; break; case 'f': if (!pc->dumpfile) error(FATAL, "-f option requires a dumpfile\n"); memtype &= ~(KVADDR|UVADDR|PHYSADDR|XENMACHADDR); memtype = FILEADDR; break; case 'x': flag |= NO_ASCII; break; case 'N': flag |= NET_ENDIAN; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if (*args[optind] == '(') addr = evall(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) addr = htoll(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) addr = (ulonglong)sp->value; else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); return; } if (flag & SHOW_OFFSET) addr += offset; if (args[++optind]) count = stol(args[optind], FAULT_ON_ERROR, NULL); if (count == -1) { if (endaddr) { long bcnt; if (endaddr <= addr) error(FATAL, "invalid ending address: %llx\n", endaddr); bcnt = endaddr - addr; switch (flag & (DISPLAY_TYPES)) { case DISPLAY_64: count = bcnt/8; break; case DISPLAY_32: count = bcnt/4; break; case DISPLAY_16: count = bcnt/2; break; case DISPLAY_8: case DISPLAY_ASCII: case DISPLAY_RAW: count = bcnt; break; } if (bcnt == 0) count = 1; } else { if ((flag & DISPLAY_TYPES) == DISPLAY_RAW) error(FATAL, "-r option requires either a count" " argument or the -e option\n"); count = (flag & DISPLAY_ASCII) ? ASCII_UNLIMITED : 1; } } else if (endaddr) error(WARNING, "ending address ignored when count is specified\n"); if ((flag & HEXADECIMAL) && !(flag & SYMBOLIC) && !(flag & NO_ASCII) && !(flag & DISPLAY_ASCII)) flag |= ASCII_ENDLINE; if (memtype == KVADDR) { if (!COMMON_VADDR_SPACE() && !IS_KVADDR(addr)) memtype = UVADDR; } display_memory(addr, count, flag, memtype, outputfile); } /* * display_memory() does the work for cmd_rd(), but can (and is) called by * other routines that want to dump raw data. Based upon the flag, the * output format is tailored to fit in an 80-character line. Hexadecimal * output is accompanied by an end-of-line ASCII translation. */ #define MAX_HEXCHARS_PER_LINE (32) /* line locations where ASCII output starts */ #define ASCII_START_8 (51 + VADDR_PRLEN) #define ASCII_START_16 (43 + VADDR_PRLEN) #define ASCII_START_32 (39 + VADDR_PRLEN) #define ASCII_START_64 (37 + VADDR_PRLEN) #define ENTRIES_8 (16) /* number of entries per line per size */ #define ENTRIES_16 (8) #define ENTRIES_32 (4) #define ENTRIES_64 (2) struct memloc { /* common holder of read memory */ uint8_t u8; uint16_t u16; uint32_t u32; uint64_t u64; uint64_t limit64; }; static void display_memory(ulonglong addr, long count, ulong flag, int memtype, void *opt) { int i, a, j; size_t typesz, sz; long written; void *location; char readtype[20]; char *addrtype; struct memloc mem; int displayed, per_line; int hx, lost; char hexchars[MAX_HEXCHARS_PER_LINE+1]; char ch; int linelen; char buf[BUFSIZE]; char slab[BUFSIZE]; int ascii_start; ulong error_handle; char *hex_64_fmt = BITS32() ? "%.*llx " : "%.*lx "; char *dec_64_fmt = BITS32() ? "%12lld " : "%15ld "; char *dec_u64_fmt = BITS32() ? "%12llu " : "%20lu "; if (count <= 0) error(FATAL, "invalid count request: %ld\n", count); switch (memtype) { case KVADDR: addrtype = "KVADDR"; break; case UVADDR: addrtype = "UVADDR"; break; case PHYSADDR: addrtype = "PHYSADDR"; break; case XENMACHADDR: addrtype = "XENMACHADDR"; break; case FILEADDR: addrtype = "FILEADDR"; break; default: addrtype = NULL; break; } if (CRASHDEBUG(4)) fprintf(fp, "\n", addr, count, flag, addrtype); if (flag & DISPLAY_RAW) { for (written = 0; written < count; written += sz) { sz = BUFSIZE > (count - written) ? (size_t)(count - written) : (size_t)BUFSIZE; readmem(addr + written, memtype, buf, (long)sz, "raw dump to file", FAULT_ON_ERROR); if (fwrite(buf, 1, sz, pc->tmpfile2) != sz) error(FATAL, "cannot write to: %s\n", (char *)opt); } close_tmpfile2(); fprintf(fp, "%ld bytes copied from 0x%llx to %s\n", count, addr, (char *)opt); return; } BZERO(&mem, sizeof(struct memloc)); hx = lost = linelen = typesz = per_line = ascii_start = 0; location = NULL; switch (flag & (DISPLAY_TYPES)) { case DISPLAY_64: ascii_start = ASCII_START_64; typesz = SIZEOF_64BIT; location = &mem.u64; sprintf(readtype, "64-bit %s", addrtype); per_line = ENTRIES_64; if (machine_type("IA64")) mem.limit64 = kt->end; break; case DISPLAY_32: ascii_start = ASCII_START_32; typesz = SIZEOF_32BIT; location = &mem.u32; sprintf(readtype, "32-bit %s", addrtype); per_line = ENTRIES_32; break; case DISPLAY_16: ascii_start = ASCII_START_16; typesz = SIZEOF_16BIT; location = &mem.u16; sprintf(readtype, "16-bit %s", addrtype); per_line = ENTRIES_16; break; case DISPLAY_8: ascii_start = ASCII_START_8; typesz = SIZEOF_8BIT; location = &mem.u8; sprintf(readtype, "8-bit %s", addrtype); per_line = ENTRIES_8; break; case DISPLAY_ASCII: typesz = SIZEOF_8BIT; location = &mem.u8; sprintf(readtype, "ascii"); per_line = 60; displayed = 0; break; } if (flag & NO_ERROR) error_handle = RETURN_ON_ERROR|QUIET; else error_handle = FAULT_ON_ERROR; for (i = a = 0; i < count; i++) { if(!readmem(addr, memtype, location, typesz, readtype, error_handle)) { addr += typesz; lost += 1; continue; } if (!(flag & DISPLAY_ASCII) && (((i - lost) % per_line) == 0)) { if ((i - lost)) { if (flag & ASCII_ENDLINE) { fprintf(fp, " %s", hexchars); } fprintf(fp, "\n"); } fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); hx = 0; BZERO(hexchars, MAX_HEXCHARS_PER_LINE+1); linelen = VADDR_PRLEN + strlen(": "); } switch (flag & DISPLAY_TYPES) { case DISPLAY_64: if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { if ((!mem.limit64 || (mem.u64 <= mem.limit64)) && in_ksymbol_range(mem.u64) && strlen(value_to_symstr(mem.u64, buf, 0))) { fprintf(fp, "%-16s ", buf); linelen += strlen(buf)+1; break; } if ((flag & SLAB_CACHE) && vaddr_to_kmem_cache(mem.u64, slab, !VERBOSE)) { if ((flag & SLAB_CACHE2) || CRASHDEBUG(1)) sprintf(buf, "[%llx:%s]", (ulonglong)mem.u64, slab); else sprintf(buf, "[%s]", slab); fprintf(fp, "%-16s ", buf); linelen += strlen(buf)+1; break; } } if (flag & HEXADECIMAL) { fprintf(fp, hex_64_fmt, LONG_LONG_PRLEN, mem.u64); linelen += (LONG_LONG_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, dec_64_fmt, mem.u64); else if (flag & UDECIMAL) fprintf(fp, dec_u64_fmt, mem.u64); break; case DISPLAY_32: if ((flag & (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) == (HEXADECIMAL|SYMBOLIC|DISPLAY_DEFAULT)) { if (in_ksymbol_range(mem.u32) && strlen(value_to_symstr(mem.u32, buf, 0))) { fprintf(fp, INT_PRLEN == 16 ? "%-16s " : "%-8s ", buf); linelen += strlen(buf)+1; break; } if ((flag & SLAB_CACHE) && vaddr_to_kmem_cache(mem.u32, slab, !VERBOSE)) { if ((flag & SLAB_CACHE2) || CRASHDEBUG(1)) sprintf(buf, "[%x:%s]", mem.u32, slab); else sprintf(buf, "[%s]", slab); fprintf(fp, INT_PRLEN == 16 ? "%-16s " : "%-8s ", buf); linelen += strlen(buf)+1; break; } } if (flag & NET_ENDIAN) mem.u32 = htonl(mem.u32); if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", INT_PRLEN, mem.u32 ); linelen += (INT_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%12d ", mem.u32 ); else if (flag & UDECIMAL) fprintf(fp, "%12u ", mem.u32 ); break; case DISPLAY_16: if (flag & NET_ENDIAN) mem.u16 = htons(mem.u16); if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", SHORT_PRLEN, mem.u16); linelen += (SHORT_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%5d ", mem.u16); else if (flag & UDECIMAL) fprintf(fp, "%5u ", mem.u16); break; case DISPLAY_8: if (flag & HEXADECIMAL) { fprintf(fp, "%.*x ", CHAR_PRLEN, mem.u8); linelen += (CHAR_PRLEN + 1); } else if (flag & DECIMAL) fprintf(fp, "%3d ", mem.u8); else if (flag & UDECIMAL) fprintf(fp, "%3u ", mem.u8); break; case DISPLAY_ASCII: if (isprint(mem.u8)) { if ((a % per_line) == 0) { if (displayed && i) fprintf(fp, "\n"); fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); } fprintf(fp, "%c", mem.u8); displayed++; a++; } else { if (count == ASCII_UNLIMITED) return; a = 0; } break; } if (flag & HEXADECIMAL) { char* ptr; switch (flag & DISPLAY_TYPES) { case DISPLAY_64: ptr = (char*)&mem.u64; for (j = 0; j < SIZEOF_64BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_32: ptr = (char*)&mem.u32; for (j = 0; j < (SIZEOF_32BIT); j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_16: ptr = (char*)&mem.u16; for (j = 0; j < SIZEOF_16BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; case DISPLAY_8: ptr = (char*)&mem.u8; for (j = 0; j < SIZEOF_8BIT; j++) { ch = ptr[j]; if ((ch >= 0x20) && (ch < 0x7f)) { hexchars[hx++] = ch; } else { hexchars[hx++] = '.'; } } break; } } addr += typesz; } if ((flag & ASCII_ENDLINE) && hx) { pad_line(fp, ascii_start - linelen, ' '); fprintf(fp, " %s", hexchars); } if (lost != count ) fprintf(fp,"\n"); } /* * cmd_wr() is the sister routine of cmd_rd(), used to modify the contents * of memory. Like the "rd" command, the starting address may be entered * either symbolically or by address. The default modification size * is the size of a long data type. Write permission must exist on the * /dev/mem. The flags are similar to those used by rd: * * -p address argument is a physical address. * -u address argument is user virtual address (only if ambiguous). * -k address argument is user virtual address (only if ambiguous). * -8 write data in an 8-bit value. * -16 write data in a 16-bit value. * -32 write data in a 32-bit values (default on 32-bit machines). * -64 write data in a 64-bit values (default on 64-bit machines). * * Only one value of a given datasize may be modified. */ void cmd_wr(void) { int c; ulonglong value; int addr_entered, value_entered; int memtype; struct memloc mem; ulong addr; void *buf; long size; struct syment *sp; if (DUMPFILE()) error(FATAL, "not allowed on dumpfiles\n"); memtype = 0; buf = NULL; addr = 0; size = sizeof(void*); addr_entered = value_entered = FALSE; while ((c = getopt(argcnt, args, "fukp81:3:6:")) != EOF) { switch(c) { case '8': size = 1; break; case '1': if (!STREQ(optarg, "6")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 2; break; case '3': if (!STREQ(optarg, "2")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 4; break; case '6': if (!STREQ(optarg, "4")) { error(INFO, "invalid option: %c%s\n", c, optarg); argerrs++; } else size = 8; break; case 'p': memtype &= ~(UVADDR|KVADDR|FILEADDR); memtype = PHYSADDR; break; case 'u': memtype &= ~(PHYSADDR|KVADDR|FILEADDR); memtype = UVADDR; break; case 'k': memtype &= ~(PHYSADDR|UVADDR|FILEADDR); memtype = KVADDR; break; case 'f': /* * Unsupported, but can be forcibly implemented * by removing the DUMPFILE() check above and * recompiling. */ if (!pc->dumpfile) error(FATAL, "-f option requires a dumpfile\n"); memtype &= ~(PHYSADDR|UVADDR|KVADDR); memtype = FILEADDR; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind]) { if (*args[optind] == '(') addr = evall(args[optind], FAULT_ON_ERROR, NULL); else if (hexadecimal(args[optind], 0)) addr = htoll(args[optind], FAULT_ON_ERROR, NULL); else if ((sp = symbol_search(args[optind]))) addr = sp->value; else { fprintf(fp, "symbol not found: %s\n", args[optind]); fprintf(fp, "possible alternatives:\n"); if (!symbol_query(args[optind], " ", NULL)) fprintf(fp, " (none found)\n"); return; } addr_entered = TRUE; if (args[++optind]) { value = stol(args[optind], FAULT_ON_ERROR, NULL); value_entered = TRUE; switch (size) { case 1: mem.u8 = (uint8_t)value; buf = (void *)&mem.u8; break; case 2: mem.u16 = (uint16_t)value; buf = (void *)&mem.u16; break; case 4: mem.u32 = (uint32_t)value; buf = (void *)&mem.u32; break; case 8: mem.u64 = (uint64_t)value; buf = (void *)&mem.u64; break; } } } if (!addr_entered || !value_entered) cmd_usage(pc->curcmd, SYNOPSIS); if (!memtype) memtype = vaddr_type(addr, CURRENT_CONTEXT()); switch (memtype) { case UVADDR: if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { error(INFO, "invalid user virtual address: %llx\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (!IS_KVADDR(addr)) { error(INFO, "invalid kernel virtual address: %llx\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } break; case PHYSADDR: break; case FILEADDR: break; case AMBIGUOUS: error(INFO, "ambiguous address: %llx (requires -p, -u or -k)\n", addr); cmd_usage(pc->curcmd, SYNOPSIS); } writemem(addr, memtype, buf, size, "write memory", FAULT_ON_ERROR); } char * format_stack_entry(struct bt_info *bt, char *retbuf, ulong value, ulong limit) { char buf[BUFSIZE]; char slab[BUFSIZE]; if (BITS32()) { if ((bt->flags & BT_FULL_SYM_SLAB) && accessible(value)) { if ((!limit || (value <= limit)) && in_ksymbol_range(value) && strlen(value_to_symstr(value, buf, 0))) sprintf(retbuf, INT_PRLEN == 16 ? "%-16s" : "%-8s", buf); else if (vaddr_to_kmem_cache(value, slab, !VERBOSE)) { if ((bt->flags & BT_FULL_SYM_SLAB2) || CRASHDEBUG(1)) sprintf(buf, "[%lx:%s]", value, slab); else sprintf(buf, "[%s]", slab); sprintf(retbuf, INT_PRLEN == 16 ? "%-16s" : "%-8s", buf); } else sprintf(retbuf, "%08lx", value); } else sprintf(retbuf, "%08lx", value); } else { if ((bt->flags & BT_FULL_SYM_SLAB) && accessible(value)) { if ((!limit || (value <= limit)) && in_ksymbol_range(value) && strlen(value_to_symstr(value, buf, 0))) sprintf(retbuf, "%-16s", buf); else if (vaddr_to_kmem_cache(value, slab, !VERBOSE)) { if ((bt->flags & BT_FULL_SYM_SLAB2) || CRASHDEBUG(1)) sprintf(buf, "[%lx:%s]", value, slab); else sprintf(buf, "[%s]", slab); sprintf(retbuf, "%-16s", buf); } else sprintf(retbuf, "%016lx", value); } else sprintf(retbuf, "%016lx", value); } return retbuf; } /* * For processors with "traditional" kernel/user address space distinction. */ int generic_is_kvaddr(ulong addr) { return (addr >= (ulong)(machdep->kvbase)); } /* * NOTE: Perhaps even this generic version should tighten up requirements * by calling uvtop()? */ int generic_is_uvaddr(ulong addr, struct task_context *tc) { return (addr < (ulong)(machdep->kvbase)); } /* * Raw dump of a task's stack, forcing symbolic output. */ void raw_stack_dump(ulong stackbase, ulong size) { display_memory(stackbase, size/sizeof(ulong), HEXADECIMAL|DISPLAY_DEFAULT|SYMBOLIC, KVADDR, NULL); } /* * Raw data dump, with the option of symbolic output. */ void raw_data_dump(ulong addr, long count, int symbolic) { long wordcnt; ulonglong address; int memtype; switch (sizeof(long)) { case SIZEOF_32BIT: wordcnt = count/SIZEOF_32BIT; if (count % SIZEOF_32BIT) wordcnt++; break; case SIZEOF_64BIT: wordcnt = count/SIZEOF_64BIT; if (count % SIZEOF_64BIT) wordcnt++; break; default: break; } if (pc->curcmd_flags & MEMTYPE_FILEADDR) { address = pc->curcmd_private; memtype = FILEADDR; } else if (pc->curcmd_flags & MEMTYPE_UVADDR) { address = (ulonglong)addr; memtype = UVADDR; } else { address = (ulonglong)addr; memtype = KVADDR; } display_memory(address, wordcnt, HEXADECIMAL|DISPLAY_DEFAULT|(symbolic ? SYMBOLIC : ASCII_ENDLINE), memtype, NULL); } /* * Quietly checks the accessibility of a memory location. */ int accessible(ulong kva) { ulong tmp; return(readmem(kva, KVADDR, &tmp, sizeof(ulong), "accessible check", RETURN_ON_ERROR|QUIET)); } /* * readmem() is by far *the* workhorse of this whole program. It reads * memory from /dev/kmem, /dev/mem the dumpfile or /proc/kcore, whichever * is appropriate: * * addr a user, kernel or physical memory address. * memtype addr type: UVADDR, KVADDR, PHYSADDR, XENMACHADDR or FILEADDR * buffer supplied buffer to read the data into. * size number of bytes to read. * type string describing the request -- helpful when the read fails. * error_handle what to do if the read fails: FAULT_ON_ERROR kills the command * immediately; RETURN_ON_ERROR returns FALSE; QUIET suppresses * the error message. */ #define PRINT_ERROR_MESSAGE ((!(error_handle & QUIET) && !STREQ(pc->curcmd, "search")) || \ (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) || CRASHDEBUG(2)) #define INVALID_UVADDR "invalid user virtual address: %llx type: \"%s\"\n" #define INVALID_KVADDR "invalid kernel virtual address: %llx type: \"%s\"\n" #define SEEK_ERRMSG "seek error: %s address: %llx type: \"%s\"\n" #define READ_ERRMSG "read error: %s address: %llx type: \"%s\"\n" #define WRITE_ERRMSG "write error: %s address: %llx type: \"%s\"\n" #define PAGE_EXCLUDED_ERRMSG "page excluded: %s address: %llx type: \"%s\"\n" #define RETURN_ON_PARTIAL_READ() \ if ((error_handle & RETURN_PARTIAL) && (size < orig_size)) { \ if (CRASHDEBUG(1)) \ error(INFO, "RETURN_PARTIAL: \"%s\" read: %ld of %ld\n",\ type, orig_size - size, orig_size); \ return TRUE; \ } int readmem(ulonglong addr, int memtype, void *buffer, long size, char *type, ulong error_handle) { int fd; long cnt, orig_size; physaddr_t paddr; ulonglong pseudo; char *bufptr; if (CRASHDEBUG(4)) fprintf(fp, "\n", addr, memtype_string(memtype, 1), type, size, error_handle_string(error_handle), (ulong)buffer); bufptr = (char *)buffer; orig_size = size; if (size <= 0) { if (PRINT_ERROR_MESSAGE) error(INFO, "invalid size request: %ld type: \"%s\"\n", size, type); goto readmem_error; } fd = REMOTE_MEMSRC() ? pc->sockfd : (ACTIVE() ? pc->mfd : pc->dfd); /* * Screen out any error conditions. */ switch (memtype) { case UVADDR: if (!CURRENT_CONTEXT()) { if (PRINT_ERROR_MESSAGE) error(INFO, "no current user process\n"); goto readmem_error; } if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto readmem_error; } break; case KVADDR: if (LKCD_DUMPFILE()) addr = fix_lkcd_address(addr); if (!IS_KVADDR(addr)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto readmem_error; } break; case PHYSADDR: case XENMACHADDR: break; case FILEADDR: return generic_read_dumpfile(addr, buffer, size, type, error_handle); } while (size > 0) { switch (memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto readmem_error; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto readmem_error; } break; case PHYSADDR: paddr = addr; break; case XENMACHADDR: pseudo = xen_m2p(addr); if (pseudo == XEN_MACHADDR_NOT_FOUND) { pc->curcmd_flags |= XEN_MACHINE_ADDR; paddr = addr; } else paddr = pseudo | PAGEOFFSET(addr); break; } /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(paddr); if (cnt > size) cnt = size; if (CRASHDEBUG(4)) fprintf(fp, "<%s: addr: %llx paddr: %llx cnt: %ld>\n", readmem_function_name(), addr, (unsigned long long)paddr, cnt); if (memtype == KVADDR) pc->curcmd_flags |= MEMTYPE_KVADDR; else pc->curcmd_flags &= ~MEMTYPE_KVADDR; switch (READMEM(fd, bufptr, cnt, (memtype == PHYSADDR) || (memtype == XENMACHADDR) ? 0 : addr, paddr)) { case SEEK_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(memtype, 0), addr, type); goto readmem_error; case READ_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, READ_ERRMSG, memtype_string(memtype, 0), addr, type); if ((pc->flags & DEVMEM) && (kt->flags & PRE_KERNEL_INIT) && !(error_handle & NO_DEVMEM_SWITCH) && devmem_is_restricted() && switch_to_proc_kcore()) return(readmem(addr, memtype, bufptr, size, type, error_handle)); goto readmem_error; case PAGE_EXCLUDED: RETURN_ON_PARTIAL_READ(); if (PRINT_ERROR_MESSAGE) error(INFO, PAGE_EXCLUDED_ERRMSG, memtype_string(memtype, 0), addr, type); goto readmem_error; default: break; } addr += cnt; bufptr += cnt; size -= cnt; } return TRUE; readmem_error: switch (error_handle) { case (FAULT_ON_ERROR): case (QUIET|FAULT_ON_ERROR): if (pc->flags & IN_FOREACH) RESUME_FOREACH(); RESTART(); case (RETURN_ON_ERROR): case (RETURN_PARTIAL|RETURN_ON_ERROR): case (QUIET|RETURN_ON_ERROR): break; } return FALSE; } /* * Accept anything... */ int generic_verify_paddr(physaddr_t paddr) { return TRUE; } /* * Read from /dev/mem. */ int read_dev_mem(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { int readcnt; if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } /* * /dev/mem disallows anything >= __pa(high_memory) * * However it will allow 64-bit lseeks to anywhere, and when followed * by pulling a 32-bit address from the 64-bit file position, it * quietly returns faulty data from the (wrapped-around) address. */ if (vt->high_memory && (paddr >= (physaddr_t)(VTOP(vt->high_memory)))) { readcnt = 0; errno = 0; goto try_dev_kmem; } if (lseek(fd, (off_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; next_read: errno = 0; readcnt = read(fd, bufptr, cnt); if ((readcnt != cnt) && CRASHDEBUG(4)) { if (errno) perror("/dev/mem"); error(INFO, "read(/dev/mem, %lx, %ld): %ld (%lx)\n", paddr, cnt, readcnt, readcnt); } try_dev_kmem: /* * On 32-bit intel architectures high memory can can only be accessed * via vmalloc'd addresses. However, /dev/mem returns 0 bytes, and * non-reserved memory pages can't be mmap'd, so the only alternative * is to read it from /dev/kmem. */ if ((readcnt != cnt) && BITS32() && !readcnt && !errno && IS_VMALLOC_ADDR(addr)) readcnt = read_dev_kmem(addr, bufptr, cnt); /* * The 2.6 valid_phys_addr_range() can potentially shorten the * count of a legitimate read request. So far this has only been * seen on an ia64 where a kernel page straddles an EFI segment. */ if ((readcnt != cnt) && readcnt && (machdep->flags & DEVMEMRD) && !errno) { if (CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) error(INFO, "read(/dev/mem, %lx, %ld): %ld (%lx)\n", paddr, cnt, readcnt, readcnt); cnt -= readcnt; bufptr += readcnt; goto next_read; } if (readcnt != cnt) return READ_ERROR; return readcnt; } /* * Write to /dev/mem. */ int write_dev_mem(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return WRITE_ERROR; } if (lseek(fd, (off_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; if (write(fd, bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * The first required reads of memory are done in kernel_init(), * so if there's a fatal read error of /dev/mem, display a warning * message if it appears that CONFIG_STRICT_DEVMEM is in effect. * On x86 and x86_64, only the first 256 pages of physical memory * are accessible: * * #ifdef CONFIG_STRICT_DEVMEM * int devmem_is_allowed(unsigned long pagenr) * { * if (pagenr <= 256) * return 1; * if (!page_is_ram(pagenr)) * return 1; * return 0; * } * #endif * * It would probably suffice to simply check for the existence of * devmem_is_allowed(), but on x86 and x86_64 verify pfn 256 reads OK, * and 257 fails. * * Update: a patch has been posted to LKML to fix the off-by-one error * by changing "<= 256" to "< 256": * * https://lkml.org/lkml/2012/8/28/357 * * The X86/X86_64 lower-boundary pfn check below has been changed * (preemptively) from 256 to 255. * * In any case, if that x86/x86_64 check fails to prove CONFIG_STRICT_DEVMEM * is configured, then the function will check that "jiffies" can be read, * as is done for the other architectures. * */ static int devmem_is_restricted(void) { long tmp; int restricted; /* * Check for pre-CONFIG_STRICT_DEVMEM kernels. */ if (!kernel_symbol_exists("devmem_is_allowed")) { if (machine_type("ARM") || machine_type("ARM64") || machine_type("X86") || machine_type("X86_64") || machine_type("PPC") || machine_type("PPC64")) return FALSE; } restricted = FALSE; if (STREQ(pc->live_memsrc, "/dev/mem")) { if (machine_type("X86") || machine_type("X86_64")) { if (readmem(255*PAGESIZE(), PHYSADDR, &tmp, sizeof(long), "devmem_is_allowed - pfn 255", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH) && !(readmem(257*PAGESIZE(), PHYSADDR, &tmp, sizeof(long), "devmem_is_allowed - pfn 257", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH))) restricted = TRUE; } if (kernel_symbol_exists("jiffies") && !readmem(symbol_value("jiffies"), KVADDR, &tmp, sizeof(ulong), "devmem_is_allowed - jiffies", QUIET|RETURN_ON_ERROR|NO_DEVMEM_SWITCH)) restricted = TRUE; if (restricted) error(INFO, "this kernel may be configured with CONFIG_STRICT_DEVMEM," " which\n renders /dev/mem unusable as a live memory " "source.\n"); } return restricted; } static int switch_to_proc_kcore(void) { close(pc->mfd); if (file_exists("/proc/kcore", NULL)) error(INFO, "trying /proc/kcore as an alternative to /dev/mem\n\n"); else return FALSE; if ((pc->mfd = open("/proc/kcore", O_RDONLY)) < 0) { error(INFO, "/proc/kcore: %s\n", strerror(errno)); return FALSE; } if (!proc_kcore_init(fp)) { error(INFO, "/proc/kcore: initialization failed\n"); return FALSE; } pc->flags &= ~DEVMEM; pc->flags |= PROC_KCORE; pc->readmem = read_proc_kcore; pc->writemem = write_proc_kcore; pc->live_memsrc = "/proc/kcore"; return TRUE; } /* * Read from memory driver. */ int read_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (pc->curcmd_flags & XEN_MACHINE_ADDR) return READ_ERROR; if (!machdep->verify_paddr(paddr)) { if (CRASHDEBUG(1)) error(INFO, "verify_paddr(%lx) failed\n", paddr); return READ_ERROR; } lseek(fd, (loff_t)paddr, SEEK_SET); if (read(fd, bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to memory driver. */ int write_memory_device(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (!(MEMORY_DRIVER_DEVICE_MODE & S_IWUSR)) return (error(FATAL, "cannot write to %s!\n", pc->live_memsrc)); if (lseek(fd, (loff_t)paddr, SEEK_SET) == -1) return SEEK_ERROR; if (write(fd, bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * Read from an MCLX formatted dumpfile. */ int read_mclx_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (vas_lseek((ulong)paddr, SEEK_SET)) return SEEK_ERROR; if (vas_read((void *)bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to an MCLX formatted dumpfile. This only modifies the buffered * copy only; if it gets flushed, the modification is lost. */ int write_mclx_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { if (vas_lseek((ulong)paddr, SEEK_SET)) return SEEK_ERROR; if (vas_write((void *)bufptr, cnt) != cnt) return WRITE_ERROR; return cnt; } /* * Read from an LKCD formatted dumpfile. */ int read_lkcd_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { set_lkcd_fp(fp); if (!lkcd_lseek(paddr)) return SEEK_ERROR; if (lkcd_read((void *)bufptr, cnt) != cnt) return READ_ERROR; return cnt; } /* * Write to an LKCD formatted dumpfile. (dummy routine -- not allowed) */ int write_lkcd_dumpfile(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return (error(FATAL, "cannot write to an LKCD compressed dump!\n")); } /* * Read from network daemon. */ int read_daemon(int fd, void *bufptr, int cnt, ulong vaddr, physaddr_t paddr) { if (remote_memory_read(pc->rmfd, bufptr, cnt, paddr, -1) == cnt) return cnt; if (!IS_VMALLOC_ADDR(vaddr) || DUMPFILE()) return READ_ERROR; /* * On 32-bit architectures w/memory above ~936MB, * that memory can only be accessed via vmalloc'd * addresses. However, /dev/mem returns 0 bytes, * and non-reserved memory pages can't be mmap'd, so * the only alternative is to read it from /dev/kmem. */ if (BITS32() && remote_memory_read(pc->rkfd, bufptr, cnt, vaddr, -1) == cnt) return cnt; return READ_ERROR; } /* * Write to network daemon. */ int write_daemon(int fd, void *bufptr, int cnt, ulong addr, physaddr_t paddr) { return (error(FATAL, "writing to daemon not supported yet [TBD]\n")); } /* * Turn the memtype bitmask into a string. */ static char *memtype_string(int memtype, int debug) { static char membuf[40]; switch (memtype) { case UVADDR: sprintf(membuf, debug ? "UVADDR" : "user virtual"); break; case KVADDR: sprintf(membuf, debug ? "KVADDR" : "kernel virtual"); break; case PHYSADDR: sprintf(membuf, debug ? "PHYSADDR" : "physical"); break; case XENMACHADDR: sprintf(membuf, debug ? "XENMACHADDR" : "xen machine"); break; case FILEADDR: sprintf(membuf, debug ? "FILEADDR" : "dumpfile"); break; default: if (debug) sprintf(membuf, "0x%x (?)", memtype); else sprintf(membuf, "unknown"); break; } return membuf; } /* * Turn the error_handle bitmask into a string, * Note: FAULT_ON_ERROR == 0 */ static char *error_handle_string(ulong error_handle) { static char ebuf[20]; int others; sprintf(ebuf, "("); others = 0; if (error_handle & RETURN_ON_ERROR) sprintf(&ebuf[strlen(ebuf)], "%sROE", others++ ? "|" : ""); if (error_handle & FAULT_ON_ERROR) sprintf(&ebuf[strlen(ebuf)], "%sFOE", others++ ? "|" : ""); if (error_handle & QUIET) sprintf(&ebuf[strlen(ebuf)], "%sQ", others++ ? "|" : ""); if (error_handle & HEX_BIAS) sprintf(&ebuf[strlen(ebuf)], "%sHB", others++ ? "|" : ""); if (error_handle & RETURN_PARTIAL) sprintf(&ebuf[strlen(ebuf)], "%sRP", others++ ? "|" : ""); if (error_handle & NO_DEVMEM_SWITCH) sprintf(&ebuf[strlen(ebuf)], "%sNDS", others++ ? "|" : ""); strcat(ebuf, ")"); return ebuf; } /* * Sister routine to readmem(). */ int writemem(ulonglong addr, int memtype, void *buffer, long size, char *type, ulong error_handle) { int fd; long cnt; physaddr_t paddr; char *bufptr; if (CRASHDEBUG(1)) fprintf(fp, "writemem: %llx, %s, \"%s\", %ld, %s %lx\n", addr, memtype_string(memtype, 1), type, size, error_handle_string(error_handle), (ulong)buffer); if (size < 0) { if (PRINT_ERROR_MESSAGE) error(INFO, "invalid size request: %ld\n", size); goto writemem_error; } bufptr = (char *)buffer; fd = ACTIVE() ? pc->mfd : pc->dfd; /* * Screen out any error conditions. */ switch (memtype) { case UVADDR: if (!CURRENT_CONTEXT()) { if (PRINT_ERROR_MESSAGE) error(INFO, "no current user process\n"); goto writemem_error; } if (!IS_UVADDR(addr, CURRENT_CONTEXT())) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto writemem_error; } break; case KVADDR: if (!IS_KVADDR(addr)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto writemem_error; } break; case PHYSADDR: break; case FILEADDR: return generic_write_dumpfile(addr, buffer, size, type, error_handle); } while (size > 0) { switch (memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_UVADDR, addr, type); goto writemem_error; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), addr, &paddr, 0)) { if (PRINT_ERROR_MESSAGE) error(INFO, INVALID_KVADDR, addr, type); goto writemem_error; } break; case PHYSADDR: paddr = addr; break; } /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(paddr); if (cnt > size) cnt = size; switch (pc->writemem(fd, bufptr, cnt, addr, paddr)) { case SEEK_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(memtype, 0), addr, type); goto writemem_error; case WRITE_ERROR: if (PRINT_ERROR_MESSAGE) error(INFO, WRITE_ERRMSG, memtype_string(memtype, 0), addr, type); goto writemem_error; default: break; } addr += cnt; bufptr += cnt; size -= cnt; } return TRUE; writemem_error: switch (error_handle) { case (FAULT_ON_ERROR): case (QUIET|FAULT_ON_ERROR): RESTART(); case (RETURN_ON_ERROR): case (QUIET|RETURN_ON_ERROR): break; } return FALSE; } /* * When /dev/mem won't allow access, try /dev/kmem. */ static ssize_t read_dev_kmem(ulong vaddr, char *bufptr, long cnt) { ssize_t readcnt; if (pc->kfd < 0) { if ((pc->kfd = open("/dev/kmem", O_RDONLY)) < 0) return 0; } if (lseek(pc->kfd, vaddr, SEEK_SET) == -1) return 0; readcnt = read(pc->kfd, bufptr, cnt); if (readcnt != cnt) readcnt = 0; return readcnt; } /* * Generic dumpfile read/write functions to handle FILEADDR * memtype arguments to readmem() and writemem(). These are * not to be confused with pc->readmem/writemem plug-ins. */ static int generic_read_dumpfile(ulonglong addr, void *buffer, long size, char *type, ulong error_handle) { int fd; int retval; retval = TRUE; if (!pc->dumpfile) error(FATAL, "command requires a dumpfile\n"); if ((fd = open(pc->dumpfile, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (lseek(fd, addr, SEEK_SET) == -1) { if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } else if (read(fd, buffer, size) != size) { if (PRINT_ERROR_MESSAGE) error(INFO, READ_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } close(fd); return retval; } static int generic_write_dumpfile(ulonglong addr, void *buffer, long size, char *type, ulong error_handle) { int fd; int retval; retval = TRUE; if (!pc->dumpfile) error(FATAL, "command requires a dumpfile\n"); if ((fd = open(pc->dumpfile, O_WRONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (lseek(fd, addr, SEEK_SET) == -1) { if (PRINT_ERROR_MESSAGE) error(INFO, SEEK_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } else if (write(fd, buffer, size) != size) { if (PRINT_ERROR_MESSAGE) error(INFO, WRITE_ERRMSG, memtype_string(FILEADDR, 0), addr, type); retval = FALSE; } close(fd); return retval; } /* * Translates a kernel virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. */ int kvtop(struct task_context *tc, ulong kvaddr, physaddr_t *paddr, int verbose) { physaddr_t unused; return (machdep->kvtop(tc ? tc : CURRENT_CONTEXT(), kvaddr, paddr ? paddr : &unused, verbose)); } /* * Translates a user virtual address to its physical address. cmd_vtop() * sets the verbose flag so that the pte translation gets displayed; all * other callers quietly accept the translation. * * This routine can also take mapped kernel virtual addresses if the -u flag * was passed to cmd_vtop(). If so, it makes the translation using the * kernel-memory PGD entry instead of swapper_pg_dir. */ int uvtop(struct task_context *tc, ulong vaddr, physaddr_t *paddr, int verbose) { return(machdep->uvtop(tc, vaddr, paddr, verbose)); } /* * The vtop command does a verbose translation of a user or kernel virtual * address into it physical address. The pte translation is shown by * passing the VERBOSE flag to kvtop() or uvtop(). If it's a user virtual * address, the vm_area_struct data containing the page is displayed. * Lastly, the mem_map[] page data containing the address is displayed. */ void cmd_vtop(void) { int c; ulong vaddr, context; int others; ulong vtop_flags, loop_vtop_flags; struct task_context *tc; vtop_flags = loop_vtop_flags = 0; tc = NULL; while ((c = getopt(argcnt, args, "ukc:")) != EOF) { switch(c) { case 'c': switch (str_to_context(optarg, &context, &tc)) { case STR_PID: case STR_TASK: vtop_flags |= USE_USER_PGD; break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); break; } break; case 'u': vtop_flags |= UVADDR; break; case 'k': vtop_flags |= KVADDR; break; default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); if (!tc && !(tc = CURRENT_CONTEXT())) error(FATAL, "no current user process\n"); if ((vtop_flags & (UVADDR|KVADDR)) == (UVADDR|KVADDR)) error(FATAL, "-u and -k options are mutually exclusive\n"); others = 0; while (args[optind]) { vaddr = htol(args[optind], FAULT_ON_ERROR, NULL); if (!(vtop_flags & (UVADDR|KVADDR))) { switch (vaddr_type(vaddr, tc)) { case UVADDR: loop_vtop_flags = UVADDR; break; case KVADDR: loop_vtop_flags = KVADDR; break; case AMBIGUOUS: error(FATAL, "ambiguous address: %lx (requires -u or -k)\n", vaddr); break; } } else loop_vtop_flags = 0; if (others++) fprintf(fp, "\n"); do_vtop(vaddr, tc, vtop_flags | loop_vtop_flags); if (REMOTE() && CRASHDEBUG(1)) { ulong paddr = remote_vtop(tc->processor, vaddr); if (paddr) fprintf(fp, "rvtop(%lx)=%lx\n", vaddr, paddr); } optind++; } } /* * Do the work for cmd_vtop(), or less likely, foreach(). */ void do_vtop(ulong vaddr, struct task_context *tc, ulong vtop_flags) { physaddr_t paddr; ulong vma, page; int page_exists; struct meminfo meminfo; char buf1[BUFSIZE]; char buf2[BUFSIZE]; int memtype = 0; switch (vtop_flags & (UVADDR|KVADDR)) { case UVADDR: memtype = UVADDR; break; case KVADDR: memtype = KVADDR; break; case (UVADDR|KVADDR): error(FATAL, "-u and -k options are mutually exclusive\n"); break; default: switch (vaddr_type(vaddr, tc)) { case UVADDR: memtype = UVADDR; break; case KVADDR: memtype = KVADDR; break; case AMBIGUOUS: error(FATAL, "ambiguous address: %lx (requires -u or -k)\n", vaddr); break; } break; } page_exists = paddr = 0; switch (memtype) { case UVADDR: fprintf(fp, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); if (!IN_TASK_VMA(tc->task, vaddr)) { fprintf(fp, "%s (not accessible)\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr))); return; } if (!uvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } uvtop(tc, vaddr, &paddr, VERBOSE); fprintf(fp, "\n"); vma = vm_area_dump(tc->task, UVADDR, vaddr, 0); if (!page_exists) { if (swap_location(paddr, buf1)) fprintf(fp, "\nSWAP: %s\n", buf1); else if (vma_file_offset(vma, vaddr, buf1)) fprintf(fp, "\nFILE: %s\n", buf1); } break; case KVADDR: fprintf(fp, "%s %s\n", mkstring(buf1, VADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); if (!IS_KVADDR(vaddr)) { fprintf(fp, "%-8lx (not a kernel virtual address)\n\n", vaddr); return; } if (vtop_flags & USE_USER_PGD) { if (!uvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } uvtop(tc, vaddr, &paddr, VERBOSE); } else { if (!kvtop(tc, vaddr, &paddr, 0)) { fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), (XEN() && (paddr == PADDR_NOT_AVAILABLE)) ? "(page not available)" : "(not mapped)"); page_exists = FALSE; } else { fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); page_exists = TRUE; } kvtop(tc, vaddr, &paddr, VERBOSE); } break; } fprintf(fp, "\n"); if (page_exists && phys_to_page(paddr, &page)) { if ((pc->flags & DEVMEM) && (paddr >= VTOP(vt->high_memory))) return; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.flags = ADDRESS_SPECIFIED; meminfo.spec_addr = paddr; meminfo.memtype = PHYSADDR; dump_mem_map(&meminfo); } } /* * Runs PTOV() on the physical address argument or translates * a per-cpu offset and cpu specifier. */ void cmd_ptov(void) { int c, len, unknown; ulong vaddr; physaddr_t paddr, paddr_test; char buf1[BUFSIZE]; char buf2[BUFSIZE]; int others; char *cpuspec; ulong *cpus; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs || !args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); others = 0; cpuspec = NULL; cpus = NULL; while (args[optind]) { cpuspec = strchr(args[optind], ':'); if (cpuspec) { *cpuspec++ = NULLCHAR; cpus = get_cpumask_buf(); if (STREQ(cpuspec, "")) SET_BIT(cpus, CURRENT_CONTEXT()->processor); else make_cpumask(cpuspec, cpus, FAULT_ON_ERROR, NULL); } paddr = htoll(args[optind], FAULT_ON_ERROR, NULL); if (cpuspec) { sprintf(buf1, "[%d]", kt->cpus-1); len = strlen(buf1) + 2; fprintf(fp, "%sPER-CPU OFFSET: %llx\n", others++ ? "\n" : "", (ulonglong)paddr); fprintf(fp, " %s %s\n", mkstring(buf1, len, LJUST, "CPU"), mkstring(buf2, VADDR_PRLEN, LJUST, "VIRTUAL")); for (c = 0; c < kt->cpus; c++) { if (!NUM_IN_BITMAP(cpus, c)) continue; vaddr = paddr + kt->__per_cpu_offset[c]; sprintf(buf1, "[%d]", c); fprintf(fp, " %s%lx", mkstring(buf2, len, LJUST, buf1), vaddr); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } FREEBUF(cpus); } else { vaddr = PTOV(paddr); unknown = BITS32() && (!kvtop(0, vaddr, &paddr_test, 0) || (paddr_test != paddr)); fprintf(fp, "%s%s %s\n", others++ ? "\n" : "", mkstring(buf1, VADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, VADDR_PRLEN, LJUST, "PHYSICAL")); fprintf(fp, "%s %s\n", unknown ? mkstring(buf1, VADDR_PRLEN, LJUST, "unknown") : mkstring(buf1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), mkstring(buf2, VADDR_PRLEN, LJUST|LONGLONG_HEX, MKSTR(&paddr))); } optind++; } } /* * Runs PTOB() on the page frame number to get the page address. */ void cmd_ptob(void) { ulonglong value; optind = 1; if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { value = stoll(args[optind], FAULT_ON_ERROR, NULL); fprintf(fp, "%llx: %llx\n", value, PTOB(value)); optind++; } } /* * Runs BTOP() on the address to get the page frame number. */ void cmd_btop(void) { ulonglong value; optind = 1; if (!args[optind]) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { value = htoll(args[optind], FAULT_ON_ERROR, NULL); fprintf(fp, "%llx: %llx\n", value, BTOP(value)); optind++; } } /* * This command displays basic virtual memory information of a context, * consisting of a pointer to its mm_struct, its RSS and total virtual * memory size; and a list of pointers to each vm_area_struct, its starting * and ending address, and vm_flags value. The argument can be a task * address or a PID number; if no args, the current context is used. */ void cmd_vm(void) { int c; ulong flag; ulong value; ulong single_vma; ulonglong llvalue; struct task_context *tc; struct reference reference, *ref; unsigned int radix; int subsequent; flag = 0; single_vma = 0; radix = 0; ref = NULL; BZERO(&reference, sizeof(struct reference)); while ((c = getopt(argcnt, args, "f:pmvR:P:xdM:")) != EOF) { switch(c) { case 'M': pc->curcmd_private = htoll(optarg, FAULT_ON_ERROR, NULL); pc->curcmd_flags |= MM_STRUCT_FORCE; break; case 'f': if (flag) argerrs++; else { llvalue = htoll(optarg, FAULT_ON_ERROR, NULL); do_vm_flags(llvalue); return; } break; case 'p': if (flag) argerrs++; else flag |= PHYSADDR; break; case 'm': if (flag) argerrs++; else flag |= PRINT_MM_STRUCT; break; case 'v': if (flag) argerrs++; else flag |= PRINT_VMA_STRUCTS; break; case 'R': if (ref) { error(INFO, "only one -R option allowed\n"); argerrs++; } else if (flag && !(flag & PHYSADDR)) argerrs++; else { ref = &reference; ref->str = optarg; flag |= PHYSADDR; } break; case 'P': if (flag) argerrs++; else { flag |= PRINT_SINGLE_VMA; single_vma = htol(optarg, FAULT_ON_ERROR, NULL); } break; case 'x': if (radix == 10) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 16; break; case 'd': if (radix == 16) error(FATAL, "-d and -x are mutually exclusive\n"); radix = 10; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (radix == 10) flag |= PRINT_RADIX_10; else if (radix == 16) flag |= PRINT_RADIX_16; if (!args[optind]) { if (!ref) print_task_header(fp, CURRENT_CONTEXT(), 0); vm_area_dump(CURRENT_TASK(), flag, single_vma, ref); return; } subsequent = 0; while (args[optind]) { switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) { if (!ref) print_task_header(fp, tc, subsequent++); vm_area_dump(tc->task, flag, single_vma, ref); } break; case STR_TASK: if (!ref) print_task_header(fp, tc, subsequent++); vm_area_dump(tc->task, flag, single_vma, ref); break; case STR_INVALID: error(INFO, "%sinvalid task or pid value: %s\n", subsequent++ ? "\n" : "", args[optind]); break; } optind++; } } /* * Translate a vm_flags value. */ #define VM_READ 0x00000001ULL /* currently active flags */ #define VM_WRITE 0x00000002ULL #define VM_EXEC 0x00000004ULL #define VM_SHARED 0x00000008ULL #define VM_MAYREAD 0x00000010ULL /* limits for mprotect() etc */ #define VM_MAYWRITE 0x00000020ULL #define VM_MAYEXEC 0x00000040ULL #define VM_MAYSHARE 0x00000080ULL #define VM_GROWSDOWN 0x00000100ULL /* general info on the segment */ #define VM_GROWSUP 0x00000200ULL #define VM_NOHUGEPAGE 0x00000200ULL /* MADV_NOHUGEPAGE marked this vma */ #define VM_SHM 0x00000400ULL /* shared memory area, don't swap out */ #define VM_PFNMAP 0x00000400ULL #define VM_DENYWRITE 0x00000800ULL /* ETXTBSY on write attempts.. */ #define VM_EXECUTABLE 0x00001000ULL #define VM_LOCKED 0x00002000ULL #define VM_IO 0x00004000ULL /* Memory mapped I/O or similar */ #define VM_SEQ_READ 0x00008000ULL /* App will access data sequentially */ #define VM_RAND_READ 0x00010000ULL /* App will not benefit from clustered reads */ #define VM_DONTCOPY 0x00020000ULL /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000ULL /* Cannot expand with mremap() */ #define VM_RESERVED 0x00080000ULL /* Don't unmap it from swap_out */ #define VM_BIGPAGE 0x00100000ULL /* bigpage mappings, no pte's */ #define VM_BIGMAP 0x00200000ULL /* user wants bigpage mapping */ #define VM_WRITECOMBINED 0x00100000ULL /* Write-combined */ #define VM_NONCACHED 0x00200000ULL /* Noncached access */ #define VM_HUGETLB 0x00400000ULL /* Huge tlb Page*/ #define VM_ACCOUNT 0x00100000ULL /* Memory is a vm accounted object */ #define VM_NONLINEAR 0x00800000ULL /* Is non-linear (remap_file_pages) */ #define VM_MAPPED_COPY 0x01000000ULL /* T if mapped copy of data (nommu mmap) */ #define VM_HUGEPAGE 0x01000000ULL /* MADV_HUGEPAGE marked this vma */ #define VM_INSERTPAGE 0x02000000ULL /* The vma has had "vm_insert_page()" done on it */ #define VM_ALWAYSDUMP 0x04000000ULL /* Always include in core dumps */ #define VM_CAN_NONLINEAR 0x08000000ULL /* Has ->fault & does nonlinear pages */ #define VM_MIXEDMAP 0x10000000ULL /* Can contain "struct page" and pure PFN pages */ #define VM_SAO 0x20000000ULL /* Strong Access Ordering (powerpc) */ #define VM_PFN_AT_MMAP 0x40000000ULL /* PFNMAP vma that is fully mapped at mmap time */ #define VM_MERGEABLE 0x80000000ULL /* KSM may merge identical pages */ static void do_vm_flags(ulonglong flags) { int others; others = 0; fprintf(fp, "%llx: (", flags); if (flags & VM_READ) { fprintf(fp, "READ"); others++; } if (flags & VM_WRITE) fprintf(fp, "%sWRITE", others++ ? "|" : ""); if (flags & VM_EXEC) fprintf(fp, "%sEXEC", others++ ? "|" : ""); if (flags & VM_SHARED) fprintf(fp, "%sSHARED", others++ ? "|" : ""); if (flags & VM_MAYREAD) fprintf(fp, "%sMAYREAD", others++ ? "|" : ""); if (flags & VM_MAYWRITE) fprintf(fp, "%sMAYWRITE", others++ ? "|" : ""); if (flags & VM_MAYEXEC) fprintf(fp, "%sMAYEXEC", others++ ? "|" : ""); if (flags & VM_MAYSHARE) fprintf(fp, "%sMAYSHARE", others++ ? "|" : ""); if (flags & VM_GROWSDOWN) fprintf(fp, "%sGROWSDOWN", others++ ? "|" : ""); if (kernel_symbol_exists("expand_upwards")) { if (flags & VM_GROWSUP) fprintf(fp, "%sGROWSUP", others++ ? "|" : ""); } else if (flags & VM_NOHUGEPAGE) fprintf(fp, "%sNOHUGEPAGE", others++ ? "|" : ""); if (flags & VM_SHM) { if (THIS_KERNEL_VERSION > LINUX(2,6,17)) fprintf(fp, "%sPFNMAP", others++ ? "|" : ""); else fprintf(fp, "%sSHM", others++ ? "|" : ""); } if (flags & VM_DENYWRITE) fprintf(fp, "%sDENYWRITE", others++ ? "|" : ""); if (flags & VM_EXECUTABLE) fprintf(fp, "%sEXECUTABLE", others++ ? "|" : ""); if (flags & VM_LOCKED) fprintf(fp, "%sLOCKED", others++ ? "|" : ""); if (flags & VM_IO) fprintf(fp, "%sIO", others++ ? "|" : ""); if (flags & VM_SEQ_READ) fprintf(fp, "%sSEQ_READ", others++ ? "|" : ""); if (flags & VM_RAND_READ) fprintf(fp, "%sRAND_READ", others++ ? "|" : ""); if (flags & VM_DONTCOPY) fprintf(fp, "%sDONTCOPY", others++ ? "|" : ""); if (flags & VM_DONTEXPAND) fprintf(fp, "%sDONTEXPAND", others++ ? "|" : ""); if (flags & VM_RESERVED) fprintf(fp, "%sRESERVED", others++ ? "|" : ""); if (symbol_exists("nr_bigpages") && (THIS_KERNEL_VERSION == LINUX(2,4,9))) { if (flags & VM_BIGPAGE) fprintf(fp, "%sBIGPAGE", others++ ? "|" : ""); if (flags & VM_BIGMAP) fprintf(fp, "%sBIGMAP", others++ ? "|" : ""); } else { if ((THIS_KERNEL_VERSION < LINUX(2,4,21)) && (flags & VM_WRITECOMBINED)) fprintf(fp, "%sWRITECOMBINED", others++ ? "|" : ""); if ((THIS_KERNEL_VERSION < LINUX(2,4,21)) && (flags & VM_NONCACHED)) fprintf(fp, "%sNONCACHED", others++ ? "|" : ""); if (flags & VM_HUGETLB) fprintf(fp, "%sHUGETLB", others++ ? "|" : ""); if (flags & VM_ACCOUNT) fprintf(fp, "%sACCOUNT", others++ ? "|" : ""); } if (flags & VM_NONLINEAR) fprintf(fp, "%sNONLINEAR", others++ ? "|" : ""); if (flags & VM_HUGEPAGE) { if (MEMBER_EXISTS("mm_struct", "pmd_huge_pte")) fprintf(fp, "%sHUGEPAGE", others++ ? "|" : ""); else fprintf(fp, "%sMAPPED_COPY", others++ ? "|" : ""); } if (flags & VM_INSERTPAGE) fprintf(fp, "%sINSERTPAGE", others++ ? "|" : ""); if (flags & VM_ALWAYSDUMP) fprintf(fp, "%sALWAYSDUMP", others++ ? "|" : ""); if (flags & VM_CAN_NONLINEAR) fprintf(fp, "%sCAN_NONLINEAR", others++ ? "|" : ""); if (flags & VM_MIXEDMAP) fprintf(fp, "%sMIXEDMAP", others++ ? "|" : ""); if (flags & VM_SAO) fprintf(fp, "%sSAO", others++ ? "|" : ""); if (flags & VM_PFN_AT_MMAP) fprintf(fp, "%sPFN_AT_MMAP", others++ ? "|" : ""); if (flags & VM_MERGEABLE) fprintf(fp, "%sMERGEABLE", others++ ? "|" : ""); fprintf(fp, ")\n"); } /* * Read whatever size vm_area_struct.vm_flags happens to be into a ulonglong. */ static ulonglong get_vm_flags(char *vma_buf) { ulonglong vm_flags = 0; if (SIZE(vm_area_struct_vm_flags) == sizeof(short)) vm_flags = USHORT(vma_buf + OFFSET(vm_area_struct_vm_flags)); else if (SIZE(vm_area_struct_vm_flags) == sizeof(long)) vm_flags = ULONG(vma_buf+ OFFSET(vm_area_struct_vm_flags)); else if (SIZE(vm_area_struct_vm_flags) == sizeof(long long)) vm_flags = ULONGLONG(vma_buf+ OFFSET(vm_area_struct_vm_flags)); else error(INFO, "questionable vm_area_struct.vm_flags size: %d\n", SIZE(vm_area_struct_vm_flags)); return vm_flags; } static void vm_cleanup(void *arg) { struct task_context *tc; pc->cmd_cleanup = NULL; pc->cmd_cleanup_arg = NULL; tc = (struct task_context *)arg; tc->mm_struct = 0; } static int is_valid_mm(ulong mm) { char kbuf[BUFSIZE]; char *p; int mm_count; if (!(p = vaddr_to_kmem_cache(mm, kbuf, VERBOSE))) goto bailout; if (!STRNEQ(p, "mm_struct")) goto bailout; readmem(mm + OFFSET(mm_struct_mm_count), KVADDR, &mm_count, sizeof(int), "mm_struct mm_count", FAULT_ON_ERROR); if (mm_count == 0) error(FATAL, "stale mm_struct address\n"); return mm_count; bailout: error(FATAL, "invalid mm_struct address\n"); return 0; } /* * vm_area_dump() primarily does the work for cmd_vm(), but is also called * from IN_TASK_VMA(), do_vtop(), and foreach(). How it behaves depends * upon the flag and ref arguments: * * UVADDR do_vtop() when dumping the VMA for a uvaddr * UVADDR|VERIFY_ADDR IN_TASK_VMA() macro checks if a uvaddr is in a VMA * PHYSADDR cmd_vm() or foreach(vm) for -p and -R options * PRINT_MM_STRUCT cmd_vm() or foreach(vm) for -m option * PRINT_VMA_STRUCTS cmd_vm() or foreach(vm) for -v option * PRINT_INODES open_files_dump() backdoors foreach(vm) * * ref cmd_vm() or foreach(vm) for -R option that searches * for references -- and only then does a display */ #define PRINT_VM_DATA() \ { \ fprintf(fp, "%s %s ", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST, "MM"), \ mkstring(buf5, VADDR_PRLEN, CENTER|LJUST, "PGD")); \ fprintf(fp, "%s %s\n", \ mkstring(buf4, 6, CENTER|LJUST, "RSS"), \ mkstring(buf5, 8, CENTER|LJUST, "TOTAL_VM")); \ \ fprintf(fp, "%s %s ", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, \ MKSTR(tm->mm_struct_addr)), \ mkstring(buf5, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, \ MKSTR(tm->pgd_addr))); \ \ sprintf(buf4, "%ldk", (tm->rss * PAGESIZE())/1024); \ sprintf(buf5, "%ldk", (tm->total_vm * PAGESIZE())/1024); \ fprintf(fp, "%s %s\n", \ mkstring(buf4, 6, CENTER|LJUST, NULL), \ mkstring(buf5, 8, CENTER|LJUST, NULL)); \ } #define PRINT_VMA_DATA() \ fprintf(fp, "%s%s%s%s%s %6llx%s%s\n", \ mkstring(buf4, VADDR_PRLEN, CENTER|LJUST|LONG_HEX, MKSTR(vma)), \ space(MINSPACE), \ mkstring(buf2, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_start)), \ space(MINSPACE), \ mkstring(buf3, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_end)), \ vm_flags, space(MINSPACE), buf1); #define FILENAME_COMPONENT(P,C) \ ((STREQ((P), "/") && STREQ((C), "/")) || \ (!STREQ((C), "/") && strstr((P),(C)))) #define VM_REF_SEARCH (0x1) #define VM_REF_DISPLAY (0x2) #define VM_REF_NUMBER (0x4) #define VM_REF_VMA (0x8) #define VM_REF_PAGE (0x10) #define VM_REF_HEADER (0x20) #define DO_REF_SEARCH(X) ((X) && ((X)->cmdflags & VM_REF_SEARCH)) #define DO_REF_DISPLAY(X) ((X) && ((X)->cmdflags & VM_REF_DISPLAY)) #define VM_REF_CHECK_HEXVAL(X,V) \ (DO_REF_SEARCH(X) && ((X)->cmdflags & VM_REF_NUMBER) && ((X)->hexval == (V))) #define VM_REF_CHECK_DECVAL(X,V) \ (DO_REF_SEARCH(X) && ((X)->cmdflags & VM_REF_NUMBER) && ((X)->decval == (V))) #define VM_REF_CHECK_STRING(X,S) \ (DO_REF_SEARCH(X) && (string_exists(S)) && FILENAME_COMPONENT((S),(X)->str)) #define VM_REF_FOUND(X) ((X) && ((X)->cmdflags & VM_REF_HEADER)) ulong vm_area_dump(ulong task, ulong flag, ulong vaddr, struct reference *ref) { struct task_context *tc; ulong vma; ulong vm_start; ulong vm_end; ulong vm_next, vm_mm; char *dentry_buf, *vma_buf, *file_buf; ulonglong vm_flags; ulong vm_file, inode; ulong dentry, vfsmnt; ulong single_vma; unsigned int radix; int single_vma_found; int found; struct task_mem_usage task_mem_usage, *tm; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; char vma_header[BUFSIZE]; tc = task_to_context(task); tm = &task_mem_usage; get_task_mem_usage(task, tm); single_vma = 0; single_vma_found = FALSE; if (flag & PRINT_SINGLE_VMA) { single_vma = vaddr; vaddr = 0; } if (flag & PRINT_RADIX_10) radix = 10; else if (flag & PRINT_RADIX_16) radix = 16; else radix = 0; if (ref) { ref->cmdflags = VM_REF_SEARCH; if (IS_A_NUMBER(ref->str)) { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); if (decimal(ref->str, 0)) ref->decval = dtol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= VM_REF_NUMBER; } } if (VM_REF_CHECK_HEXVAL(ref, tm->mm_struct_addr) || VM_REF_CHECK_HEXVAL(ref, tm->pgd_addr)) { print_task_header(fp, tc, 0); PRINT_VM_DATA(); fprintf(fp, "\n"); return (ulong)NULL; } if (!(flag & (UVADDR|PRINT_MM_STRUCT|PRINT_VMA_STRUCTS|PRINT_SINGLE_VMA)) && !DO_REF_SEARCH(ref)) PRINT_VM_DATA(); if (!tm->mm_struct_addr) { if (pc->curcmd_flags & MM_STRUCT_FORCE) { if (!is_valid_mm(pc->curcmd_private)) return (ulong)NULL; tc->mm_struct = tm->mm_struct_addr = pc->curcmd_private; /* * tc->mm_struct is changed, use vm_cleanup to * restore it. */ pc->cmd_cleanup_arg = (void *)tc; pc->cmd_cleanup = vm_cleanup; } else return (ulong)NULL; } if (flag & PRINT_MM_STRUCT) { dump_struct("mm_struct", tm->mm_struct_addr, radix); return (ulong)NULL; } readmem(tm->mm_struct_addr + OFFSET(mm_struct_mmap), KVADDR, &vma, sizeof(void *), "mm_struct mmap", FAULT_ON_ERROR); sprintf(vma_header, "%s%s%s%s%s FLAGS%sFILE\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "VMA"), space(MINSPACE), mkstring(buf2, UVADDR_PRLEN, CENTER|RJUST, "START"), space(MINSPACE), mkstring(buf3, UVADDR_PRLEN, CENTER|RJUST, "END"), space(MINSPACE)); if (!(flag & (PHYSADDR|VERIFY_ADDR|PRINT_VMA_STRUCTS|PRINT_SINGLE_VMA)) && !DO_REF_SEARCH(ref)) fprintf(fp, "%s", vma_header); for (found = FALSE; vma; vma = vm_next) { if ((flag & PHYSADDR) && !DO_REF_SEARCH(ref)) fprintf(fp, "%s", vma_header); inode = 0; BZERO(buf1, BUFSIZE); vma_buf = fill_vma_cache(vma); vm_mm = ULONG(vma_buf + OFFSET(vm_area_struct_vm_mm)); vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); vm_next = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next)); vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_flags = get_vm_flags(vma_buf); vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); if (flag & PRINT_SINGLE_VMA) { if (vma != single_vma) continue; fprintf(fp, "%s", vma_header); single_vma_found = TRUE; } if (flag & PRINT_VMA_STRUCTS) { dump_struct("vm_area_struct", vma, radix); continue; } if (vm_file && !(flag & VERIFY_ADDR)) { file_buf = fill_file_cache(vm_file); dentry = ULONG(file_buf + OFFSET(file_f_dentry)); dentry_buf = NULL; if (dentry) { dentry_buf = fill_dentry_cache(dentry); if (VALID_MEMBER(file_f_vfsmnt)) { vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); get_pathname(dentry, buf1, BUFSIZE, 1, vfsmnt); } else { get_pathname(dentry, buf1, BUFSIZE, 1, 0); } } if ((flag & PRINT_INODES) && dentry) { inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); } } if (!(flag & UVADDR) || ((flag & UVADDR) && ((vaddr >= vm_start) && (vaddr < vm_end)))) { found = TRUE; if (flag & VERIFY_ADDR) return vma; if (DO_REF_SEARCH(ref)) { if (VM_REF_CHECK_HEXVAL(ref, vma) || VM_REF_CHECK_HEXVAL(ref, (ulong)vm_flags) || VM_REF_CHECK_STRING(ref, buf1)) { if (!(ref->cmdflags & VM_REF_HEADER)) { print_task_header(fp, tc, 0); PRINT_VM_DATA(); ref->cmdflags |= VM_REF_HEADER; } if (!(ref->cmdflags & VM_REF_VMA) || (ref->cmdflags & VM_REF_PAGE)) { fprintf(fp, "%s", vma_header); ref->cmdflags |= VM_REF_VMA; ref->cmdflags &= ~VM_REF_PAGE; ref->ref1 = vma; } PRINT_VMA_DATA(); } if (vm_area_page_dump(vma, task, vm_start, vm_end, vm_mm, ref)) { if (!(ref->cmdflags & VM_REF_HEADER)) { print_task_header(fp, tc, 0); PRINT_VM_DATA(); ref->cmdflags |= VM_REF_HEADER; } if (!(ref->cmdflags & VM_REF_VMA) || (ref->ref1 != vma)) { fprintf(fp, "%s", vma_header); PRINT_VMA_DATA(); ref->cmdflags |= VM_REF_VMA; ref->ref1 = vma; } ref->cmdflags |= VM_REF_DISPLAY; vm_area_page_dump(vma, task, vm_start, vm_end, vm_mm, ref); ref->cmdflags &= ~VM_REF_DISPLAY; } continue; } if (inode) { fprintf(fp, "%lx%s%s%s%s%s%6llx%s%lx %s\n", vma, space(MINSPACE), mkstring(buf2, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_start)), space(MINSPACE), mkstring(buf3, UVADDR_PRLEN, RJUST|LONG_HEX, MKSTR(vm_end)), space(MINSPACE), vm_flags, space(MINSPACE), inode, buf1); } else { PRINT_VMA_DATA(); if (flag & (PHYSADDR|PRINT_SINGLE_VMA)) vm_area_page_dump(vma, task, vm_start, vm_end, vm_mm, ref); } if (flag & UVADDR) return vma; } } if (flag & VERIFY_ADDR) return (ulong)NULL; if ((flag & PRINT_SINGLE_VMA) && !single_vma_found) fprintf(fp, "(not found)\n"); if ((flag & UVADDR) && !found) fprintf(fp, "(not found)\n"); if (VM_REF_FOUND(ref)) fprintf(fp, "\n"); return (ulong)NULL; } static int vm_area_page_dump(ulong vma, ulong task, ulong start, ulong end, ulong mm, struct reference *ref) { physaddr_t paddr; ulong offs; char *p1, *p2; int display; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; if (mm == symbol_value("init_mm")) return FALSE; if (!ref || DO_REF_DISPLAY(ref)) fprintf(fp, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST, "VIRTUAL"), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), LJUST, "PHYSICAL")); if (DO_REF_DISPLAY(ref)) { start = ref->ref2; } while (start < end) { display = DO_REF_SEARCH(ref) ? FALSE : TRUE; if (VM_REF_CHECK_HEXVAL(ref, start)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } if (uvtop(task_to_context(task), start, &paddr, 0)) { sprintf(buf3, "%s %s\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST|LONGLONG_HEX, MKSTR(&paddr))); if (VM_REF_CHECK_HEXVAL(ref, paddr)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } else if (paddr && swap_location(paddr, buf1)) { sprintf(buf3, "%s SWAP: %s\n", mkstring(buf2, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), buf1); if (DO_REF_SEARCH(ref)) { if (VM_REF_CHECK_DECVAL(ref, THIS_KERNEL_VERSION >= LINUX(2,6,0) ? __swp_offset(paddr) : SWP_OFFSET(paddr))) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } strcpy(buf4, buf3); p1 = strstr(buf4, "SWAP:") + strlen("SWAP: "); p2 = strstr(buf4, " OFFSET:"); *p2 = NULLCHAR; if (VM_REF_CHECK_STRING(ref, p1)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } } else if (vma_file_offset(vma, start, buf1)) { sprintf(buf3, "%s FILE: %s\n", mkstring(buf2, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start)), buf1); if (DO_REF_SEARCH(ref)) { extract_hex(strstr(buf3, "OFFSET:") + strlen("OFFSET: "), &offs, 0, 0); if (VM_REF_CHECK_HEXVAL(ref, offs)) { if (DO_REF_DISPLAY(ref)) display = TRUE; else { ref->cmdflags |= VM_REF_PAGE; ref->ref2 = start; return TRUE; } } } } else { sprintf(buf3, "%s (not mapped)\n", mkstring(buf1, UVADDR_PRLEN, LJUST|LONG_HEX, MKSTR(start))); } if (display) fprintf(fp, "%s", buf3); start += PAGESIZE(); } return FALSE; } /* * Cache the passed-in vm_area_struct. */ char * fill_vma_cache(ulong vma) { int i; char *cache; vt->vma_cache_fills++; for (i = 0; i < VMA_CACHE; i++) { if (vt->cached_vma[i] == vma) { vt->cached_vma_hits[i]++; cache = vt->vma_cache + (SIZE(vm_area_struct)*i); return(cache); } } cache = vt->vma_cache + (SIZE(vm_area_struct)*vt->vma_cache_index); readmem(vma, KVADDR, cache, SIZE(vm_area_struct), "fill_vma_cache", FAULT_ON_ERROR); vt->cached_vma[vt->vma_cache_index] = vma; vt->vma_cache_index = (vt->vma_cache_index+1) % VMA_CACHE; return(cache); } /* * If active, clear the vm_area_struct references. */ void clear_vma_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < VMA_CACHE; i++) { vt->cached_vma[i] = 0; vt->cached_vma_hits[i] = 0; } vt->vma_cache_fills = 0; vt->vma_cache_index = 0; } /* * Check whether an address is a user stack address based * upon its vm_area_struct flags. */ int in_user_stack(ulong task, ulong vaddr) { ulong vma; ulonglong vm_flags; char *vma_buf; if ((vma = vm_area_dump(task, UVADDR|VERIFY_ADDR, vaddr, 0))) { vma_buf = fill_vma_cache(vma); vm_flags = get_vm_flags(vma_buf); if (vm_flags & VM_GROWSDOWN) return TRUE; else if (kernel_symbol_exists("expand_upwards") && (vm_flags & VM_GROWSUP)) return TRUE; /* * per-thread stack */ if ((vm_flags & (VM_READ|VM_WRITE)) == (VM_READ|VM_WRITE)) return TRUE; } return FALSE; } /* * Set the const value of filepages and anonpages * according to MM_FILEPAGES and MM_ANONPAGES. */ static void rss_page_types_init(void) { long anonpages, filepages; if (VALID_MEMBER(mm_struct_rss)) return; if (VALID_MEMBER(mm_struct_rss_stat)) { if (!enumerator_value("MM_FILEPAGES", &filepages) || !enumerator_value("MM_ANONPAGES", &anonpages)) { filepages = 0; anonpages = 1; } tt->filepages = filepages; tt->anonpages = anonpages; } } static struct tgid_context * tgid_quick_search(ulong tgid) { struct tgid_context *last, *next; tt->tgid_searches++; if (!(last = tt->last_tgid)) return NULL; if (tgid == last->tgid) { tt->tgid_cache_hits++; return last; } next = last + 1; if ((next < (tt->tgid_array + RUNNING_TASKS())) && (tgid == next->tgid)) { tt->tgid_cache_hits++; return next; } return NULL; } static void collect_page_member_data(char *optlist, struct meminfo *mi) { int i; int members; char buf[BUFSIZE]; char *memberlist[MAXARGS]; struct struct_member_data *page_member_cache, *pmd; if ((count_chars(optlist, ',')+1) > MAXARGS) error(FATAL, "too many members in comma-separated list\n"); if ((LASTCHAR(optlist) == ',') || (LASTCHAR(optlist) == '.')) error(FATAL, "invalid format: %s\n", optlist); strcpy(buf, optlist); replace_string(optlist, ",", ' '); if (!(members = parse_line(optlist, memberlist))) error(FATAL, "invalid page struct member list format: %s\n", buf); page_member_cache = (struct struct_member_data *) GETBUF(sizeof(struct struct_member_data) * members); for (i = 0, pmd = page_member_cache; i < members; i++, pmd++) { pmd->structure = "page"; pmd->member = memberlist[i]; if (!fill_struct_member_data(pmd)) error(FATAL, "invalid %s struct member: %s\n", pmd->structure, pmd->member); if (CRASHDEBUG(1)) { fprintf(fp, " structure: %s\n", pmd->structure); fprintf(fp, " member: %s\n", pmd->member); fprintf(fp, " type: %ld\n", pmd->type); fprintf(fp, " unsigned_type: %ld\n", pmd->unsigned_type); fprintf(fp, " length: %ld\n", pmd->length); fprintf(fp, " offset: %ld\n", pmd->offset); fprintf(fp, " bitpos: %ld\n", pmd->bitpos); fprintf(fp, " bitsize: %ld%s", pmd->bitsize, members > 1 ? "\n\n" : "\n"); } } mi->nr_members = members; mi->page_member_cache = page_member_cache; } static int get_bitfield_data(struct integer_data *bd) { int pos, size; uint32_t tmpvalue32; uint64_t tmpvalue64; uint32_t mask32; uint64_t mask64; struct struct_member_data *pmd; pmd = bd->pmd; pos = bd->pmd->bitpos; size = bd->pmd->bitsize; if (pos == 0 && size == 0) { bd->bitfield_value = bd->value; return TRUE; } switch (__BYTE_ORDER) { case __LITTLE_ENDIAN: switch (pmd->length) { case 4: tmpvalue32 = (uint32_t)bd->value; tmpvalue32 >>= pos; mask32 = (1 << size) - 1; tmpvalue32 &= mask32; bd->bitfield_value = (ulong)tmpvalue32; break; case 8: tmpvalue64 = (uint64_t)bd->value; tmpvalue64 >>= pos; mask64 = (1UL << size) - 1; tmpvalue64 &= mask64; bd->bitfield_value = tmpvalue64; break; default: return FALSE; } break; case __BIG_ENDIAN: switch (pmd->length) { case 4: tmpvalue32 = (uint32_t)bd->value; tmpvalue32 <<= pos; tmpvalue32 >>= (32-size); mask32 = (1 << size) - 1; tmpvalue32 &= mask32; bd->bitfield_value = (ulong)tmpvalue32; break; case 8: tmpvalue64 = (uint64_t)bd->value; tmpvalue64 <<= pos; tmpvalue64 >>= (64-size); mask64 = (1UL << size) - 1; tmpvalue64 &= mask64; bd->bitfield_value = tmpvalue64; break; default: return FALSE; } break; } return TRUE; } static int show_page_member_data(char *pcache, ulong pp, struct meminfo *mi, char *outputbuffer) { int bufferindex, i, c, cnt, radix, struct_intbuf[10]; ulong longbuf, struct_longbuf[10]; unsigned char boolbuf; void *voidptr; ushort shortbuf; struct struct_member_data *pmd; struct integer_data integer_data; bufferindex = 0; pmd = mi->page_member_cache; bufferindex += sprintf(outputbuffer + bufferindex, "%lx ", pp); for (i = 0; i < mi->nr_members; pmd++, i++) { switch (pmd->type) { case TYPE_CODE_PTR: voidptr = VOID_PTR(pcache + pmd->offset); bufferindex += sprintf(outputbuffer + bufferindex, VADDR_PRLEN == 8 ? "%08lx " : "%016lx ", (ulong)voidptr); break; case TYPE_CODE_INT: switch (pmd->length) { case 1: integer_data.value = UCHAR(pcache + pmd->offset); break; case 2: integer_data.value = USHORT(pcache + pmd->offset); break; case 4: integer_data.value = UINT(pcache + pmd->offset); break; case 8: if (BITS32()) goto unsupported; integer_data.value = ULONG(pcache + pmd->offset); break; default: goto unsupported; } integer_data.pmd = pmd; if (get_bitfield_data(&integer_data)) longbuf = integer_data.bitfield_value; else goto unsupported; if (STREQ(pmd->member, "flags")) radix = 16; else if (STRNEQ(pmd->member, "_count") || STRNEQ(pmd->member, "_mapcount")) radix = 10; else radix = *gdb_output_radix; if (pmd->unsigned_type) { if (pmd->length == sizeof(ulonglong)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%lu " : "%016lx ", longbuf); else if (pmd->length == sizeof(int)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%08x ", (uint)longbuf); else if (pmd->length == sizeof(short)) { bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%04x ", (ushort)longbuf); } else if (pmd->length == sizeof(char)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%u " : "%02x ", (unsigned char)longbuf); } else { if (pmd->length == sizeof(ulonglong)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%ld " : "%016lx", longbuf); else if (pmd->length == sizeof(int)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%08x ", (int)longbuf); else if (pmd->length == sizeof(short)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%04x ", (short)longbuf); else if (pmd->length == sizeof(char)) bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d " : "%02x ", (char)longbuf); } break; case TYPE_CODE_STRUCT: if (STRNEQ(pmd->member, "_count") || STRNEQ(pmd->member, "_mapcount")) { BCOPY(pcache+pmd->offset, (char *)&struct_intbuf[0], pmd->length); bufferindex += sprintf(outputbuffer + bufferindex, "%d ", struct_intbuf[0]); } else if ((pmd->length % sizeof(long)) == 0) { BCOPY(pcache+pmd->offset, (char *)&struct_longbuf[0], pmd->length); cnt = pmd->length / sizeof(long); for (c = 0; c < cnt; c++) { bufferindex += sprintf(outputbuffer + bufferindex, BITS32() ? "%08lx%s" : "%016lx%s", struct_longbuf[c], (c+1) < cnt ? "," : ""); } bufferindex += sprintf(outputbuffer + bufferindex, " "); } else if ((pmd->length % sizeof(int)) == 0) { BCOPY(pcache+pmd->offset, (char *)&struct_intbuf[0], pmd->length); cnt = pmd->length / sizeof(int); for (c = 0; c < cnt; c++) { bufferindex += sprintf(outputbuffer + bufferindex, "%08x%s", struct_intbuf[c], (c+1) < cnt ? "," : ""); } } else if (pmd->length == sizeof(short)) { BCOPY(pcache+pmd->offset, (char *)&shortbuf, pmd->length); bufferindex += sprintf(outputbuffer + bufferindex, "%04x ", shortbuf); } else goto unsupported; break; case TYPE_CODE_BOOL: radix = *gdb_output_radix; boolbuf = UCHAR(pcache + pmd->offset); if (boolbuf <= 1) bufferindex += sprintf(outputbuffer + bufferindex, "%s ", boolbuf ? "true" : "false"); else bufferindex += sprintf(outputbuffer + bufferindex, radix == 10 ? "%d" : "%x ", boolbuf); break; default: unsupported: error(FATAL, "unsupported page member reference: %s.%s\n", pmd->structure, pmd->member); break; } } return bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } /* * Fill in the task_mem_usage structure with the RSS, virtual memory size, * percent of physical memory being used, and the mm_struct address. */ void get_task_mem_usage(ulong task, struct task_mem_usage *tm) { struct task_context *tc; long rss = 0; BZERO(tm, sizeof(struct task_mem_usage)); if (IS_ZOMBIE(task) || IS_EXITING(task)) return; tc = task_to_context(task); if (!tc || !tc->mm_struct) /* probably a kernel thread */ return; tm->mm_struct_addr = tc->mm_struct; if (!task_mm(task, TRUE)) return; if (VALID_MEMBER(mm_struct_rss)) /* * mm_struct.rss or mm_struct._rss exist. */ tm->rss = ULONG(tt->mm_struct + OFFSET(mm_struct_rss)); else { /* * Latest kernels have mm_struct.mm_rss_stat[]. */ if (VALID_MEMBER(mm_struct_rss_stat)) { long anonpages, filepages; anonpages = tt->anonpages; filepages = tt->filepages; rss += LONG(tt->mm_struct + OFFSET(mm_struct_rss_stat) + OFFSET(mm_rss_stat_count) + (filepages * sizeof(long))); rss += LONG(tt->mm_struct + OFFSET(mm_struct_rss_stat) + OFFSET(mm_rss_stat_count) + (anonpages * sizeof(long))); } /* Check whether SPLIT_RSS_COUNTING is enabled */ if (VALID_MEMBER(task_struct_rss_stat)) { int sync_rss; struct tgid_context tgid, *tgid_array, *tg, *first, *last; tgid_array = tt->tgid_array; tgid.tgid = task_tgid(task); if (!(tg = tgid_quick_search(tgid.tgid))) tg = (struct tgid_context *)bsearch(&tgid, tgid_array, RUNNING_TASKS(), sizeof(struct tgid_context), sort_by_tgid); if (tg) { /* find the first element which has the same tgid */ first = tg; while ((first > tgid_array) && ((first - 1)->tgid == first->tgid)) first--; /* find the last element which have same tgid */ last = tg; while ((last < (tgid_array + (RUNNING_TASKS() - 1))) && (last->tgid == (last + 1)->tgid)) last++; while (first <= last) { /* count 0 -> filepages */ if (!readmem(first->task + OFFSET(task_struct_rss_stat) + OFFSET(task_rss_stat_count), KVADDR, &sync_rss, sizeof(int), "task_struct rss_stat MM_FILEPAGES", RETURN_ON_ERROR)) continue; rss += sync_rss; /* count 1 -> anonpages */ if (!readmem(first->task + OFFSET(task_struct_rss_stat) + OFFSET(task_rss_stat_count) + sizeof(int), KVADDR, &sync_rss, sizeof(int), "task_struct rss_stat MM_ANONPAGES", RETURN_ON_ERROR)) continue; rss += sync_rss; if (first == last) break; first++; } tt->last_tgid = last; } } /* * mm_struct._anon_rss and mm_struct._file_rss should exist. */ if (VALID_MEMBER(mm_struct_anon_rss)) rss += LONG(tt->mm_struct + OFFSET(mm_struct_anon_rss)); if (VALID_MEMBER(mm_struct_file_rss)) rss += LONG(tt->mm_struct + OFFSET(mm_struct_file_rss)); tm->rss = (unsigned long)rss; } tm->total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); tm->pgd_addr = ULONG(tt->mm_struct + OFFSET(mm_struct_pgd)); if (is_kernel_thread(task) && !tm->rss) return; tm->pct_physmem = ((double)(tm->rss*100)) / ((double)(MIN(vt->total_pages, vt->num_physpages ? vt->num_physpages : vt->total_pages))); } /* * cmd_kmem() is designed as a multi-purpose kernel memory investigator with * the flag argument sending it off in a multitude of areas. To date, the * following options are defined: * * -f displays the contents of the system free_area[] array headers; * also verifies that the page count equals nr_free_pages * -F same as -f, but also dumps all pages linked to that header. * -p displays basic information about each page in the system * mem_map[] array. * -s displays kmalloc() slab data. * -S same as -s, but displays all kmalloc() objects. * -v displays the vmlist entries. * -c displays the number of pages in the page_hash_table. * -C displays all entries in the page_hash_table. * -i displays informational data shown by /proc/meminfo. * -h hugepage information from hstates[] array * * -P forces address to be defined as a physical address * address when used with -f, the address can be either a page pointer * or a physical address; the free_area header containing the page * (if any) is displayed. * When used with -p, the address can be either a page pointer or a * physical address; its basic mem_map page information is displayed. * When used with -c, the page_hash_table entry containing the * page pointer is displayed. */ /* Note: VERBOSE is 0x1, ADDRESS_SPECIFIED is 0x2 */ #define GET_TOTALRAM_PAGES (ADDRESS_SPECIFIED << 1) #define GET_SHARED_PAGES (ADDRESS_SPECIFIED << 2) #define GET_FREE_PAGES (ADDRESS_SPECIFIED << 3) #define GET_FREE_HIGHMEM_PAGES (ADDRESS_SPECIFIED << 4) #define GET_ZONE_SIZES (ADDRESS_SPECIFIED << 5) #define GET_HIGHEST (ADDRESS_SPECIFIED << 6) #define GET_BUFFERS_PAGES (ADDRESS_SPECIFIED << 7) #define GET_SLAB_PAGES (ADDRESS_SPECIFIED << 8) #define GET_PHYS_TO_VMALLOC (ADDRESS_SPECIFIED << 9) #define GET_ACTIVE_LIST (ADDRESS_SPECIFIED << 10) #define GET_INACTIVE_LIST (ADDRESS_SPECIFIED << 11) #define GET_INACTIVE_CLEAN (ADDRESS_SPECIFIED << 12) /* obsolete */ #define GET_INACTIVE_DIRTY (ADDRESS_SPECIFIED << 13) /* obsolete */ #define SLAB_GET_COUNTS (ADDRESS_SPECIFIED << 14) #define SLAB_WALKTHROUGH (ADDRESS_SPECIFIED << 15) #define GET_VMLIST_COUNT (ADDRESS_SPECIFIED << 16) #define GET_VMLIST (ADDRESS_SPECIFIED << 17) #define SLAB_DATA_NOSAVE (ADDRESS_SPECIFIED << 18) #define GET_SLUB_SLABS (ADDRESS_SPECIFIED << 19) #define GET_SLUB_OBJECTS (ADDRESS_SPECIFIED << 20) #define VMLIST_VERIFY (ADDRESS_SPECIFIED << 21) #define SLAB_FIRST_NODE (ADDRESS_SPECIFIED << 22) #define CACHE_SET (ADDRESS_SPECIFIED << 23) #define SLAB_OVERLOAD_PAGE_PTR (ADDRESS_SPECIFIED << 24) #define SLAB_BITFIELD (ADDRESS_SPECIFIED << 25) #define SLAB_GATHER_FAILURE (ADDRESS_SPECIFIED << 26) #define GET_ALL \ (GET_SHARED_PAGES|GET_TOTALRAM_PAGES|GET_BUFFERS_PAGES|GET_SLAB_PAGES) void cmd_kmem(void) { int i; int c; int sflag, Sflag, pflag, fflag, Fflag, vflag, zflag, oflag, gflag; int nflag, cflag, Cflag, iflag, lflag, Lflag, Pflag, Vflag, hflag; struct meminfo meminfo; ulonglong value[MAXARGS]; char buf[BUFSIZE]; char *p1; int spec_addr, escape; spec_addr = 0; sflag = Sflag = pflag = fflag = Fflag = Pflag = zflag = oflag = 0; vflag = Cflag = cflag = iflag = nflag = lflag = Lflag = Vflag = 0; gflag = hflag = 0; escape = FALSE; BZERO(&meminfo, sizeof(struct meminfo)); BZERO(&value[0], sizeof(ulonglong)*MAXARGS); pc->curcmd_flags &= ~HEADER_PRINTED; while ((c = getopt(argcnt, args, "gI:sSFfm:pvczCinl:L:PVoh")) != EOF) { switch(c) { case 'V': Vflag = 1; break; case 'n': nflag = 1; break; case 'z': zflag = 1; break; case 'i': iflag = 1; break; case 'h': hflag = 1; break; case 'C': Cflag = 1, cflag = 0;; break; case 'c': cflag = 1, Cflag = 0; break; case 'v': vflag = 1; break; case 's': sflag = 1; Sflag = 0; break; case 'S': Sflag = 1; sflag = 0; break; case 'F': Fflag = 1; fflag = 0; break;; case 'f': fflag = 1; Fflag = 0; break;; case 'p': pflag = 1; break; case 'm': pflag = 1; collect_page_member_data(optarg, &meminfo); break; case 'I': meminfo.ignore = optarg; break; case 'l': if (STREQ(optarg, "a")) { meminfo.flags |= GET_ACTIVE_LIST; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "i")) { meminfo.flags |= GET_INACTIVE_LIST; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "ic")) { meminfo.flags |= GET_INACTIVE_CLEAN; lflag = 1; Lflag = 0; } else if (STREQ(optarg, "id")) { meminfo.flags |= GET_INACTIVE_DIRTY; lflag = 1; Lflag = 0; } else argerrs++; break; case 'L': if (STREQ(optarg, "a")) { meminfo.flags |= GET_ACTIVE_LIST; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "i")) { meminfo.flags |= GET_INACTIVE_LIST; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "ic")) { meminfo.flags |= GET_INACTIVE_CLEAN; Lflag = 1; lflag = 0; } else if (STREQ(optarg, "id")) { meminfo.flags |= GET_INACTIVE_DIRTY; Lflag = 1; lflag = 0; } else argerrs++; break; case 'P': Pflag = 1; break; case 'o': oflag = 1; break; case 'g': gflag = 1; break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if ((sflag + Sflag + pflag + fflag + Fflag + Vflag + oflag + vflag + Cflag + cflag + iflag + lflag + Lflag + gflag + hflag) > 1) { error(INFO, "only one flag allowed!\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (sflag || Sflag || !(vt->flags & KMEM_CACHE_INIT)) kmem_cache_init(); while (args[optind]) { if (hexadecimal(args[optind], 0)) { value[spec_addr++] = htoll(args[optind], FAULT_ON_ERROR, NULL); } else { if (meminfo.reqname) error(FATAL, "only one kmem_cache reference is allowed\n"); meminfo.reqname = args[optind]; if (args[optind][0] == '\\') { meminfo.reqname = &args[optind][1]; escape = TRUE; } else meminfo.reqname = args[optind]; if (!sflag && !Sflag) cmd_usage(pc->curcmd, SYNOPSIS); } optind++; } for (i = 0; i < spec_addr; i++) { if (Pflag) meminfo.memtype = PHYSADDR; else meminfo.memtype = IS_KVADDR(value[i]) ? KVADDR : PHYSADDR; if (fflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_free_pages(&meminfo); fflag++; } if (pflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; dump_mem_map(&meminfo); pflag++; } if (sflag || Sflag) { if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); meminfo.flags = Sflag ? VERBOSE : 0; if (meminfo.memtype == PHYSADDR) { if (value[i] < VTOP(vt->high_memory)) { value[i] = PTOV(value[i]); meminfo.memtype = KVADDR; } else error(WARNING, "cannot make virtual-to-physical translation: %llx\n", value[i]); } if ((p1 = is_kmem_cache_addr(value[i], buf))) { if (meminfo.reqname) error(FATAL, "only one kmem_cache reference is allowed\n"); meminfo.reqname = p1; meminfo.cache = value[i]; meminfo.flags |= CACHE_SET; if ((i+1) == spec_addr) { /* done? */ if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_kmem_cache(&meminfo); } meminfo.flags &= ~CACHE_SET; } else { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (Sflag && (vt->flags & KMALLOC_SLUB)) meminfo.flags |= VERBOSE; if (meminfo.calls++) fprintf(fp, "\n"); vt->dump_kmem_cache(&meminfo); } if (sflag) sflag++; if (Sflag) Sflag++; } if (vflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; dump_vmlist(&meminfo); vflag++; } if (cflag) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); dump_page_hash_table(&meminfo); cflag++; } if (lflag) { meminfo.spec_addr = value[i]; meminfo.flags |= (ADDRESS_SPECIFIED|VERBOSE); if (meminfo.calls++) fprintf(fp, "\n"); dump_page_lists(&meminfo); lflag++; } if (gflag) { if (i) fprintf(fp, "\n"); dump_page_flags(value[i]); gflag++; } /* * no value arguments allowed! */ if (zflag || nflag || iflag || Fflag || Cflag || Lflag || Vflag || oflag || hflag) { error(INFO, "no address arguments allowed with this option\n"); cmd_usage(pc->curcmd, SYNOPSIS); } if (!(sflag + Sflag + pflag + fflag + vflag + cflag + lflag + Lflag + gflag)) { meminfo.spec_addr = value[i]; meminfo.flags = ADDRESS_SPECIFIED; if (meminfo.calls++) fprintf(fp, "\n"); else kmem_cache_init(); kmem_search(&meminfo); } } if (iflag == 1) dump_kmeminfo(); if (pflag == 1) dump_mem_map(&meminfo); if (fflag == 1) vt->dump_free_pages(&meminfo); if (Fflag == 1) { meminfo.flags = VERBOSE; vt->dump_free_pages(&meminfo); } if (hflag == 1) dump_hstates(); if (sflag == 1) { if (!escape && STREQ(meminfo.reqname, "list")) kmem_cache_list(); else if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); else vt->dump_kmem_cache(&meminfo); } if (Sflag == 1) { if (STREQ(meminfo.reqname, "list")) kmem_cache_list(); else if (vt->flags & KMEM_CACHE_UNAVAIL) error(FATAL, "kmem cache slab subsystem not available\n"); else { meminfo.flags = VERBOSE; vt->dump_kmem_cache(&meminfo); } } if (vflag == 1) dump_vmlist(&meminfo); if (Cflag == 1) { meminfo.flags = VERBOSE; dump_page_hash_table(&meminfo); } if (cflag == 1) dump_page_hash_table(&meminfo); if (nflag == 1) dump_memory_nodes(MEMORY_NODES_DUMP); if (zflag == 1) dump_zone_stats(); if (lflag == 1) { dump_page_lists(&meminfo); } if (Lflag == 1) { meminfo.flags |= VERBOSE; dump_page_lists(&meminfo); } if (Vflag == 1) { dump_vm_stat(NULL, NULL, 0); dump_page_states(); dump_vm_event_state(); } if (oflag == 1) dump_per_cpu_offsets(); if (gflag == 1) dump_page_flags(0); if (!(sflag + Sflag + pflag + fflag + Fflag + vflag + Vflag + zflag + oflag + cflag + Cflag + iflag + nflag + lflag + Lflag + gflag + hflag + meminfo.calls)) cmd_usage(pc->curcmd, SYNOPSIS); } static void PG_reserved_flag_init(void) { ulong pageptr; int count; ulong vaddr, flags; char *buf; if (enumerator_value("PG_reserved", (long *)&flags)) { vt->PG_reserved = 1 << flags; if (CRASHDEBUG(2)) fprintf(fp, "PG_reserved (enum): %lx\n", vt->PG_reserved); return; } vaddr = kt->stext ? kt->stext : symbol_value("sys_read"); if (!phys_to_page((physaddr_t)VTOP(vaddr), &pageptr)) return; buf = (char *)GETBUF(SIZE(page)); if (!readmem(pageptr, KVADDR, buf, SIZE(page), "reserved page", RETURN_ON_ERROR|QUIET)) { FREEBUF(buf); return; } flags = ULONG(buf + OFFSET(page_flags)); count = INT(buf + OFFSET(page_count)); if (count_bits_long(flags) == 1) vt->PG_reserved = flags; else vt->PG_reserved = 1 << (ffsl(flags)-1); if (count == -1) vt->flags |= PGCNT_ADJ; if (CRASHDEBUG(2)) fprintf(fp, "PG_reserved: vaddr: %lx page: %lx flags: %lx => %lx\n", vaddr, pageptr, flags, vt->PG_reserved); FREEBUF(buf); } static void PG_slab_flag_init(void) { int bit; ulong pageptr; ulong vaddr, flags, flags2; char buf[BUFSIZE]; /* safe for a page struct */ /* * Set the old defaults in case all else fails. */ if (enumerator_value("PG_slab", (long *)&flags)) { vt->PG_slab = flags; if (CRASHDEBUG(2)) fprintf(fp, "PG_slab (enum): %lx\n", vt->PG_slab); } else if (VALID_MEMBER(page_pte)) { if (THIS_KERNEL_VERSION < LINUX(2,6,0)) vt->PG_slab = 10; else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) vt->PG_slab = 7; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { vt->PG_slab = 7; } else { if (try_get_symbol_data("vm_area_cachep", sizeof(void *), &vaddr) && phys_to_page((physaddr_t)VTOP(vaddr), &pageptr) && readmem(pageptr, KVADDR, buf, SIZE(page), "vm_area_cachep page", RETURN_ON_ERROR|QUIET)) { flags = ULONG(buf + OFFSET(page_flags)); if ((bit = ffsl(flags))) { vt->PG_slab = bit - 1; if (CRASHDEBUG(2)) fprintf(fp, "PG_slab bit: vaddr: %lx page: %lx flags: %lx => %ld\n", vaddr, pageptr, flags, vt->PG_slab); } } } if (VALID_MEMBER(page_compound_head)) { if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: (UNUSED): page.compound_head exists!\n"); } else if (vt->flags & KMALLOC_SLUB) { /* * PG_slab and the following are hardwired for * kernels prior to the pageflags enumerator. */ #define PG_compound 14 /* Part of a compound page */ #define PG_reclaim 17 /* To be reclaimed asap */ vt->PG_head_tail_mask = ((1L << PG_compound) | (1L << PG_reclaim)); if (enumerator_value("PG_tail", (long *)&flags)) vt->PG_head_tail_mask = (1L << flags); else if (enumerator_value("PG_compound", (long *)&flags) && enumerator_value("PG_reclaim", (long *)&flags2)) { vt->PG_head_tail_mask = ((1L << flags) | (1L << flags2)); if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: %lx\n", vt->PG_head_tail_mask); } else if (vt->flags & PAGEFLAGS) { vt->PG_head_tail_mask = 0; error(WARNING, "SLUB: cannot determine how compound pages are linked\n\n"); } } else { if (enumerator_value("PG_tail", (long *)&flags)) vt->PG_head_tail_mask = (1L << flags); else if (enumerator_value("PG_compound", (long *)&flags) && enumerator_value("PG_reclaim", (long *)&flags2)) { vt->PG_head_tail_mask = ((1L << flags) | (1L << flags2)); if (CRASHDEBUG(2)) fprintf(fp, "PG_head_tail_mask: %lx (PG_compound|PG_reclaim)\n", vt->PG_head_tail_mask); } else if (vt->flags & PAGEFLAGS) error(WARNING, "SLAB: cannot determine how compound pages are linked\n\n"); } if (!vt->PG_slab) error(INFO, "cannot determine PG_slab bit value\n"); } /* * dump_mem_map() displays basic data about each entry in the mem_map[] * array, or if an address is specified, just the mem_map[] entry for that * address. Specified addresses can either be physical address or page * structure pointers. */ /* Page flag bit values */ #define v22_PG_locked 0 #define v22_PG_error 1 #define v22_PG_referenced 2 #define v22_PG_dirty 3 #define v22_PG_uptodate 4 #define v22_PG_free_after 5 #define v22_PG_decr_after 6 #define v22_PG_swap_unlock_after 7 #define v22_PG_DMA 8 #define v22_PG_Slab 9 #define v22_PG_swap_cache 10 #define v22_PG_skip 11 #define v22_PG_reserved 31 #define v24_PG_locked 0 #define v24_PG_error 1 #define v24_PG_referenced 2 #define v24_PG_uptodate 3 #define v24_PG_dirty 4 #define v24_PG_decr_after 5 #define v24_PG_active 6 #define v24_PG_inactive_dirty 7 #define v24_PG_slab 8 #define v24_PG_swap_cache 9 #define v24_PG_skip 10 #define v24_PG_inactive_clean 11 #define v24_PG_highmem 12 #define v24_PG_checked 13 /* kill me in 2.5.. */ #define v24_PG_bigpage 14 /* bits 21-30 unused */ #define v24_PG_arch_1 30 #define v24_PG_reserved 31 #define v26_PG_private 12 #define PGMM_CACHED (512) static void dump_mem_map_SPARSEMEM(struct meminfo *mi) { ulong i; long total_pages; int others, page_not_mapped, phys_not_mapped, page_mapping; ulong pp, ppend; physaddr_t phys, physend; ulong tmp, reserved, shared, slabs; ulong PG_reserved_flag; long buffers; ulong inode, offset, flags, mapping, index; uint count; int print_hdr, pg_spec, phys_spec, done; int v22; char hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char *page_cache; char *pcache; ulong section, section_nr, nr_mem_sections, section_size; long buffersize; char *outputbuffer; int bufferindex; buffersize = 1024 * 1024; outputbuffer = GETBUF(buffersize + 512); char style1[100]; char style2[100]; char style3[100]; char style4[100]; sprintf((char *)&style1, "%%lx%s%%%dllx%s%%%dlx%s%%8lx %%2d%s", space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE), space(MINSPACE)); sprintf((char *)&style2, "%%-%dlx%s%%%dllx%s%s%s%s %2s ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, " "), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, " "), " "); sprintf((char *)&style3, "%%-%dlx%s%%%dllx%s%s%s%s %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "-------"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "-----")); sprintf((char *)&style4, "%%-%dlx%s%%%dllx%s%%%dlx%s%%8lx %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE)); v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ if (v22) { sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), space(MINSPACE), mkstring(buf4, 8, CENTER|LJUST, "OFFSET"), space(MINSPACE-1)); } else if (mi->nr_members) { sprintf(hdr, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE")); for (i = 0; i < mi->nr_members; i++) sprintf(&hdr[strlen(hdr)], " %s", mi->page_member_cache[i].member); strcat(hdr, "\n"); } else { sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "INDEX")); } mapping = index = 0; reserved = shared = slabs = buffers = inode = offset = 0; pg_spec = phys_spec = print_hdr = FALSE; switch (mi->flags) { case ADDRESS_SPECIFIED: switch (mi->memtype) { case KVADDR: if (is_page_ptr(mi->spec_addr, NULL)) pg_spec = TRUE; else { if (kvtop(NULL, mi->spec_addr, &phys, 0)) { mi->spec_addr = phys; phys_spec = TRUE; } else return; } break; case PHYSADDR: phys_spec = TRUE; break; default: error(FATAL, "dump_mem_map: no memtype specified\n"); break; } print_hdr = TRUE; break; case GET_ALL: shared = 0; reserved = 0; buffers = 0; slabs = 0; break; case GET_SHARED_PAGES: shared = 0; break; case GET_TOTALRAM_PAGES: reserved = 0; break; case GET_BUFFERS_PAGES: buffers = 0; break; case GET_SLAB_PAGES: slabs = 0; break; default: print_hdr = TRUE; break; } page_cache = GETBUF(SIZE(page) * PGMM_CACHED); done = FALSE; total_pages = 0; nr_mem_sections = NR_MEM_SECTIONS(); bufferindex = 0; /* * Iterate over all possible sections */ for (section_nr = 0; section_nr < nr_mem_sections ; section_nr++) { if (CRASHDEBUG(2)) fprintf(fp, "section_nr = %ld\n", section_nr); /* * If we are looking up a specific address, jump directly * to the section with that page */ if (mi->flags & ADDRESS_SPECIFIED) { ulong pfn; physaddr_t tmp; if (pg_spec) { if (!page_to_phys(mi->spec_addr, &tmp)) return; pfn = tmp >> PAGESHIFT(); } else pfn = mi->spec_addr >> PAGESHIFT(); section_nr = pfn_to_section_nr(pfn); } if (!(section = valid_section_nr(section_nr))) { #ifdef NOTDEF break; /* On a real sparsemem system we need to check * every section as gaps may exist. But this * can be slow. If we know we don't have gaps * just stop validating sections when we * get to the end of the valid ones. * In the future find a way to short circuit * this loop. */ #endif if (mi->flags & ADDRESS_SPECIFIED) break; continue; } if (print_hdr) { if (!(pc->curcmd_flags & HEADER_PRINTED)) fprintf(fp, "%s", hdr); print_hdr = FALSE; pc->curcmd_flags |= HEADER_PRINTED; } pp = section_mem_map_addr(section); pp = sparse_decode_mem_map(pp, section_nr); phys = (physaddr_t) section_nr * PAGES_PER_SECTION() * PAGESIZE(); section_size = PAGES_PER_SECTION(); for (i = 0; i < section_size; i++, pp += SIZE(page), phys += PAGESIZE()) { if ((i % PGMM_CACHED) == 0) { ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); if ((pg_spec && (mi->spec_addr > ppend)) || (phys_spec && (PHYSPAGEBASE(mi->spec_addr) > physend))) { i += (PGMM_CACHED-1); pp = ppend; phys = physend; continue; } fill_mem_map_cache(pp, ppend, page_cache); } pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); if (received_SIGINT()) restart(0); if ((pg_spec && (pp == mi->spec_addr)) || (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) done = TRUE; if (!done && (pg_spec || phys_spec)) continue; if (mi->nr_members) { bufferindex += show_page_member_data(pcache, pp, mi, outputbuffer+bufferindex); goto display_members; } flags = ULONG(pcache + OFFSET(page_flags)); if (SIZE(page_flags) == 4) flags &= 0xffffffff; count = UINT(pcache + OFFSET(page_count)); switch (mi->flags) { case GET_ALL: case GET_BUFFERS_PAGES: if (VALID_MEMBER(page_buffers)) { tmp = ULONG(pcache + OFFSET(page_buffers)); if (tmp) buffers++; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { if ((flags >> v26_PG_private) & 1) buffers++; } else error(FATAL, "cannot determine whether pages have buffers\n"); if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SLAB_PAGES: if (v22) { if ((flags >> v22_PG_Slab) & 1) slabs++; } else if (vt->PG_slab) { if ((flags >> vt->PG_slab) & 1) slabs++; } else { if ((flags >> v24_PG_slab) & 1) slabs++; } if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SHARED_PAGES: case GET_TOTALRAM_PAGES: if (vt->PG_reserved) PG_reserved_flag = vt->PG_reserved; else PG_reserved_flag = v22 ? 1 << v22_PG_reserved : 1 << v24_PG_reserved; if (flags & PG_reserved_flag) { reserved++; } else { if ((int)count > (vt->flags & PGCNT_ADJ ? 0 : 1)) shared++; } continue; } page_mapping = VALID_MEMBER(page_mapping); if (v22) { inode = ULONG(pcache + OFFSET(page_inode)); offset = ULONG(pcache + OFFSET(page_offset)); } else if (page_mapping) { mapping = ULONG(pcache + OFFSET(page_mapping)); index = ULONG(pcache + OFFSET(page_index)); } page_not_mapped = phys_not_mapped = FALSE; if (v22) { bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style1, pp, phys, inode, offset, count); } else { if ((vt->flags & V_MEM_MAP)) { if (!machdep->verify_paddr(phys)) phys_not_mapped = TRUE; if (!kvtop(NULL, pp, NULL, 0)) page_not_mapped = TRUE; } if (page_not_mapped) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style2, pp, phys); else if (!page_mapping) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style3, pp, phys, count); else bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style4, pp, phys, mapping, index, count); } others = 0; #define sprintflag(X) sprintf(outputbuffer + bufferindex, X, others++ ? "," : "") if (v22) { if ((flags >> v22_PG_DMA) & 1) bufferindex += sprintflag("%sDMA"); if ((flags >> v22_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v22_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v22_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v22_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v22_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v22_PG_free_after) & 1) bufferindex += sprintflag("%sfree_after"); if ((flags >> v22_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v22_PG_swap_unlock_after) & 1) bufferindex += sprintflag("%sswap_unlock_after"); if ((flags >> v22_PG_Slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v22_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v22_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v22_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) { if (vt->flags & PAGEFLAGS) bufferindex += translate_page_flags(outputbuffer+bufferindex, flags); else bufferindex += sprintf(outputbuffer+bufferindex, "%lx\n", flags); } else { if ((flags >> v24_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v24_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v24_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v24_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v24_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v24_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v24_PG_active) & 1) bufferindex += sprintflag("%sactive"); if ((flags >> v24_PG_inactive_dirty) & 1) bufferindex += sprintflag("%sinactive_dirty"); if ((flags >> v24_PG_slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v24_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v24_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v24_PG_inactive_clean) & 1) bufferindex += sprintflag("%sinactive_clean"); if ((flags >> v24_PG_highmem) & 1) bufferindex += sprintflag("%shighmem"); if ((flags >> v24_PG_checked) & 1) bufferindex += sprintflag("%schecked"); if ((flags >> v24_PG_bigpage) & 1) bufferindex += sprintflag("%sbigpage"); if ((flags >> v24_PG_arch_1) & 1) bufferindex += sprintflag("%sarch_1"); if ((flags >> v24_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); if (phys_not_mapped) bufferindex += sprintflag("%s[NOT MAPPED]"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } display_members: if (bufferindex > buffersize) { fprintf(fp, "%s", outputbuffer); bufferindex = 0; } if (done) break; } if (done) break; } if (bufferindex > 0) { fprintf(fp, "%s", outputbuffer); } switch (mi->flags) { case GET_TOTALRAM_PAGES: mi->retval = total_pages - reserved; break; case GET_SHARED_PAGES: mi->retval = shared; break; case GET_BUFFERS_PAGES: mi->retval = buffers; break; case GET_SLAB_PAGES: mi->retval = slabs; break; case GET_ALL: mi->get_totalram = total_pages - reserved; mi->get_shared = shared; mi->get_buffers = buffers; mi->get_slabs = slabs; break; case ADDRESS_SPECIFIED: mi->retval = done; break; } if (mi->nr_members) FREEBUF(mi->page_member_cache); FREEBUF(outputbuffer); FREEBUF(page_cache); } static void dump_mem_map(struct meminfo *mi) { long i, n; long total_pages; int others, page_not_mapped, phys_not_mapped, page_mapping; ulong pp, ppend; physaddr_t phys, physend; ulong tmp, reserved, shared, slabs; ulong PG_reserved_flag; long buffers; ulong inode, offset, flags, mapping, index; ulong node_size; uint count; int print_hdr, pg_spec, phys_spec, done; int v22; struct node_table *nt; char hdr[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char *page_cache; char *pcache; long buffersize; char *outputbuffer; int bufferindex; char style1[100]; char style2[100]; char style3[100]; char style4[100]; if (IS_SPARSEMEM()) { dump_mem_map_SPARSEMEM(mi); return; } buffersize = 1024 * 1024; outputbuffer = GETBUF(buffersize + 512); sprintf((char *)&style1, "%%lx%s%%%dllx%s%%%dlx%s%%8lx %%2d%s", space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE), space(MINSPACE)); sprintf((char *)&style2, "%%-%dlx%s%%%dllx%s%s%s%s %2s ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, " "), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, " "), " "); sprintf((char *)&style3, "%%-%dlx%s%%%dllx%s%s%s%s %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "-------"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "-----")); sprintf((char *)&style4, "%%-%dlx%s%%%dllx%s%%%dlx%s%%8lx %%2d ", VADDR_PRLEN, space(MINSPACE), (int)MAX(PADDR_PRLEN, strlen("PHYSICAL")), space(MINSPACE), VADDR_PRLEN, space(MINSPACE)); v22 = VALID_MEMBER(page_inode); /* page.inode vs. page.mapping */ if (v22) { sprintf(hdr, "%s%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "INODE"), space(MINSPACE), mkstring(buf4, 8, CENTER|LJUST, "OFFSET"), space(MINSPACE-1)); } else if (mi->nr_members) { sprintf(hdr, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE")); for (i = 0; i < mi->nr_members; i++) sprintf(&hdr[strlen(hdr)], " %s", mi->page_member_cache[i].member); strcat(hdr, "\n"); } else { sprintf(hdr, "%s%s%s%s%s%s%sCNT FLAGS\n", mkstring(buf1, VADDR_PRLEN, CENTER, "PAGE"), space(MINSPACE), mkstring(buf2, MAX(PADDR_PRLEN, strlen("PHYSICAL")), RJUST, "PHYSICAL"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|RJUST, "MAPPING"), space(MINSPACE), mkstring(buf4, 8, CENTER|RJUST, "INDEX")); } mapping = index = 0; reserved = shared = slabs = buffers = inode = offset = 0; pg_spec = phys_spec = print_hdr = FALSE; switch (mi->flags) { case ADDRESS_SPECIFIED: switch (mi->memtype) { case KVADDR: if (is_page_ptr(mi->spec_addr, NULL)) pg_spec = TRUE; else { if (kvtop(NULL, mi->spec_addr, &phys, 0)) { mi->spec_addr = phys; phys_spec = TRUE; } else return; } break; case PHYSADDR: phys_spec = TRUE; break; default: error(FATAL, "dump_mem_map: no memtype specified\n"); break; } print_hdr = TRUE; break; case GET_ALL: shared = 0; reserved = 0; buffers = 0; slabs = 0; break; case GET_SHARED_PAGES: shared = 0; break; case GET_TOTALRAM_PAGES: reserved = 0; break; case GET_BUFFERS_PAGES: buffers = 0; break; case GET_SLAB_PAGES: slabs = 0; break; default: print_hdr = TRUE; break; } page_cache = GETBUF(SIZE(page) * PGMM_CACHED); done = FALSE; total_pages = 0; bufferindex = 0; for (n = 0; n < vt->numnodes; n++) { if (print_hdr) { if (!(pc->curcmd_flags & HEADER_PRINTED)) fprintf(fp, "%s%s", n ? "\n" : "", hdr); print_hdr = FALSE; pc->curcmd_flags |= HEADER_PRINTED; } nt = &vt->node_table[n]; total_pages += nt->size; pp = nt->mem_map; phys = nt->start_paddr; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; for (i = 0; i < node_size; i++, pp += SIZE(page), phys += PAGESIZE()) { if ((i % PGMM_CACHED) == 0) { ppend = pp + ((PGMM_CACHED-1) * SIZE(page)); physend = phys + ((PGMM_CACHED-1) * PAGESIZE()); if ((pg_spec && (mi->spec_addr > ppend)) || (phys_spec && (PHYSPAGEBASE(mi->spec_addr) > physend))) { i += (PGMM_CACHED-1); pp = ppend; phys = physend; continue; } fill_mem_map_cache(pp, ppend, page_cache); } pcache = page_cache + ((i%PGMM_CACHED) * SIZE(page)); if (received_SIGINT()) restart(0); if ((pg_spec && (pp == mi->spec_addr)) || (phys_spec && (phys == PHYSPAGEBASE(mi->spec_addr)))) done = TRUE; if (!done && (pg_spec || phys_spec)) continue; if (mi->nr_members) { bufferindex += show_page_member_data(pcache, pp, mi, outputbuffer+bufferindex); goto display_members; } flags = ULONG(pcache + OFFSET(page_flags)); if (SIZE(page_flags) == 4) flags &= 0xffffffff; count = UINT(pcache + OFFSET(page_count)); switch (mi->flags) { case GET_ALL: case GET_BUFFERS_PAGES: if (VALID_MEMBER(page_buffers)) { tmp = ULONG(pcache + OFFSET(page_buffers)); if (tmp) buffers++; } else if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) { if ((flags >> v26_PG_private) & 1) buffers++; } else error(FATAL, "cannot determine whether pages have buffers\n"); if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SLAB_PAGES: if (v22) { if ((flags >> v22_PG_Slab) & 1) slabs++; } else if (vt->PG_slab) { if ((flags >> vt->PG_slab) & 1) slabs++; } else { if ((flags >> v24_PG_slab) & 1) slabs++; } if (mi->flags != GET_ALL) continue; /* FALLTHROUGH */ case GET_SHARED_PAGES: case GET_TOTALRAM_PAGES: if (vt->PG_reserved) PG_reserved_flag = vt->PG_reserved; else PG_reserved_flag = v22 ? 1 << v22_PG_reserved : 1 << v24_PG_reserved; if (flags & PG_reserved_flag) { reserved++; } else { if ((int)count > (vt->flags & PGCNT_ADJ ? 0 : 1)) shared++; } continue; } page_mapping = VALID_MEMBER(page_mapping); if (v22) { inode = ULONG(pcache + OFFSET(page_inode)); offset = ULONG(pcache + OFFSET(page_offset)); } else if (page_mapping) { mapping = ULONG(pcache + OFFSET(page_mapping)); index = ULONG(pcache + OFFSET(page_index)); } page_not_mapped = phys_not_mapped = FALSE; if (v22) { bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style1, pp, phys, inode, offset, count); } else { if ((vt->flags & V_MEM_MAP)) { if (!machdep->verify_paddr(phys)) phys_not_mapped = TRUE; if (!kvtop(NULL, pp, NULL, 0)) page_not_mapped = TRUE; } if (page_not_mapped) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style2, pp, phys); else if (!page_mapping) bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style3, pp, phys, count); else bufferindex += sprintf(outputbuffer+bufferindex, (char *)&style4, pp, phys, mapping, index, count); } others = 0; #define sprintflag(X) sprintf(outputbuffer + bufferindex, X, others++ ? "," : "") if (v22) { if ((flags >> v22_PG_DMA) & 1) bufferindex += sprintflag("%sDMA"); if ((flags >> v22_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v22_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v22_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v22_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v22_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v22_PG_free_after) & 1) bufferindex += sprintflag("%sfree_after"); if ((flags >> v22_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v22_PG_swap_unlock_after) & 1) bufferindex += sprintflag("%sswap_unlock_after"); if ((flags >> v22_PG_Slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v22_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v22_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v22_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } else if (THIS_KERNEL_VERSION > LINUX(2,4,9)) { if (vt->flags & PAGEFLAGS) bufferindex += translate_page_flags(outputbuffer+bufferindex, flags); else bufferindex += sprintf(outputbuffer+bufferindex, "%lx\n", flags); } else { if ((flags >> v24_PG_locked) & 1) bufferindex += sprintflag("%slocked"); if ((flags >> v24_PG_error) & 1) bufferindex += sprintflag("%serror"); if ((flags >> v24_PG_referenced) & 1) bufferindex += sprintflag("%sreferenced"); if ((flags >> v24_PG_uptodate) & 1) bufferindex += sprintflag("%suptodate"); if ((flags >> v24_PG_dirty) & 1) bufferindex += sprintflag("%sdirty"); if ((flags >> v24_PG_decr_after) & 1) bufferindex += sprintflag("%sdecr_after"); if ((flags >> v24_PG_active) & 1) bufferindex += sprintflag("%sactive"); if ((flags >> v24_PG_inactive_dirty) & 1) bufferindex += sprintflag("%sinactive_dirty"); if ((flags >> v24_PG_slab) & 1) bufferindex += sprintflag("%sslab"); if ((flags >> v24_PG_swap_cache) & 1) bufferindex += sprintflag("%sswap_cache"); if ((flags >> v24_PG_skip) & 1) bufferindex += sprintflag("%sskip"); if ((flags >> v24_PG_inactive_clean) & 1) bufferindex += sprintflag("%sinactive_clean"); if ((flags >> v24_PG_highmem) & 1) bufferindex += sprintflag("%shighmem"); if ((flags >> v24_PG_checked) & 1) bufferindex += sprintflag("%schecked"); if ((flags >> v24_PG_bigpage) & 1) bufferindex += sprintflag("%sbigpage"); if ((flags >> v24_PG_arch_1) & 1) bufferindex += sprintflag("%sarch_1"); if ((flags >> v24_PG_reserved) & 1) bufferindex += sprintflag("%sreserved"); if (phys_not_mapped) bufferindex += sprintflag("%s[NOT MAPPED]"); bufferindex += sprintf(outputbuffer+bufferindex, "\n"); } display_members: if (bufferindex > buffersize) { fprintf(fp, "%s", outputbuffer); bufferindex = 0; } if (done) break; } if (done) break; } if (bufferindex > 0) { fprintf(fp, "%s", outputbuffer); } switch (mi->flags) { case GET_TOTALRAM_PAGES: mi->retval = total_pages - reserved; break; case GET_SHARED_PAGES: mi->retval = shared; break; case GET_BUFFERS_PAGES: mi->retval = buffers; break; case GET_SLAB_PAGES: mi->retval = slabs; break; case GET_ALL: mi->get_totalram = total_pages - reserved; mi->get_shared = shared; mi->get_buffers = buffers; mi->get_slabs = slabs; break; case ADDRESS_SPECIFIED: mi->retval = done; break; } if (mi->nr_members) FREEBUF(mi->page_member_cache); FREEBUF(outputbuffer); FREEBUF(page_cache); } /* * Stash a chunk of PGMM_CACHED page structures, starting at addr, into the * passed-in buffer. The mem_map array is normally guaranteed to be * readable except in the case of virtual mem_map usage. When V_MEM_MAP * is in place, read all pages consumed by PGMM_CACHED page structures * that are currently mapped, leaving the unmapped ones just zeroed out. */ static void fill_mem_map_cache(ulong pp, ulong ppend, char *page_cache) { long size, cnt; ulong addr; char *bufptr; /* * Try to read it in one fell swoop. */ if (readmem(pp, KVADDR, page_cache, SIZE(page) * PGMM_CACHED, "page struct cache", RETURN_ON_ERROR|QUIET)) return; /* * Break it into page-size-or-less requests, warning if it's * not a virtual mem_map. */ size = SIZE(page) * PGMM_CACHED; addr = pp; bufptr = page_cache; while (size > 0) { /* * Compute bytes till end of page. */ cnt = PAGESIZE() - PAGEOFFSET(addr); if (cnt > size) cnt = size; if (!readmem(addr, KVADDR, bufptr, size, "virtual page struct cache", RETURN_ON_ERROR|QUIET)) { BZERO(bufptr, size); if (!(vt->flags & V_MEM_MAP) && ((addr+size) < ppend)) error(WARNING, "mem_map[] from %lx to %lx not accessible\n", addr, addr+size); } addr += cnt; bufptr += cnt; size -= cnt; } } static void dump_hstates() { char *hstate; int i, len, order; long nr, free; ulong vaddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (!kernel_symbol_exists("hstates")) { error(INFO, "hstates[] array does not exist\n"); option_not_supported('h'); } if (INVALID_SIZE(hstate) || INVALID_MEMBER(hstate_order) || INVALID_MEMBER(hstate_name) || INVALID_MEMBER(hstate_nr_huge_pages) || INVALID_MEMBER(hstate_free_huge_pages)) { error(INFO, "hstate structure or members have changed\n"); option_not_supported('h'); } fprintf(fp, "%s", mkstring(buf1, VADDR_PRLEN, CENTER, "HSTATE")); fprintf(fp, " SIZE FREE TOTAL NAME\n"); len = get_array_length("hstates", NULL, 0); hstate = GETBUF(SIZE(hstate)); for (i = 0; i < len; i++) { vaddr = symbol_value("hstates") + (SIZE(hstate) * i); if (!readmem(vaddr, KVADDR, hstate, SIZE(hstate), "hstate", RETURN_ON_ERROR)) break; order = INT(hstate + OFFSET(hstate_order)); if (!order) continue; fprintf(fp, "%lx ", vaddr); pages_to_size(1 << order, buf1); shift_string_left(first_space(buf1), 1); fprintf(fp, "%s ", mkstring(buf2, 5, RJUST, buf1)); free = LONG(hstate + OFFSET(hstate_free_huge_pages)); sprintf(buf1, "%ld", free); fprintf(fp, "%s ", mkstring(buf2, 6, RJUST, buf1)); nr = LONG(hstate + OFFSET(hstate_nr_huge_pages)); sprintf(buf1, "%ld", nr); fprintf(fp, "%s ", mkstring(buf2, 6, RJUST, buf1)); fprintf(fp, "%s\n", hstate + OFFSET(hstate_name)); } FREEBUF(hstate); } static void page_flags_init(void) { if (!page_flags_init_from_pageflag_names()) page_flags_init_from_pageflags_enum(); PG_reserved_flag_init(); PG_slab_flag_init(); } static int page_flags_init_from_pageflag_names(void) { int i, len; char *buffer, *nameptr; char namebuf[BUFSIZE]; ulong mask; void *name; MEMBER_OFFSET_INIT(trace_print_flags_mask, "trace_print_flags", "mask"); MEMBER_OFFSET_INIT(trace_print_flags_name, "trace_print_flags", "name"); STRUCT_SIZE_INIT(trace_print_flags, "trace_print_flags"); if (INVALID_SIZE(trace_print_flags) || INVALID_MEMBER(trace_print_flags_mask) || INVALID_MEMBER(trace_print_flags_name) || !kernel_symbol_exists("pageflag_names") || !(len = get_array_length("pageflag_names", NULL, 0))) return FALSE; buffer = GETBUF(SIZE(trace_print_flags) * len); if (!readmem(symbol_value("pageflag_names"), KVADDR, buffer, SIZE(trace_print_flags) * len, "pageflag_names array", RETURN_ON_ERROR)) { FREEBUF(buffer); return FALSE; } if (!(vt->pageflags_data = (struct pageflags_data *) malloc(sizeof(struct pageflags_data) * len))) { error(INFO, "cannot malloc pageflags_data cache\n"); FREEBUF(buffer); return FALSE; } if (CRASHDEBUG(1)) fprintf(fp, "pageflags from pageflag_names: \n"); for (i = 0; i < len; i++) { mask = ULONG(buffer + (SIZE(trace_print_flags)*i) + OFFSET(trace_print_flags_mask)); name = VOID_PTR(buffer + (SIZE(trace_print_flags)*i) + OFFSET(trace_print_flags_name)); if ((mask == -1UL) && !name) { /* Linux 3.5 and earlier */ len--; break; } if ((mask == 0UL) && !name) { /* Linux 4.6 and later */ len--; break; } if (!read_string((ulong)name, namebuf, BUFSIZE-1)) { error(INFO, "failed to read pageflag_names entry (i: %d name: \"%s\" mask: %ld)\n", i, name, mask); goto pageflags_fail; } if (!(nameptr = (char *)malloc(strlen(namebuf)+1))) { error(INFO, "cannot malloc pageflag_names space\n"); goto pageflags_fail; } strcpy(nameptr, namebuf); vt->pageflags_data[i].name = nameptr; vt->pageflags_data[i].mask = mask; if (CRASHDEBUG(1)) { fprintf(fp, " %08lx %s\n", vt->pageflags_data[i].mask, vt->pageflags_data[i].name); } } FREEBUF(buffer); vt->nr_pageflags = len; vt->flags |= PAGEFLAGS; return TRUE; pageflags_fail: FREEBUF(buffer); free(vt->pageflags_data); vt->pageflags_data = NULL; return FALSE; } static int page_flags_init_from_pageflags_enum(void) { int c; int p, len; char *nameptr; char buf[BUFSIZE]; char *arglist[MAXARGS]; if (!(vt->pageflags_data = (struct pageflags_data *) malloc(sizeof(struct pageflags_data) * 32))) { error(INFO, "cannot malloc pageflags_data cache\n"); return FALSE; } p = 0; pc->flags2 |= ALLOW_FP; open_tmpfile(); if (dump_enumerator_list("pageflags")) { rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (!strstr(buf, " = ")) continue; c = parse_line(buf, arglist); if (strstr(arglist[0], "__NR_PAGEFLAGS")) { len = atoi(arglist[2]); if (!len || (len > 32)) goto enum_fail; vt->nr_pageflags = len; break; } if (!(nameptr = (char *)malloc(strlen(arglist[0])))) { error(INFO, "cannot malloc pageflags name space\n"); goto enum_fail; } strcpy(nameptr, arglist[0] + strlen("PG_")); vt->pageflags_data[p].name = nameptr; vt->pageflags_data[p].mask = 1 << atoi(arglist[2]); p++; } } else goto enum_fail; close_tmpfile(); pc->flags2 &= ~ALLOW_FP; if (CRASHDEBUG(1)) { fprintf(fp, "pageflags from enum: \n"); for (p = 0; p < vt->nr_pageflags; p++) fprintf(fp, " %08lx %s\n", vt->pageflags_data[p].mask, vt->pageflags_data[p].name); } vt->flags |= PAGEFLAGS; return TRUE; enum_fail: close_tmpfile(); pc->flags2 &= ~ALLOW_FP; for (c = 0; c < p; c++) free(vt->pageflags_data[c].name); free(vt->pageflags_data); vt->pageflags_data = NULL; vt->nr_pageflags = 0; return FALSE; } static int translate_page_flags(char *buffer, ulong flags) { char buf[BUFSIZE]; int i, others; sprintf(buf, "%lx", flags); if (flags) { for (i = others = 0; i < vt->nr_pageflags; i++) { if (flags & vt->pageflags_data[i].mask) sprintf(&buf[strlen(buf)], "%s%s", others++ ? "," : " ", vt->pageflags_data[i].name); } } strcat(buf, "\n"); strcpy(buffer, buf); return(strlen(buf)); } /* * Display the mem_map data for a single page. */ int dump_inode_page(ulong page) { struct meminfo meminfo; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.spec_addr = page; meminfo.memtype = KVADDR; meminfo.flags = ADDRESS_SPECIFIED; dump_mem_map(&meminfo); return meminfo.retval; } /* * dump_page_hash_table() displays the entries in each page_hash_table. */ #define PGHASH_CACHED (1024) static void dump_page_hash_table(struct meminfo *hi) { int i; int len, entry_len; ulong page_hash_table, head; struct list_data list_data, *ld; struct gnu_request req; long total_cached; long page_cache_size; ulong this_addr, searchpage; int errflag, found, cnt, populated, verbose; uint ival; ulong buffer_pages; char buf[BUFSIZE]; char hash_table[BUFSIZE]; char *pcache, *pghash_cache; if (!vt->page_hash_table) { if (hi->flags & VERBOSE) option_not_supported('C'); if (symbol_exists("nr_pagecache")) { buffer_pages = nr_blockdev_pages(); get_symbol_data("nr_pagecache", sizeof(int), &ival); page_cache_size = (ulong)ival; page_cache_size -= buffer_pages; fprintf(fp, "page cache size: %ld\n", page_cache_size); if (hi->flags & ADDRESS_SPECIFIED) option_not_supported('c'); } else option_not_supported('c'); return; } ld = &list_data; if (hi->spec_addr && (hi->flags & ADDRESS_SPECIFIED)) { verbose = TRUE; searchpage = hi->spec_addr; } else if (hi->flags & VERBOSE) { verbose = TRUE; searchpage = 0; } else { verbose = FALSE; searchpage = 0; } if (vt->page_hash_table_len == 0) error(FATAL, "cannot determine size of page_hash_table\n"); page_hash_table = vt->page_hash_table; len = vt->page_hash_table_len; entry_len = VALID_STRUCT(page_cache_bucket) ? SIZE(page_cache_bucket) : sizeof(void *); populated = 0; if (CRASHDEBUG(1)) fprintf(fp, "page_hash_table length: %d\n", len); get_symbol_type("page_cache_size", NULL, &req); if (req.length == sizeof(int)) { get_symbol_data("page_cache_size", sizeof(int), &ival); page_cache_size = (long)ival; } else get_symbol_data("page_cache_size", sizeof(long), &page_cache_size); pghash_cache = GETBUF(sizeof(void *) * PGHASH_CACHED); if (searchpage) open_tmpfile(); hq_open(); for (i = total_cached = 0; i < len; i++, page_hash_table += entry_len) { if ((i % PGHASH_CACHED) == 0) { readmem(page_hash_table, KVADDR, pghash_cache, entry_len * PGHASH_CACHED, "page hash cache", FAULT_ON_ERROR); } pcache = pghash_cache + ((i%PGHASH_CACHED) * entry_len); if (VALID_STRUCT(page_cache_bucket)) pcache += OFFSET(page_cache_bucket_chain); head = ULONG(pcache); if (!head) continue; if (verbose) fprintf(fp, "page_hash_table[%d]\n", i); if (CRASHDEBUG(1)) populated++; BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = head; ld->searchfor = searchpage; ld->member_offset = OFFSET(page_next_hash); cnt = do_list(ld); total_cached += cnt; if (ld->searchfor) break; if (received_SIGINT()) restart(0); } hq_close(); fprintf(fp, "%spage_cache_size: %ld ", verbose ? "\n" : "", page_cache_size); if (page_cache_size != total_cached) fprintf(fp, "(found %ld)\n", total_cached); else fprintf(fp, "(verified)\n"); if (CRASHDEBUG(1)) fprintf(fp, "heads containing page(s): %d\n", populated); if (searchpage) { rewind(pc->tmpfile); found = FALSE; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "retval = TRUE; } } } /* * dump_free_pages() displays basic data about pages currently resident * in the free_area[] memory lists. If the flags contains the VERBOSE * bit, each page slab base address is dumped. If an address is specified * only the free_area[] data containing that page is displayed, along with * the page slab base address. Specified addresses can either be physical * address or page structure pointers. */ char *free_area_hdr1 = \ "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; char *free_area_hdr2 = \ "AREA SIZE FREE_AREA_STRUCT\n"; static void dump_free_pages(struct meminfo *fi) { int i; int order; ulong free_area; char *free_area_buf; ulong *pp; int nr_mem_lists; struct list_data list_data, *ld; long cnt, total_free, chunk_size; int nr_free_pages; char buf[BUFSIZE]; char last_free[BUFSIZE]; char last_free_hdr[BUFSIZE]; int verbose, errflag, found; physaddr_t searchphys; ulong this_addr; physaddr_t this_phys; int do_search; ulong kfp, offset; int flen, dimension; if (vt->flags & (NODES|ZONES)) error(FATAL, "dump_free_pages called with (NODES|ZONES)\n"); nr_mem_lists = ARRAY_LENGTH(free_area); dimension = ARRAY_LENGTH(free_area_DIMENSION); if (nr_mem_lists == 0) error(FATAL, "cannot determine size/dimensions of free_area\n"); if (dimension) error(FATAL, "dump_free_pages called with multidimensional free area\n"); ld = &list_data; total_free = 0; searchphys = 0; chunk_size = 0; do_search = FALSE; get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); switch (fi->flags) { case GET_FREE_HIGHMEM_PAGES: error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); case GET_FREE_PAGES: fi->retval = (ulong)nr_free_pages; return; case ADDRESS_SPECIFIED: switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages: no memtype specified\n"); } do_search = TRUE; break; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; free_area_buf = GETBUF(nr_mem_lists * SIZE(free_area_struct)); kfp = free_area = symbol_value("free_area"); flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); readmem(free_area, KVADDR, free_area_buf, SIZE(free_area_struct) * nr_mem_lists, "free_area_struct", FAULT_ON_ERROR); if (do_search) open_tmpfile(); if (!verbose) fprintf(fp, "%s", free_area_hdr1); hq_open(); for (i = 0; i < nr_mem_lists; i++) { pp = (ulong *)(free_area_buf + (SIZE(free_area_struct)*i)); chunk_size = power(2, i); if (verbose) fprintf(fp, "%s", free_area_hdr2); fprintf(fp, "%3d ", i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, "%5s ", buf); fprintf(fp, "%s %s", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), verbose ? "\n" : ""); if (is_page_ptr(*pp, NULL)) { BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = *pp; ld->end = free_area; cnt = do_list(ld); total_free += (cnt * chunk_size); } else cnt = 0; if (!verbose) fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); free_area += SIZE(free_area_struct); kfp += SIZE(free_area_struct); } hq_close(); fprintf(fp, "\nnr_free_pages: %d ", nr_free_pages); if (total_free != nr_free_pages) fprintf(fp, "(found %ld)\n", total_free); else fprintf(fp, "(verified)\n"); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { order--; fprintf(fp, "%s", last_free_hdr); fprintf(fp, "%s", last_free); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Dump free pages on kernels with a multi-dimensional free_area array. */ char *free_area_hdr5 = \ " AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; char *free_area_hdr6 = \ " AREA SIZE FREE_AREA_STRUCT\n"; static void dump_multidimensional_free_pages(struct meminfo *fi) { int i, j; struct list_data list_data, *ld; long cnt, total_free; ulong kfp, free_area; physaddr_t searchphys; int flen, errflag, verbose, nr_free_pages; int nr_mem_lists, dimension, order, do_search; ulong sum, found, offset; char *free_area_buf, *p; ulong *pp; long chunk_size; ulong this_addr; physaddr_t this_phys; char buf[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (vt->flags & (NODES|ZONES)) error(FATAL, "dump_multidimensional_free_pages called with (NODES|ZONES)\n"); ld = &list_data; if (SIZE(free_area_struct) % sizeof(ulong)) error(FATAL, "free_area_struct not long-word aligned?\n"); total_free = 0; searchphys = 0; chunk_size = 0; do_search = FALSE; get_symbol_data("nr_free_pages", sizeof(int), &nr_free_pages); switch (fi->flags) { case GET_FREE_HIGHMEM_PAGES: error(FATAL, "GET_FREE_HIGHMEM_PAGES invalid in this kernel\n"); case GET_FREE_PAGES: fi->retval = (ulong)nr_free_pages; return; case ADDRESS_SPECIFIED: switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_multidimensional_free_pages: no memtype specified\n"); } do_search = TRUE; break; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); nr_mem_lists = ARRAY_LENGTH(free_area); dimension = ARRAY_LENGTH(free_area_DIMENSION); if (!nr_mem_lists || !dimension) error(FATAL, "cannot determine free_area dimensions\n"); free_area_buf = GETBUF((nr_mem_lists * SIZE(free_area_struct)) * dimension); kfp = free_area = symbol_value("free_area"); readmem(free_area, KVADDR, free_area_buf, (SIZE(free_area_struct) * nr_mem_lists) * dimension, "free_area arrays", FAULT_ON_ERROR); if (do_search) open_tmpfile(); hq_open(); for (i = sum = found = 0; i < dimension; i++) { if (!verbose) fprintf(fp, "%s", free_area_hdr5); pp = (ulong *)(free_area_buf + ((SIZE(free_area_struct)*nr_mem_lists)*i)); for (j = 0; j < nr_mem_lists; j++) { if (verbose) fprintf(fp, "%s", free_area_hdr6); sprintf(buf, "[%d][%d]", i, j); fprintf(fp, "%7s ", buf); chunk_size = power(2, j); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, "%5s ", buf); fprintf(fp, "%s %s", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(kfp)), verbose ? "\n" : ""); if (is_page_ptr(*pp, NULL)) { BZERO(ld, sizeof(struct list_data)); ld->flags = verbose; ld->start = *pp; ld->end = free_area; cnt = do_list(ld); total_free += (cnt * chunk_size); } else cnt = 0; if (!verbose) fprintf(fp, "%6ld %6ld\n", cnt, cnt * chunk_size ); pp += (SIZE(free_area_struct)/sizeof(ulong)); free_area += SIZE(free_area_struct); kfp += SIZE(free_area_struct); } fprintf(fp, "\n"); } hq_close(); fprintf(fp, "nr_free_pages: %d ", nr_free_pages); if (total_free != nr_free_pages) fprintf(fp, "(found %ld)\n", total_free); else fprintf(fp, "(verified)\n"); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Dump free pages in newer kernels that have zones. This is a work in * progress, because although the framework for memory nodes has been laid * down, complete support has not been put in place. */ static char *zone_hdr = "ZONE NAME SIZE FREE"; static void dump_free_pages_zones_v1(struct meminfo *fi) { int i, n; ulong node_zones; ulong size; long zone_size_offset; long chunk_size; int order, errflag, do_search; ulong offset, verbose, value, sum, found; ulong this_addr; physaddr_t this_phys, searchphys; ulong zone_mem_map; ulong zone_start_paddr; ulong zone_start_mapnr; struct node_table *nt; char buf[BUFSIZE], *p; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char last_node[BUFSIZE]; char last_zone[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (!(vt->flags & (NODES|ZONES))) error(FATAL, "dump_free_pages_zones_v1 called without (NODES|ZONES)\n"); if (fi->flags & ADDRESS_SPECIFIED) { switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages_zones_v1: no memtype specified\n"); } do_search = TRUE; } else { searchphys = 0; do_search = FALSE; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; chunk_size = 0; zone_size_offset = 0; if (VALID_MEMBER(zone_struct_size)) zone_size_offset = OFFSET(zone_struct_size); else if (VALID_MEMBER(zone_struct_memsize)) zone_size_offset = OFFSET(zone_struct_memsize); else error(FATAL, "zone_struct has neither size nor memsize field\n"); if (do_search) open_tmpfile(); hq_open(); for (n = sum = found = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (fi->flags == GET_FREE_PAGES) { readmem(node_zones+ OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; node_zones += SIZE(zone_struct); continue; } if (fi->flags == GET_FREE_HIGHMEM_PAGES) { if (i == vt->ZONE_HIGHMEM) { readmem(node_zones+ OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; } node_zones += SIZE(zone_struct); continue; } if (fi->flags == GET_ZONE_SIZES) { readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones {mem}size", FAULT_ON_ERROR); sum += size; node_zones += SIZE(zone_struct); continue; } if ((i == 0) && (vt->flags & NODES)) { if (n) { fprintf(fp, "\n"); pad_line(fp, VADDR_PRLEN > 8 ? 74 : 66, '-'); fprintf(fp, "\n"); } fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", i > 0 ? "\n" : "", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%3d ", i); readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1)) fprintf(fp, "%-9s ", buf); else fprintf(fp, "(unknown) "); readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones {mem}size", FAULT_ON_ERROR); fprintf(fp, "%6ld ", size); readmem(node_zones+OFFSET(zone_struct_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); fprintf(fp, "%6ld ", value); readmem(node_zones+OFFSET(zone_struct_zone_start_paddr), KVADDR, &zone_start_paddr, sizeof(ulong), "node_zones zone_start_paddr", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_zone_start_mapnr), KVADDR, &zone_start_mapnr, sizeof(ulong), "node_zones zone_start_mapnr", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), CENTER|LONG_HEX|RJUST, MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); sum += value; if (value) found += dump_zone_free_area(node_zones+ OFFSET(zone_struct_free_area), vt->nr_free_areas, verbose, NULL); node_zones += SIZE(zone_struct); } } hq_close(); if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { fi->retval = sum; return; } fprintf(fp, "\nnr_free_pages: %ld ", sum); if (sum == found) fprintf(fp, "(verified)\n"); else fprintf(fp, "(found %ld)\n", found); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = this_addr = 0; last_node[0] = NULLCHAR; last_zone[0] = NULLCHAR; last_area[0] = NULLCHAR; last_area_hdr[0] = NULLCHAR; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_node, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "ZONE")) { p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_zone, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "AREA")) { strcpy(last_area_hdr, buf); p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } if (CRASHDEBUG(0) && !hexadecimal(strip_linefeeds(buf), 0)) continue; errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { if (strlen(last_node)) fprintf(fp, "NODE\n%s\n", last_node); fprintf(fp, "%s %s START_PADDR START_MAPNR\n", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s\n", last_zone); fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages) ", ordinal(offset+1, buf), power(2, order)); } fi->retval = TRUE; fprintf(fp, "\n"); } } /* * Callback function for free-list search for a specific page. */ struct free_page_callback_data { ulong searchpage; long chunk_size; ulong page; int found; }; static int free_page_callback(void *page, void *arg) { struct free_page_callback_data *cbd = arg; ulong first_page, last_page; first_page = (ulong)page; last_page = first_page + (cbd->chunk_size * SIZE(page)); if ((cbd->searchpage >= first_page) && (cbd->searchpage <= last_page)) { cbd->page = (ulong)page; cbd->found = TRUE; return TRUE; } return FALSE; } /* * Same as dump_free_pages_zones_v1(), but updated for numerous 2.6 zone * and free_area related data structure changes. */ static void dump_free_pages_zones_v2(struct meminfo *fi) { int i, n; ulong node_zones; ulong size; long zone_size_offset; long chunk_size; int order, errflag, do_search; ulong offset, verbose, value, sum, found; ulong this_addr; physaddr_t phys, this_phys, searchphys, end_paddr; ulong searchpage; struct free_page_callback_data callback_data; ulong pp; ulong zone_mem_map; ulong zone_start_paddr; ulong zone_start_pfn; ulong zone_start_mapnr; struct node_table *nt; char buf[BUFSIZE], *p; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char last_node[BUFSIZE]; char last_zone[BUFSIZE]; char last_area[BUFSIZE]; char last_area_hdr[BUFSIZE]; if (!(vt->flags & (NODES|ZONES))) error(FATAL, "dump_free_pages_zones_v2 called without (NODES|ZONES)\n"); if (fi->flags & ADDRESS_SPECIFIED) { switch (fi->memtype) { case KVADDR: if (!page_to_phys(fi->spec_addr, &searchphys)) { if (!kvtop(NULL, fi->spec_addr, &searchphys, 0)) return; } break; case PHYSADDR: searchphys = fi->spec_addr; break; default: error(FATAL, "dump_free_pages_zones_v2: no memtype specified\n"); } if (!phys_to_page(searchphys, &searchpage)) { error(INFO, "cannot determine page for %lx\n", fi->spec_addr); return; } do_search = TRUE; callback_data.searchpage = searchpage; callback_data.found = FALSE; } else { searchphys = 0; do_search = FALSE; } verbose = (do_search || (fi->flags & VERBOSE)) ? TRUE : FALSE; zone_size_offset = 0; chunk_size = 0; this_addr = 0; if (VALID_MEMBER(zone_spanned_pages)) zone_size_offset = OFFSET(zone_spanned_pages); else error(FATAL, "zone struct has no spanned_pages field\n"); if (do_search) open_tmpfile(); hq_open(); for (n = sum = found = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (fi->flags == GET_FREE_PAGES) { readmem(node_zones+ OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; node_zones += SIZE(zone); continue; } if (fi->flags == GET_FREE_HIGHMEM_PAGES) { readmem(node_zones+OFFSET(zone_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1) && STREQ(buf, "HighMem")) vt->ZONE_HIGHMEM = i; if (i == vt->ZONE_HIGHMEM) { readmem(node_zones+ OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); sum += value; } node_zones += SIZE(zone); continue; } if (fi->flags == GET_ZONE_SIZES) { readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones size", FAULT_ON_ERROR); sum += size; node_zones += SIZE(zone); continue; } if ((i == 0) && ((vt->flags & NODES) || (vt->numnodes > 1))) { if (n) { fprintf(fp, "\n"); pad_line(fp, VADDR_PRLEN > 8 ? 74 : 66, '-'); fprintf(fp, "\n"); } fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s%s %s START_PADDR START_MAPNR\n", i > 0 ? "\n" : "", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%3d ", i); readmem(node_zones+OFFSET(zone_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf, BUFSIZE-1)) fprintf(fp, "%-9s ", buf); else fprintf(fp, "(unknown) "); readmem(node_zones+zone_size_offset, KVADDR, &size, sizeof(ulong), "node_zones size", FAULT_ON_ERROR); fprintf(fp, "%6ld ", size); readmem(node_zones+OFFSET(zone_free_pages), KVADDR, &value, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); fprintf(fp, "%6ld ", value); if (VALID_MEMBER(zone_zone_mem_map)) { readmem(node_zones+OFFSET(zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); } readmem(node_zones+ OFFSET(zone_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); if (!VALID_MEMBER(zone_zone_mem_map)) { if (IS_SPARSEMEM() || IS_DISCONTIGMEM()) { zone_mem_map = 0; if (size) { phys = PTOB(zone_start_pfn); if (phys_to_page(phys, &pp)) zone_mem_map = pp; } } else if (vt->flags & FLATMEM) { zone_mem_map = 0; if (size) zone_mem_map = nt->mem_map + (zone_start_pfn * SIZE(page)); } else error(FATAL, "\ncannot determine zone mem_map: TBD\n"); } if (zone_mem_map) zone_start_mapnr = (zone_mem_map - nt->mem_map) / SIZE(page); else zone_start_mapnr = 0; fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), CENTER|LONG_HEX|RJUST, MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); sum += value; if (value) { if (do_search) { end_paddr = nt->start_paddr + ((physaddr_t)nt->size * (physaddr_t)PAGESIZE()); if ((searchphys >= nt->start_paddr) && (searchphys < end_paddr)) found += dump_zone_free_area(node_zones+ OFFSET(zone_free_area), vt->nr_free_areas, verbose, &callback_data); if (callback_data.found) goto done_search; } else found += dump_zone_free_area(node_zones+ OFFSET(zone_free_area), vt->nr_free_areas, verbose, NULL); } node_zones += SIZE(zone); } } done_search: hq_close(); if (fi->flags & (GET_FREE_PAGES|GET_ZONE_SIZES|GET_FREE_HIGHMEM_PAGES)) { fi->retval = sum; return; } fprintf(fp, "\nnr_free_pages: %ld ", sum); if (sum == found) fprintf(fp, "(verified)\n"); else fprintf(fp, "(found %ld)\n", found); if (!do_search) return; found = FALSE; rewind(pc->tmpfile); order = offset = 0; last_node[0] = NULLCHAR; last_zone[0] = NULLCHAR; last_area[0] = NULLCHAR; last_area_hdr[0] = NULLCHAR; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (CRASHDEBUG(1) && STRNEQ(buf, "tmpfile); strcpy(last_node, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "ZONE")) { p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_zone, strip_linefeeds(buf)); continue; } if (STRNEQ(buf, "AREA")) { strcpy(last_area_hdr, buf); p = fgets(buf, BUFSIZE, pc->tmpfile); strcpy(last_area, strip_linefeeds(buf)); p = strstr(buf, "k"); *p = NULLCHAR; while (*p != ' ') p--; chunk_size = atol(p+1) * 1024; if (chunk_size == PAGESIZE()) order = 0; else order++; continue; } if (CRASHDEBUG(0) && !hexadecimal(strip_linefeeds(buf), 0)) continue; errflag = 0; this_addr = htol(strip_linefeeds(buf), RETURN_ON_ERROR, &errflag); if (errflag) continue; if (!page_to_phys(this_addr, &this_phys)) continue; if ((searchphys >= this_phys) && (searchphys < (this_phys+chunk_size))) { if (searchphys > this_phys) offset = (searchphys - this_phys)/PAGESIZE(); found = TRUE; break; } } close_tmpfile(); if (found) { if (strlen(last_node)) fprintf(fp, "NODE\n%s\n", last_node); fprintf(fp, "%s %s START_PADDR START_MAPNR\n", zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s\n", last_zone); fprintf(fp, "%s", last_area_hdr); fprintf(fp, "%s\n", last_area); fprintf(fp, "%lx ", this_addr); if (order) { switch (fi->memtype) { case KVADDR: fprintf(fp, "(%lx is ", (ulong)fi->spec_addr); break; case PHYSADDR: fprintf(fp, "(%llx is %s", fi->spec_addr, PAGEOFFSET(fi->spec_addr) ? "in " : ""); break; } fprintf(fp, "%s of %ld pages)", ordinal(offset+1, buf), chunk_size/PAGESIZE()); } fi->retval = TRUE; fprintf(fp, "\n"); } } static char * page_usage_hdr = "ZONE NAME FREE ACTIVE INACTIVE_DIRTY INACTIVE_CLEAN MIN/LOW/HIGH"; /* * Display info about the non-free pages in each zone. */ static int dump_zone_page_usage(void) { int i, n; ulong value, node_zones; struct node_table *nt; ulong inactive_dirty_pages, inactive_clean_pages, active_pages; ulong free_pages, pages_min, pages_low, pages_high; char namebuf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; if (!VALID_MEMBER(zone_struct_inactive_dirty_pages) || !VALID_MEMBER(zone_struct_inactive_clean_pages) || !VALID_MEMBER(zone_struct_active_pages) || !VALID_MEMBER(zone_struct_pages_min) || !VALID_MEMBER(zone_struct_pages_low) || !VALID_MEMBER(zone_struct_pages_high)) return FALSE; fprintf(fp, "\n"); for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); if ((vt->numnodes > 1) && (vt->flags & NODES)) { fprintf(fp, "%sNODE\n %2d\n", n ? "\n" : "", nt->node_id); } fprintf(fp, "%s\n", page_usage_hdr); for (i = 0; i < vt->nr_zones; i++) { readmem(node_zones+OFFSET(zone_struct_free_pages), KVADDR, &free_pages, sizeof(ulong), "node_zones free_pages", FAULT_ON_ERROR); readmem(node_zones+ OFFSET(zone_struct_inactive_dirty_pages), KVADDR, &inactive_dirty_pages, sizeof(ulong), "node_zones inactive_dirty_pages", FAULT_ON_ERROR); readmem(node_zones+ OFFSET(zone_struct_inactive_clean_pages), KVADDR, &inactive_clean_pages, sizeof(ulong), "node_zones inactive_clean_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_active_pages), KVADDR, &active_pages, sizeof(ulong), "node_zones active_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_min), KVADDR, &pages_min, sizeof(ulong), "node_zones pages_min", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_low), KVADDR, &pages_low, sizeof(ulong), "node_zones pages_low", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_pages_high), KVADDR, &pages_high, sizeof(ulong), "node_zones pages_high", FAULT_ON_ERROR); readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "node_zones name", FAULT_ON_ERROR); if (read_string(value, buf1, BUFSIZE-1)) sprintf(namebuf, "%-8s", buf1); else sprintf(namebuf, "(unknown)"); sprintf(buf2, "%ld/%ld/%ld", pages_min, pages_low, pages_high); fprintf(fp, "%3d %s %7ld %7ld %15ld %15ld %s\n", i, namebuf, free_pages, active_pages, inactive_dirty_pages, inactive_clean_pages, mkstring(buf3, strlen("MIN/LOW/HIGH"), CENTER, buf2)); node_zones += SIZE(zone_struct); } } return TRUE; } /* * Dump the num "order" contents of the zone_t free_area array. */ char *free_area_hdr3 = "AREA SIZE FREE_AREA_STRUCT\n"; char *free_area_hdr4 = "AREA SIZE FREE_AREA_STRUCT BLOCKS PAGES\n"; static int dump_zone_free_area(ulong free_area, int num, ulong verbose, struct free_page_callback_data *callback_data) { int i, j; long chunk_size; int flen, total_free, cnt; char buf[BUFSIZE]; ulong free_area_buf[3]; char *free_area_buf2; char *free_list_buf; ulong free_list; struct list_data list_data, *ld; int list_count; ulong *free_ptr; list_count = 0; free_list_buf = free_area_buf2 = NULL; if (VALID_STRUCT(free_area_struct)) { if (SIZE(free_area_struct) != (3 * sizeof(ulong))) error(FATAL, "unrecognized free_area_struct size: %ld\n", SIZE(free_area_struct)); list_count = 1; } else if (VALID_STRUCT(free_area)) { if (SIZE(free_area) == (3 * sizeof(ulong))) list_count = 1; else { list_count = MEMBER_SIZE("free_area", "free_list")/SIZE(list_head); free_area_buf2 = GETBUF(SIZE(free_area)); free_list_buf = GETBUF(SIZE(list_head)); readmem(free_area, KVADDR, free_area_buf2, SIZE(free_area), "free_area struct", FAULT_ON_ERROR); } } else error(FATAL, "neither free_area_struct or free_area structures exist\n"); ld = &list_data; if (!verbose) fprintf(fp, "%s", free_area_hdr4); total_free = 0; flen = MAX(VADDR_PRLEN, strlen("FREE_AREA_STRUCT")); if (list_count > 1) goto multiple_lists; for (i = 0; i < num; i++, free_area += SIZE_OPTION(free_area_struct, free_area)) { if (verbose) fprintf(fp, "%s", free_area_hdr3); fprintf(fp, "%3d ", i); chunk_size = power(2, i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, " %7s ", buf); readmem(free_area, KVADDR, free_area_buf, sizeof(ulong) * 3, "free_area_struct", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_area))); if (free_area_buf[0] == free_area) { if (verbose) fprintf(fp, "\n"); else fprintf(fp, "%6d %6d\n", 0, 0); continue; } if (verbose) fprintf(fp, "\n"); BZERO(ld, sizeof(struct list_data)); ld->flags = verbose | RETURN_ON_DUPLICATE; ld->start = free_area_buf[0]; ld->end = free_area; if (VALID_MEMBER(page_list_next)) ld->list_head_offset = OFFSET(page_list); else if (VALID_MEMBER(page_lru)) ld->list_head_offset = OFFSET(page_lru)+ OFFSET(list_head_next); else error(FATAL, "neither page.list or page.lru exist?\n"); cnt = do_list(ld); if (cnt < 0) { error(pc->curcmd_flags & IGNORE_ERRORS ? INFO : FATAL, "corrupted free list from free_area_struct: %lx\n", free_area); if (pc->curcmd_flags & IGNORE_ERRORS) break; } if (!verbose) fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); total_free += (cnt * chunk_size); } return total_free; multiple_lists: for (i = 0; i < num; i++, free_area += SIZE_OPTION(free_area_struct, free_area)) { readmem(free_area, KVADDR, free_area_buf2, SIZE(free_area), "free_area struct", FAULT_ON_ERROR); for (j = 0, free_list = free_area; j < list_count; j++, free_list += SIZE(list_head)) { if (verbose) fprintf(fp, "%s", free_area_hdr3); fprintf(fp, "%3d ", i); chunk_size = power(2, i); sprintf(buf, "%ldk", (chunk_size * PAGESIZE())/1024); fprintf(fp, " %7s ", buf); readmem(free_list, KVADDR, free_list_buf, SIZE(list_head), "free_area free_list", FAULT_ON_ERROR); fprintf(fp, "%s ", mkstring(buf, flen, CENTER|LONG_HEX, MKSTR(free_list))); free_ptr = (ulong *)free_list_buf; if (*free_ptr == free_list) { if (verbose) fprintf(fp, "\n"); else fprintf(fp, "%6d %6d\n", 0, 0); continue; } if (verbose) fprintf(fp, "\n"); BZERO(ld, sizeof(struct list_data)); ld->flags = verbose | RETURN_ON_DUPLICATE; ld->start = *free_ptr; ld->end = free_list; ld->list_head_offset = OFFSET(page_lru) + OFFSET(list_head_next); if (callback_data) { ld->flags &= ~VERBOSE; ld->flags |= (LIST_CALLBACK|CALLBACK_RETURN); ld->callback_func = free_page_callback; ld->callback_data = (void *)callback_data; callback_data->chunk_size = chunk_size; } cnt = do_list(ld); if (cnt < 0) { error(pc->curcmd_flags & IGNORE_ERRORS ? INFO : FATAL, "corrupted free list %d from free_area struct: %lx\n", j, free_area); if (pc->curcmd_flags & IGNORE_ERRORS) goto bailout; } if (callback_data && callback_data->found) { fprintf(fp, "%lx\n", callback_data->page); goto bailout; } if (!verbose) fprintf(fp, "%6d %6ld\n", cnt, cnt*chunk_size); total_free += (cnt * chunk_size); } } bailout: FREEBUF(free_area_buf2); FREEBUF(free_list_buf); return total_free; } /* * dump_kmeminfo displays basic memory use information typically shown * by /proc/meminfo, and then some... */ char *kmeminfo_hdr = " PAGES TOTAL PERCENTAGE\n"; static void dump_kmeminfo(void) { int i, len; ulong totalram_pages; ulong freeram_pages; ulong used_pages; ulong shared_pages; ulong buffer_pages; ulong subtract_buffer_pages; ulong totalswap_pages, totalused_pages; ulong totalhigh_pages; ulong freehighmem_pages; ulong totallowmem_pages; ulong freelowmem_pages; ulong allowed; long committed; ulong overcommit_kbytes = 0; int overcommit_ratio; ulong hugetlb_total_pages, hugetlb_total_free_pages = 0; int done_hugetlb_calc = 0; long nr_file_pages, nr_slab; ulong swapper_space_nrpages; ulong pct; ulong value1, value2; uint tmp; struct meminfo meminfo; struct gnu_request req; long page_cache_size; ulong get_totalram; ulong get_buffers; ulong get_slabs; struct syment *sp_array[2]; char buf[BUFSIZE]; BZERO(&meminfo, sizeof(struct meminfo)); meminfo.flags = GET_ALL; dump_mem_map(&meminfo); get_totalram = meminfo.get_totalram; shared_pages = meminfo.get_shared; get_buffers = meminfo.get_buffers; get_slabs = meminfo.get_slabs; /* * If vm_stat array exists, override page search info. */ if (vm_stat_init()) { if (dump_vm_stat("NR_SLAB", &nr_slab, 0)) get_slabs = nr_slab; else if (dump_vm_stat("NR_SLAB_RECLAIMABLE", &nr_slab, 0)) { get_slabs = nr_slab; if (dump_vm_stat("NR_SLAB_UNRECLAIMABLE", &nr_slab, 0)) get_slabs += nr_slab; } } fprintf(fp, "%s", kmeminfo_hdr); /* * Get total RAM based upon how the various versions of si_meminfo() * have done it, latest to earliest: * * Prior to 2.3.36, count all mem_map pages minus the reserved ones. * From 2.3.36 onwards, use "totalram_pages" if set. */ if (symbol_exists("totalram_pages")) { totalram_pages = vt->totalram_pages ? vt->totalram_pages : get_totalram; } else totalram_pages = get_totalram; fprintf(fp, "%13s %7ld %11s ----\n", "TOTAL MEM", totalram_pages, pages_to_size(totalram_pages, buf)); /* * Get free pages from dump_free_pages() or its associates. * Used pages are a free-bee... */ meminfo.flags = GET_FREE_PAGES; vt->dump_free_pages(&meminfo); freeram_pages = meminfo.retval; pct = (freeram_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "FREE", freeram_pages, pages_to_size(freeram_pages, buf), pct); used_pages = totalram_pages - freeram_pages; pct = (used_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "USED", used_pages, pages_to_size(used_pages, buf), pct); /* * Get shared pages from dump_mem_map(). Note that this is done * differently than the kernel -- it just tallies the non-reserved * pages that have a count of greater than 1. */ pct = (shared_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "SHARED", shared_pages, pages_to_size(shared_pages, buf), pct); subtract_buffer_pages = 0; if (symbol_exists("buffermem_pages")) { get_symbol_data("buffermem_pages", sizeof(int), &tmp); buffer_pages = (ulong)tmp; } else if (symbol_exists("buffermem")) { get_symbol_data("buffermem", sizeof(int), &tmp); buffer_pages = BTOP(tmp); } else if ((THIS_KERNEL_VERSION >= LINUX(2,6,0)) && symbol_exists("nr_blockdev_pages")) { subtract_buffer_pages = buffer_pages = nr_blockdev_pages(); } else buffer_pages = 0; pct = (buffer_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "BUFFERS", buffer_pages, pages_to_size(buffer_pages, buf), pct); if (CRASHDEBUG(1)) error(NOTE, "pages with buffers: %ld\n", get_buffers); /* * page_cache_size has evolved from a long to an atomic_t to * not existing at all. */ if (symbol_exists("page_cache_size")) { get_symbol_type("page_cache_size", NULL, &req); if (req.length == sizeof(int)) { get_symbol_data("page_cache_size", sizeof(int), &tmp); page_cache_size = (long)tmp; } else get_symbol_data("page_cache_size", sizeof(long), &page_cache_size); page_cache_size -= subtract_buffer_pages; } else if (symbol_exists("nr_pagecache")) { get_symbol_data("nr_pagecache", sizeof(int), &tmp); page_cache_size = (long)tmp; page_cache_size -= subtract_buffer_pages; } else if (dump_vm_stat("NR_FILE_PAGES", &nr_file_pages, 0)) { char *swapper_space = GETBUF(SIZE(address_space)); swapper_space_nrpages = 0; if (symbol_exists("nr_swapper_spaces") && (len = get_array_length("nr_swapper_spaces", NULL, 0))) { char *nr_swapper_space = GETBUF(len * sizeof(unsigned int)); readmem(symbol_value("nr_swapper_spaces"), KVADDR, nr_swapper_space, len * sizeof(unsigned int), "nr_swapper_space", RETURN_ON_ERROR); for (i = 0; i < len; i++) { int j; unsigned long sa; unsigned int banks = UINT(nr_swapper_space + (i * sizeof(unsigned int))); if (!banks) continue; readmem(symbol_value("swapper_spaces") + (i * sizeof(void *)),KVADDR, &sa, sizeof(void *), "swapper_space", RETURN_ON_ERROR); if (!sa) continue; for (j = 0; j < banks; j++) { readmem(sa + j * SIZE(address_space), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR); swapper_space_nrpages += ULONG(swapper_space + OFFSET(address_space_nrpages)); } } FREEBUF(nr_swapper_space); } else if (symbol_exists("swapper_spaces") && (len = get_array_length("swapper_spaces", NULL, 0))) { for (i = 0; i < len; i++) { if (!readmem(symbol_value("swapper_spaces") + i * SIZE(address_space), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR)) break; swapper_space_nrpages += ULONG(swapper_space + OFFSET(address_space_nrpages)); } } else if (symbol_exists("swapper_space") && readmem(symbol_value("swapper_space"), KVADDR, swapper_space, SIZE(address_space), "swapper_space", RETURN_ON_ERROR)) swapper_space_nrpages = ULONG(swapper_space + OFFSET(address_space_nrpages)); page_cache_size = nr_file_pages - swapper_space_nrpages - buffer_pages; FREEBUF(swapper_space); } else page_cache_size = 0; pct = (page_cache_size * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "CACHED", page_cache_size, pages_to_size(page_cache_size, buf), pct); /* * Although /proc/meminfo doesn't show it, show how much memory * the slabs take up. */ pct = (get_slabs * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "SLAB", get_slabs, pages_to_size(get_slabs, buf), pct); if (symbol_exists("totalhigh_pages")) { switch (get_syment_array("totalhigh_pages", sp_array, 2)) { case 1: get_symbol_data("totalhigh_pages", sizeof(ulong), &totalhigh_pages); break; case 2: if (!(readmem(sp_array[0]->value, KVADDR, &value1, sizeof(ulong), "totalhigh_pages #1", RETURN_ON_ERROR))) break; if (!(readmem(sp_array[1]->value, KVADDR, &value2, sizeof(ulong), "totalhigh_pages #2", RETURN_ON_ERROR))) break; totalhigh_pages = MAX(value1, value2); break; } pct = totalhigh_pages ? (totalhigh_pages * 100)/totalram_pages : 0; fprintf(fp, "\n%13s %7ld %11s %3ld%% of TOTAL MEM\n", "TOTAL HIGH", totalhigh_pages, pages_to_size(totalhigh_pages, buf), pct); meminfo.flags = GET_FREE_HIGHMEM_PAGES; vt->dump_free_pages(&meminfo); freehighmem_pages = meminfo.retval; pct = freehighmem_pages ? (freehighmem_pages * 100)/totalhigh_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL HIGH\n", "FREE HIGH", freehighmem_pages, pages_to_size(freehighmem_pages, buf), pct); totallowmem_pages = totalram_pages - totalhigh_pages; pct = (totallowmem_pages * 100)/totalram_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL MEM\n", "TOTAL LOW", totallowmem_pages, pages_to_size(totallowmem_pages, buf), pct); freelowmem_pages = freeram_pages - freehighmem_pages; pct = (freelowmem_pages * 100)/totallowmem_pages; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL LOW\n", "FREE LOW", freelowmem_pages, pages_to_size(freelowmem_pages, buf), pct); } if (get_hugetlb_total_pages(&hugetlb_total_pages, &hugetlb_total_free_pages)) { done_hugetlb_calc = 1; fprintf(fp, "\n%13s %7ld %11s ----\n", "TOTAL HUGE", hugetlb_total_pages, pages_to_size(hugetlb_total_pages, buf)); pct = hugetlb_total_free_pages ? (hugetlb_total_free_pages * 100) / hugetlb_total_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL HUGE\n", "HUGE FREE", hugetlb_total_free_pages, pages_to_size(hugetlb_total_free_pages, buf), pct); } /* * get swap data from dump_swap_info(). */ fprintf(fp, "\n"); if (symbol_exists("swapper_space") || symbol_exists("swapper_spaces")) { if (dump_swap_info(RETURN_ON_ERROR, &totalswap_pages, &totalused_pages)) { fprintf(fp, "%13s %7ld %11s ----\n", "TOTAL SWAP", totalswap_pages, pages_to_size(totalswap_pages, buf)); pct = totalswap_pages ? (totalused_pages * 100) / totalswap_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL SWAP\n", "SWAP USED", totalused_pages, pages_to_size(totalused_pages, buf), pct); pct = totalswap_pages ? ((totalswap_pages - totalused_pages) * 100) / totalswap_pages : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL SWAP\n", "SWAP FREE", totalswap_pages - totalused_pages, pages_to_size(totalswap_pages - totalused_pages, buf), pct); } else error(INFO, "swap_info[%ld].swap_map at %lx is inaccessible\n", totalused_pages, totalswap_pages); } /* * Show committed memory */ if (kernel_symbol_exists("sysctl_overcommit_memory")) { fprintf(fp, "\n"); if (kernel_symbol_exists("sysctl_overcommit_kbytes")) get_symbol_data("sysctl_overcommit_kbytes", sizeof(ulong), &overcommit_kbytes); if (overcommit_kbytes) allowed = overcommit_kbytes >> (machdep->pageshift - 10); else { get_symbol_data("sysctl_overcommit_ratio", sizeof(int), &overcommit_ratio); if (!done_hugetlb_calc) goto bailout; allowed = ((totalram_pages - hugetlb_total_pages) * overcommit_ratio / 100); } if (symbol_exists("vm_committed_as")) { if (INVALID_MEMBER(percpu_counter_count)) goto bailout; readmem(symbol_value("vm_committed_as") + OFFSET(percpu_counter_count), KVADDR, &committed, sizeof(long), "percpu_counter count", FAULT_ON_ERROR); /* Ensure always positive */ if (committed < 0) committed = 0; } else { if (INVALID_MEMBER(atomic_t_counter)) goto bailout; readmem(symbol_value("vm_committed_space") + OFFSET(atomic_t_counter), KVADDR, &committed, sizeof(int), "atomic_t counter", FAULT_ON_ERROR); } allowed += totalswap_pages; fprintf(fp, "%13s %7ld %11s ----\n", "COMMIT LIMIT", allowed, pages_to_size(allowed, buf)); if (allowed) { pct = committed ? ((committed * 100) / allowed) : 0; fprintf(fp, "%13s %7ld %11s %3ld%% of TOTAL LIMIT\n", "COMMITTED", committed, pages_to_size(committed, buf), pct); } else fprintf(fp, "%13s %7ld %11s ----\n", "COMMITTED", committed, pages_to_size(committed, buf)); } bailout: dump_zone_page_usage(); } /* * Emulate 2.6 nr_blockdev_pages() function. */ static ulong nr_blockdev_pages(void) { struct list_data list_data, *ld; int i, bdevcnt; ulong inode, address_space; ulong nrpages; char *block_device_buf, *inode_buf, *address_space_buf; ld = &list_data; BZERO(ld, sizeof(struct list_data)); get_symbol_data("all_bdevs", sizeof(void *), &ld->start); if (empty_list(ld->start)) return 0; ld->flags |= LIST_ALLOCATE; ld->end = symbol_value("all_bdevs"); ld->list_head_offset = OFFSET(block_device_bd_list); block_device_buf = GETBUF(SIZE(block_device)); inode_buf = GETBUF(SIZE(inode)); address_space_buf = GETBUF(SIZE(address_space)); bdevcnt = do_list(ld); /* * go through the block_device list, emulating: * * ret += bdev->bd_inode->i_mapping->nrpages; */ for (i = nrpages = 0; i < bdevcnt; i++) { readmem(ld->list_ptr[i], KVADDR, block_device_buf, SIZE(block_device), "block_device buffer", FAULT_ON_ERROR); inode = ULONG(block_device_buf + OFFSET(block_device_bd_inode)); readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", FAULT_ON_ERROR); address_space = ULONG(inode_buf + OFFSET(inode_i_mapping)); readmem(address_space, KVADDR, address_space_buf, SIZE(address_space), "address_space buffer", FAULT_ON_ERROR); nrpages += ULONG(address_space_buf + OFFSET(address_space_nrpages)); } FREEBUF(ld->list_ptr); FREEBUF(block_device_buf); FREEBUF(inode_buf); FREEBUF(address_space_buf); return nrpages; } /* * dump_vmlist() displays information from the vmlist. */ static void dump_vmlist(struct meminfo *vi) { char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; ulong vmlist; ulong addr, size, next, pcheck, count, verified; physaddr_t paddr; int mod_vmlist; if (vt->flags & USE_VMAP_AREA) { dump_vmap_area(vi); return; } get_symbol_data("vmlist", sizeof(void *), &vmlist); next = vmlist; count = verified = 0; mod_vmlist = kernel_symbol_exists("mod_vmlist"); while (next) { if (!(pc->curcmd_flags & HEADER_PRINTED) && (next == vmlist) && !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC| GET_VMLIST_COUNT|GET_VMLIST|VMLIST_VERIFY))) { fprintf(fp, "%s ", mkstring(buf, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), CENTER|LJUST, "VM_STRUCT")); fprintf(fp, "%s SIZE\n", mkstring(buf, (VADDR_PRLEN * 2) + strlen(" - "), CENTER|LJUST, "ADDRESS RANGE")); pc->curcmd_flags |= HEADER_PRINTED; } readmem(next+OFFSET(vm_struct_addr), KVADDR, &addr, sizeof(void *), "vmlist addr", FAULT_ON_ERROR); readmem(next+OFFSET(vm_struct_size), KVADDR, &size, sizeof(ulong), "vmlist size", FAULT_ON_ERROR); if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) { /* * Preceding GET_VMLIST_COUNT set vi->retval. */ if (vi->flags & GET_VMLIST) { if (count < vi->retval) { vi->vmlist[count].addr = addr; vi->vmlist[count].size = size; } } count++; goto next_entry; } if (!(vi->flags & ADDRESS_SPECIFIED) || ((vi->memtype == KVADDR) && ((vi->spec_addr >= addr) && (vi->spec_addr < (addr+size))))) { if (vi->flags & VMLIST_VERIFY) { verified++; break; } fprintf(fp, "%s%s %s - %s %6ld\n", mkstring(buf,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(next)), space(MINSPACE-1), mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(addr)), mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(addr+size)), size); } if ((vi->flags & ADDRESS_SPECIFIED) && (vi->memtype == PHYSADDR)) { for (pcheck = addr; pcheck < (addr+size); pcheck += PAGESIZE()) { if (!kvtop(NULL, pcheck, &paddr, 0)) continue; if ((vi->spec_addr >= paddr) && (vi->spec_addr < (paddr+PAGESIZE()))) { if (vi->flags & GET_PHYS_TO_VMALLOC) { vi->retval = pcheck + PAGEOFFSET(paddr); return; } else fprintf(fp, "%s%s %s - %s %6ld\n", mkstring(buf, VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(next)), space(MINSPACE-1), mkstring(buf1, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(addr)), mkstring(buf2, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(addr+size)), size); break; } } } next_entry: readmem(next+OFFSET(vm_struct_next), KVADDR, &next, sizeof(void *), "vmlist next", FAULT_ON_ERROR); if (!next && mod_vmlist) { get_symbol_data("mod_vmlist", sizeof(void *), &next); mod_vmlist = FALSE; } } if (vi->flags & GET_HIGHEST) vi->retval = addr+size; if (vi->flags & GET_VMLIST_COUNT) vi->retval = count; if (vi->flags & VMLIST_VERIFY) vi->retval = verified; } static void dump_vmap_area(struct meminfo *vi) { int i, cnt; ulong start, end, vm_struct, flags; struct list_data list_data, *ld; char *vmap_area_buf; ulong size, pcheck, count, verified; physaddr_t paddr; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; #define VM_VM_AREA 0x4 /* mm/vmalloc.c */ vmap_area_buf = GETBUF(SIZE(vmap_area)); start = count = verified = size = 0; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags = LIST_HEAD_FORMAT|LIST_HEAD_POINTER|LIST_ALLOCATE; get_symbol_data("vmap_area_list", sizeof(void *), &ld->start); ld->list_head_offset = OFFSET(vmap_area_list); ld->end = symbol_value("vmap_area_list"); cnt = do_list(ld); for (i = 0; i < cnt; i++) { if (!(pc->curcmd_flags & HEADER_PRINTED) && (i == 0) && !(vi->flags & (GET_HIGHEST|GET_PHYS_TO_VMALLOC| GET_VMLIST_COUNT|GET_VMLIST|VMLIST_VERIFY))) { fprintf(fp, "%s ", mkstring(buf1, MAX(strlen("VMAP_AREA"), VADDR_PRLEN), CENTER|LJUST, "VMAP_AREA")); fprintf(fp, "%s ", mkstring(buf1, MAX(strlen("VM_STRUCT"), VADDR_PRLEN), CENTER|LJUST, "VM_STRUCT")); fprintf(fp, "%s SIZE\n", mkstring(buf1, (VADDR_PRLEN * 2) + strlen(" - "), CENTER|LJUST, "ADDRESS RANGE")); pc->curcmd_flags |= HEADER_PRINTED; } readmem(ld->list_ptr[i], KVADDR, vmap_area_buf, SIZE(vmap_area), "vmap_area struct", FAULT_ON_ERROR); flags = ULONG(vmap_area_buf + OFFSET(vmap_area_flags)); if (flags != VM_VM_AREA) continue; start = ULONG(vmap_area_buf + OFFSET(vmap_area_va_start)); end = ULONG(vmap_area_buf + OFFSET(vmap_area_va_end)); vm_struct = ULONG(vmap_area_buf + OFFSET(vmap_area_vm)); size = end - start; if (vi->flags & (GET_VMLIST_COUNT|GET_VMLIST)) { /* * Preceding GET_VMLIST_COUNT set vi->retval. */ if (vi->flags & GET_VMLIST) { if (count < vi->retval) { vi->vmlist[count].addr = start; vi->vmlist[count].size = size; } } count++; continue; } if (!(vi->flags & ADDRESS_SPECIFIED) || ((vi->memtype == KVADDR) && ((vi->spec_addr >= start) && (vi->spec_addr < (start+size))))) { if (vi->flags & VMLIST_VERIFY) { verified++; break; } fprintf(fp, "%s%s %s%s %s - %s %7ld\n", mkstring(buf1,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(ld->list_ptr[i])), space(MINSPACE-1), mkstring(buf2,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(vm_struct)), space(MINSPACE-1), mkstring(buf3, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(start)), mkstring(buf4, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(start+size)), size); } if ((vi->flags & ADDRESS_SPECIFIED) && (vi->memtype == PHYSADDR)) { for (pcheck = start; pcheck < (start+size); pcheck += PAGESIZE()) { if (!kvtop(NULL, pcheck, &paddr, 0)) continue; if ((vi->spec_addr >= paddr) && (vi->spec_addr < (paddr+PAGESIZE()))) { if (vi->flags & GET_PHYS_TO_VMALLOC) { vi->retval = pcheck + PAGEOFFSET(paddr); FREEBUF(ld->list_ptr); return; } else fprintf(fp, "%s%s %s%s %s - %s %7ld\n", mkstring(buf1,VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(ld->list_ptr[i])), space(MINSPACE-1), mkstring(buf2, VADDR_PRLEN, LONG_HEX|CENTER|LJUST, MKSTR(vm_struct)), space(MINSPACE-1), mkstring(buf3, VADDR_PRLEN, LONG_HEX|RJUST, MKSTR(start)), mkstring(buf4, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(start+size)), size); break; } } } } FREEBUF(ld->list_ptr); if (vi->flags & GET_HIGHEST) vi->retval = start+size; if (vi->flags & GET_VMLIST_COUNT) vi->retval = count; if (vi->flags & VMLIST_VERIFY) vi->retval = verified; } /* * dump_page_lists() displays information from the active_list, * inactive_dirty_list and inactive_clean_list from each zone. */ static int dump_page_lists(struct meminfo *mi) { int i, c, n, retval; ulong node_zones, pgdat; struct node_table *nt; struct list_data list_data, *ld; char buf[BUFSIZE]; ulong value; ulong inactive_clean_pages, inactive_clean_list; int nr_active_pages, nr_inactive_pages; int nr_inactive_dirty_pages; ld = &list_data; retval = FALSE; nr_active_pages = nr_inactive_dirty_pages = -1; BZERO(ld, sizeof(struct list_data)); ld->list_head_offset = OFFSET(page_lru); if (mi->flags & ADDRESS_SPECIFIED) ld->searchfor = mi->spec_addr; else if (mi->flags & VERBOSE) ld->flags |= VERBOSE; if (mi->flags & GET_ACTIVE_LIST) { if (!symbol_exists("active_list")) error(FATAL, "active_list does not exist in this kernel\n"); if (symbol_exists("nr_active_pages")) get_symbol_data("nr_active_pages", sizeof(int), &nr_active_pages); else error(FATAL, "nr_active_pages does not exist in this kernel\n"); ld->end = symbol_value("active_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "active_list:\n"); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_active_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_active_pages); if (c != nr_active_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_LIST) { if (!symbol_exists("inactive_list")) error(FATAL, "inactive_list does not exist in this kernel\n"); if (symbol_exists("nr_inactive_pages")) get_symbol_data("nr_inactive_pages", sizeof(int), &nr_inactive_pages); else error(FATAL, "nr_active_pages does not exist in this kernel\n"); ld->end = symbol_value("inactive_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "inactive_list:\n"); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_inactive_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_inactive_pages); if (c != nr_inactive_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_DIRTY) { if (!symbol_exists("inactive_dirty_list")) error(FATAL, "inactive_dirty_list does not exist in this kernel\n"); if (symbol_exists("nr_inactive_dirty_pages")) get_symbol_data("nr_inactive_dirty_pages", sizeof(int), &nr_inactive_dirty_pages); else error(FATAL, "nr_inactive_dirty_pages does not exist in this kernel\n"); ld->end = symbol_value("inactive_dirty_list"); readmem(ld->end, KVADDR, &ld->start, sizeof(void *), "LIST_HEAD contents", FAULT_ON_ERROR); if (mi->flags & VERBOSE) fprintf(fp, "%sinactive_dirty_list:\n", mi->flags & GET_ACTIVE_LIST ? "\n" : ""); if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { fprintf(fp, "%snr_inactive_dirty_pages: %d ", mi->flags & VERBOSE ? "\n" : "", nr_inactive_dirty_pages); if (c != nr_inactive_dirty_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } } if (mi->flags & GET_INACTIVE_CLEAN) { if (INVALID_MEMBER(zone_struct_inactive_clean_list)) error(FATAL, "inactive_clean_list(s) do not exist in this kernel\n"); get_symbol_data("pgdat_list", sizeof(void *), &pgdat); if ((mi->flags & VERBOSE) && (mi->flags & (GET_ACTIVE_LIST|GET_INACTIVE_DIRTY))) fprintf(fp, "\n"); for (n = 0; pgdat; n++) { nt = &vt->node_table[n]; node_zones = nt->pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { readmem(node_zones+OFFSET(zone_struct_name), KVADDR, &value, sizeof(void *), "zone_struct name", FAULT_ON_ERROR); if (!read_string(value, buf, BUFSIZE-1)) sprintf(buf, "(unknown) "); if (mi->flags & VERBOSE) { if (vt->numnodes > 1) fprintf(fp, "NODE %d ", n); fprintf(fp, "\"%s\" inactive_clean_list:\n", buf); } readmem(node_zones + OFFSET(zone_struct_inactive_clean_pages), KVADDR, &inactive_clean_pages, sizeof(ulong), "inactive_clean_pages", FAULT_ON_ERROR); readmem(node_zones + OFFSET(zone_struct_inactive_clean_list), KVADDR, &inactive_clean_list, sizeof(ulong), "inactive_clean_list", FAULT_ON_ERROR); ld->start = inactive_clean_list; ld->end = node_zones + OFFSET(zone_struct_inactive_clean_list); if (mi->flags & ADDRESS_SPECIFIED) ld->searchfor = mi->spec_addr; if (ld->start == ld->end) { c = 0; ld->searchfor = 0; if (mi->flags & VERBOSE) fprintf(fp, "(empty)\n"); } else { hq_open(); c = do_list(ld); hq_close(); } if ((mi->flags & ADDRESS_SPECIFIED) && ld->searchfor) { fprintf(fp, "%lx\n", ld->searchfor); retval = TRUE; } else { if (vt->numnodes > 1) fprintf(fp, "NODE %d ", n); fprintf(fp, "\"%s\" ", buf); fprintf(fp, "inactive_clean_pages: %ld ", inactive_clean_pages); if (c != inactive_clean_pages) fprintf(fp, "(found %d)\n", c); else fprintf(fp, "(verified)\n"); } node_zones += SIZE(zone_struct); } readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } } return retval; } /* * Check whether an address is a kmem_cache_t address, and if so, return * a pointer to the static buffer containing its name string. Otherwise * return NULL on failure. */ #define PERCPU_NOT_SUPPORTED "per-cpu slab format not supported yet\n" static char * is_kmem_cache_addr(ulong vaddr, char *kbuf) { ulong cache, cache_cache, name; long next_offset, name_offset; if (vt->flags & KMEM_CACHE_UNAVAIL) { error(INFO, "kmem cache slab subsystem not available\n"); return NULL; } if (vt->flags & KMALLOC_SLUB) return is_kmem_cache_addr_common(vaddr, kbuf); if ((vt->flags & KMALLOC_COMMON) && !symbol_exists("cache_cache")) return is_kmem_cache_addr_common(vaddr, kbuf); name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); cache = cache_cache = symbol_value("cache_cache"); do { if (cache == vaddr) { if (vt->kmem_cache_namelen) { readmem(cache+name_offset, KVADDR, kbuf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(cache+name_offset, KVADDR, &name, sizeof(name), "name", FAULT_ON_ERROR); if (!read_string(name, kbuf, BUFSIZE-1)) { if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); else error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(kbuf, "(unknown)"); } } return kbuf; } readmem(cache+next_offset, KVADDR, &cache, sizeof(long), "kmem_cache_s next", FAULT_ON_ERROR); if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) cache -= next_offset; } while (cache != cache_cache); return NULL; } /* * Note same functionality as above, but instead it just * dumps all slab cache names and their addresses. */ static void kmem_cache_list(void) { ulong cache, cache_cache, name; long next_offset, name_offset; char *cache_buf; int has_cache_chain; ulong cache_chain; char buf[BUFSIZE]; if (vt->flags & KMEM_CACHE_UNAVAIL) { error(INFO, "kmem cache slab subsystem not available\n"); return; } if (vt->flags & (KMALLOC_SLUB|KMALLOC_COMMON)) { kmem_cache_list_common(); return; } if (symbol_exists("cache_chain")) { has_cache_chain = TRUE; cache_chain = symbol_value("cache_chain"); } else { has_cache_chain = FALSE; cache_chain = 0; } name_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_name) : OFFSET(kmem_cache_s_c_name); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); cache = cache_cache = symbol_value("cache_cache"); cache_buf = GETBUF(SIZE(kmem_cache_s)); do { readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", FAULT_ON_ERROR); if (vt->kmem_cache_namelen) { BCOPY(cache_buf+name_offset, buf, vt->kmem_cache_namelen); } else { name = ULONG(cache_buf + name_offset); if (!read_string(name, buf, BUFSIZE-1)) { if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); else error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(buf, "(unknown)"); } } fprintf(fp, "%lx %s\n", cache, buf); cache = ULONG(cache_buf + next_offset); if (has_cache_chain && (cache == cache_chain)) readmem(cache, KVADDR, &cache, sizeof(char *), "cache_chain", FAULT_ON_ERROR); if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) cache -= next_offset; } while (cache != cache_cache); FREEBUF(cache_buf); } /* * Translate an address to its physical page number, verify that the * page in fact belongs to the slab subsystem, and if so, return the * name of the cache to which it belongs. */ static char * vaddr_to_kmem_cache(ulong vaddr, char *buf, int verbose) { physaddr_t paddr; ulong page, cache, page_flags; if (!kvtop(NULL, vaddr, &paddr, 0)) { if (verbose) error(WARNING, "cannot make virtual-to-physical translation: %lx\n", vaddr); return NULL; } if (!phys_to_page(paddr, &page)) { if (verbose) error(WARNING, "cannot find mem_map page for address: %lx\n", vaddr); return NULL; } if (vt->PG_slab) { readmem(page+OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", FAULT_ON_ERROR); if (!(page_flags & (1 << vt->PG_slab))) { if (((vt->flags & KMALLOC_SLUB) || VALID_MEMBER(page_compound_head)) || ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab) && VALID_MEMBER(page_first_page))) { readmem(compound_head(page)+OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", FAULT_ON_ERROR); if (!(page_flags & (1 << vt->PG_slab))) return NULL; } else return NULL; } } if ((vt->flags & KMALLOC_SLUB) || ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab) && (VALID_MEMBER(page_compound_head) || VALID_MEMBER(page_first_page)))) { readmem(compound_head(page)+OFFSET(page_slab), KVADDR, &cache, sizeof(void *), "page.slab", FAULT_ON_ERROR); } else if (VALID_MEMBER(page_next)) readmem(page+OFFSET(page_next), KVADDR, &cache, sizeof(void *), "page.next", FAULT_ON_ERROR); else if (VALID_MEMBER(page_list_next)) readmem(page+OFFSET(page_list_next), KVADDR, &cache, sizeof(void *), "page.list.next", FAULT_ON_ERROR); else if (VALID_MEMBER(page_lru)) readmem(page+OFFSET(page_lru)+OFFSET(list_head_next), KVADDR, &cache, sizeof(void *), "page.lru.next", FAULT_ON_ERROR); else error(FATAL, "cannot determine slab cache from page struct\n"); return(is_kmem_cache_addr(cache, buf)); } static char * is_slab_overload_page(ulong vaddr, ulong *page_head, char *buf) { ulong cache; char *p; if ((vt->flags & SLAB_OVERLOAD_PAGE) && is_page_ptr(vaddr, NULL) && VALID_MEMBER(page_slab) && (VALID_MEMBER(page_compound_head) || VALID_MEMBER(page_first_page))) { readmem(compound_head(vaddr)+OFFSET(page_slab), KVADDR, &cache, sizeof(void *), "page.slab", FAULT_ON_ERROR); p = is_kmem_cache_addr(cache, buf); if (p) *page_head = compound_head(vaddr); return p; } return NULL; } /* * Translate an address to its physical page number, verify that the * page in fact belongs to the slab subsystem, and if so, return the * address of the slab to which it belongs. */ static ulong vaddr_to_slab(ulong vaddr) { physaddr_t paddr; ulong page; ulong slab; if (!kvtop(NULL, vaddr, &paddr, 0)) { error(WARNING, "cannot make virtual-to-physical translation: %lx\n", vaddr); return 0; } if (!phys_to_page(paddr, &page)) { error(WARNING, "cannot find mem_map page for address: %lx\n", vaddr); return 0; } slab = 0; if ((vt->flags & KMALLOC_SLUB) || VALID_MEMBER(page_compound_head)) slab = compound_head(page); else if (vt->flags & SLAB_OVERLOAD_PAGE) slab = compound_head(page); else if ((vt->flags & KMALLOC_COMMON) && VALID_MEMBER(page_slab_page)) readmem(page+OFFSET(page_slab_page), KVADDR, &slab, sizeof(void *), "page.slab_page", FAULT_ON_ERROR); else if (VALID_MEMBER(page_prev)) readmem(page+OFFSET(page_prev), KVADDR, &slab, sizeof(void *), "page.prev", FAULT_ON_ERROR); else if (VALID_MEMBER(page_list_prev)) readmem(page+OFFSET(page_list_prev), KVADDR, &slab, sizeof(void *), "page.list.prev", FAULT_ON_ERROR); else if (VALID_MEMBER(page_lru)) readmem(page+OFFSET(page_lru)+OFFSET(list_head_prev), KVADDR, &slab, sizeof(void *), "page.lru.prev", FAULT_ON_ERROR); else error(FATAL, "unknown definition of struct page?\n"); return slab; } /* * Initialize any data required for scouring the kmalloc subsystem more * efficiently. */ char slab_hdr[100] = { 0 }; char kmem_cache_hdr[100] = { 0 }; char free_inuse_hdr[100] = { 0 }; static void kmem_cache_init(void) { ulong cache, cache_end, max_cnum, max_limit, max_cpus, tmp, tmp2; long cache_count, num_offset, next_offset; char *cache_buf; if (vt->flags & KMEM_CACHE_UNAVAIL) return; if ((vt->flags & KMEM_CACHE_DELAY) && !(pc->flags & RUNTIME)) return; if (DUMPFILE() && (vt->flags & KMEM_CACHE_INIT)) return; please_wait("gathering kmem slab cache data"); if (!strlen(slab_hdr)) { if (vt->flags & KMALLOC_SLUB) sprintf(slab_hdr, "SLAB%sMEMORY%sNODE TOTAL ALLOCATED FREE\n", space(VADDR_PRLEN > 8 ? 14 : 6), space(VADDR_PRLEN > 8 ? 12 : 4)); else sprintf(slab_hdr, "SLAB%sMEMORY%sTOTAL ALLOCATED FREE\n", space(VADDR_PRLEN > 8 ? 14 : 6), space(VADDR_PRLEN > 8 ? 12 : 4)); } if (!strlen(kmem_cache_hdr)) sprintf(kmem_cache_hdr, "CACHE%sNAME OBJSIZE ALLOCATED TOTAL SLABS SSIZE\n", space(VADDR_PRLEN > 8 ? 12 : 4)); if (!strlen(free_inuse_hdr)) sprintf(free_inuse_hdr, "FREE / [ALLOCATED]\n"); if (vt->flags & KMALLOC_SLUB) { kmem_cache_init_slub(); please_wait_done(); return; } num_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_num) : OFFSET(kmem_cache_s_c_num); next_offset = vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? OFFSET(kmem_cache_s_next) : OFFSET(kmem_cache_s_c_nextp); max_cnum = max_limit = max_cpus = cache_count = tmp2 = 0; /* * Pre-2.6 versions used the "cache_cache" as the head of the * slab chain list. 2.6 uses the "cache_chain" list_head. * In 3.6 SLAB and SLUB use the "slab_caches" list_head. */ if (vt->flags & PERCPU_KMALLOC_V2) { if (kernel_symbol_exists("cache_chain")) { get_symbol_data("cache_chain", sizeof(ulong), &cache); cache_end = symbol_value("cache_chain"); } else if (kernel_symbol_exists("slab_caches")) { vt->flags |= KMALLOC_COMMON; get_symbol_data("slab_caches", sizeof(ulong), &cache); cache_end = symbol_value("slab_caches"); } else { error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); return; } cache -= next_offset; } else cache = cache_end = symbol_value("cache_cache"); if (!(pc->flags & RUNTIME)) { if (kmem_cache_downsize()) add_to_downsized("kmem_cache"); } cache_buf = GETBUF(SIZE(kmem_cache_s)); hq_open(); do { cache_count++; if (!readmem(cache, KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", RETURN_ON_ERROR)) { FREEBUF(cache_buf); vt->flags |= KMEM_CACHE_UNAVAIL; error(INFO, "%sunable to initialize kmem slab cache subsystem\n\n", DUMPFILE() ? "\n" : ""); hq_close(); return; } if (!hq_enter(cache)) { error(WARNING, "%sduplicate kmem_cache entry in cache list: %lx\n", DUMPFILE() ? "\n" : "", cache); error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); vt->flags |= KMEM_CACHE_UNAVAIL; hq_close(); return; } tmp = (ulong)(UINT(cache_buf + num_offset)); if (tmp > max_cnum) max_cnum = tmp; if ((tmp = max_cpudata_limit(cache, &tmp2)) > max_limit) max_limit = tmp; /* * Recognize and bail out on any max_cpudata_limit() failures. */ if (vt->flags & KMEM_CACHE_UNAVAIL) { FREEBUF(cache_buf); hq_close(); return; } if (tmp2 > max_cpus) max_cpus = tmp2; cache = ULONG(cache_buf + next_offset); switch (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { case PERCPU_KMALLOC_V1: cache -= next_offset; break; case PERCPU_KMALLOC_V2: if (cache != cache_end) cache -= next_offset; break; } } while (cache != cache_end); hq_close(); FREEBUF(cache_buf); vt->kmem_max_c_num = max_cnum; vt->kmem_max_limit = max_limit; vt->kmem_max_cpus = max_cpus; vt->kmem_cache_count = cache_count; if (CRASHDEBUG(2)) { fprintf(fp, "kmem_cache_init:\n"); fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); } if (!(vt->flags & KMEM_CACHE_INIT)) { if (vt->flags & PERCPU_KMALLOC_V1) ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, kmem_cache_s_name, "kmem_cache_s.name", NULL, sizeof(char)); else if (vt->flags & PERCPU_KMALLOC_V2) vt->kmem_cache_namelen = 0; else ARRAY_LENGTH_INIT(vt->kmem_cache_namelen, kmem_cache_s_c_name, "kmem_cache_s.c_name", NULL, 0); } please_wait_done(); vt->flags |= KMEM_CACHE_INIT; } static ulong kmem_cache_nodelists(ulong cache) { ulong nodelists = 0; if (vt->flags & NODELISTS_IS_PTR) { /* * nodelists is pointer to the array */ if (!readmem(cache+OFFSET(kmem_cache_s_lists), KVADDR, &nodelists, sizeof(ulong), "nodelists pointer", RETURN_ON_ERROR)) error(WARNING, "cannot read kmem_cache nodelists pointer"); return nodelists; } else return cache+OFFSET(kmem_cache_s_lists); } static int kmem_cache_downsize(void) { char *cache_buf; ulong kmem_cache; uint buffer_size, object_size; int nr_node_ids; int nr_cpu_ids; if (vt->flags & KMALLOC_SLUB) { if (kernel_symbol_exists("kmem_cache") && VALID_MEMBER(kmem_cache_objsize) && try_get_symbol_data("kmem_cache", sizeof(ulong), &kmem_cache) && readmem(kmem_cache + OFFSET(kmem_cache_objsize), KVADDR, &object_size, sizeof(int), "kmem_cache objsize/object_size", RETURN_ON_ERROR)) { ASSIGN_SIZE(kmem_cache) = object_size; if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache)); } if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache)) return TRUE; else return FALSE; } if ((THIS_KERNEL_VERSION < LINUX(2,6,22)) || !(vt->flags & PERCPU_KMALLOC_V2_NODES) || (!kernel_symbol_exists("cache_cache") && !kernel_symbol_exists("kmem_cache_boot")) || (!MEMBER_EXISTS("kmem_cache", "buffer_size") && !MEMBER_EXISTS("kmem_cache", "size"))) { return FALSE; } if (vt->flags & NODELISTS_IS_PTR) { /* * More recent kernels have kmem_cache.array[] sized * by the number of cpus plus the number of nodes. */ if (kernel_symbol_exists("kmem_cache_boot") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("kmem_cache_boot") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "kmem_cache_boot object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else if (kernel_symbol_exists("cache_cache") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("cache_cache") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "cache_cache object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else object_size = 0; /* * Older kernels have kmem_cache.array[] sized by * the number of cpus; real value is nr_cpu_ids, * but fallback is kt->cpus. */ if (kernel_symbol_exists("nr_cpu_ids")) get_symbol_data("nr_cpu_ids", sizeof(int), &nr_cpu_ids); else nr_cpu_ids = kt->cpus; ARRAY_LENGTH(kmem_cache_s_array) = nr_cpu_ids; if (!object_size) ASSIGN_SIZE(kmem_cache_s) = OFFSET(kmem_cache_s_array) + sizeof(ulong) * nr_cpu_ids; if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache_s)); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } else if (vt->flags & SLAB_CPU_CACHE) { if (kernel_symbol_exists("kmem_cache_boot") && MEMBER_EXISTS("kmem_cache", "object_size") && readmem(symbol_value("kmem_cache_boot") + MEMBER_OFFSET("kmem_cache", "object_size"), KVADDR, &object_size, sizeof(int), "kmem_cache_boot object_size", RETURN_ON_ERROR)) ASSIGN_SIZE(kmem_cache_s) = object_size; else { object_size = OFFSET(kmem_cache_node) + (sizeof(void *) * vt->kmem_cache_len_nodes); ASSIGN_SIZE(kmem_cache_s) = object_size; } if (CRASHDEBUG(1)) fprintf(fp, "\nkmem_cache_downsize: %ld to %ld\n", STRUCT_SIZE("kmem_cache"), SIZE(kmem_cache_s)); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } cache_buf = GETBUF(SIZE(kmem_cache_s)); if (!readmem(symbol_value("cache_cache"), KVADDR, cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", RETURN_ON_ERROR)) { FREEBUF(cache_buf); return FALSE; } buffer_size = UINT(cache_buf + MEMBER_OFFSET("kmem_cache", "buffer_size")); if (buffer_size < SIZE(kmem_cache_s)) { if (kernel_symbol_exists("nr_node_ids")) { get_symbol_data("nr_node_ids", sizeof(int), &nr_node_ids); vt->kmem_cache_len_nodes = nr_node_ids; } else vt->kmem_cache_len_nodes = 1; if (buffer_size >= (uint)(OFFSET(kmem_cache_s_lists) + (sizeof(void *) * vt->kmem_cache_len_nodes))) ASSIGN_SIZE(kmem_cache_s) = buffer_size; else error(WARNING, "questionable cache_cache.buffer_size: %d\n", buffer_size); if (CRASHDEBUG(1)) { fprintf(fp, "\nkmem_cache_downsize: %ld to %d\n", STRUCT_SIZE("kmem_cache"), buffer_size); fprintf(fp, "kmem_cache_downsize: nr_node_ids: %ld\n", vt->kmem_cache_len_nodes); } FREEBUF(cache_buf); if (STRUCT_SIZE("kmem_cache") != SIZE(kmem_cache_s)) return TRUE; else return FALSE; } FREEBUF(cache_buf); return FALSE; } /* * Stash a list of presumably-corrupted slab cache addresses. */ static void mark_bad_slab_cache(ulong cache) { size_t sz; if (vt->nr_bad_slab_caches) { sz = sizeof(ulong) * (vt->nr_bad_slab_caches + 1); if (!(vt->bad_slab_caches = realloc(vt->bad_slab_caches, sz))) { error(INFO, "cannot realloc bad_slab_caches array\n"); vt->nr_bad_slab_caches = 0; return; } } else { if (!(vt->bad_slab_caches = (ulong *)malloc(sizeof(ulong)))) { error(INFO, "cannot malloc bad_slab_caches array\n"); return; } } vt->bad_slab_caches[vt->nr_bad_slab_caches++] = cache; } static int bad_slab_cache(ulong cache) { int i; for (i = 0; i < vt->nr_bad_slab_caches; i++) { if (vt->bad_slab_caches[i] == cache) return TRUE; } return FALSE; } /* * Determine the largest cpudata limit for a given cache. */ static ulong max_cpudata_limit(ulong cache, ulong *cpus) { int i; ulong cpudata[NR_CPUS]; int limit; ulong max_limit; ulong shared, percpu_ptr; ulong *start_address; if (vt->flags & PERCPU_KMALLOC_V2_NODES) goto kmem_cache_s_array_nodes; if (vt->flags & PERCPU_KMALLOC_V2) goto kmem_cache_s_array; if (INVALID_MEMBER(kmem_cache_s_cpudata)) { *cpus = 0; return 0; } if (!readmem(cache+OFFSET(kmem_cache_s_cpudata), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), "cpudata array", RETURN_ON_ERROR)) goto bail_out; for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && cpudata[i]; i++) { if (!readmem(cpudata[i]+OFFSET(cpucache_s_limit), KVADDR, &limit, sizeof(int), "cpucache limit", RETURN_ON_ERROR)) goto bail_out; if (limit > max_limit) max_limit = limit; } *cpus = i; return max_limit; kmem_cache_s_array: if (!readmem(cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), "array cache array", RETURN_ON_ERROR)) goto bail_out; for (i = max_limit = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && cpudata[i]; i++) { if (!readmem(cpudata[i]+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "array cache limit", RETURN_ON_ERROR)) goto bail_out; if (limit > max_limit) max_limit = limit; } /* * If the shared list can be accessed, check its size as well. */ if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) && readmem(cache+OFFSET(kmem_cache_s_lists)+OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) && readmem(shared+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "shared array_cache limit", RETURN_ON_ERROR|QUIET)) { if (limit > max_limit) max_limit = limit; } *cpus = i; return max_limit; kmem_cache_s_array_nodes: if (CRASHDEBUG(3)) fprintf(fp, "kmem_cache: %lx\n", cache); if (vt->flags & SLAB_CPU_CACHE) { if (!readmem(cache+OFFSET(kmem_cache_cpu_cache), KVADDR, &percpu_ptr, sizeof(void *), "kmem_cache.cpu_cache", RETURN_ON_ERROR)) goto bail_out; for (i = 0; i < kt->cpus; i++) cpudata[i] = percpu_ptr + kt->__per_cpu_offset[i]; } else { if (!readmem(cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * MIN(NR_CPUS, ARRAY_LENGTH(kmem_cache_s_array)), "array cache array", RETURN_ON_ERROR)) goto bail_out; } for (i = max_limit = 0; i < kt->cpus; i++) { if (check_offline_cpu(i)) continue; if (!cpudata[i]) break; if (!readmem(cpudata[i]+OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "array cache limit", RETURN_ON_ERROR)) { error(INFO, "kmem_cache: %lx: invalid array_cache pointer: %lx\n", cache, cpudata[i]); mark_bad_slab_cache(cache); return max_limit; } if (CRASHDEBUG(3)) fprintf(fp, " array limit[%d]: %d\n", i, limit); if ((unsigned int)limit > INT_MAX) error(INFO, "kmem_cache: %lx: invalid array limit[%d]: %d\n", cache, i, limit); else if (limit > max_limit) max_limit = limit; } *cpus = i; /* * Check the shared list of all the nodes. */ start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (VALID_MEMBER(kmem_list3_shared) && VALID_MEMBER(kmem_cache_s_lists) && readmem(kmem_cache_nodelists(cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) { for (i = 0; i < vt->kmem_cache_len_nodes; i++) { if (start_address[i] == 0) continue; if (readmem(start_address[i] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET)) { if (!shared) break; } else continue; if (readmem(shared + OFFSET(array_cache_limit), KVADDR, &limit, sizeof(int), "shared array_cache limit", RETURN_ON_ERROR|QUIET)) { if (CRASHDEBUG(3)) fprintf(fp, " shared node limit[%d]: %d\n", i, limit); if ((unsigned int)limit > INT_MAX) error(INFO, "kmem_cache: %lx: shared node limit[%d]: %d\n", cache, i, limit); else if (limit > max_limit) max_limit = limit; break; } } } FREEBUF(start_address); return max_limit; bail_out: vt->flags |= KMEM_CACHE_UNAVAIL; error(INFO, "unable to initialize kmem slab cache subsystem\n\n"); *cpus = 0; return 0; } /* * Determine whether the current slab cache is contained in * the comma-separated list from a "kmem -I list1,list2 ..." * command entry. */ static int ignore_cache(struct meminfo *si, char *name) { int i, argc; char *p1; char *arglist[MAXARGS]; char buf[BUFSIZE]; if (!si->ignore) return FALSE; strcpy(buf, si->ignore); p1 = buf; while (*p1) { if (*p1 == ',') *p1 = ' '; p1++; } argc = parse_line(buf, arglist); for (i = 0; i < argc; i++) { if (STREQ(name, arglist[i])) return TRUE; } return FALSE; } /* * dump_kmem_cache() displays basic information about kmalloc() slabs. * At this point, only kmem_cache_s structure data for each slab is dumped. * * TBD: Given a specified physical address, and determine which slab it came * from, and whether it's in use or not. */ #define SLAB_C_MAGIC 0x4F17A36DUL #define SLAB_MAGIC_ALLOC 0xA5C32F2BUL /* slab is alive */ #define SLAB_MAGIC_DESTROYED 0xB2F23C5AUL /* slab has been destroyed */ #define SLAB_CFLGS_BUFCTL 0x020000UL /* bufctls in own cache */ #define SLAB_CFLGS_OBJFREELIST 0x40000000UL /* Freelist as an object */ #define KMEM_SLAB_ADDR (1) #define KMEM_BUFCTL_ADDR (2) #define KMEM_OBJECT_ADDR_FREE (3) #define KMEM_OBJECT_ADDR_INUSE (4) #define KMEM_OBJECT_ADDR_CACHED (5) #define KMEM_ON_SLAB (6) #define KMEM_OBJECT_ADDR_SHARED (7) #define KMEM_SLAB_OVERLOAD_PAGE (8) #define KMEM_SLAB_FREELIST (9) #define DUMP_KMEM_CACHE_INFO_V1() \ { \ char b1[BUFSIZE]; \ fprintf(fp, "%s %-18s %8ld ", \ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache)), \ buf, si->size); \ fprintf(fp, "%9ld %8ld %5ld %3ldk\n", \ vt->flags & PERCPU_KMALLOC_V1 ? \ si->inuse - si->cpucached_cache : \ si->inuse, si->num_slabs * si->c_num, \ si->num_slabs, si->slabsize/1024); \ } #define DUMP_KMEM_CACHE_INFO_V2() dump_kmem_cache_info_v2(si) static void dump_kmem_cache_info_v2(struct meminfo *si) { char b1[BUFSIZE]; char b2[BUFSIZE]; int namelen, sizelen, spacelen; fprintf(fp, "%s ", mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); namelen = strlen(si->curname); sprintf(b2, "%ld", si->size); sizelen = strlen(b2); spacelen = 0; if (namelen++ > 18) { spacelen = 29 - namelen - sizelen; fprintf(fp, "%s%s%ld ", si->curname, space(spacelen <= 0 ? 1 : spacelen), si->size); if (spacelen > 0) spacelen = 1; sprintf(b1, "%c%dld ", '%', 9 + spacelen - 1); } else { fprintf(fp, "%-18s %8ld ", si->curname, si->size); sprintf(b1, "%c%dld ", '%', 9); } fprintf(fp, b1, vt->flags & (PERCPU_KMALLOC_V2) ? si->inuse - si->cpucached_cache : si->inuse); fprintf(fp, "%8ld %s%5ld %s%3ldk\n", si->num_slabs * si->c_num, si->num_slabs < 100000 ? " " : "", si->num_slabs, (si->slabsize/1024) < 1000 ? " " : "", si->slabsize/1024); } #define DUMP_SLAB_INFO() \ { \ char b1[BUFSIZE], b2[BUFSIZE]; \ ulong allocated, freeobjs, slab; \ if (vt->flags & SLAB_OVERLOAD_PAGE) \ slab = si->slab - OFFSET(page_lru); \ else \ slab = si->slab; \ if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) { \ allocated = si->s_inuse - si->cpucached_slab; \ freeobjs = si->c_num - allocated - si->cpucached_slab; \ } else { \ allocated = si->s_inuse; \ freeobjs = si->c_num - si->s_inuse; \ } \ fprintf(fp, "%s %s %5ld %9ld %4ld\n", \ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(slab)), \ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->s_mem)), \ si->c_num, allocated, \ vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2) ? \ freeobjs + si->cpucached_slab : freeobjs); \ } static void dump_kmem_cache(struct meminfo *si) { char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_cache; ulong name, magic; int cnt; char *p1; if (vt->flags & (PERCPU_KMALLOC_V1|PERCPU_KMALLOC_V2)) error(FATAL, "dump_kmem_cache called with PERCPU_KMALLOC_V[12] set\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) { readmem(si->cache+OFFSET(kmem_cache_s_c_nextp), KVADDR, &cache_cache, sizeof(ulong), "kmem_cache next", FAULT_ON_ERROR); } else si->cache = cache_cache = symbol_value("cache_cache"); if (si->flags & ADDRESS_SPECIFIED) { if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; si->cache_buf = GETBUF(SIZE(kmem_cache_s)); do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); readmem(si->cache, KVADDR, si->cache_buf, SIZE(kmem_cache_s), "kmem_cache buffer", FAULT_ON_ERROR); if (vt->kmem_cache_namelen) { BCOPY(si->cache_buf + OFFSET(kmem_cache_s_c_name), buf, vt->kmem_cache_namelen); } else { name = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_name)); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.c_name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); goto next_cache; } si->curname = buf; if (CRASHDEBUG(1)) fprintf(fp, "cache: %lx %s\n", si->cache, si->curname); console("cache: %lx %s\n", si->cache, si->curname); magic = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_magic)); if (magic == SLAB_C_MAGIC) { si->size = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_org_size)); if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: c_org_size: %ld\n", si->curname, si->size); si->errors++; } } si->c_flags = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_flags)); si->c_offset = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_offset)); si->order = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_gfporder)); si->c_num = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_num)); do_slab_chain(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) DUMP_KMEM_CACHE_INFO_V1(); if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; do_slab_chain(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO_V1(); fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(ON-SLAB kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(ON-SLAB kmem_slab_t)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; } break; } } } else { error(INFO, "\"%s\" cache: invalid c_magic: %lx\n", si->curname, magic); si->errors++; } next_cache: si->cache = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_nextp)); } while (si->cache != cache_cache); FREEBUF(si->cache_buf); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); } /* * dump_kmem_cache() adapted for newer percpu slab format. */ static void dump_kmem_cache_percpu_v1(struct meminfo *si) { int i; char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_cache; ulong name; int cnt; uint tmp_val; /* Used as temporary variable to read sizeof(int) and assigned to ulong variable. We are doing this to mask the endian issue */ char *p1; if (!(vt->flags & PERCPU_KMALLOC_V1)) error(FATAL, "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V1\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); for (i = 0; i < vt->kmem_max_cpus; i++) si->cpudata[i] = (ulong *) GETBUF(vt->kmem_max_limit * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) { readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &cache_cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); } else si->cache = cache_cache = symbol_value("cache_cache"); if (si->flags & ADDRESS_SPECIFIED) { if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); if (vt->kmem_cache_namelen) { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, buf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, &name, sizeof(ulong), "name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); goto next_cache; } si->curname = buf; readmem(si->cache+OFFSET(kmem_cache_s_objsize), KVADDR, &tmp_val, sizeof(uint), "objsize", FAULT_ON_ERROR); si->size = (ulong)tmp_val; if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: objsize: %ld\n", si->curname, si->size); si->errors++; } } readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); si->c_flags = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_gfporder), KVADDR, &tmp_val, sizeof(uint), "gfporder", FAULT_ON_ERROR); si->order = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_num), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s num", FAULT_ON_ERROR); si->c_num = (ulong)tmp_val; do_slab_chain_percpu_v1(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { DUMP_KMEM_CACHE_INFO_V1(); if (CRASHDEBUG(3)) dump_struct("kmem_cache_s", si->cache, 0); } if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { gather_cpudata_list_v1(si); si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; do_slab_chain_percpu_v1(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO_V1(); fprintf(fp, "%s", slab_hdr); gather_slab_cached_count(si); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp,"(kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(slab_s)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_CACHED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (cpu %d cache)\n", si->container ? si->container : (ulong)si->spec_addr, si->cpu); break; } break; } } next_cache: readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &si->cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); si->cache -= OFFSET(kmem_cache_s_next); } while (si->cache != cache_cache); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); FREEBUF(si->kmem_bufctl); for (i = 0; i < vt->kmem_max_cpus; i++) FREEBUF(si->cpudata[i]); } /* * Updated for 2.6 slab substructure. */ static void dump_kmem_cache_percpu_v2(struct meminfo *si) { int i; char buf[BUFSIZE]; char kbuf[BUFSIZE]; char *reqname; ulong cache_end; ulong name, page_head; int cnt; uint tmp_val; /* Used as temporary variable to read sizeof(int) and assigned to ulong variable. We are doing this to mask the endian issue */ char *p1; if (!(vt->flags & PERCPU_KMALLOC_V2)) error(FATAL, "dump_kmem_cache_percpu called without PERCPU_KMALLOC_V2\n"); si->found = si->retval = 0; reqname = NULL; if ((!(si->flags & VERBOSE) || si->reqname) && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); si->addrlist = (ulong *)GETBUF((vt->kmem_max_c_num+1) * sizeof(ulong)); si->kmem_bufctl = (int *)GETBUF((vt->kmem_max_c_num+1) * sizeof(int)); if (vt->flags & SLAB_OVERLOAD_PAGE) { si->freelist = si->kmem_bufctl; si->freelist_index_size = slab_freelist_index_size(); } for (i = 0; i < vt->kmem_max_cpus; i++) si->cpudata[i] = (ulong *) GETBUF(vt->kmem_max_limit * sizeof(ulong)); if(vt->flags & PERCPU_KMALLOC_V2_NODES) si->shared_array_cache = (ulong *) GETBUF(vt->kmem_cache_len_nodes * (vt->kmem_max_limit+1) * sizeof(ulong)); else si->shared_array_cache = (ulong *) GETBUF((vt->kmem_max_limit+1) * sizeof(ulong)); cnt = 0; if (si->flags & CACHE_SET) readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &cache_end, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); else { if (vt->flags & KMALLOC_COMMON) { get_symbol_data("slab_caches", sizeof(ulong), &si->cache); si->cache -= OFFSET(kmem_cache_s_next); cache_end = symbol_value("slab_caches"); } else { get_symbol_data("cache_chain", sizeof(ulong), &si->cache); si->cache -= OFFSET(kmem_cache_s_next); cache_end = symbol_value("cache_chain"); } } if (si->flags & ADDRESS_SPECIFIED) { if ((p1 = is_slab_overload_page(si->spec_addr, &page_head, kbuf))) { si->flags |= SLAB_OVERLOAD_PAGE_PTR; si->spec_addr = page_head; } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); return; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; do { if ((si->flags & VERBOSE) && !si->reqname && !(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s%s", cnt++ ? "\n" : "", kmem_cache_hdr); if (vt->kmem_cache_namelen) { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, buf, vt->kmem_cache_namelen, "name array", FAULT_ON_ERROR); } else { readmem(si->cache+OFFSET(kmem_cache_s_name), KVADDR, &name, sizeof(ulong), "name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) { error(WARNING, "cannot read kmem_cache_s.name string at %lx\n", name); sprintf(buf, "(unknown)"); } } if (reqname && !STREQ(reqname, buf)) goto next_cache; if (ignore_cache(si, buf)) { fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache, buf); goto next_cache; } if (bad_slab_cache(si->cache)) { fprintf(fp, "%lx %-18s [INVALID/CORRUPTED]\n", si->cache, buf); goto next_cache; } si->curname = buf; readmem(si->cache+OFFSET(kmem_cache_s_objsize), KVADDR, &tmp_val, sizeof(uint), "objsize", FAULT_ON_ERROR); si->size = (ulong)tmp_val; if (!si->size) { if (STREQ(si->curname, "kmem_cache")) si->size = SIZE(kmem_cache_s); else { error(INFO, "\"%s\" cache: objsize: %ld\n", si->curname, si->size); si->errors++; } } readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); si->c_flags = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_gfporder), KVADDR, &tmp_val, sizeof(uint), "gfporder", FAULT_ON_ERROR); si->order = (ulong)tmp_val; readmem(si->cache+OFFSET(kmem_cache_s_num), KVADDR, &tmp_val, sizeof(uint), "kmem_cache_s num", FAULT_ON_ERROR); si->c_num = (ulong)tmp_val; if (vt->flags & PERCPU_KMALLOC_V2_NODES) { if (vt->flags & SLAB_OVERLOAD_PAGE) do_slab_chain_slab_overload_page(SLAB_GET_COUNTS, si); else do_slab_chain_percpu_v2_nodes(SLAB_GET_COUNTS, si); } else do_slab_chain_percpu_v2(SLAB_GET_COUNTS, si); if (!(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) { DUMP_KMEM_CACHE_INFO_V2(); if (CRASHDEBUG(3)) dump_struct("kmem_cache_s", si->cache, 0); } if (si->flags == GET_SLAB_PAGES) si->retval += (si->num_slabs * (si->slabsize/PAGESIZE())); if (si->flags & (VERBOSE|ADDRESS_SPECIFIED)) { if (!(vt->flags & PERCPU_KMALLOC_V2_NODES)) gather_cpudata_list_v2(si); si->slab = (si->flags & ADDRESS_SPECIFIED) ? vaddr_to_slab(si->spec_addr) : 0; if (vt->flags & PERCPU_KMALLOC_V2_NODES) { if (vt->flags & SLAB_OVERLOAD_PAGE) do_slab_chain_slab_overload_page(SLAB_WALKTHROUGH, si); else do_slab_chain_percpu_v2_nodes(SLAB_WALKTHROUGH, si); } else do_slab_chain_percpu_v2(SLAB_WALKTHROUGH, si); if (si->found) { fprintf(fp, "%s", kmem_cache_hdr); DUMP_KMEM_CACHE_INFO_V2(); fprintf(fp, "%s", slab_hdr); gather_slab_cached_count(si); DUMP_SLAB_INFO(); switch (si->found) { case KMEM_BUFCTL_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp,"(kmem_bufctl_t)\n"); break; case KMEM_SLAB_ADDR: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(slab)\n"); break; case KMEM_ON_SLAB: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(unused part of slab)\n"); break; case KMEM_SLAB_FREELIST: fprintf(fp, " %lx ", (ulong)si->spec_addr); fprintf(fp, "(on-slab freelist)\n"); break; case KMEM_SLAB_OVERLOAD_PAGE: si->flags &= ~ADDRESS_SPECIFIED; dump_slab_objects_percpu(si); si->flags |= ADDRESS_SPECIFIED; break; case KMEM_OBJECT_ADDR_FREE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_INUSE: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " [%lx]\n", si->container ? si->container : (ulong)si->spec_addr); break; case KMEM_OBJECT_ADDR_CACHED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (cpu %d cache)\n", si->container ? si->container : (ulong)si->spec_addr, si->cpu); break; case KMEM_OBJECT_ADDR_SHARED: fprintf(fp, "%s", free_inuse_hdr); fprintf(fp, " %lx (shared cache)\n", si->container ? si->container : (ulong)si->spec_addr); break; } break; } } next_cache: readmem(si->cache+OFFSET(kmem_cache_s_next), KVADDR, &si->cache, sizeof(ulong), "kmem_cache_s next", FAULT_ON_ERROR); if (si->cache != cache_end) si->cache -= OFFSET(kmem_cache_s_next); } while (si->cache != cache_end); if ((si->flags & ADDRESS_SPECIFIED) && !si->found) error(INFO, "%s: address not found in cache: %lx\n", reqname, si->spec_addr); if (si->errors) error(INFO, "%ld error%s encountered\n", si->errors, si->errors > 1 ? "s" : ""); FREEBUF(si->addrlist); FREEBUF(si->kmem_bufctl); for (i = 0; i < vt->kmem_max_cpus; i++) FREEBUF(si->cpudata[i]); FREEBUF(si->shared_array_cache); } /* * Walk through the slab chain hanging off a kmem_cache_s structure, * gathering basic statistics. * * TBD: Given a specified physical address, determine whether it's in this * slab chain, and whether it's in use or not. */ #define INSLAB(obj, si) \ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == si->s_mem) static void do_slab_chain(int cmd, struct meminfo *si) { ulong tmp, magic; ulong kmem_slab_end; char *kmem_slab_s_buf; si->slabsize = (power(2, si->order) * PAGESIZE()); kmem_slab_end = si->cache + OFFSET(kmem_cache_s_c_offset); switch (cmd) { case SLAB_GET_COUNTS: si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); if (slab_data_saved(si)) return; si->num_slabs = si->inuse = 0; if (si->slab == kmem_slab_end) return; kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); do { if (received_SIGINT()) { FREEBUF(kmem_slab_s_buf); restart(0); } readmem(si->slab, KVADDR, kmem_slab_s_buf, SIZE(kmem_slab_s), "kmem_slab_s buffer", FAULT_ON_ERROR); magic = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_magic)); if (magic == SLAB_MAGIC_ALLOC) { tmp = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_inuse)); si->inuse += tmp; si->num_slabs++; } else { fprintf(fp, "\"%s\" cache: invalid s_magic: %lx\n", si->curname, magic); si->errors++; FREEBUF(kmem_slab_s_buf); return; } si->slab = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_nextp)); } while (si->slab != kmem_slab_end); FREEBUF(kmem_slab_s_buf); save_slab_data(si); break; case SLAB_WALKTHROUGH: if (!si->slab) si->slab = ULONG(si->cache_buf + OFFSET(kmem_cache_s_c_firstp)); if (si->slab == kmem_slab_end) return; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } si->slab_buf = kmem_slab_s_buf = GETBUF(SIZE(kmem_slab_s)); do { if (received_SIGINT()) { FREEBUF(kmem_slab_s_buf); restart(0); } readmem(si->slab, KVADDR, kmem_slab_s_buf, SIZE(kmem_slab_s), "kmem_slab_s buffer", FAULT_ON_ERROR); dump_slab(si); if (si->found) { FREEBUF(kmem_slab_s_buf); return; } si->slab = ULONG(kmem_slab_s_buf + OFFSET(kmem_slab_s_s_nextp)); } while (si->slab != kmem_slab_end); FREEBUF(kmem_slab_s_buf); break; } } /* * do_slab_chain() adapted for newer percpu slab format. */ #define SLAB_BASE(X) (PTOB(BTOP(X))) #define INSLAB_PERCPU(obj, si) \ ((ulong)((ulong)(obj) & ~(si->slabsize-1)) == SLAB_BASE(si->s_mem)) #define SLAB_CHAINS (3) static char *slab_chain_name_v1[] = {"full", "partial", "free"}; static void do_slab_chain_percpu_v1(long cmd, struct meminfo *si) { int i, tmp, s; int list_borked; char *slab_s_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; list_borked = 0; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; if (VALID_MEMBER(kmem_cache_s_slabs)) { slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs); slab_chains[1] = 0; slab_chains[2] = 0; } else { slab_chains[0] = si->cache + OFFSET(kmem_cache_s_slabs_full); slab_chains[1] = si->cache + OFFSET(kmem_cache_s_slabs_partial); slab_chains[2] = si->cache + OFFSET(kmem_cache_s_slabs_free); } if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "full: %lx partial: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } switch (cmd) { case SLAB_GET_COUNTS: si->flags |= SLAB_GET_COUNTS; si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; gather_cpudata_list_v1(si); slab_s_buf = GETBUF(SIZE(slab_s)); for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v1[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_s_buf); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_s_buf); restart(0); } if (!verify_slab_v1(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_s_list); readmem(si->slab, KVADDR, slab_s_buf, SIZE(slab_s), "slab_s buffer", FAULT_ON_ERROR); tmp = INT(slab_s_buf + OFFSET(slab_s_inuse)); si->inuse += tmp; if (ACTIVE()) gather_cpudata_list_v1(si); si->s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_s_buf + OFFSET(slab_s_list)); si->slab -= OFFSET(slab_s_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v1[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } FREEBUF(slab_s_buf); if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= SLAB_WALKTHROUGH; si->flags &= ~SLAB_GET_COUNTS; for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v1[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) restart(0); if (!verify_slab_v1(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_s_list); dump_slab_percpu_v1(si); if (si->found) { return; } readmem(si->slab+OFFSET(slab_s_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_s_list); } while (si->slab != slab_chains[s] && !list_borked); } break; } } /* * Try to preclude any attempt to translate a bogus slab structure. */ static int verify_slab_v1(struct meminfo *si, ulong last, int s) { char slab_s_buf[BUFSIZE]; struct kernel_list_head *list_head; unsigned int inuse; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v1[s]; errcnt = 0; if (!readmem(si->slab, KVADDR, slab_s_buf, SIZE(slab_s), "slab_s buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); return FALSE; } list_head = (struct kernel_list_head *) (slab_s_buf + OFFSET(slab_s_list)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } inuse = UINT(slab_s_buf + OFFSET(slab_s_inuse)); if (inuse > si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } if (!last) goto no_inuse_check_v1; switch (s) { case 0: /* full -- but can be one singular list */ if (VALID_MEMBER(kmem_cache_s_slabs_full) && (inuse != si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 1: /* partial */ if ((inuse == 0) || (inuse == si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 2: /* free */ if (inuse > 0) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; } no_inuse_check_v1: s_mem = ULONG(slab_s_buf + OFFSET(slab_s_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; return(errcnt ? FALSE : TRUE); } /* * Updated for 2.6 slab substructure. */ static char *slab_chain_name_v2[] = {"partial", "full", "free"}; static void do_slab_chain_percpu_v2(long cmd, struct meminfo *si) { int i, tmp, s; int list_borked; char *slab_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; list_borked = 0; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; slab_chains[0] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_full); slab_chains[2] = si->cache + OFFSET(kmem_cache_s_lists) + OFFSET(kmem_list3_slabs_free); if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } switch (cmd) { case SLAB_GET_COUNTS: si->flags |= SLAB_GET_COUNTS; si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; gather_cpudata_list_v2(si); slab_buf = GETBUF(SIZE(slab)); for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_buf); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_buf); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); tmp = INT(slab_buf + OFFSET(slab_inuse)); si->inuse += tmp; if (ACTIVE()) gather_cpudata_list_v2(si); si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_buf + OFFSET(slab_list)); si->slab -= OFFSET(slab_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } FREEBUF(slab_buf); if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= SLAB_WALKTHROUGH; si->flags &= ~SLAB_GET_COUNTS; for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) restart(0); if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); dump_slab_percpu_v2(si); if (si->found) { return; } readmem(si->slab+OFFSET(slab_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_list); } while (si->slab != slab_chains[s] && !list_borked); } break; } } /* * Added To Traverse the Nodelists */ static void do_slab_chain_percpu_v2_nodes(long cmd, struct meminfo *si) { int i, tmp, s, node; int list_borked; char *slab_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; ulong *start_address; int index; list_borked = 0; slab_buf = NULL; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) error(INFO, "cannot read kmem_cache nodelists array"); switch (cmd) { case SLAB_GET_COUNTS: si->flags |= (SLAB_GET_COUNTS|SLAB_FIRST_NODE); si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; slab_buf = GETBUF(SIZE(slab)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(slab_buf); FREEBUF(start_address); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(slab_buf); FREEBUF(start_address); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); tmp = INT(slab_buf + OFFSET(slab_inuse)); si->inuse += tmp; si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(slab_buf + OFFSET(slab_list)); si->slab -= OFFSET(slab_list); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } } if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: specified_slab = si->slab; si->flags |= (SLAB_WALKTHROUGH|SLAB_FIRST_NODE); si->flags &= ~SLAB_GET_COUNTS; slab_buf = GETBUF(SIZE(slab)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: " "bad slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", FAULT_ON_ERROR); si->s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) { FREEBUF(start_address); FREEBUF(slab_buf); restart(0); } if (!verify_slab_v2(si, last, s)) { list_borked = 1; continue; } last = si->slab - OFFSET(slab_list); dump_slab_percpu_v2(si); if (si->found) { FREEBUF(start_address); FREEBUF(slab_buf); return; } readmem(si->slab+OFFSET(slab_list), KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); si->slab -= OFFSET(slab_list); } while (si->slab != slab_chains[s] && !list_borked); } } break; } FREEBUF(slab_buf); FREEBUF(start_address); } static int slab_freelist_index_size(void) { struct datatype_member datatype, *dm; dm = &datatype; BZERO(dm, sizeof(*dm)); dm->name = "freelist_idx_t"; if (is_typedef(dm->name)) return DATATYPE_SIZE(dm); if (CRASHDEBUG(1)) error(INFO, "freelist_idx_t does not exist\n"); return sizeof(int); } static void do_slab_chain_slab_overload_page(long cmd, struct meminfo *si) { int i, tmp, s, node; int list_borked; char *page_buf; ulong specified_slab; ulong last; ulong slab_chains[SLAB_CHAINS]; ulong *start_address; int index; list_borked = 0; page_buf = NULL; si->slabsize = (power(2, si->order) * PAGESIZE()); si->cpucached_slab = 0; start_address = (ulong *)GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes, "array nodelist array", RETURN_ON_ERROR)) error(INFO, "cannot read kmem_cache nodelists array"); switch (cmd) { case SLAB_GET_COUNTS: si->flags |= (SLAB_GET_COUNTS|SLAB_FIRST_NODE); si->flags &= ~SLAB_WALKTHROUGH; si->cpucached_cache = 0; si->num_slabs = si->inuse = 0; page_buf = GETBUF(SIZE(page)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "first slab", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad page/slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } if (slab_data_saved(si)) { FREEBUF(page_buf); FREEBUF(start_address); return; } if (si->slab == slab_chains[s]) continue; last = slab_chains[s]; do { if (received_SIGINT()) { FREEBUF(page_buf); FREEBUF(start_address); restart(0); } if (!verify_slab_overload_page(si, last, s)) { list_borked = 1; continue; } last = si->slab; readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, SIZE(page), "page (slab) buffer", FAULT_ON_ERROR); tmp = INT(page_buf + OFFSET(page_active)); si->inuse += tmp; si->s_mem = ULONG(page_buf + OFFSET(page_s_mem)); gather_slab_cached_count(si); si->num_slabs++; si->slab = ULONG(page_buf + OFFSET(page_lru)); /* * Check for slab transition. (Tony Dziedzic) */ for (i = 0; i < SLAB_CHAINS; i++) { if ((i != s) && (si->slab == slab_chains[i])) { error(NOTE, "%s: slab chain inconsistency: %s list\n", si->curname, slab_chain_name_v2[s]); list_borked = 1; } } } while (si->slab != slab_chains[s] && !list_borked); } } if (!list_borked) save_slab_data(si); break; case SLAB_WALKTHROUGH: if (si->flags & SLAB_OVERLOAD_PAGE_PTR) { specified_slab = si->spec_addr; si->slab = si->spec_addr + OFFSET(page_lru); } else { specified_slab = si->slab; if (si->slab) si->slab += OFFSET(page_lru); } si->flags |= (SLAB_WALKTHROUGH|SLAB_FIRST_NODE); si->flags &= ~SLAB_GET_COUNTS; page_buf = GETBUF(SIZE(page)); for (index = 0; (index < vt->kmem_cache_len_nodes); index++) { if (vt->flags & NODES_ONLINE) { node = next_online_node(index); if (node < 0) break; if (node != index) continue; } if (start_address[index] == 0) continue; slab_chains[0] = start_address[index] + OFFSET(kmem_list3_slabs_partial); slab_chains[1] = start_address[index] + OFFSET(kmem_list3_slabs_full); slab_chains[2] = start_address[index] + OFFSET(kmem_list3_slabs_free); gather_cpudata_list_v2_nodes(si, index); si->flags &= ~SLAB_FIRST_NODE; if (CRASHDEBUG(1)) { fprintf(fp, "[ %s: %lx ", si->curname, si->cache); fprintf(fp, "partial: %lx full: %lx free: %lx ]\n", slab_chains[0], slab_chains[1], slab_chains[2]); } for (s = 0; s < SLAB_CHAINS; s++) { if (!slab_chains[s]) continue; if (!specified_slab) { if (!readmem(slab_chains[s], KVADDR, &si->slab, sizeof(ulong), "slabs", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: " "bad page/slab pointer: %lx\n", si->curname, slab_chain_name_v2[s], slab_chains[s]); list_borked = 1; continue; } last = slab_chains[s]; } else last = 0; if (si->slab == slab_chains[s]) continue; readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, SIZE(page), "page (slab) buffer", FAULT_ON_ERROR); si->s_mem = ULONG(page_buf + OFFSET(page_s_mem)); if (CRASHDEBUG(1)) { fprintf(fp, "search cache: [%s] ", si->curname); if (si->flags & ADDRESS_SPECIFIED) fprintf(fp, "for %llx", si->spec_addr); fprintf(fp, "\n"); } do { if (received_SIGINT()) { FREEBUF(start_address); FREEBUF(page_buf); restart(0); } if (!verify_slab_overload_page(si, last, s)) { list_borked = 1; continue; } last = si->slab; dump_slab_overload_page(si); if (si->found) { FREEBUF(start_address); FREEBUF(page_buf); return; } readmem(si->slab, KVADDR, &si->slab, sizeof(ulong), "slab list", FAULT_ON_ERROR); } while (si->slab != slab_chains[s] && !list_borked); } } break; } FREEBUF(page_buf); FREEBUF(start_address); } /* * Try to preclude any attempt to translate a bogus slab structure. */ static int verify_slab_v2(struct meminfo *si, ulong last, int s) { char slab_buf[BUFSIZE]; struct kernel_list_head *list_head; unsigned int inuse; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v2[s]; errcnt = 0; if (!readmem(si->slab, KVADDR, slab_buf, SIZE(slab), "slab buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); return FALSE; } list_head = (struct kernel_list_head *)(slab_buf + OFFSET(slab_list)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } inuse = UINT(slab_buf + OFFSET(slab_inuse)); if (inuse > si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } if (!last) goto no_inuse_check_v2; switch (s) { case 0: /* partial */ if ((inuse == 0) || (inuse == si->c_num)) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 1: /* full */ if (inuse != si->c_num) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; case 2: /* free */ if (inuse > 0) { error(INFO, "%s: %s list: slab: %lx bad inuse counter: %ld\n", si->curname, list, si->slab, inuse); errcnt++; } break; } no_inuse_check_v2: s_mem = ULONG(slab_buf + OFFSET(slab_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; return(errcnt ? FALSE : TRUE); } static int verify_slab_overload_page(struct meminfo *si, ulong last, int s) { char *page_buf; struct kernel_list_head *list_head; unsigned int active; ulong s_mem; char *list; int errcnt; list = slab_chain_name_v2[s]; page_buf = GETBUF(SIZE(page)); errcnt = 0; if (!readmem(si->slab - OFFSET(page_lru), KVADDR, page_buf, SIZE(page), "page (slab) buffer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: %s list: bad slab pointer: %lx\n", si->curname, list, si->slab); FREEBUF(page_buf); return FALSE; } list_head = (struct kernel_list_head *)(page_buf + OFFSET(page_lru)); if (!IS_KVADDR((ulong)list_head->next) || !accessible((ulong)list_head->next)) { error(INFO, "%s: %s list: page/slab: %lx bad next pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->next); errcnt++; } if (last && (last != (ulong)list_head->prev)) { error(INFO, "%s: %s list: page/slab: %lx bad prev pointer: %lx\n", si->curname, list, si->slab, (ulong)list_head->prev); errcnt++; } active = UINT(page_buf + OFFSET(page_active)); if (active > si->c_num) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } if (!last) goto no_inuse_check_v2; switch (s) { case 0: /* partial */ if ((active == 0) || (active == si->c_num)) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; case 1: /* full */ if (active != si->c_num) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; case 2: /* free */ if (active > 0) { error(INFO, "%s: %s list: page/slab: %lx bad active counter: %ld\n", si->curname, list, si->slab, active); errcnt++; } break; } no_inuse_check_v2: s_mem = ULONG(page_buf + OFFSET(page_s_mem)); if (!IS_KVADDR(s_mem) || !accessible(s_mem)) { error(INFO, "%s: %s list: page/slab: %lx bad s_mem pointer: %lx\n", si->curname, list, si->slab, s_mem); errcnt++; } si->errors += errcnt; FREEBUF(page_buf); return(errcnt ? FALSE : TRUE); } /* * If it's a dumpfile, save the essential slab data to avoid re-reading * the whole slab chain more than once. This may seem like overkill, but * if the problem is a memory leak, or just the over-use of the buffer_head * cache, it's painful to wait each time subsequent kmem -s or -i commands * simply need the basic slab counts. */ struct slab_data { ulong cache_addr; int num_slabs; int inuse; ulong cpucached_cache; }; #define NO_SLAB_DATA ((void *)(-1)) static void save_slab_data(struct meminfo *si) { int i; if (si->flags & SLAB_DATA_NOSAVE) { si->flags &= ~SLAB_DATA_NOSAVE; return; } if (ACTIVE()) return; if (vt->slab_data == NO_SLAB_DATA) return; if (!vt->slab_data) { if (!(vt->slab_data = (struct slab_data *) malloc(sizeof(struct slab_data) * vt->kmem_cache_count))) { error(INFO, "cannot malloc slab_data table"); vt->slab_data = NO_SLAB_DATA; return; } for (i = 0; i < vt->kmem_cache_count; i++) { vt->slab_data[i].cache_addr = (ulong)NO_SLAB_DATA; vt->slab_data[i].num_slabs = 0; vt->slab_data[i].inuse = 0; vt->slab_data[i].cpucached_cache = 0; } } for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == si->cache) break; if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) { vt->slab_data[i].cache_addr = si->cache; vt->slab_data[i].num_slabs = si->num_slabs; vt->slab_data[i].inuse = si->inuse; vt->slab_data[i].cpucached_cache = si->cpucached_cache; break; } } } static int slab_data_saved(struct meminfo *si) { int i; if (ACTIVE() || !vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) return FALSE; for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == si->cache) { si->inuse = vt->slab_data[i].inuse; si->num_slabs = vt->slab_data[i].num_slabs; si->cpucached_cache = vt->slab_data[i].cpucached_cache; return TRUE; } } return FALSE; } static void dump_saved_slab_data(void) { int i; if (!vt->slab_data || (vt->slab_data == NO_SLAB_DATA)) return; for (i = 0; i < vt->kmem_cache_count; i++) { if (vt->slab_data[i].cache_addr == (ulong)NO_SLAB_DATA) break; fprintf(fp, " cache: %lx inuse: %5d num_slabs: %3d cpucached_cache: %ld\n", vt->slab_data[i].cache_addr, vt->slab_data[i].inuse, vt->slab_data[i].num_slabs, vt->slab_data[i].cpucached_cache); } } /* * Dump the contents of a kmem slab. */ static void dump_slab(struct meminfo *si) { si->s_mem = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_mem)); si->s_mem = PTOB(BTOP(si->s_mem)); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+SIZE(kmem_slab_s)))) { si->found = KMEM_SLAB_ADDR; return; } if (INSLAB(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } si->s_freep = VOID_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_freep)); si->s_inuse = ULONG(si->slab_buf + OFFSET(kmem_slab_s_s_inuse)); si->s_index = ULONG_PTR(si->slab_buf + OFFSET(kmem_slab_s_s_index)); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects(si); } /* * dump_slab() adapted for newer percpu slab format. */ static void dump_slab_percpu_v1(struct meminfo *si) { int tmp; readmem(si->slab+OFFSET(slab_s_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "s_mem", FAULT_ON_ERROR); /* * Include the array of kmem_bufctl_t's appended to slab. */ tmp = SIZE(slab_s) + (SIZE(kmem_bufctl_t) * si->c_num); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB_PERCPU(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+tmp))) { if (si->spec_addr >= (si->slab + SIZE(slab_s))) si->found = KMEM_BUFCTL_ADDR; else si->found = KMEM_SLAB_ADDR; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(si->slab+OFFSET(slab_s_inuse), KVADDR, &tmp, sizeof(int), "inuse", FAULT_ON_ERROR); si->s_inuse = tmp; readmem(si->slab+OFFSET(slab_s_free), KVADDR, &si->free, SIZE(kmem_bufctl_t), "kmem_bufctl_t", FAULT_ON_ERROR); gather_slab_free_list_percpu(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } /* * Updated for 2.6 slab substructure. */ static void dump_slab_percpu_v2(struct meminfo *si) { int tmp; readmem(si->slab+OFFSET(slab_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "s_mem", FAULT_ON_ERROR); /* * Include the array of kmem_bufctl_t's appended to slab. */ tmp = SIZE(slab) + (SIZE(kmem_bufctl_t) * si->c_num); if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB_PERCPU(si->slab, si) && (si->spec_addr >= si->slab) && (si->spec_addr < (si->slab+tmp))) { if (si->spec_addr >= (si->slab + SIZE(slab))) si->found = KMEM_BUFCTL_ADDR; else si->found = KMEM_SLAB_ADDR; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(si->slab+OFFSET(slab_inuse), KVADDR, &tmp, sizeof(int), "inuse", FAULT_ON_ERROR); si->s_inuse = tmp; readmem(si->slab+OFFSET(slab_free), KVADDR, &si->free, SIZE(kmem_bufctl_t), "kmem_bufctl_t", FAULT_ON_ERROR); gather_slab_free_list_percpu(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } static void dump_slab_overload_page(struct meminfo *si) { int tmp; ulong slab_overload_page, freelist; slab_overload_page = si->slab - OFFSET(page_lru); readmem(slab_overload_page + OFFSET(page_s_mem), KVADDR, &si->s_mem, sizeof(ulong), "page.s_mem", FAULT_ON_ERROR); readmem(slab_overload_page + OFFSET(page_freelist), KVADDR, &freelist, sizeof(ulong), "page.freelist", FAULT_ON_ERROR); if (si->flags & ADDRESS_SPECIFIED) { if ((si->spec_addr >= slab_overload_page) && (si->spec_addr < (slab_overload_page+SIZE(page)))) { si->found = KMEM_SLAB_OVERLOAD_PAGE; } else if (INSLAB_PERCPU(si->spec_addr, si)) si->found = KMEM_ON_SLAB; /* But don't return yet... */ else return; } readmem(slab_overload_page + OFFSET(page_active), KVADDR, &tmp, sizeof(int), "active", FAULT_ON_ERROR); si->s_inuse = tmp; gather_slab_free_list_slab_overload_page(si); gather_slab_cached_count(si); if (!(si->flags & ADDRESS_SPECIFIED)) { fprintf(fp, "%s", slab_hdr); DUMP_SLAB_INFO(); } dump_slab_objects_percpu(si); } /* * Gather the free objects in a slab into the si->addrlist, checking for * specified addresses that are in-slab kmem_bufctls, and making error checks * along the way. Object address checks are deferred to dump_slab_objects(). */ #define INOBJECT(addr, obj) ((addr >= obj) && (addr < (obj+si->size))) static void gather_slab_free_list(struct meminfo *si) { ulong *next, obj; ulong expected, cnt; BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); if (!si->s_freep) return; cnt = 0; expected = si->c_num - si->s_inuse; next = si->s_freep; do { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } /* * Off-slab kmem_bufctls are contained in arrays of object * pointers that point to: * 1. next kmem_bufctl (or NULL) if the object is free. * 2. to the object if it the object is in use. * * On-slab kmem_bufctls resides just after the object itself, * and point to: * 1. next kmem_bufctl (or NULL) if object is free. * 2. the containing slab if the object is in use. */ if (si->c_flags & SLAB_CFLGS_BUFCTL) obj = si->s_mem + ((next - si->s_index) * si->c_offset); else obj = (ulong)next - si->c_offset; si->addrlist[cnt] = obj; if (si->flags & ADDRESS_SPECIFIED) { if (INSLAB(next, si) && (si->spec_addr >= (ulong)next) && (si->spec_addr < (ulong)(next + 1))) { si->found = KMEM_BUFCTL_ADDR; return; } } cnt++; if (!INSLAB(obj, si)) { error(INFO, "\"%s\" cache: address not contained within slab: %lx\n", si->curname, obj); si->errors++; } readmem((ulong)next, KVADDR, &next, sizeof(void *), "s_freep chain entry", FAULT_ON_ERROR); } while (next); if (cnt != expected) { error(INFO, "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } /* * gather_slab_free_list() adapted for newer percpu slab format. */ #define BUFCTL_END 0xffffFFFF static void gather_slab_free_list_percpu(struct meminfo *si) { int i; ulong obj; ulong expected, cnt; int free_index; ulong kmembp; short *kbp; BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); if (CRASHDEBUG(1)) fprintf(fp, "slab: %lx si->s_inuse: %ld si->c_num: %ld\n", si->slab, si->s_inuse, si->c_num); if (si->s_inuse == si->c_num ) return; kmembp = si->slab + SIZE_OPTION(slab_s, slab); readmem((ulong)kmembp, KVADDR, si->kmem_bufctl, SIZE(kmem_bufctl_t) * si->c_num, "kmem_bufctl array", FAULT_ON_ERROR); if (CRASHDEBUG(1)) { for (i = 0; (SIZE(kmem_bufctl_t) == sizeof(int)) && (i < si->c_num); i++) fprintf(fp, "%d ", si->kmem_bufctl[i]); for (kbp = (short *)&si->kmem_bufctl[0], i = 0; (SIZE(kmem_bufctl_t) == sizeof(short)) && (i < si->c_num); i++) fprintf(fp, "%d ", *(kbp + i)); fprintf(fp, "\n"); } cnt = 0; expected = si->c_num - si->s_inuse; if (SIZE(kmem_bufctl_t) == sizeof(int)) { for (free_index = si->free; free_index != BUFCTL_END; free_index = si->kmem_bufctl[free_index]) { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } obj = si->s_mem + (free_index*si->size); si->addrlist[cnt] = obj; cnt++; } } else if (SIZE(kmem_bufctl_t) == sizeof(short)) { kbp = (short *)&si->kmem_bufctl[0]; for (free_index = si->free; free_index != BUFCTL_END; free_index = (int)*(kbp + free_index)) { if (cnt == si->c_num) { error(INFO, "\"%s\" cache: too many objects found in slab free list\n", si->curname); si->errors++; return; } obj = si->s_mem + (free_index*si->size); si->addrlist[cnt] = obj; cnt++; } } else error(FATAL, "size of kmem_bufctl_t (%d) not sizeof(int) or sizeof(short)\n", SIZE(kmem_bufctl_t)); if (cnt != expected) { error(INFO, "\"%s\" cache: free object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } static void gather_slab_free_list_slab_overload_page(struct meminfo *si) { int i, active, start_offset; ulong obj, objnr, cnt, freelist; unsigned char *ucharptr; unsigned short *ushortptr; unsigned int *uintptr; unsigned int cache_flags, overload_active; ulong slab_overload_page; if (CRASHDEBUG(1)) fprintf(fp, "slab page: %lx active: %ld si->c_num: %ld\n", si->slab - OFFSET(page_lru), si->s_inuse, si->c_num); if (si->s_inuse == si->c_num ) return; slab_overload_page = si->slab - OFFSET(page_lru); readmem(slab_overload_page + OFFSET(page_freelist), KVADDR, &freelist, sizeof(void *), "page freelist", FAULT_ON_ERROR); readmem(freelist, KVADDR, si->freelist, si->freelist_index_size * si->c_num, "freelist array", FAULT_ON_ERROR); readmem(si->cache+OFFSET(kmem_cache_s_flags), KVADDR, &cache_flags, sizeof(uint), "kmem_cache_s flags", FAULT_ON_ERROR); readmem(slab_overload_page + OFFSET(page_active), KVADDR, &overload_active, sizeof(uint), "active", FAULT_ON_ERROR); BNEG(si->addrlist, sizeof(ulong) * (si->c_num+1)); cnt = objnr = 0; ucharptr = NULL; ushortptr = NULL; uintptr = NULL; active = si->s_inuse; /* * On an OBJFREELIST slab, the object might have been recycled * and everything before the active count can be random data. */ start_offset = 0; if (cache_flags & SLAB_CFLGS_OBJFREELIST) start_offset = overload_active; switch (si->freelist_index_size) { case 1: ucharptr = (unsigned char *)si->freelist + start_offset; break; case 2: ushortptr = (unsigned short *)si->freelist + start_offset; break; case 4: uintptr = (unsigned int *)si->freelist + start_offset; break; } for (i = start_offset; i < si->c_num; i++) { switch (si->freelist_index_size) { case 1: objnr = (ulong)*ucharptr++; break; case 2: objnr = (ulong)*ushortptr++; break; case 4: objnr = (ulong)*uintptr++; break; } if (objnr >= si->c_num) { error(INFO, "\"%s\" cache: invalid/corrupt freelist entry: %ld\n", si->curname, objnr); si->errors++; } if (i >= active) { obj = si->s_mem + (objnr * si->size); si->addrlist[cnt++] = obj; if (CRASHDEBUG(1)) fprintf(fp, "%ld ", objnr); } else if (CRASHDEBUG(1)) fprintf(fp, "[%ld] ", objnr); } if (CRASHDEBUG(1)) fprintf(fp, "\n"); } /* * Dump the FREE, [ALLOCATED] and objects of a slab. */ #define DUMP_SLAB_OBJECT() \ for (j = on_free_list = 0; j < si->c_num; j++) { \ if (obj == si->addrlist[j]) { \ on_free_list = TRUE; \ break; \ } \ } \ \ if (on_free_list) { \ if (!(si->flags & ADDRESS_SPECIFIED)) \ fprintf(fp, " %lx\n", obj); \ if (si->flags & ADDRESS_SPECIFIED) { \ if (INOBJECT(si->spec_addr, obj)) { \ si->found = \ KMEM_OBJECT_ADDR_FREE; \ si->container = obj; \ return; \ } \ } \ } else { \ if (!(si->flags & ADDRESS_SPECIFIED)) \ fprintf(fp, " [%lx]\n", obj); \ cnt++; \ if (si->flags & ADDRESS_SPECIFIED) { \ if (INOBJECT(si->spec_addr, obj)) { \ si->found = \ KMEM_OBJECT_ADDR_INUSE; \ si->container = obj; \ return; \ } \ } \ } static void dump_slab_objects(struct meminfo *si) { int i, j; ulong *next; int on_free_list; ulong cnt, expected; ulong bufctl, obj; gather_slab_free_list(si); if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) return; cnt = 0; expected = si->s_inuse; si->container = 0; if (CRASHDEBUG(1)) for (i = 0; i < si->c_num; i++) { fprintf(fp, "si->addrlist[%d]: %lx\n", i, si->addrlist[i]); } if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s", free_inuse_hdr); /* For on-slab bufctls, c_offset is the distance between the start of * an obj and its related bufctl. For off-slab bufctls, c_offset is * the distance between objs in the slab. */ if (si->c_flags & SLAB_CFLGS_BUFCTL) { for (i = 0, next = si->s_index; i < si->c_num; i++, next++) { obj = si->s_mem + ((next - si->s_index) * si->c_offset); DUMP_SLAB_OBJECT(); } } else { /* * Get the "real" s_mem, i.e., without the offset stripped off. * It contains the address of the first object. */ readmem(si->slab+OFFSET(kmem_slab_s_s_mem), KVADDR, &obj, sizeof(ulong), "s_mem", FAULT_ON_ERROR); for (i = 0; i < si->c_num; i++) { DUMP_SLAB_OBJECT(); if (si->flags & ADDRESS_SPECIFIED) { bufctl = obj + si->c_offset; if ((si->spec_addr >= bufctl) && (si->spec_addr < (bufctl + SIZE(kmem_bufctl_t)))) { si->found = KMEM_BUFCTL_ADDR; return; } } obj += (si->c_offset + SIZE(kmem_bufctl_t)); } } if (cnt != expected) { error(INFO, "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } } /* * dump_slab_objects() adapted for newer percpu slab format. */ static void dump_slab_objects_percpu(struct meminfo *si) { int i, j; int on_free_list, on_cpudata_list, on_shared_list; ulong cnt, expected; ulong obj, freelist; if ((si->flags & ADDRESS_SPECIFIED) && (si->found & ~KMEM_ON_SLAB)) if (!(si->found & KMEM_SLAB_OVERLOAD_PAGE)) return; cnt = 0; expected = si->s_inuse; si->container = 0; if (CRASHDEBUG(1)) for (i = 0; i < si->c_num; i++) { fprintf(fp, "si->addrlist[%d]: %lx\n", i, si->addrlist[i]); } if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, "%s", free_inuse_hdr); for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { on_free_list = FALSE; on_cpudata_list = FALSE; on_shared_list = FALSE; for (j = 0; j < si->c_num; j++) { if (obj == si->addrlist[j]) { on_free_list = TRUE; break; } } on_cpudata_list = check_cpudata_list(si, obj); on_shared_list = check_shared_list(si, obj); if (on_free_list && on_cpudata_list) { error(INFO, "\"%s\" cache: object %lx on both free and cpu %d lists\n", si->curname, obj, si->cpu); si->errors++; } if (on_free_list && on_shared_list) { error(INFO, "\"%s\" cache: object %lx on both free and shared lists\n", si->curname, obj); si->errors++; } if (on_cpudata_list && on_shared_list) { error(INFO, "\"%s\" cache: object %lx on both cpu %d and shared lists\n", si->curname, obj, si->cpu); si->errors++; } if (on_free_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx\n", obj); if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_FREE; si->container = obj; return; } } } else if (on_cpudata_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx (cpu %d cache)\n", obj, si->cpu); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_CACHED; si->container = obj; return; } } } else if (on_shared_list) { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " %lx (shared cache)\n", obj); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_SHARED; si->container = obj; return; } } } else { if (!(si->flags & ADDRESS_SPECIFIED)) fprintf(fp, " [%lx]\n", obj); cnt++; if (si->flags & ADDRESS_SPECIFIED) { if (INOBJECT(si->spec_addr, obj)) { si->found = KMEM_OBJECT_ADDR_INUSE; si->container = obj; return; } } } } if (cnt != expected) { error(INFO, "\"%s\" cache: inuse object mismatch: expected: %ld found: %ld\n", si->curname, expected, cnt); si->errors++; } if ((si->flags & ADDRESS_SPECIFIED) && (vt->flags & SLAB_OVERLOAD_PAGE)) { readmem(si->slab - OFFSET(page_lru) + OFFSET(page_freelist), KVADDR, &freelist, sizeof(ulong), "page.freelist", FAULT_ON_ERROR); if ((si->spec_addr >= freelist) && (si->spec_addr < si->s_mem)) si->found = KMEM_SLAB_FREELIST; } } /* * Determine how many of the "inuse" slab objects are actually cached * in the kmem_cache_s header. Set the per-slab count and update the * cumulative per-cache count. With the addition of the shared list * check, the terms "cpucached_cache" and "cpucached_slab" are somewhat * misleading. But they both are types of objects that are cached * in the kmem_cache_s header, just not necessarily per-cpu. */ static void gather_slab_cached_count(struct meminfo *si) { int i; ulong obj; int in_cpudata, in_shared; si->cpucached_slab = 0; for (i = 0, obj = si->s_mem; i < si->c_num; i++, obj += si->size) { in_cpudata = in_shared = 0; if (check_cpudata_list(si, obj)) { in_cpudata = TRUE; si->cpucached_slab++; if (si->flags & SLAB_GET_COUNTS) { si->cpucached_cache++; } } if (check_shared_list(si, obj)) { in_shared = TRUE; if (!in_cpudata) { si->cpucached_slab++; if (si->flags & SLAB_GET_COUNTS) { si->cpucached_cache++; } } } if (in_cpudata && in_shared) { si->flags |= SLAB_DATA_NOSAVE; if (!(si->flags & VERBOSE)) error(INFO, "\"%s\" cache: object %lx on both cpu %d and shared lists\n", si->curname, obj, si->cpu); } } } /* * Populate the percpu object list for a given slab. */ static void gather_cpudata_list_v1(struct meminfo *si) { int i, j; int avail; ulong cpudata[NR_CPUS]; if (INVALID_MEMBER(kmem_cache_s_cpudata)) return; readmem(si->cache+OFFSET(kmem_cache_s_cpudata), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_cpudata), "cpudata array", FAULT_ON_ERROR); for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_cpudata)) && cpudata[i]; i++) { BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); readmem(cpudata[i]+OFFSET(cpucache_s_avail), KVADDR, &avail, sizeof(int), "cpucache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: cpucache_s.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(cpucache_s), KVADDR, si->cpudata[i], sizeof(void *) * avail, "cpucache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx\n", si->cpudata[i][j]); } } /* * Updated for 2.6 slab percpu data structure, this also gathers * the shared array_cache list as well. */ static void gather_cpudata_list_v2(struct meminfo *si) { int i, j; int avail; ulong cpudata[NR_CPUS]; ulong shared; readmem(si->cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * ARRAY_LENGTH(kmem_cache_s_array), "array_cache array", FAULT_ON_ERROR); for (i = 0; (i < ARRAY_LENGTH(kmem_cache_s_array)) && cpudata[i]; i++) { BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); readmem(cpudata[i]+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "array cache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(array_cache), KVADDR, si->cpudata[i], sizeof(void *) * avail, "array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); } /* * If the shared list contains anything, gather them as well. */ BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit); if (!VALID_MEMBER(kmem_list3_shared) || !VALID_MEMBER(kmem_cache_s_lists) || !readmem(si->cache+OFFSET(kmem_cache_s_lists)+ OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || !readmem(shared+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) return; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; return; } if (CRASHDEBUG(2)) fprintf(fp, "%s: shared avail: %d\n", si->curname, avail); readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache, sizeof(void *) * avail, "shared array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); } /* * Updated gather_cpudata_list_v2 for per-node kmem_list3's in kmem_cache */ static void gather_cpudata_list_v2_nodes(struct meminfo *si, int index) { int i, j; int avail; ulong cpudata[NR_CPUS]; ulong shared, percpu_ptr; ulong *start_address; start_address = (ulong *) GETBUF(sizeof(ulong) * vt->kmem_cache_len_nodes); if (vt->flags & SLAB_CPU_CACHE) { readmem(si->cache+OFFSET(kmem_cache_cpu_cache), KVADDR, &percpu_ptr, sizeof(void *), "kmem_cache.cpu_cache", FAULT_ON_ERROR); for (i = 0; i < vt->kmem_max_cpus; i++) cpudata[i] = percpu_ptr + kt->__per_cpu_offset[i]; } else { readmem(si->cache+OFFSET(kmem_cache_s_array), KVADDR, &cpudata[0], sizeof(ulong) * vt->kmem_max_cpus, "array_cache array", FAULT_ON_ERROR); } for (i = 0; (i < vt->kmem_max_cpus) && cpudata[i] && !(index); i++) { if (si->cpudata[i]) BZERO(si->cpudata[i], sizeof(ulong) * vt->kmem_max_limit); else continue; readmem(cpudata[i]+OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "array cache avail", FAULT_ON_ERROR); if (!avail) continue; if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; continue; } if (CRASHDEBUG(2)) fprintf(fp, "%s: cpu[%d] avail: %d\n", si->curname, i, avail); readmem(cpudata[i]+SIZE(array_cache), KVADDR, si->cpudata[i], sizeof(void *) * avail, "array_cache avail", FAULT_ON_ERROR); if (CRASHDEBUG(2)) for (j = 0; j < avail; j++) fprintf(fp, " %lx (cpu %d)\n", si->cpudata[i][j], i); } /* * If the shared list contains anything, gather them as well. */ if (si->flags & SLAB_FIRST_NODE) { BZERO(si->shared_array_cache, sizeof(ulong) * vt->kmem_max_limit * vt->kmem_cache_len_nodes); si->current_cache_index = 0; } if (!readmem(kmem_cache_nodelists(si->cache), KVADDR, &start_address[0], sizeof(ulong) * vt->kmem_cache_len_nodes , "array nodelist array", RETURN_ON_ERROR) || !readmem(start_address[index] + OFFSET(kmem_list3_shared), KVADDR, &shared, sizeof(void *), "kmem_list3 shared", RETURN_ON_ERROR|QUIET) || !shared || !readmem(shared + OFFSET(array_cache_avail), KVADDR, &avail, sizeof(int), "shared array_cache avail", RETURN_ON_ERROR|QUIET) || !avail) { FREEBUF(start_address); return; } if (avail > vt->kmem_max_limit) { error(INFO, "\"%s\" cache: shared array_cache.avail %d greater than limit %ld\n", si->curname, avail, vt->kmem_max_limit); si->errors++; FREEBUF(start_address); return; } if (CRASHDEBUG(2)) fprintf(fp, "%s: shared avail: %d\n", si->curname, avail); readmem(shared+SIZE(array_cache), KVADDR, si->shared_array_cache + si->current_cache_index, sizeof(void *) * avail, "shared array_cache avail", FAULT_ON_ERROR); if ((si->current_cache_index + avail) > (vt->kmem_max_limit * vt->kmem_cache_len_nodes)) { error(INFO, "\"%s\" cache: total shared array_cache.avail %d greater than total limit %ld\n", si->curname, si->current_cache_index + avail, vt->kmem_max_limit * vt->kmem_cache_len_nodes); si->errors++; FREEBUF(start_address); return; } if (CRASHDEBUG(2)) for (j = si->current_cache_index; j < (si->current_cache_index + avail); j++) fprintf(fp, " %lx (shared list)\n", si->shared_array_cache[j]); si->current_cache_index += avail; FREEBUF(start_address); } /* * Check whether a given address is contained in the previously-gathered * percpu object cache. */ static int check_cpudata_list(struct meminfo *si, ulong obj) { int i, j; for (i = 0; i < vt->kmem_max_cpus; i++) { for (j = 0; si->cpudata[i][j]; j++) if (si->cpudata[i][j] == obj) { si->cpu = i; return TRUE; } } return FALSE; } /* * Check whether a given address is contained in the previously-gathered * shared object cache. */ static int check_shared_list(struct meminfo *si, ulong obj) { int i; if (INVALID_MEMBER(kmem_list3_shared) || !si->shared_array_cache) return FALSE; for (i = 0; si->shared_array_cache[i]; i++) { if (si->shared_array_cache[i] == obj) return TRUE; } return FALSE; } /* * Search the various memory subsystems for instances of this address. * Start with the most specific areas, ending up with at least the * mem_map page data. */ static void kmem_search(struct meminfo *mi) { struct syment *sp; struct meminfo tmp_meminfo; char buf[BUFSIZE]; ulong vaddr, orig_flags; physaddr_t paddr; ulong offset; ulong task; ulong show_flags; struct task_context *tc; vaddr = 0; pc->curcmd_flags &= ~HEADER_PRINTED; pc->curcmd_flags |= IGNORE_ERRORS; switch (mi->memtype) { case KVADDR: vaddr = mi->spec_addr; break; case PHYSADDR: vaddr = mi->spec_addr < VTOP(vt->high_memory) ? PTOV(mi->spec_addr) : BADADDR; break; } orig_flags = mi->flags; mi->retval = 0; /* * Check first for a possible symbolic display of the virtual * address associated with mi->spec_addr or PTOV(mi->spec_addr). */ if (((vaddr >= kt->stext) && (vaddr <= kt->end)) || IS_MODULE_VADDR(mi->spec_addr)) { if ((sp = value_search(vaddr, &offset))) { show_flags = SHOW_LINENUM | SHOW_RADIX(); if (module_symbol(sp->value, NULL, NULL, NULL, 0)) show_flags |= SHOW_MODULE; show_symbol(sp, offset, show_flags); fprintf(fp, "\n"); } } /* * Check for a valid mapped address. */ if ((mi->memtype == KVADDR) && IS_VMALLOC_ADDR(mi->spec_addr)) { if (kvtop(NULL, mi->spec_addr, &paddr, 0)) { mi->flags = orig_flags | VMLIST_VERIFY; dump_vmlist(mi); if (mi->retval) { mi->flags = orig_flags; dump_vmlist(mi); fprintf(fp, "\n"); mi->spec_addr = paddr; mi->memtype = PHYSADDR; goto mem_map; } } } /* * If the address is physical, check whether it's in vmalloc space. */ if (mi->memtype == PHYSADDR) { mi->flags = orig_flags; mi->flags |= GET_PHYS_TO_VMALLOC; mi->retval = 0; dump_vmlist(mi); mi->flags &= ~GET_PHYS_TO_VMALLOC; if (mi->retval) { if ((sp = value_search(mi->retval, &offset))) { show_symbol(sp, offset, SHOW_LINENUM | SHOW_RADIX()); fprintf(fp, "\n"); } dump_vmlist(mi); fprintf(fp, "\n"); goto mem_map; } } /* * Check whether the containing page belongs to the slab subsystem. */ mi->flags = orig_flags; mi->retval = 0; if ((vaddr != BADADDR) && vaddr_to_kmem_cache(vaddr, buf, VERBOSE)) { BZERO(&tmp_meminfo, sizeof(struct meminfo)); tmp_meminfo.spec_addr = vaddr; tmp_meminfo.memtype = KVADDR; tmp_meminfo.flags = mi->flags; vt->dump_kmem_cache(&tmp_meminfo); fprintf(fp, "\n"); } if ((vaddr != BADADDR) && is_slab_page(mi, buf)) { BZERO(&tmp_meminfo, sizeof(struct meminfo)); tmp_meminfo.spec_addr = vaddr; tmp_meminfo.memtype = KVADDR; tmp_meminfo.flags = mi->flags; vt->dump_kmem_cache(&tmp_meminfo); fprintf(fp, "\n"); } /* * Check free list. */ mi->flags = orig_flags; mi->retval = 0; vt->dump_free_pages(mi); if (mi->retval) fprintf(fp, "\n"); if (vt->page_hash_table) { /* * Check the page cache. */ mi->flags = orig_flags; mi->retval = 0; dump_page_hash_table(mi); if (mi->retval) fprintf(fp, "\n"); } /* * Check whether it's a current task or stack address. */ if ((mi->memtype == KVADDR) && (task = vaddr_in_task_struct(vaddr)) && (tc = task_to_context(task))) { show_context(tc); fprintf(fp, "\n"); } else if ((mi->memtype == KVADDR) && (task = stkptr_to_task(vaddr)) && (tc = task_to_context(task))) { show_context(tc); fprintf(fp, "\n"); } mem_map: mi->flags = orig_flags; pc->curcmd_flags &= ~HEADER_PRINTED; if (vaddr != BADADDR) dump_mem_map(mi); else mi->retval = FALSE; if (!mi->retval) fprintf(fp, "%llx: %s address not found in mem map\n", mi->spec_addr, memtype_string(mi->memtype, 0)); } /* * Determine whether an address is a page pointer from the mem_map[] array. * If the caller requests it, return the associated physical address. */ int is_page_ptr(ulong addr, physaddr_t *phys) { int n; ulong ppstart, ppend; struct node_table *nt; ulong pgnum, node_size; ulong nr, sec_addr; ulong nr_mem_sections; ulong coded_mem_map, mem_map, end_mem_map; physaddr_t section_paddr; if (IS_SPARSEMEM()) { nr_mem_sections = NR_MEM_SECTIONS(); for (nr = 0; nr < nr_mem_sections ; nr++) { if ((sec_addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(sec_addr); mem_map = sparse_decode_mem_map(coded_mem_map, nr); end_mem_map = mem_map + (PAGES_PER_SECTION() * SIZE(page)); if ((addr >= mem_map) && (addr < end_mem_map)) { if ((addr - mem_map) % SIZE(page)) return FALSE; if (phys) { section_paddr = PTOB(section_nr_to_pfn(nr)); pgnum = (addr - mem_map) / SIZE(page); *phys = section_paddr + ((physaddr_t)pgnum * PAGESIZE()); } return TRUE; } } } return FALSE; } for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; ppstart = nt->mem_map; ppend = ppstart + (node_size * SIZE(page)); if ((addr < ppstart) || (addr >= ppend)) continue; /* * We're in the mem_map range -- but it is a page pointer? */ if ((addr - ppstart) % SIZE(page)) return FALSE; if (phys) { pgnum = (addr - nt->mem_map) / SIZE(page); *phys = ((physaddr_t)pgnum * PAGESIZE()) + nt->start_paddr; } return TRUE; } return FALSE; #ifdef PRE_NODES ppstart = vt->mem_map; ppend = ppstart + (vt->total_pages * vt->page_struct_len); if ((addr < ppstart) || (addr >= ppend)) return FALSE; if ((addr - ppstart) % vt->page_struct_len) return FALSE; return TRUE; #endif } /* * Return the physical address associated with this page pointer. */ static int page_to_phys(ulong pp, physaddr_t *phys) { return(is_page_ptr(pp, phys)); } /* * Return the page pointer associated with this physical address. */ int phys_to_page(physaddr_t phys, ulong *pp) { int n; ulong pgnum; struct node_table *nt; physaddr_t pstart, pend; ulong node_size; if (IS_SPARSEMEM()) { ulong map; map = pfn_to_map(phys >> PAGESHIFT()); if (map) { *pp = map; return TRUE; } return FALSE; } for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; pstart = nt->start_paddr; pend = pstart + ((ulonglong)node_size * PAGESIZE()); if ((phys < pstart) || (phys >= pend)) continue; /* * We're in the physical range -- calculate the page. */ pgnum = BTOP(phys - pstart); *pp = nt->mem_map + (pgnum * SIZE(page)); return TRUE; } return FALSE; #ifdef PRE_NODES if (phys >= (vt->total_pages * PAGESIZE())) return FALSE; pgnum = PTOB(BTOP(phys)) / PAGESIZE(); *pp = vt->mem_map + (pgnum * vt->page_struct_len); return TRUE; #endif } /* * Fill the caller's buffer with up to maxlen non-NULL bytes * starting from kvaddr, returning the number of consecutive * non-NULL bytes found. If the buffer gets filled with * maxlen bytes without a NULL, then the caller is reponsible * for handling it. */ int read_string(ulong kvaddr, char *buf, int maxlen) { int i; BZERO(buf, maxlen); readmem(kvaddr, KVADDR, buf, maxlen, "read_string characters", QUIET|RETURN_ON_ERROR); for (i = 0; i < maxlen; i++) { if (buf[i] == NULLCHAR) { BZERO(&buf[i], maxlen-i); break; } } return i; } /* * "help -v" output */ void dump_vm_table(int verbose) { int i; struct node_table *nt; int others; ulong *up; others = 0; fprintf(fp, " flags: %lx %s(", vt->flags, count_bits_long(vt->flags) > 4 ? "\n " : ""); if (vt->flags & NODES) fprintf(fp, "%sNODES", others++ ? "|" : ""); if (vt->flags & NODES_ONLINE) fprintf(fp, "%sNODES_ONLINE", others++ ? "|" : ""); if (vt->flags & ZONES) fprintf(fp, "%sZONES", others++ ? "|" : ""); if (vt->flags & PERCPU_KMALLOC_V1) fprintf(fp, "%sPERCPU_KMALLOC_V1", others++ ? "|" : ""); if (vt->flags & PERCPU_KMALLOC_V2) fprintf(fp, "%sPERCPU_KMALLOC_V2", others++ ? "|" : ""); if (vt->flags & COMMON_VADDR) fprintf(fp, "%sCOMMON_VADDR", others++ ? "|" : ""); if (vt->flags & KMEM_CACHE_INIT) fprintf(fp, "%sKMEM_CACHE_INIT", others++ ? "|" : ""); if (vt->flags & V_MEM_MAP) fprintf(fp, "%sV_MEM_MAP", others++ ? "|" : ""); if (vt->flags & KMEM_CACHE_UNAVAIL) fprintf(fp, "%sKMEM_CACHE_UNAVAIL", others++ ? "|" : ""); if (vt->flags & DISCONTIGMEM) fprintf(fp, "%sDISCONTIGMEM", others++ ? "|" : ""); if (vt->flags & FLATMEM) fprintf(fp, "%sFLATMEM", others++ ? "|" : ""); if (vt->flags & SPARSEMEM) fprintf(fp, "%sSPARSEMEM", others++ ? "|" : "");\ if (vt->flags & SPARSEMEM_EX) fprintf(fp, "%sSPARSEMEM_EX", others++ ? "|" : "");\ if (vt->flags & KMEM_CACHE_DELAY) fprintf(fp, "%sKMEM_CACHE_DELAY", others++ ? "|" : "");\ if (vt->flags & PERCPU_KMALLOC_V2_NODES) fprintf(fp, "%sPERCPU_KMALLOC_V2_NODES", others++ ? "|" : "");\ if (vt->flags & VM_STAT) fprintf(fp, "%sVM_STAT", others++ ? "|" : "");\ if (vt->flags & KMALLOC_SLUB) fprintf(fp, "%sKMALLOC_SLUB", others++ ? "|" : "");\ if (vt->flags & KMALLOC_COMMON) fprintf(fp, "%sKMALLOC_COMMON", others++ ? "|" : "");\ if (vt->flags & SLAB_OVERLOAD_PAGE) fprintf(fp, "%sSLAB_OVERLOAD_PAGE", others++ ? "|" : "");\ if (vt->flags & SLAB_CPU_CACHE) fprintf(fp, "%sSLAB_CPU_CACHE", others++ ? "|" : "");\ if (vt->flags & USE_VMAP_AREA) fprintf(fp, "%sUSE_VMAP_AREA", others++ ? "|" : "");\ if (vt->flags & CONFIG_NUMA) fprintf(fp, "%sCONFIG_NUMA", others++ ? "|" : "");\ if (vt->flags & VM_EVENT) fprintf(fp, "%sVM_EVENT", others++ ? "|" : "");\ if (vt->flags & PGCNT_ADJ) fprintf(fp, "%sPGCNT_ADJ", others++ ? "|" : "");\ if (vt->flags & PAGEFLAGS) fprintf(fp, "%sPAGEFLAGS", others++ ? "|" : "");\ if (vt->flags & SWAPINFO_V1) fprintf(fp, "%sSWAPINFO_V1", others++ ? "|" : "");\ if (vt->flags & SWAPINFO_V2) fprintf(fp, "%sSWAPINFO_V2", others++ ? "|" : "");\ if (vt->flags & NODELISTS_IS_PTR) fprintf(fp, "%sNODELISTS_IS_PTR", others++ ? "|" : "");\ if (vt->flags & VM_INIT) fprintf(fp, "%sVM_INIT", others++ ? "|" : "");\ fprintf(fp, ")\n"); if (vt->kernel_pgd[0] == vt->kernel_pgd[1]) fprintf(fp, " kernel_pgd[NR_CPUS]: %lx ...\n", vt->kernel_pgd[0]); else { fprintf(fp, " kernel_pgd[NR_CPUS]: "); for (i = 0; i < NR_CPUS; i++) { if ((i % 4) == 0) fprintf(fp, "\n "); fprintf(fp, "%lx ", vt->kernel_pgd[i]); } fprintf(fp, "\n"); } fprintf(fp, " high_memory: %lx\n", vt->high_memory); fprintf(fp, " vmalloc_start: %lx\n", vt->vmalloc_start); fprintf(fp, " mem_map: %lx\n", vt->mem_map); fprintf(fp, " total_pages: %ld\n", vt->total_pages); fprintf(fp, " max_mapnr: %ld\n", vt->max_mapnr); fprintf(fp, " totalram_pages: %ld\n", vt->totalram_pages); fprintf(fp, " totalhigh_pages: %ld\n", vt->totalhigh_pages); fprintf(fp, " num_physpages: %ld\n", vt->num_physpages); fprintf(fp, " page_hash_table: %lx\n", vt->page_hash_table); fprintf(fp, "page_hash_table_len: %d\n", vt->page_hash_table_len); fprintf(fp, " kmem_max_c_num: %ld\n", vt->kmem_max_c_num); fprintf(fp, " kmem_max_limit: %ld\n", vt->kmem_max_limit); fprintf(fp, " kmem_max_cpus: %ld\n", vt->kmem_max_cpus); fprintf(fp, " kmem_cache_count: %ld\n", vt->kmem_cache_count); fprintf(fp, " kmem_cache_namelen: %d\n", vt->kmem_cache_namelen); fprintf(fp, "kmem_cache_len_nodes: %ld\n", vt->kmem_cache_len_nodes); fprintf(fp, " nr_bad_slab_caches: %d\n", vt->nr_bad_slab_caches); if (!vt->nr_bad_slab_caches) fprintf(fp, " bad_slab_caches: (unused)\n"); else { for (i = 0; i < vt->nr_bad_slab_caches; i++) { fprintf(fp, " bad_slab_caches[%d]: %lx\n", i, vt->bad_slab_caches[i]); } } fprintf(fp, " paddr_prlen: %d\n", vt->paddr_prlen); fprintf(fp, " numnodes: %d\n", vt->numnodes); fprintf(fp, " nr_zones: %d\n", vt->nr_zones); fprintf(fp, " nr_free_areas: %d\n", vt->nr_free_areas); for (i = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; fprintf(fp, " node_table[%d]: \n", i); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } fprintf(fp, " dump_free_pages: "); if (vt->dump_free_pages == dump_free_pages) fprintf(fp, "dump_free_pages()\n"); else if (vt->dump_free_pages == dump_free_pages_zones_v1) fprintf(fp, "dump_free_pages_zones_v1()\n"); else if (vt->dump_free_pages == dump_free_pages_zones_v2) fprintf(fp, "dump_free_pages_zones_v2()\n"); else if (vt->dump_free_pages == dump_multidimensional_free_pages) fprintf(fp, "dump_multidimensional_free_pages()\n"); else fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_free_pages); fprintf(fp, " dump_kmem_cache: "); if (vt->dump_kmem_cache == dump_kmem_cache) fprintf(fp, "dump_kmem_cache()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v1) fprintf(fp, "dump_kmem_cache_percpu_v1()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_percpu_v2) fprintf(fp, "dump_kmem_cache_percpu_v2()\n"); else if (vt->dump_kmem_cache == dump_kmem_cache_slub) fprintf(fp, "dump_kmem_cache_slub()\n"); else fprintf(fp, "%lx (unknown)\n", (ulong)vt->dump_kmem_cache); fprintf(fp, " slab_data: %lx\n", (ulong)vt->slab_data); if (verbose) dump_saved_slab_data(); fprintf(fp, " cpu_slab_type: %d\n", vt->cpu_slab_type); fprintf(fp, " nr_swapfiles: %d\n", vt->nr_swapfiles); fprintf(fp, " last_swap_read: %lx\n", vt->last_swap_read); fprintf(fp, " swap_info_struct: %lx\n", (ulong)vt->swap_info_struct); fprintf(fp, " mem_sec: %lx\n", (ulong)vt->mem_sec); fprintf(fp, " mem_section: %lx\n", (ulong)vt->mem_section); fprintf(fp, " ZONE_HIGHMEM: %d\n", vt->ZONE_HIGHMEM); fprintf(fp, "node_online_map_len: %d\n", vt->node_online_map_len); if (vt->node_online_map_len) { fprintf(fp, " node_online_map: "); up = (ulong *)vt->node_online_map; for (i = 0; i < vt->node_online_map_len; i++) { fprintf(fp, "%s%lx", i ? ", " : "[", *up); up++; } fprintf(fp, "]\n"); } else { fprintf(fp, " node_online_map: (unused)\n"); } fprintf(fp, " nr_vm_stat_items: %d\n", vt->nr_vm_stat_items); fprintf(fp, " vm_stat_items: %s", (vt->flags & VM_STAT) ? "\n" : "(not used)\n"); for (i = 0; i < vt->nr_vm_stat_items; i++) fprintf(fp, " [%d] %s\n", i, vt->vm_stat_items[i]); fprintf(fp, " nr_vm_event_items: %d\n", vt->nr_vm_event_items); fprintf(fp, " vm_event_items: %s", (vt->flags & VM_EVENT) ? "\n" : "(not used)\n"); for (i = 0; i < vt->nr_vm_event_items; i++) fprintf(fp, " [%d] %s\n", i, vt->vm_event_items[i]); fprintf(fp, " PG_reserved: %lx\n", vt->PG_reserved); fprintf(fp, " PG_slab: %ld (%lx)\n", vt->PG_slab, (ulong)1 << vt->PG_slab); fprintf(fp, " PG_head_tail_mask: %lx\n", vt->PG_head_tail_mask); fprintf(fp, " nr_pageflags: %d\n", vt->nr_pageflags); fprintf(fp, " pageflags_data: %s\n", vt->nr_pageflags ? "" : "(not used)"); for (i = 0; i < vt->nr_pageflags; i++) { fprintf(fp, " %s[%d] %08lx: %s\n", i < 10 ? " " : "", i, vt->pageflags_data[i].mask, vt->pageflags_data[i].name); } dump_vma_cache(VERBOSE); } /* * Calculate the amount of memory referenced in the kernel-specific "nodes". */ uint64_t total_node_memory() { int i; struct node_table *nt; uint64_t total; for (i = total = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; if (CRASHDEBUG(1)) { console("node_table[%d]: \n", i); console(" id: %d\n", nt->node_id); console(" pgdat: %lx\n", nt->pgdat); console(" size: %ld\n", nt->size); console(" present: %ld\n", nt->present); console(" mem_map: %lx\n", nt->mem_map); console(" start_paddr: %lx\n", nt->start_paddr); console(" start_mapnr: %ld\n", nt->start_mapnr); } if (nt->present) total += (uint64_t)((uint64_t)nt->present * (uint64_t)PAGESIZE()); else total += (uint64_t)((uint64_t)nt->size * (uint64_t)PAGESIZE()); } return total; } /* * Dump just the vm_area_struct cache table data so that it can be * called from above or for debug purposes. */ void dump_vma_cache(ulong verbose) { int i; ulong vhits; if (!verbose) goto show_hits; for (i = 0; i < VMA_CACHE; i++) fprintf(fp, " cached_vma[%2d]: %lx (%ld)\n", i, vt->cached_vma[i], vt->cached_vma_hits[i]); fprintf(fp, " vma_cache: %lx\n", (ulong)vt->vma_cache); fprintf(fp, " vma_cache_index: %d\n", vt->vma_cache_index); fprintf(fp, " vma_cache_fills: %ld\n", vt->vma_cache_fills); fflush(fp); show_hits: if (vt->vma_cache_fills) { for (i = vhits = 0; i < VMA_CACHE; i++) vhits += vt->cached_vma_hits[i]; fprintf(stderr, "%s vma hit rate: %2ld%% (%ld of %ld)\n", verbose ? "" : " ", (vhits * 100)/vt->vma_cache_fills, vhits, vt->vma_cache_fills); } } /* * Guess at the "real" amount of physical memory installed, formatting * it in a MB or GB based string. */ char * get_memory_size(char *buf) { uint64_t total; ulong next_gig; #ifdef OLDWAY ulong mbs, gbs; #endif total = machdep->memory_size(); if ((next_gig = roundup(total, GIGABYTES(1)))) { if ((next_gig - total) <= MEGABYTES(64)) total = next_gig; } return (pages_to_size((ulong)(total/PAGESIZE()), buf)); #ifdef OLDWAY gbs = (ulong)(total/GIGABYTES(1)); mbs = (ulong)(total/MEGABYTES(1)); if (gbs) mbs = (total % GIGABYTES(1))/MEGABYTES(1); if (total%MEGABYTES(1)) mbs++; if (gbs) sprintf(buf, mbs ? "%ld GB %ld MB" : "%ld GB", gbs, mbs); else sprintf(buf, "%ld MB", mbs); return buf; #endif } /* * For use by architectures not having machine-specific manners for * best determining physical memory size. */ uint64_t generic_memory_size(void) { if (machdep->memsize) return machdep->memsize; return (machdep->memsize = total_node_memory()); } /* * Determine whether a virtual address is user or kernel or ambiguous. */ int vaddr_type(ulong vaddr, struct task_context *tc) { int memtype, found; if (!tc) tc = CURRENT_CONTEXT(); memtype = found = 0; if (machdep->is_uvaddr(vaddr, tc)) { memtype |= UVADDR; found++; } if (machdep->is_kvaddr(vaddr)) { memtype |= KVADDR; found++; } if (found == 1) return memtype; else return AMBIGUOUS; } /* * Determine the first valid user space address */ static int address_space_start(struct task_context *tc, ulong *addr) { ulong vma; char *vma_buf; if (!tc->mm_struct) return FALSE; fill_mm_struct(tc->mm_struct); vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); if (!vma) return FALSE; vma_buf = fill_vma_cache(vma); *addr = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); return TRUE; } int generic_get_kvaddr_ranges(struct vaddr_range *rp) { int cnt; if (XEN_HYPER_MODE()) return 0; cnt = 0; rp[cnt].type = KVADDR_UNITY_MAP; rp[cnt].start = machdep->kvbase; rp[cnt++].end = vt->vmalloc_start; rp[cnt].type = KVADDR_VMALLOC; rp[cnt].start = vt->vmalloc_start; rp[cnt++].end = (ulong)(-1); return cnt; } /* * Search for a given value between a starting and ending address range, * applying an optional mask for "don't care" bits. As an alternative * to entering the starting address value, -k means "start of kernel address * space". For processors with ambiguous user/kernel address spaces, * -u or -k must be used (with or without -s) as a differentiator. */ void cmd_search(void) { int i, c, memtype, ranges, context, max; ulonglong start, end; ulong value, mask, len; ulong uvaddr_start, uvaddr_end; ulong kvaddr_start, kvaddr_end, range_end; int sflag, Kflag, Vflag, pflag, Tflag, tflag; struct searchinfo searchinfo; struct syment *sp; struct node_table *nt; struct vaddr_range vaddr_ranges[MAX_KVADDR_RANGES]; struct vaddr_range *vrp; struct task_context *tc; #define vaddr_overflow(ADDR) (BITS32() && ((ADDR) > 0xffffffffULL)) #define uint_overflow(VALUE) ((VALUE) > 0xffffffffUL) #define ushort_overflow(VALUE) ((VALUE) > 0xffffUL) context = max = 0; start = end = 0; value = mask = sflag = pflag = Kflag = Vflag = memtype = len = Tflag = tflag = 0; kvaddr_start = kvaddr_end = 0; uvaddr_start = UNINITIALIZED; uvaddr_end = COMMON_VADDR_SPACE() ? (ulong)(-1) : machdep->kvbase; BZERO(&searchinfo, sizeof(struct searchinfo)); vrp = &vaddr_ranges[0]; ranges = machdep->get_kvaddr_ranges(vrp); if (CRASHDEBUG(1)) { fprintf(fp, "kvaddr ranges:\n"); for (i = 0; i < ranges; i++) { fprintf(fp, " [%d] %lx %lx ", i, vrp[i].start, vrp[i].end); switch (vrp[i].type) { case KVADDR_UNITY_MAP: fprintf(fp, "KVADDR_UNITY_MAP\n"); break; case KVADDR_START_MAP: fprintf(fp, "KVADDR_START_MAP\n"); break; case KVADDR_VMALLOC: fprintf(fp, "KVADDR_VMALLOC\n"); break; case KVADDR_MODULES: fprintf(fp, "KVADDR_MODULES\n"); break; case KVADDR_VMEMMAP: fprintf(fp, "KVADDR_VMEMMAP\n"); break; } } } searchinfo.mode = SEARCH_ULONG; /* default search */ while ((c = getopt(argcnt, args, "Ttl:ukKVps:e:v:m:hwcx:")) != EOF) { switch(c) { case 'u': if (XEN_HYPER_MODE()) error(FATAL, "-u option is not applicable to the " "Xen hypervisor\n"); if (is_kernel_thread(CURRENT_TASK()) || !task_mm(CURRENT_TASK(), TRUE)) error(FATAL, "current context has no user address space\n"); if (!sflag) { address_space_start(CURRENT_CONTEXT(), &uvaddr_start); start = (ulonglong)uvaddr_start; } memtype = UVADDR; sflag++; break; case 'p': if (XEN_HYPER_MODE()) error(FATAL, "-p option is not applicable to the " "Xen hypervisor\n"); memtype = PHYSADDR; if (!sflag) { nt = &vt->node_table[0]; start = nt->start_paddr; } sflag++; break; case 'V': case 'K': case 'k': if (XEN_HYPER_MODE()) error(FATAL, "-%c option is not applicable to the " "Xen hypervisor\n", c); if (!sflag) start = vrp[0].start; memtype = KVADDR; sflag++; if (c == 'K') Kflag++; else if (c == 'V') Vflag++; break; case 's': if ((sp = symbol_search(optarg))) start = (ulonglong)sp->value; else start = htoll(optarg, FAULT_ON_ERROR, NULL); sflag++; break; case 'e': if ((sp = symbol_search(optarg))) end = (ulonglong)sp->value; else end = htoll(optarg, FAULT_ON_ERROR, NULL); if (!end) error(FATAL, "invalid ending address: 0\n"); break; case 'l': len = stol(optarg, FAULT_ON_ERROR, NULL); break; case 'm': mask = htol(optarg, FAULT_ON_ERROR, NULL); break; case 'h': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search mode with \"h\"\n"); searchinfo.mode = SEARCH_USHORT; break; case 'w': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search mode with \"w\"\n"); searchinfo.mode = SEARCH_UINT; break; case 'c': if (searchinfo.mode != SEARCH_DEFAULT) error(INFO, "WARNING: overriding previously" " set search type with \"c\"\n"); searchinfo.mode = SEARCH_CHARS; break; case 'x': context = dtoi(optarg, FAULT_ON_ERROR, NULL); break; case 'T': case 't': if (XEN_HYPER_MODE()) error(FATAL, "-%c option is not applicable to the " "Xen hypervisor\n", c); if (c == 'T') Tflag++; else if (c == 't') tflag++; if (tflag && Tflag) error(FATAL, "-t and -T options are mutually exclusive\n"); break; default: argerrs++; break; } } if ((tflag || Tflag) && (memtype || start || end || len)) error(FATAL, "-%c option cannot be used with other " "memory-selection options\n", tflag ? 't' : 'T'); if (XEN_HYPER_MODE()) { memtype = KVADDR; if (!sflag) error(FATAL, "the \"-s start\" option is required for" " the Xen hypervisor\n"); } else if (!memtype) { memtype = KVADDR; if (!tflag && !sflag++) start = vrp[0].start; } if (argerrs || (!sflag && !tflag) || !args[optind] || (len && end) || !memtype) cmd_usage(pc->curcmd, SYNOPSIS); searchinfo.memtype = memtype; /* * Verify starting address. */ switch (memtype) { case UVADDR: if (vaddr_overflow(start) || !IS_UVADDR((ulong)start, CURRENT_CONTEXT())) { error(INFO, "invalid user virtual address: %llx\n", start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (tflag) break; if (vaddr_overflow(start) || !IS_KVADDR((ulong)start)) { error(INFO, "invalid kernel virtual address: %llx\n", (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case AMBIGUOUS: error(INFO, "ambiguous virtual address: %llx (requires -u or -k)\n", (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } /* * Set up ending address if necessary. */ if (!end && !len && !tflag) { switch (memtype) { case UVADDR: end = (ulonglong)uvaddr_end; break; case KVADDR: if (XEN_HYPER_MODE()) end = (ulong)(-1); else { range_end = 0; for (i = 0; i < ranges; i++) { if (vrp[i].end > range_end) range_end = vrp[i].end; } end = (ulonglong)range_end; } break; case PHYSADDR: nt = &vt->node_table[vt->numnodes-1]; end = nt->start_paddr + (nt->size * PAGESIZE()); break; } } else if (len) end = start + len; /* * Final verification and per-type start/end variable setting. */ switch (memtype) { case UVADDR: uvaddr_start = (ulong)start; if (end > (ulonglong)uvaddr_end) { error(INFO, "ending address %lx is in kernel space: %llx\n", end); cmd_usage(pc->curcmd, SYNOPSIS); } if (end < (ulonglong)uvaddr_end) uvaddr_end = (ulong)end; if (uvaddr_end < uvaddr_start) { error(INFO, "ending address %lx is below starting address %lx\n", uvaddr_end, uvaddr_start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case KVADDR: if (tflag) break; kvaddr_start = (ulong)start; kvaddr_end = (ulong)end; if (kvaddr_end < kvaddr_start) { error(INFO, "ending address %lx is below starting address %lx\n", kvaddr_end, kvaddr_start); cmd_usage(pc->curcmd, SYNOPSIS); } break; case PHYSADDR: if (end < start) { error(INFO, "ending address %llx is below starting address %llx\n", (ulonglong)end, (ulonglong)start); cmd_usage(pc->curcmd, SYNOPSIS); } break; } if (mask) { switch (searchinfo.mode) { case SEARCH_ULONG: searchinfo.s_parms.s_ulong.mask = mask; break; case SEARCH_UINT: searchinfo.s_parms.s_uint.mask = mask; break; case SEARCH_USHORT: searchinfo.s_parms.s_ushort.mask = mask; break; case SEARCH_CHARS: error(INFO, "mask ignored on string search\n"); break; } } if (context) { switch (searchinfo.mode) { case SEARCH_ULONG: max = PAGESIZE()/sizeof(long); break; case SEARCH_UINT: max = PAGESIZE()/sizeof(int); break; case SEARCH_USHORT: max = PAGESIZE()/sizeof(short); break; case SEARCH_CHARS: error(FATAL, "-x option is not allowed with -c\n"); break; } if (context > max) error(FATAL, "context value %d is too large: maximum is %d\n", context, max); searchinfo.context = context; } searchinfo.vcnt = 0; searchinfo.val = UNUSED; while (args[optind]) { switch (searchinfo.mode) { case SEARCH_ULONG: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ulong.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_ulong.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ulong.value[searchinfo.vcnt] = value; searchinfo.vcnt++; break; case SEARCH_UINT: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_uint.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_uint.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_uint.value[searchinfo.vcnt] = value; if (uint_overflow(value)) error(FATAL, "value too large for -w option: %lx %s\n", value, show_opt_string(&searchinfo)); searchinfo.vcnt++; break; case SEARCH_USHORT: if (can_eval(args[optind])) { value = eval(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ushort.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else if (symbol_exists(args[optind])) { value = symbol_value(args[optind]); searchinfo.s_parms.s_ushort.opt_string[searchinfo.vcnt] = mask ? NULL : args[optind]; } else value = htol(args[optind], FAULT_ON_ERROR, NULL); searchinfo.s_parms.s_ushort.value[searchinfo.vcnt] = value; if (ushort_overflow(value)) error(FATAL, "value too large for -h option: %lx %s\n", value, show_opt_string(&searchinfo)); searchinfo.vcnt++; break; case SEARCH_CHARS: /* parser can deliver empty strings */ if (strlen(args[optind])) { searchinfo.s_parms.s_chars.value[searchinfo.vcnt] = args[optind]; searchinfo.s_parms.s_chars.len[searchinfo.vcnt] = strlen(args[optind]); searchinfo.vcnt++; } break; } optind++; } if (!searchinfo.vcnt) cmd_usage(pc->curcmd, SYNOPSIS); switch (memtype) { case PHYSADDR: searchinfo.paddr_start = start; searchinfo.paddr_end = end; search_physical(&searchinfo); break; case UVADDR: searchinfo.vaddr_start = uvaddr_start; searchinfo.vaddr_end = uvaddr_end; search_virtual(&searchinfo); break; case KVADDR: if (XEN_HYPER_MODE()) { searchinfo.vaddr_start = kvaddr_start; searchinfo.vaddr_end = kvaddr_end; search_virtual(&searchinfo); break; } if (tflag || Tflag) { searchinfo.tasks_found = 0; tc = FIRST_CONTEXT(); for (i = 0; i < RUNNING_TASKS(); i++, tc++) { if (Tflag && !is_task_active(tc->task)) continue; searchinfo.vaddr_start = GET_STACKBASE(tc->task); searchinfo.vaddr_end = GET_STACKTOP(tc->task); searchinfo.task_context = tc; searchinfo.do_task_header = TRUE; search_virtual(&searchinfo); } break; } for (i = 0; i < ranges; i++) { if ((kvaddr_start >= vrp[i].end) || (kvaddr_end <= vrp[i].start)) continue; switch (vrp[i].type) { case KVADDR_UNITY_MAP: case KVADDR_START_MAP: if (Vflag) continue; break; case KVADDR_VMALLOC: case KVADDR_MODULES: case KVADDR_VMEMMAP: if (Kflag) continue; break; } pc->curcmd_private = vrp[i].type; searchinfo.vaddr_start = kvaddr_start > vrp[i].start ? kvaddr_start : vrp[i].start; searchinfo.vaddr_end = (kvaddr_end < vrp[i].end) ? kvaddr_end : vrp[i].end; search_virtual(&searchinfo); } break; } } /* * Do the work for cmd_search(). */ static char * show_opt_string(struct searchinfo *si) { char *opt_string; int index; index = (si->val == UNUSED) ? si->vcnt : si->val; switch (si->mode) { case SEARCH_USHORT: opt_string = si->s_parms.s_ushort.opt_string[index]; break; case SEARCH_UINT: opt_string = si->s_parms.s_uint.opt_string[index]; break; case SEARCH_ULONG: default: opt_string = si->s_parms.s_ulong.opt_string[index]; break; } if (!opt_string) return ""; else if (FIRSTCHAR(opt_string) == '(') return opt_string; else { sprintf(si->buf, "(%s)", opt_string); return si->buf; } } #define SEARCHMASK(X) ((X) | mask) static void display_with_pre_and_post(void *bufptr, ulonglong addr, struct searchinfo *si) { int ctx, memtype, t, amount; ulonglong addr_d; ulong flag; char buf[BUFSIZE]; ctx = si->context; memtype = si->memtype; flag = HEXADECIMAL|NO_ERROR|ASCII_ENDLINE; switch (si->mode) { case SEARCH_USHORT: t = sizeof(ushort); break; case SEARCH_UINT: t = sizeof(uint); break; case SEARCH_ULONG: default: t = sizeof(ulong); break; } switch (t) { case 8: flag |= DISPLAY_64; break; case 4: flag |= DISPLAY_32; break; case 2: flag |= DISPLAY_16; break; } amount = ctx * t; addr_d = addr - amount; display_memory(addr_d, ctx, flag, memtype, NULL); BZERO(buf, BUFSIZE); fprintf(fp, "%s: ", mkstring(buf, VADDR_PRLEN, RJUST|LONGLONG_HEX, MKSTR(&addr))); switch(si->mode) { case SEARCH_ULONG: fprintf(fp, "%lx %s\n", *((ulong *)bufptr), show_opt_string(si)); break; case SEARCH_UINT: fprintf(fp, "%x %s\n", *((uint *)bufptr), show_opt_string(si)); break; case SEARCH_USHORT: fprintf(fp, "%x %s\n", *((ushort *)bufptr), show_opt_string(si)); break; } addr_d = addr + t; display_memory(addr_d, ctx, flag, memtype, NULL); fprintf(fp, "\n"); } static ulong search_ulong(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; ulong mask = si->s_parms.s_ulong.mask; for (i = 0; i < longcnt; i++, bufptr++, addr += sizeof(long)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*bufptr) == SEARCHMASK(si->s_parms.s_ulong.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(bufptr, addr, si); else fprintf(fp, "%lx: %lx %s\n", addr, *bufptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_ulong_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; ulong mask = si->s_parms.s_ulong.mask; for (i = 0; i < longcnt; i++, bufptr++, addr += sizeof(long)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*bufptr) == SEARCHMASK(si->s_parms.s_ulong.value[si->val])) { if (si->context) display_with_pre_and_post(bufptr, addr, si); else fprintf(fp, "%llx: %lx %s\n", addr, *bufptr, show_opt_string(si)); } } } return addr; } static ulong search_uint(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(int)); uint *ptr = (uint *)bufptr; uint mask = si->s_parms.s_uint.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(int)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_uint.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%lx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_uint_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(int)); uint *ptr = (uint *)bufptr; uint mask = si->s_parms.s_uint.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(int)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_uint.value[si->val])) { if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%llx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } static ulong search_ushort(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(short)); ushort *ptr = (ushort *)bufptr; ushort mask = si->s_parms.s_ushort.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(short)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_ushort.value[si->val])) { if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%lx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* phys search uses ulonglong address representation */ static ulonglong search_ushort_p(ulong *bufptr, ulonglong addr, int longcnt, struct searchinfo *si) { int i; int cnt = longcnt * (sizeof(long)/sizeof(short)); ushort *ptr = (ushort *)bufptr; ushort mask = si->s_parms.s_ushort.mask; for (i = 0; i < cnt; i++, ptr++, addr += sizeof(short)) { for (si->val = 0; si->val < si->vcnt; si->val++) { if (SEARCHMASK(*ptr) == SEARCHMASK(si->s_parms.s_ushort.value[si->val])) { if (si->context) display_with_pre_and_post(ptr, addr, si); else fprintf(fp, "%llx: %x %s\n", addr, *ptr, show_opt_string(si)); } } } return addr; } /* * String search "memory" to remember possible matches that cross * page (or search buffer) boundaries. * The cross_match zone is the last strlen-1 chars of the page for * each of the possible targets. */ struct cross_match { int cnt; /* possible hits in the cross_match zone */ ulong addr; /* starting addr of crossing match zone for this target */ ulonglong addr_p; /* for physical search */ char hit[BUFSIZE]; /* array of hit locations in the crossing match zone */ /* This should really be the much-smaller MAXARGLEN, but * no one seems to be enforcing that in the parser. */ } cross[MAXARGS]; ulong cross_match_next_addr; /* the expected starting value of the next page */ ulonglong cross_match_next_addr_p; /* the expected starting value of the next physical page */ #define CHARS_CTX 56 static void report_match(struct searchinfo *si, ulong addr, char *ptr1, int len1, char *ptr2, int len2) { int i; if (si->do_task_header) { print_task_header(fp, si->task_context, si->tasks_found); si->do_task_header = FALSE; si->tasks_found++; } fprintf(fp, "%lx: ", addr); for (i = 0; i < len1; i++) { if (isprint(ptr1[i])) fprintf(fp, "%c", ptr1[i]); else fprintf(fp, "."); } for (i = 0; i < len2; i++) { if (isprint(ptr2[i])) fprintf(fp, "%c", ptr2[i]); else fprintf(fp, "."); } fprintf(fp, "\n"); } static ulong search_chars(ulong *bufptr, ulong addr, int longcnt, struct searchinfo *si) { int i, j; int len; char *target; int charcnt = longcnt * sizeof(long); char *ptr = (char *)bufptr; /* is this the first page of this search? */ if (si->s_parms.s_chars.started_flag == 0) { for (j = 0; j < si->vcnt; j++) { cross[j].cnt = 0; /* no hits */ } cross_match_next_addr = (ulong)-1; /* no page match for first page */ si->s_parms.s_chars.started_flag++; } if (cross_match_next_addr == addr) { for (j = 0; j < si->vcnt; j++) { if (cross[j].cnt) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; for (i = 0; i < len - 1; i++) { if (cross[j].hit[i] && !strncmp(&target[len - 1 - i], ptr, i + 1)) report_match(si, cross[j].addr + i, target, len, &ptr[i+1], CHARS_CTX - len); } } } } /* set up for possible cross matches on this page */ cross_match_next_addr = addr + charcnt; for (j = 0; j < si->vcnt; j++) { len = si->s_parms.s_chars.len[j]; cross[j].cnt = 0; cross[j].addr = addr + longcnt * sizeof(long) - (len - 1); for (i = 0; i < len - 1; i++) cross[j].hit[i] = 0; } for (i = 0; i < charcnt; i++, ptr++, addr++) { for (j = 0; j < si->vcnt; j++) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; if ((i + len) > charcnt) { /* check for cross match */ if (!strncmp(target, ptr, charcnt - i)) { cross[j].hit[len + i - charcnt - 1] = 1; cross[j].cnt++; } } else { if (!strncmp(target, ptr, len)) { int slen = CHARS_CTX; if ((i + CHARS_CTX) > charcnt) slen = charcnt - i; report_match(si, addr, ptr, slen, (char *)0, 0); } } } } return addr; } static void report_match_p(ulonglong addr, char *ptr1, int len1, char *ptr2, int len2) { int i; fprintf(fp, "%llx: ", addr); for (i = 0; i < len1; i++) { if (isprint(ptr1[i])) fprintf(fp, "%c", ptr1[i]); else fprintf(fp, "."); } for (i = 0; i < len2; i++) { if (isprint(ptr2[i])) fprintf(fp, "%c", ptr2[i]); else fprintf(fp, "."); } fprintf(fp, "\n"); } static ulonglong search_chars_p(ulong *bufptr, ulonglong addr_p, int longcnt, struct searchinfo *si) { int i, j; int len; char *target; int charcnt = longcnt * sizeof(long); char *ptr = (char *)bufptr; /* is this the first page of this search? */ if (si->s_parms.s_chars.started_flag == 0) { for (j = 0; j < si->vcnt; j++) { cross[j].cnt = 0; /* no hits */ } cross_match_next_addr_p = (ulonglong)-1; /* no page match for first page */ si->s_parms.s_chars.started_flag++; } if (cross_match_next_addr_p == addr_p) { for (j = 0; j < si->vcnt; j++) { if (cross[j].cnt) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; for (i = 0; i < len - 1; i++) { if (cross[j].hit[i] && !strncmp(&target[len - 1 - i], ptr, i + 1)) report_match_p(cross[j].addr_p + i, target, len, &ptr[i+1], CHARS_CTX - len); } } } } /* set up for possible cross matches on this page */ cross_match_next_addr_p = addr_p + charcnt; for (j = 0; j < si->vcnt; j++) { len = si->s_parms.s_chars.len[j]; cross[j].cnt = 0; cross[j].addr_p = addr_p + longcnt * sizeof(long) - (len - 1); for (i = 0; i < len - 1; i++) cross[j].hit[i] = 0; } for (i = 0; i < charcnt; i++, ptr++, addr_p++) { for (j = 0; j < si->vcnt; j++) { target = si->s_parms.s_chars.value[j]; len = si->s_parms.s_chars.len[j]; if ((i + len) > charcnt) { /* check for cross match */ if (!strncmp(target, ptr, charcnt - i)) { cross[j].hit[len + i - charcnt - 1] = 1; cross[j].cnt++; } } else { if (!strncmp(target, ptr, len)) { int slen = CHARS_CTX; if ((i + CHARS_CTX) > charcnt) slen = charcnt - i; report_match_p(addr_p, ptr, slen, (char *)0, 0); } } } } return addr_p; } static void search_virtual(struct searchinfo *si) { ulong start, end; ulong pp, next, *ubp; int wordcnt, lastpage; ulong page; physaddr_t paddr; char *pagebuf; ulong pct, pages_read, pages_checked; time_t begin, finish; start = si->vaddr_start; end = si->vaddr_end; pages_read = pages_checked = 0; begin = finish = 0; pagebuf = GETBUF(PAGESIZE()); if (start & (sizeof(long)-1)) { start &= ~(sizeof(long)-1); error(INFO, "rounding down start address to: %lx\n", start); } if (CRASHDEBUG(1)) { begin = time(NULL); fprintf(fp, "search_virtual: start: %lx end: %lx\n", start, end); } next = start; for (pp = VIRTPAGEBASE(start); next < end; next = pp) { pages_checked++; lastpage = (VIRTPAGEBASE(next) == VIRTPAGEBASE(end)); if (LKCD_DUMPFILE()) set_lkcd_nohash(); /* * Keep it virtual for Xen hypervisor. */ if (XEN_HYPER_MODE()) { if (!readmem(pp, KVADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { if (CRASHDEBUG(1)) fprintf(fp, "search suspended at: %lx\n", pp); goto done; } goto virtual; } switch (si->memtype) { case UVADDR: if (!uvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || !phys_to_page(paddr, &page)) { if (!next_upage(CURRENT_CONTEXT(), pp, &pp)) goto done; continue; } break; case KVADDR: if (!kvtop(CURRENT_CONTEXT(), pp, &paddr, 0) || !phys_to_page(paddr, &page)) { if (!next_kpage(pp, &pp)) goto done; continue; } break; } if (!readmem(paddr, PHYSADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { pp += PAGESIZE(); continue; } virtual: pages_read++; ubp = (ulong *)&pagebuf[next - pp]; if (lastpage) { if (end == (ulong)(-1)) wordcnt = PAGESIZE()/sizeof(long); else wordcnt = (end - next)/sizeof(long); } else wordcnt = (PAGESIZE() - (next - pp))/sizeof(long); switch (si->mode) { case SEARCH_ULONG: next = search_ulong(ubp, next, wordcnt, si); break; case SEARCH_UINT: next = search_uint(ubp, next, wordcnt, si); break; case SEARCH_USHORT: next = search_ushort(ubp, next, wordcnt, si); break; case SEARCH_CHARS: next = search_chars(ubp, next, wordcnt, si); break; default: /* unimplemented search type */ next += wordcnt * (sizeof(long)); break; } if (CRASHDEBUG(1)) if ((pp % (1024*1024)) == 0) console("%lx\n", pp); pp += PAGESIZE(); } done: if (CRASHDEBUG(1)) { finish = time(NULL); pct = (pages_read * 100)/pages_checked; fprintf(fp, "search_virtual: read %ld (%ld%%) of %ld pages checked in %ld seconds\n", pages_read, pct, pages_checked, finish - begin); } FREEBUF(pagebuf); } static void search_physical(struct searchinfo *si) { ulonglong start_in, end_in; ulong *ubp; int wordcnt, lastpage; ulonglong pnext, ppp; char *pagebuf; ulong pct, pages_read, pages_checked; time_t begin, finish; ulong page; start_in = si->paddr_start; end_in = si->paddr_end; pages_read = pages_checked = 0; begin = finish = 0; pagebuf = GETBUF(PAGESIZE()); if (start_in & (sizeof(ulonglong)-1)) { start_in &= ~(sizeof(ulonglong)-1); error(INFO, "rounding down start address to: %llx\n", (ulonglong)start_in); } if (CRASHDEBUG(1)) { begin = time(NULL); fprintf(fp, "search_physical: start: %llx end: %llx\n", start_in, end_in); } pnext = start_in; for (ppp = PHYSPAGEBASE(start_in); pnext < end_in; pnext = ppp) { pages_checked++; lastpage = (PHYSPAGEBASE(pnext) == PHYSPAGEBASE(end_in)); if (LKCD_DUMPFILE()) set_lkcd_nohash(); if (!phys_to_page(ppp, &page) || !readmem(ppp, PHYSADDR, pagebuf, PAGESIZE(), "search page", RETURN_ON_ERROR|QUIET)) { if (!next_physpage(ppp, &ppp)) break; continue; } pages_read++; ubp = (ulong *)&pagebuf[pnext - ppp]; if (lastpage) { if (end_in == (ulonglong)(-1)) wordcnt = PAGESIZE()/sizeof(long); else wordcnt = (end_in - pnext)/sizeof(long); } else wordcnt = (PAGESIZE() - (pnext - ppp))/sizeof(long); switch (si->mode) { case SEARCH_ULONG: pnext = search_ulong_p(ubp, pnext, wordcnt, si); break; case SEARCH_UINT: pnext = search_uint_p(ubp, pnext, wordcnt, si); break; case SEARCH_USHORT: pnext = search_ushort_p(ubp, pnext, wordcnt, si); break; case SEARCH_CHARS: pnext = search_chars_p(ubp, pnext, wordcnt, si); break; default: /* unimplemented search type */ pnext += wordcnt * (sizeof(long)); break; } ppp += PAGESIZE(); } if (CRASHDEBUG(1)) { finish = time(NULL); pct = (pages_read * 100)/pages_checked; fprintf(fp, "search_physical: read %ld (%ld%%) of %ld pages checked in %ld seconds\n", pages_read, pct, pages_checked, finish - begin); } FREEBUF(pagebuf); } /* * Return the next mapped user virtual address page that comes after * the passed-in address. */ static int next_upage(struct task_context *tc, ulong vaddr, ulong *nextvaddr) { ulong vma, total_vm; char *vma_buf; ulong vm_start, vm_end; ulong vm_next; if (!tc->mm_struct) return FALSE; fill_mm_struct(tc->mm_struct); vma = ULONG(tt->mm_struct + OFFSET(mm_struct_mmap)); total_vm = ULONG(tt->mm_struct + OFFSET(mm_struct_total_vm)); if (!vma || (total_vm == 0)) return FALSE; vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ for ( ; vma; vma = vm_next) { vma_buf = fill_vma_cache(vma); vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_end = ULONG(vma_buf + OFFSET(vm_area_struct_vm_end)); vm_next = ULONG(vma_buf + OFFSET(vm_area_struct_vm_next)); if (vaddr <= vm_start) { *nextvaddr = vm_start; return TRUE; } if ((vaddr > vm_start) && (vaddr < vm_end)) { *nextvaddr = vaddr; return TRUE; } } return FALSE; } /* * Return the next mapped kernel virtual address in the vmlist * that is equal to or comes after the passed-in address. * Prevent repeated calls to dump_vmlist() by only doing it * one time for dumpfiles, or one time per (active) command. */ static int next_vmlist_vaddr(ulong vaddr, ulong *nextvaddr) { int i, retval; ulong cnt; struct meminfo meminfo, *mi; static int count = 0; static struct vmlist *vmlist = NULL; static ulong cmdgencur = BADVAL; /* * Search the stashed vmlist if possible. */ if (vmlist && ACTIVE()) { if (pc->cmdgencur != cmdgencur) { free(vmlist); vmlist = NULL; } } if (vmlist) { for (i = 0, retval = FALSE; i < count; i++) { if (vaddr <= vmlist[i].addr) { *nextvaddr = vmlist[i].addr; retval = TRUE; break; } if (vaddr < (vmlist[i].addr + vmlist[i].size)) { *nextvaddr = vaddr; retval = TRUE; break; } } return retval; } mi = &meminfo; BZERO(mi, sizeof(struct meminfo)); mi->flags = GET_VMLIST_COUNT; dump_vmlist(mi); cnt = mi->retval; if (!cnt) return FALSE; mi->vmlist = (struct vmlist *)GETBUF(sizeof(struct vmlist)*cnt); mi->flags = GET_VMLIST; dump_vmlist(mi); for (i = 0, retval = FALSE; i < cnt; i++) { if (vaddr <= mi->vmlist[i].addr) { *nextvaddr = mi->vmlist[i].addr; retval = TRUE; break; } if (vaddr < (mi->vmlist[i].addr + mi->vmlist[i].size)) { *nextvaddr = vaddr; retval = TRUE; break; } } if (!vmlist) { vmlist = (struct vmlist *) malloc(sizeof(struct vmlist)*cnt); if (vmlist) { BCOPY(mi->vmlist, vmlist, sizeof(struct vmlist)*cnt); count = cnt; cmdgencur = pc->cmdgencur; } } FREEBUF(mi->vmlist); return retval; } /* * Determine whether a virtual address is inside a vmlist segment. */ int in_vmlist_segment(ulong vaddr) { ulong next; if (next_vmlist_vaddr(vaddr, &next) && (vaddr == next)) return TRUE; return FALSE; } /* * Return the next kernel module virtual address that is * equal to or comes after the passed-in address. */ static int next_module_vaddr(ulong vaddr, ulong *nextvaddr) { int i; ulong start, end; struct load_module *lm; for (i = 0; i < st->mods_installed; i++) { lm = &st->load_modules[i]; start = lm->mod_base; end = lm->mod_base + lm->mod_size; if (vaddr >= end) continue; /* * Either below or in this module. */ if (vaddr < start) *nextvaddr = start; else *nextvaddr = vaddr; return TRUE; } return FALSE; } /* * Return the next kernel virtual address page in a designated * kernel virtual address range that comes after the passed-in, * untranslatable, address. */ static int next_kpage(ulong vaddr, ulong *nextvaddr) { ulong vaddr_orig; vaddr_orig = vaddr; vaddr = VIRTPAGEBASE(vaddr) + PAGESIZE(); /* first possible page */ if (vaddr < vaddr_orig) /* wrapped back to zero? */ return FALSE; switch (pc->curcmd_private) { case KVADDR_UNITY_MAP: return next_identity_mapping(vaddr, nextvaddr); case KVADDR_VMALLOC: return next_vmlist_vaddr(vaddr, nextvaddr); case KVADDR_VMEMMAP: *nextvaddr = vaddr; return TRUE; case KVADDR_START_MAP: *nextvaddr = vaddr; return TRUE; case KVADDR_MODULES: return next_module_vaddr(vaddr, nextvaddr); } return FALSE; } /* * Return the next physical address page that comes after * the passed-in, unreadable, address. */ static int next_physpage(ulonglong paddr, ulonglong *nextpaddr) { int n; ulonglong node_start; ulonglong node_end; struct node_table *nt; for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; node_start = nt->start_paddr; node_end = nt->start_paddr + (nt->size * PAGESIZE()); if (paddr >= node_end) continue; if (paddr < node_start) { *nextpaddr = node_start; return TRUE; } if (paddr < node_end) { *nextpaddr = paddr + PAGESIZE(); return TRUE; } } return FALSE; } static int get_hugetlb_total_pages(ulong *nr_total_pages, ulong *nr_total_free_pages) { ulong hstate_p, vaddr; int i, len; ulong nr_huge_pages; ulong free_huge_pages; uint horder; *nr_total_pages = *nr_total_free_pages = 0; if (kernel_symbol_exists("hstates")) { if (INVALID_SIZE(hstate) || INVALID_MEMBER(hstate_order) || INVALID_MEMBER(hstate_nr_huge_pages) || INVALID_MEMBER(hstate_free_huge_pages)) return FALSE; len = get_array_length("hstates", NULL, 0); hstate_p = symbol_value("hstates"); for (i = 0; i < len; i++) { vaddr = hstate_p + (SIZE(hstate) * i); readmem(vaddr + OFFSET(hstate_order), KVADDR, &horder, sizeof(uint), "hstate_order", FAULT_ON_ERROR); if (!horder) continue; readmem(vaddr + OFFSET(hstate_nr_huge_pages), KVADDR, &nr_huge_pages, sizeof(ulong), "hstate_nr_huge_pages", FAULT_ON_ERROR); readmem(vaddr + OFFSET(hstate_free_huge_pages), KVADDR, &free_huge_pages, sizeof(ulong), "hstate_free_huge_pages", FAULT_ON_ERROR); *nr_total_pages += nr_huge_pages * (1 << horder); *nr_total_free_pages += free_huge_pages * (1 << horder); } } else if (kernel_symbol_exists("nr_huge_pages")) { unsigned long hpage_shift = 21; if ((machine_type("X86") && !(machdep->flags & PAE))) hpage_shift = 22; get_symbol_data("nr_huge_pages", sizeof(ulong), &nr_huge_pages); get_symbol_data("free_huge_pages", sizeof(ulong), &free_huge_pages); *nr_total_pages = nr_huge_pages * ((1 << hpage_shift) / machdep->pagesize); *nr_total_free_pages = free_huge_pages * ((1 << hpage_shift) / machdep->pagesize); } return TRUE; } /* * Display swap statistics. */ void cmd_swap(void) { int c; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); dump_swap_info(VERBOSE, NULL, NULL); } /* * Do the work for cmd_swap(). */ #define SWP_USED 1 #define SWAP_MAP_BAD 0x8000 char *swap_info_hdr = \ "SWAP_INFO_STRUCT TYPE SIZE USED PCT PRI FILENAME\n"; static int dump_swap_info(ulong swapflags, ulong *totalswap_pages, ulong *totalused_pages) { int i, j; int swap_device, prio; ulong pages, usedswap; ulong flags, swap_file, max, swap_map, pct; ulong vfsmnt; ulong swap_info, swap_info_ptr; ushort *smap; ulong inuse_pages, totalswap, totalused; char *devname; char buf[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; if (!symbol_exists("nr_swapfiles")) error(FATAL, "nr_swapfiles doesn't exist in this kernel!\n"); if (!symbol_exists("swap_info")) error(FATAL, "swap_info doesn't exist in this kernel!\n"); swap_info_init(); swap_info = symbol_value("swap_info"); if (swapflags & VERBOSE) fprintf(fp, "%s", swap_info_hdr); totalswap = totalused = 0; for (i = 0; i < vt->nr_swapfiles; i++, swap_info += (vt->flags & SWAPINFO_V1 ? SIZE(swap_info_struct) : sizeof(void *))) { if (vt->flags & SWAPINFO_V2) { if (!readmem(swap_info, KVADDR, &swap_info_ptr, sizeof(void *), "swap_info pointer", QUIET|RETURN_ON_ERROR)) continue; if (!swap_info_ptr) continue; fill_swap_info(swap_info_ptr); } else fill_swap_info(swap_info); if (MEMBER_SIZE("swap_info_struct", "flags") == sizeof(uint)) flags = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_flags)); else flags = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_flags)); if (!(flags & SWP_USED)) continue; swap_file = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_file)); swap_device = INT(vt->swap_info_struct + OFFSET_OPTION(swap_info_struct_swap_device, swap_info_struct_old_block_size)); pages = INT(vt->swap_info_struct + OFFSET(swap_info_struct_pages)); totalswap += pages; pages <<= (PAGESHIFT() - 10); inuse_pages = 0; if (MEMBER_SIZE("swap_info_struct", "prio") == sizeof(short)) prio = SHORT(vt->swap_info_struct + OFFSET(swap_info_struct_prio)); else prio = INT(vt->swap_info_struct + OFFSET(swap_info_struct_prio)); if (MEMBER_SIZE("swap_info_struct", "max") == sizeof(int)) max = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_max)); else max = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_max)); if (VALID_MEMBER(swap_info_struct_inuse_pages)) { if (MEMBER_SIZE("swap_info_struct", "inuse_pages") == sizeof(int)) inuse_pages = UINT(vt->swap_info_struct + OFFSET(swap_info_struct_inuse_pages)); else inuse_pages = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_inuse_pages)); } swap_map = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_map)); if (swap_file) { if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { vfsmnt = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_vfsmnt)); get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { devname = vfsmount_devname(file_to_vfsmnt(swap_file), buf1, BUFSIZE); get_pathname(file_to_dentry(swap_file), buf, BUFSIZE, 1, file_to_vfsmnt(swap_file)); if ((STREQ(devname, "devtmpfs") || STREQ(devname, "udev")) && !STRNEQ(buf, "/dev/")) string_insert("/dev", buf); } else { get_pathname(swap_file, buf, BUFSIZE, 1, 0); } } else sprintf(buf, "(unknown)"); smap = NULL; if (vt->flags & SWAPINFO_V1) { smap = (ushort *)GETBUF(sizeof(ushort) * max); if (!readmem(swap_map, KVADDR, smap, sizeof(ushort) * max, "swap_info swap_map data", RETURN_ON_ERROR|QUIET)) { if (swapflags & RETURN_ON_ERROR) { *totalswap_pages = swap_map; *totalused_pages = i; FREEBUF(smap); return FALSE; } else error(FATAL, "swap_info[%d].swap_map at %lx is inaccessible\n", i, swap_map); } } usedswap = 0; if (smap) { for (j = 0; j < max; j++) { switch (smap[j]) { case SWAP_MAP_BAD: case 0: continue; default: usedswap++; } } FREEBUF(smap); } else usedswap = inuse_pages; totalused += usedswap; usedswap <<= (PAGESHIFT() - 10); pct = (usedswap * 100)/pages; if (swapflags & VERBOSE) { sprintf(buf1, "%lx", (vt->flags & SWAPINFO_V2) ? swap_info_ptr : swap_info); sprintf(buf2, "%ldk", pages); sprintf(buf3, "%ldk", usedswap); sprintf(buf4, "%2ld%%", pct); sprintf(buf5, "%d", prio); fprintf(fp, "%s %s %s %s %s %s %s\n", mkstring(buf1, MAX(VADDR_PRLEN, strlen("SWAP_INFO_STRUCT")), CENTER|LJUST, NULL), swap_device ? "PARTITION" : " FILE ", mkstring(buf2, 10, CENTER|RJUST, NULL), mkstring(buf3, 10, CENTER|RJUST, NULL), mkstring(buf4, 4, CENTER|RJUST, NULL), mkstring(buf5, 4, RJUST, NULL), buf); } } if (totalswap_pages) *totalswap_pages = totalswap; if (totalused_pages) *totalused_pages = totalused; return TRUE; } /* * Determine the swap_info_struct usage. */ static void swap_info_init(void) { struct gnu_request *req; if (vt->flags & (SWAPINFO_V1|SWAPINFO_V2)) return; req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); if ((get_symbol_type("swap_info", NULL, req) == TYPE_CODE_ARRAY) && ((req->target_typecode == TYPE_CODE_PTR) || (req->target_typecode == TYPE_CODE_STRUCT))) { switch (req->target_typecode) { case TYPE_CODE_STRUCT: vt->flags |= SWAPINFO_V1; break; case TYPE_CODE_PTR: vt->flags |= SWAPINFO_V2; break; } } else { if (THIS_KERNEL_VERSION >= LINUX(2,6,33)) vt->flags |= SWAPINFO_V2; else vt->flags |= SWAPINFO_V1; } FREEBUF(req); } /* * Translate a PTE into a swap device and offset string. */ char * swap_location(ulonglong pte, char *buf) { char swapdev[BUFSIZE]; if (!pte) return NULL; if (!symbol_exists("nr_swapfiles") || !symbol_exists("swap_info")) return NULL; if (THIS_KERNEL_VERSION >= LINUX(2,6,0)) sprintf(buf, "%s OFFSET: %lld", get_swapdev(__swp_type(pte), swapdev), (ulonglong)__swp_offset(pte)); else sprintf(buf, "%s OFFSET: %llx", get_swapdev(SWP_TYPE(pte), swapdev), (ulonglong)SWP_OFFSET(pte)); return buf; } /* * Given the type field from a PTE, return the name of the swap device. */ static char * get_swapdev(ulong type, char *buf) { unsigned int i, swap_info_len; ulong swap_info, swap_info_ptr, swap_file; ulong vfsmnt; char *devname; char buf1[BUFSIZE]; swap_info_init(); swap_info = symbol_value("swap_info"); swap_info_len = (i = ARRAY_LENGTH(swap_info)) ? i : get_array_length("swap_info", NULL, 0); sprintf(buf, "(unknown swap location)"); if (type >= swap_info_len) return buf; switch (vt->flags & (SWAPINFO_V1|SWAPINFO_V2)) { case SWAPINFO_V1: swap_info += type * SIZE(swap_info_struct); fill_swap_info(swap_info); break; case SWAPINFO_V2: swap_info += type * sizeof(void *); if (!readmem(swap_info, KVADDR, &swap_info_ptr, sizeof(void *), "swap_info pointer", RETURN_ON_ERROR|QUIET)) return buf; if (!swap_info_ptr) return buf; fill_swap_info(swap_info_ptr); break; } swap_file = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_file)); if (swap_file) { if (VALID_MEMBER(swap_info_struct_swap_vfsmnt)) { vfsmnt = ULONG(vt->swap_info_struct + OFFSET(swap_info_struct_swap_vfsmnt)); get_pathname(swap_file, buf, BUFSIZE, 1, vfsmnt); } else if (VALID_MEMBER (swap_info_struct_old_block_size)) { devname = vfsmount_devname(file_to_vfsmnt(swap_file), buf1, BUFSIZE); get_pathname(file_to_dentry(swap_file), buf, BUFSIZE, 1, file_to_vfsmnt(swap_file)); if ((STREQ(devname, "devtmpfs") || STREQ(devname, "udev")) && !STRNEQ(buf, "/dev/")) string_insert("/dev", buf); } else { get_pathname(swap_file, buf, BUFSIZE, 1, 0); } } return buf; } /* * If not currently stashed, cache the passed-in swap_info_struct. */ static void fill_swap_info(ulong swap_info) { if (vt->last_swap_read == swap_info) return; if (!vt->swap_info_struct && !(vt->swap_info_struct = (char *) malloc(SIZE(swap_info_struct)))) error(FATAL, "cannot malloc swap_info_struct space\n"); readmem(swap_info, KVADDR, vt->swap_info_struct, SIZE(swap_info_struct), "fill_swap_info", FAULT_ON_ERROR); vt->last_swap_read = swap_info; } /* * If active, clear references to the swap_info references. */ void clear_swap_info_cache(void) { if (ACTIVE()) vt->last_swap_read = 0; } /* * Translage a vm_area_struct and virtual address into a filename * and offset string. */ #define PAGE_CACHE_SHIFT (machdep->pageshift) /* This is supposed to change! */ static char * vma_file_offset(ulong vma, ulong vaddr, char *buf) { ulong vm_file, vm_start, vm_offset, vm_pgoff, dentry, offset; ulong vfsmnt; char file[BUFSIZE]; char *vma_buf, *file_buf; if (!vma) return NULL; vma_buf = fill_vma_cache(vma); vm_file = ULONG(vma_buf + OFFSET(vm_area_struct_vm_file)); if (!vm_file) goto no_file_offset; file_buf = fill_file_cache(vm_file); dentry = ULONG(file_buf + OFFSET(file_f_dentry)); if (!dentry) goto no_file_offset; file[0] = NULLCHAR; if (VALID_MEMBER(file_f_vfsmnt)) { vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); get_pathname(dentry, file, BUFSIZE, 1, vfsmnt); } else get_pathname(dentry, file, BUFSIZE, 1, 0); if (!strlen(file)) goto no_file_offset; vm_start = ULONG(vma_buf + OFFSET(vm_area_struct_vm_start)); vm_offset = vm_pgoff = 0xdeadbeef; if (VALID_MEMBER(vm_area_struct_vm_offset)) vm_offset = ULONG(vma_buf + OFFSET(vm_area_struct_vm_offset)); else if (VALID_MEMBER(vm_area_struct_vm_pgoff)) vm_pgoff = ULONG(vma_buf + OFFSET(vm_area_struct_vm_pgoff)); else goto no_file_offset; offset = 0; if (vm_offset != 0xdeadbeef) offset = VIRTPAGEBASE(vaddr) - vm_start + vm_offset; else if (vm_pgoff != 0xdeadbeef) { offset = ((vaddr - vm_start) >> PAGE_CACHE_SHIFT) + vm_pgoff; offset <<= PAGE_CACHE_SHIFT; } sprintf(buf, "%s OFFSET: %lx", file, offset); return buf; no_file_offset: return NULL; } /* * Translate a PTE into its physical address and flags. */ void cmd_pte(void) { int c; ulonglong pte; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); while (args[optind]) { pte = htoll(args[optind], FAULT_ON_ERROR, NULL); machdep->translate_pte((ulong)pte, NULL, pte); optind++; } } static char *node_zone_hdr = "ZONE NAME SIZE"; /* * On systems supporting memory nodes, display the basic per-node data. */ static void dump_memory_nodes(int initialize) { int i, j; int n, id, node, flen, slen, badaddr; ulong node_mem_map; ulong temp_node_start_paddr; ulonglong node_start_paddr; ulong node_start_pfn; ulong node_start_mapnr; ulong node_spanned_pages, node_present_pages; ulong free_pages, zone_size, node_size, cum_zone_size; ulong zone_start_paddr, zone_start_mapnr, zone_mem_map; physaddr_t phys; ulong pp; ulong zone_start_pfn; ulong bdata; ulong pgdat; ulong node_zones; ulong value; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char buf5[BUFSIZE]; struct node_table *nt; node = slen = 0; if (!(vt->flags & (NODES|NODES_ONLINE)) && initialize) { nt = &vt->node_table[0]; nt->node_id = 0; if (symbol_exists("contig_page_data")) nt->pgdat = symbol_value("contig_page_data"); else nt->pgdat = 0; nt->size = vt->total_pages; nt->mem_map = vt->mem_map; nt->start_paddr = 0; nt->start_mapnr = 0; if (CRASHDEBUG(1)) { fprintf(fp, "node_table[%d]: \n", 0); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } return; } if (initialize) { pgdat = UNINITIALIZED; /* * This order may have to change based upon architecture... */ if (symbol_exists("pgdat_list") && (VALID_MEMBER(pglist_data_node_next) || VALID_MEMBER(pglist_data_pgdat_next))) { get_symbol_data("pgdat_list", sizeof(void *), &pgdat); vt->flags &= ~NODES_ONLINE; } else if (vt->flags & NODES_ONLINE) { if ((node = next_online_node(0)) < 0) { error(WARNING, "cannot determine first node from node_online_map\n\n"); return; } if (!(pgdat = next_online_pgdat(node))) { error(WARNING, "cannot determine pgdat list for this kernel/architecture\n\n"); return; } } } else pgdat = vt->node_table[0].pgdat; if (initialize && (pgdat == UNINITIALIZED)) { error(WARNING, "cannot initialize pgdat list\n\n"); return; } for (n = 0, badaddr = FALSE; pgdat; n++) { if (n >= vt->numnodes) error(FATAL, "numnodes out of sync with pgdat_list?\n"); nt = &vt->node_table[n]; readmem(pgdat+OFFSET(pglist_data_node_id), KVADDR, &id, sizeof(int), "pglist node_id", FAULT_ON_ERROR); if (VALID_MEMBER(pglist_data_node_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &node_mem_map, sizeof(ulong), "node_mem_map", FAULT_ON_ERROR); } else { node_mem_map = BADADDR; badaddr = TRUE; } if (VALID_MEMBER(pglist_data_node_start_paddr)) { readmem(pgdat+OFFSET(pglist_data_node_start_paddr), KVADDR, &temp_node_start_paddr, sizeof(ulong), "pglist node_start_paddr", FAULT_ON_ERROR); node_start_paddr = temp_node_start_paddr; } else if (VALID_MEMBER(pglist_data_node_start_pfn)) { readmem(pgdat+OFFSET(pglist_data_node_start_pfn), KVADDR, &node_start_pfn, sizeof(ulong), "pglist node_start_pfn", FAULT_ON_ERROR); node_start_mapnr = node_start_pfn; node_start_paddr = PTOB(node_start_pfn); if (badaddr && IS_SPARSEMEM()) { if (!verify_pfn(node_start_pfn)) error(WARNING, "questionable node_start_pfn: %lx\n", node_start_pfn); phys = PTOB(node_start_pfn); if (phys_to_page(phys, &pp)) node_mem_map = pp; } } else error(INFO, "cannot determine zone starting physical address\n"); if (VALID_MEMBER(pglist_data_node_start_mapnr)) readmem(pgdat+OFFSET(pglist_data_node_start_mapnr), KVADDR, &node_start_mapnr, sizeof(ulong), "pglist node_start_mapnr", FAULT_ON_ERROR); if (VALID_MEMBER(pglist_data_node_size)) readmem(pgdat+OFFSET(pglist_data_node_size), KVADDR, &node_size, sizeof(ulong), "pglist node_size", FAULT_ON_ERROR); else if (VALID_MEMBER(pglist_data_node_spanned_pages)) { readmem(pgdat+OFFSET(pglist_data_node_spanned_pages), KVADDR, &node_spanned_pages, sizeof(ulong), "pglist node_spanned_pages", FAULT_ON_ERROR); node_size = node_spanned_pages; } else error(INFO, "cannot determine zone size\n"); if (VALID_MEMBER(pglist_data_node_present_pages)) readmem(pgdat+OFFSET(pglist_data_node_present_pages), KVADDR, &node_present_pages, sizeof(ulong), "pglist node_present_pages", FAULT_ON_ERROR); else node_present_pages = 0; if (VALID_MEMBER(pglist_data_bdata)) readmem(pgdat+OFFSET(pglist_data_bdata), KVADDR, &bdata, sizeof(ulong), "pglist bdata", FAULT_ON_ERROR); else bdata = BADADDR; if (initialize) { nt->node_id = id; nt->pgdat = pgdat; if (VALID_MEMBER(zone_struct_memsize)) nt->size = 0; /* initialize below */ else nt->size = node_size; nt->present = node_present_pages; nt->mem_map = node_mem_map; nt->start_paddr = node_start_paddr; nt->start_mapnr = node_start_mapnr; if (CRASHDEBUG(1)) { fprintf(fp, "node_table[%d]: \n", n); fprintf(fp, " id: %d\n", nt->node_id); fprintf(fp, " pgdat: %lx\n", nt->pgdat); fprintf(fp, " size: %ld\n", nt->size); fprintf(fp, " present: %ld\n", nt->present); fprintf(fp, " mem_map: %lx\n", nt->mem_map); fprintf(fp, " start_paddr: %llx\n", nt->start_paddr); fprintf(fp, " start_mapnr: %ld\n", nt->start_mapnr); } } if (!initialize) { if (n) { fprintf(fp, "\n"); pad_line(fp, slen, '-'); } flen = MAX(VADDR_PRLEN, strlen("BOOTMEM_DATA")); fprintf(fp, "%sNODE %s %s %s %s\n", n ? "\n\n" : "", mkstring(buf1, 8, CENTER, "SIZE"), mkstring(buf2, flen, CENTER|LJUST, "PGLIST_DATA"), mkstring(buf3, flen, CENTER|LJUST, "BOOTMEM_DATA"), mkstring(buf4, flen, CENTER|LJUST, "NODE_ZONES")); node_zones = pgdat + OFFSET(pglist_data_node_zones); sprintf(buf5, " %2d %s %s %s %s\n", id, mkstring(buf1, 8, CENTER|LJUST|LONG_DEC, MKSTR(node_size)), mkstring(buf2, flen, CENTER|LJUST|LONG_HEX, MKSTR(pgdat)), bdata == BADADDR ? mkstring(buf3, flen, CENTER, "----") : mkstring(buf3, flen, CENTER|LONG_HEX, MKSTR(bdata)), mkstring(buf4, flen, CENTER|LJUST|LONG_HEX, MKSTR(node_zones))); fprintf(fp, "%s", buf5); j = 12 + strlen(buf1) + strlen(buf2) + strlen(buf3) + count_leading_spaces(buf4); for (i = 1; i < vt->nr_zones; i++) { node_zones += SIZE_OPTION(zone_struct, zone); INDENT(j); fprintf(fp, "%lx\n", node_zones); } fprintf(fp, "%s START_PADDR START_MAPNR\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(node_mem_map)), mkstring(buf2, strlen(" START_PADDR "), CENTER|LONGLONG_HEX|RJUST, MKSTR(&node_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), CENTER|LONG_DEC|RJUST, MKSTR(node_start_mapnr))); sprintf(buf2, "%s %s START_PADDR START_MAPNR", node_zone_hdr, mkstring(buf1, VADDR_PRLEN, CENTER|RJUST, "MEM_MAP")); slen = strlen(buf2); fprintf(fp, "\n%s\n", buf2); } node_zones = pgdat + OFFSET(pglist_data_node_zones); cum_zone_size = 0; for (i = 0; i < vt->nr_zones; i++) { if (CRASHDEBUG(7)) fprintf(fp, "zone %d at %lx\n", i, node_zones); if (VALID_MEMBER(zone_struct_size)) readmem(node_zones+OFFSET(zone_struct_size), KVADDR, &zone_size, sizeof(ulong), "zone_struct size", FAULT_ON_ERROR); else if (VALID_MEMBER(zone_struct_memsize)) { readmem(node_zones+OFFSET(zone_struct_memsize), KVADDR, &zone_size, sizeof(ulong), "zone_struct memsize", FAULT_ON_ERROR); nt->size += zone_size; } else if (VALID_MEMBER(zone_spanned_pages)) { readmem(node_zones+ OFFSET(zone_spanned_pages), KVADDR, &zone_size, sizeof(ulong), "zone spanned_pages", FAULT_ON_ERROR); } else error(FATAL, "zone_struct has neither size nor memsize field\n"); readmem(node_zones+ OFFSET_OPTION(zone_struct_free_pages, zone_free_pages), KVADDR, &free_pages, sizeof(ulong), "zone[_struct] free_pages", FAULT_ON_ERROR); readmem(node_zones+OFFSET_OPTION(zone_struct_name, zone_name), KVADDR, &value, sizeof(void *), "zone[_struct] name", FAULT_ON_ERROR); if (!read_string(value, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown) "); if (VALID_STRUCT(zone_struct)) { if (VALID_MEMBER(zone_struct_zone_start_paddr)) { readmem(node_zones+OFFSET (zone_struct_zone_start_paddr), KVADDR, &zone_start_paddr, sizeof(ulong), "node_zones zone_start_paddr", FAULT_ON_ERROR); } else { readmem(node_zones+ OFFSET(zone_struct_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); } readmem(node_zones+ OFFSET(zone_struct_zone_start_mapnr), KVADDR, &zone_start_mapnr, sizeof(ulong), "node_zones zone_start_mapnr", FAULT_ON_ERROR); } else { readmem(node_zones+ OFFSET(zone_zone_start_pfn), KVADDR, &zone_start_pfn, sizeof(ulong), "node_zones zone_start_pfn", FAULT_ON_ERROR); zone_start_paddr = PTOB(zone_start_pfn); if (IS_SPARSEMEM()) { zone_mem_map = 0; zone_start_mapnr = 0; if (zone_size) { phys = PTOB(zone_start_pfn); zone_start_mapnr = phys/PAGESIZE(); } } else if (!(vt->flags & NODES) && INVALID_MEMBER(zone_zone_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &zone_mem_map, sizeof(void *), "contig_page_data mem_map", FAULT_ON_ERROR); if (zone_size) zone_mem_map += cum_zone_size * SIZE(page); } else readmem(node_zones+ OFFSET(zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); if (zone_mem_map) zone_start_mapnr = (zone_mem_map - node_mem_map) / SIZE(page); else if (!IS_SPARSEMEM()) zone_start_mapnr = 0; } if (IS_SPARSEMEM()) { zone_mem_map = 0; if (zone_size) { phys = PTOB(zone_start_pfn); if (phys_to_page(phys, &pp)) zone_mem_map = pp; } } else if (!(vt->flags & NODES) && INVALID_MEMBER(zone_struct_zone_mem_map) && INVALID_MEMBER(zone_zone_mem_map)) { readmem(pgdat+OFFSET(pglist_data_node_mem_map), KVADDR, &zone_mem_map, sizeof(void *), "contig_page_data mem_map", FAULT_ON_ERROR); if (zone_size) zone_mem_map += cum_zone_size * SIZE(page); else zone_mem_map = 0; } else readmem(node_zones+ OFFSET_OPTION(zone_struct_zone_mem_map, zone_zone_mem_map), KVADDR, &zone_mem_map, sizeof(ulong), "node_zones zone_mem_map", FAULT_ON_ERROR); if (!initialize) { fprintf(fp, " %2d %-9s %7ld ", i, buf1, zone_size); cum_zone_size += zone_size; fprintf(fp, "%s %s %s\n", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX,MKSTR(zone_mem_map)), mkstring(buf2, strlen("START_PADDR"), LONG_HEX|RJUST,MKSTR(zone_start_paddr)), mkstring(buf3, strlen("START_MAPNR"), LONG_DEC|RJUST, MKSTR(zone_start_mapnr))); } node_zones += SIZE_OPTION(zone_struct, zone); } if (initialize) { if (vt->flags & NODES_ONLINE) { if ((node = next_online_node(node+1)) < 0) pgdat = 0; else if (!(pgdat = next_online_pgdat(node))) { error(WARNING, "cannot determine pgdat list for this kernel/architecture (node %d)\n\n", node); pgdat = 0; } } else readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } else { if ((n+1) < vt->numnodes) pgdat = vt->node_table[n+1].pgdat; else pgdat = 0; } } if (n != vt->numnodes) { if (CRASHDEBUG(2)) error(NOTE, "changing numnodes from %d to %d\n", vt->numnodes, n); vt->numnodes = n; } if (!initialize && IS_SPARSEMEM()) dump_mem_sections(); } /* * At least verify that page-shifted physical address. */ static int verify_pfn(ulong pfn) { int i; physaddr_t mask; if (!machdep->max_physmem_bits) return TRUE; mask = 0; for (i = machdep->max_physmem_bits; i < machdep->bits; i++) mask |= ((physaddr_t)1 << i); if (mask & PTOB(pfn)) return FALSE; return TRUE; } static void dump_zone_stats(void) { int i, n; ulong pgdat, node_zones; char *zonebuf; char buf1[BUFSIZE]; int ivalue; ulong value1; ulong value2; ulong value3; ulong value4; ulong value5; ulong value6; long min, low, high; value1 = value2 = value3 = value4 = value5 = value6 = 0; min = low = high = 0; pgdat = vt->node_table[0].pgdat; zonebuf = GETBUF(SIZE_OPTION(zone_struct, zone)); vm_stat_init(); for (n = 0; pgdat; n++) { node_zones = pgdat + OFFSET(pglist_data_node_zones); for (i = 0; i < vt->nr_zones; i++) { if (!readmem(node_zones, KVADDR, zonebuf, SIZE_OPTION(zone_struct, zone), "zone buffer", FAULT_ON_ERROR)) break; value1 = ULONG(zonebuf + OFFSET_OPTION(zone_struct_name, zone_name)); if (!read_string(value1, buf1, BUFSIZE-1)) sprintf(buf1, "(unknown) "); if (VALID_MEMBER(zone_struct_size)) value1 = value6 = ULONG(zonebuf + OFFSET(zone_struct_size)); else if (VALID_MEMBER(zone_struct_memsize)) { value1 = value6 = ULONG(zonebuf + OFFSET(zone_struct_memsize)); } else if (VALID_MEMBER(zone_spanned_pages)) { value1 = ULONG(zonebuf + OFFSET(zone_spanned_pages)); value6 = ULONG(zonebuf + OFFSET(zone_present_pages)); } else error(FATAL, "zone struct has unknown size field\n"); if (VALID_MEMBER(zone_watermark)) { if (!enumerator_value("WMARK_MIN", &min) || !enumerator_value("WMARK_LOW", &low) || !enumerator_value("WMARK_HIGH", &high)) { min = 0; low = 1; high = 2; } value2 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * min)); value3 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * low)); value4 = ULONG(zonebuf + OFFSET(zone_watermark) + (sizeof(long) * high)); } else { value2 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_min, zone_struct_pages_min)); value3 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_low, zone_struct_pages_low)); value4 = ULONG(zonebuf + OFFSET_OPTION(zone_pages_high, zone_struct_pages_high)); } value5 = ULONG(zonebuf + OFFSET_OPTION(zone_free_pages, zone_struct_free_pages)); fprintf(fp, "NODE: %d ZONE: %d ADDR: %lx NAME: \"%s\"\n", n, i, node_zones, buf1); if (!value1) { fprintf(fp, " [unpopulated]\n"); goto next_zone; } fprintf(fp, " SIZE: %ld", value1); if (value6 < value1) fprintf(fp, " PRESENT: %ld", value6); fprintf(fp, " MIN/LOW/HIGH: %ld/%ld/%ld", value2, value3, value4); if (VALID_MEMBER(zone_vm_stat)) dump_vm_stat("NR_FREE_PAGES", (long *)&value5, node_zones + OFFSET(zone_vm_stat)); if (VALID_MEMBER(zone_nr_active) && VALID_MEMBER(zone_nr_inactive)) { value1 = ULONG(zonebuf + OFFSET(zone_nr_active)); value2 = ULONG(zonebuf + OFFSET(zone_nr_inactive)); fprintf(fp, "\n NR_ACTIVE: %ld NR_INACTIVE: %ld FREE: %ld\n", value1, value2, value5); if (VALID_MEMBER(zone_vm_stat)) { fprintf(fp, " VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } } else if (VALID_MEMBER(zone_vm_stat) && dump_vm_stat("NR_ACTIVE", (long *)&value1, node_zones + OFFSET(zone_vm_stat)) && dump_vm_stat("NR_INACTIVE", (long *)&value2, node_zones + OFFSET(zone_vm_stat))) { fprintf(fp, "\n VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } else { if (VALID_MEMBER(zone_vm_stat)) { fprintf(fp, "\n VM_STAT:\n"); dump_vm_stat(NULL, NULL, node_zones + OFFSET(zone_vm_stat)); } else fprintf(fp, " FREE: %ld\n", value5); } if (VALID_MEMBER(zone_all_unreclaimable)) { ivalue = UINT(zonebuf + OFFSET(zone_all_unreclaimable)); fprintf(fp, " ALL_UNRECLAIMABLE: %s ", ivalue ? "yes" : "no"); } else if (VALID_MEMBER(zone_flags) && enumerator_value("ZONE_ALL_UNRECLAIMABLE", (long *)&value1)) { value2 = ULONG(zonebuf + OFFSET(zone_flags)); value3 = value2 & (1 << value1); fprintf(fp, " ALL_UNRECLAIMABLE: %s ", value3 ? "yes" : "no"); } if (VALID_MEMBER(zone_pages_scanned)) { value1 = ULONG(zonebuf + OFFSET(zone_pages_scanned)); fprintf(fp, "PAGES_SCANNED: %lu ", value1); } fprintf(fp, "\n"); next_zone: fprintf(fp, "\n"); node_zones += SIZE_OPTION(zone_struct, zone); } if ((n+1) < vt->numnodes) pgdat = vt->node_table[n+1].pgdat; else pgdat = 0; } FREEBUF(zonebuf); } /* * Gather essential information regarding each memory node. */ static void node_table_init(void) { int n; ulong pgdat; /* * Override numnodes -- some kernels may leave it at 1 on a system * with multiple memory nodes. */ if ((vt->flags & NODES) && (VALID_MEMBER(pglist_data_node_next) || VALID_MEMBER(pglist_data_pgdat_next))) { get_symbol_data("pgdat_list", sizeof(void *), &pgdat); for (n = 0; pgdat; n++) { readmem(pgdat + OFFSET_OPTION(pglist_data_node_next, pglist_data_pgdat_next), KVADDR, &pgdat, sizeof(void *), "pglist_data node_next", FAULT_ON_ERROR); } if (n != vt->numnodes) { if (CRASHDEBUG(2)) error(NOTE, "changing numnodes from %d to %d\n", vt->numnodes, n); vt->numnodes = n; } } else vt->flags &= ~NODES; if (!(vt->node_table = (struct node_table *) malloc(sizeof(struct node_table) * vt->numnodes))) error(FATAL, "cannot malloc node_table %s(%d nodes)", vt->numnodes > 1 ? "array " : "", vt->numnodes); BZERO(vt->node_table, sizeof(struct node_table) * vt->numnodes); dump_memory_nodes(MEMORY_NODES_INITIALIZE); qsort((void *)vt->node_table, (size_t)vt->numnodes, sizeof(struct node_table), compare_node_data); if (CRASHDEBUG(2)) dump_memory_nodes(MEMORY_NODES_DUMP); } /* * The comparison function must return an integer less than, * equal to, or greater than zero if the first argument is * considered to be respectively less than, equal to, or * greater than the second. If two members compare as equal, * their order in the sorted array is undefined. */ static int compare_node_data(const void *v1, const void *v2) { struct node_table *t1, *t2; t1 = (struct node_table *)v1; t2 = (struct node_table *)v2; return (t1->node_id < t2->node_id ? -1 : t1->node_id == t2->node_id ? 0 : 1); } /* * Depending upon the processor, and whether we're running live or on a * dumpfile, get the system page size. */ uint memory_page_size(void) { uint psz; if (machdep->pagesize) return machdep->pagesize; if (REMOTE_MEMSRC()) return remote_page_size(); switch (pc->flags & MEMORY_SOURCES) { case DISKDUMP: psz = diskdump_page_size(); break; case XENDUMP: psz = xendump_page_size(); break; case KDUMP: psz = kdump_page_size(); break; case NETDUMP: psz = netdump_page_size(); break; case MCLXCD: psz = (uint)mclx_page_size(); break; case LKCD: #if 0 /* REMIND: */ psz = lkcd_page_size(); /* dh_dump_page_size is HW page size; should add dh_page_size */ #else psz = (uint)getpagesize(); #endif break; case DEVMEM: case MEMMOD: case CRASHBUILTIN: case KVMDUMP: case PROC_KCORE: case LIVE_RAMDUMP: psz = (uint)getpagesize(); break; case S390D: psz = s390_page_size(); break; case SADUMP: psz = sadump_page_size(); break; case VMWARE_VMSS: psz = vmware_vmss_page_size(); break; default: psz = 0; error(FATAL, "memory_page_size: invalid pc->flags: %lx\n", pc->flags & MEMORY_SOURCES); } return psz; } /* * If the page size cannot be determined by the dumpfile (like kdump), * and the processor default cannot be used, allow the force-feeding * of a crash command-line page size option. */ void force_page_size(char *s) { int k, err; ulong psize; k = 1; err = FALSE; psize = 0; switch (LASTCHAR(s)) { case 'k': case 'K': LASTCHAR(s) = NULLCHAR; if (!decimal(s, 0)) { err = TRUE; break; } k = 1024; /* FALLTHROUGH */ default: if (decimal(s, 0)) psize = dtol(s, QUIET|RETURN_ON_ERROR, &err); else if (hexadecimal(s, 0)) psize = htol(s, QUIET|RETURN_ON_ERROR, &err); else err = TRUE; break; } if (err) error(INFO, "invalid page size: %s\n", s); else machdep->pagesize = psize * k; } /* * Return the vmalloc address referenced by the first vm_struct * on the vmlist. This can normally be used by the machine-specific * xxx_vmalloc_start() routines. */ ulong first_vmalloc_address(void) { static ulong vmalloc_start = 0; ulong vm_struct, vmap_area; if (DUMPFILE() && vmalloc_start) return vmalloc_start; if (vt->flags & USE_VMAP_AREA) { get_symbol_data("vmap_area_list", sizeof(void *), &vmap_area); if (!vmap_area) return 0; if (!readmem(vmap_area - OFFSET(vmap_area_list) + OFFSET(vmap_area_va_start), KVADDR, &vmalloc_start, sizeof(void *), "first vmap_area va_start", RETURN_ON_ERROR)) non_matching_kernel(); } else if (kernel_symbol_exists("vmlist")) { get_symbol_data("vmlist", sizeof(void *), &vm_struct); if (!vm_struct) return 0; if (!readmem(vm_struct+OFFSET(vm_struct_addr), KVADDR, &vmalloc_start, sizeof(void *), "first vmlist addr", RETURN_ON_ERROR)) non_matching_kernel(); } return vmalloc_start; } /* * Return the highest vmalloc address in the vmlist. */ ulong last_vmalloc_address(void) { struct meminfo meminfo; static ulong vmalloc_limit = 0; if (!vmalloc_limit || ACTIVE()) { BZERO(&meminfo, sizeof(struct meminfo)); meminfo.memtype = KVADDR; meminfo.spec_addr = 0; meminfo.flags = (ADDRESS_SPECIFIED|GET_HIGHEST); dump_vmlist(&meminfo); vmalloc_limit = meminfo.retval; } return vmalloc_limit; } /* * Determine whether an identity-mapped virtual address * refers to an existant physical page, and if not bump * it up to the next node. */ static int next_identity_mapping(ulong vaddr, ulong *nextvaddr) { int n, retval; struct node_table *nt; ulonglong paddr, pstart, psave, pend; ulong node_size; paddr = VTOP(vaddr); psave = 0; retval = FALSE; for (n = 0; n < vt->numnodes; n++) { nt = &vt->node_table[n]; if ((vt->flags & V_MEM_MAP) && (vt->numnodes == 1)) node_size = vt->max_mapnr; else node_size = nt->size; pstart = nt->start_paddr; pend = pstart + ((ulonglong)node_size * PAGESIZE()); /* * Check the next node. */ if (paddr >= pend) continue; /* * Bump up to the next node, but keep looking in * case of non-sequential nodes. */ if (paddr < pstart) { if (psave && (psave < pstart)) continue; *nextvaddr = PTOV(pstart); psave = pstart; retval = TRUE; continue; } /* * We're in the physical range. */ *nextvaddr = vaddr; retval = TRUE; break; } return retval; } /* * Return the L1 cache size in bytes, which can be found stored in the * cache_cache. */ int l1_cache_size(void) { ulong cache; ulong c_align; int colour_off; int retval; retval = -1; if (VALID_MEMBER(kmem_cache_s_c_align)) { cache = symbol_value("cache_cache"); readmem(cache+OFFSET(kmem_cache_s_c_align), KVADDR, &c_align, sizeof(ulong), "c_align", FAULT_ON_ERROR); retval = (int)c_align; } else if (VALID_MEMBER(kmem_cache_s_colour_off)) { cache = symbol_value("cache_cache"); readmem(cache+OFFSET(kmem_cache_s_colour_off), KVADDR, &colour_off, sizeof(int), "colour_off", FAULT_ON_ERROR); retval = colour_off; } return retval; } /* * Multi-purpose routine used to query/control dumpfile memory usage. */ int dumpfile_memory(int cmd) { int retval; retval = 0; switch (cmd) { case DUMPFILE_MEM_USED: if (REMOTE_DUMPFILE()) retval = remote_memory_used(); else if (pc->flags & NETDUMP) retval = netdump_memory_used(); else if (pc->flags & KDUMP) retval = kdump_memory_used(); else if (pc->flags & XENDUMP) retval = xendump_memory_used(); else if (pc->flags & KVMDUMP) retval = kvmdump_memory_used(); else if (pc->flags & DISKDUMP) retval = diskdump_memory_used(); else if (pc->flags & LKCD) retval = lkcd_memory_used(); else if (pc->flags & MCLXCD) retval = vas_memory_used(); else if (pc->flags & S390D) retval = s390_memory_used(); else if (pc->flags & SADUMP) retval = sadump_memory_used(); break; case DUMPFILE_FREE_MEM: if (REMOTE_DUMPFILE()) retval = remote_free_memory(); else if (pc->flags & NETDUMP) retval = netdump_free_memory(); else if (pc->flags & KDUMP) retval = kdump_free_memory(); else if (pc->flags & XENDUMP) retval = xendump_free_memory(); else if (pc->flags & KVMDUMP) retval = kvmdump_free_memory(); else if (pc->flags & DISKDUMP) retval = diskdump_free_memory(); else if (pc->flags & LKCD) retval = lkcd_free_memory(); else if (pc->flags & MCLXCD) retval = vas_free_memory(NULL); else if (pc->flags & S390D) retval = s390_free_memory(); else if (pc->flags & SADUMP) retval = sadump_free_memory(); break; case DUMPFILE_MEM_DUMP: if (REMOTE_DUMPFILE()) retval = remote_memory_dump(0); else if (pc->flags & NETDUMP) retval = netdump_memory_dump(fp); else if (pc->flags & KDUMP) retval = kdump_memory_dump(fp); else if (pc->flags & XENDUMP) retval = xendump_memory_dump(fp); else if (pc->flags & KVMDUMP) retval = kvmdump_memory_dump(fp); else if (pc->flags & DISKDUMP) retval = diskdump_memory_dump(fp); else if (pc->flags & LKCD) retval = lkcd_memory_dump(set_lkcd_fp(fp)); else if (pc->flags & MCLXCD) retval = vas_memory_dump(fp); else if (pc->flags & S390D) retval = s390_memory_dump(fp); else if (pc->flags & PROC_KCORE) retval = kcore_memory_dump(fp); else if (pc->flags & SADUMP) retval = sadump_memory_dump(fp); break; case DUMPFILE_ENVIRONMENT: if (pc->flags & LKCD) { set_lkcd_fp(fp); dump_lkcd_environment(0); } else if (pc->flags & REM_LKCD) retval = remote_memory_dump(VERBOSE); break; } return retval; } /* * Functions for sparse mem support */ ulong sparse_decode_mem_map(ulong coded_mem_map, ulong section_nr) { return coded_mem_map + (section_nr_to_pfn(section_nr) * SIZE(page)); } void sparse_mem_init(void) { ulong addr; ulong mem_section_size; int len, dimension, mem_section_is_ptr; if (!IS_SPARSEMEM()) return; MEMBER_OFFSET_INIT(mem_section_section_mem_map, "mem_section", "section_mem_map"); if (!MAX_PHYSMEM_BITS()) error(FATAL, "CONFIG_SPARSEMEM kernels not supported for this architecture\n"); /* * The kernel's mem_section changed from array to pointer in this commit: * * commit 83e3c48729d9ebb7af5a31a504f3fd6aff0348c4 * mm/sparsemem: Allocate mem_section at runtime for CONFIG_SPARSEMEM_EXTREME=y */ mem_section_is_ptr = get_symbol_type("mem_section", NULL, NULL) == TYPE_CODE_PTR ? TRUE : FALSE; if (((len = get_array_length("mem_section", &dimension, 0)) == (NR_MEM_SECTIONS() / _SECTIONS_PER_ROOT_EXTREME())) || mem_section_is_ptr || !dimension) vt->flags |= SPARSEMEM_EX; if (IS_SPARSEMEM_EX()) { machdep->sections_per_root = _SECTIONS_PER_ROOT_EXTREME(); mem_section_size = sizeof(void *) * NR_SECTION_ROOTS(); } else { machdep->sections_per_root = _SECTIONS_PER_ROOT(); mem_section_size = SIZE(mem_section) * NR_SECTION_ROOTS(); } if (CRASHDEBUG(1)) { fprintf(fp, "PAGESIZE=%d\n",PAGESIZE()); fprintf(fp,"mem_section_size = %ld\n", mem_section_size); fprintf(fp, "NR_SECTION_ROOTS = %ld\n", NR_SECTION_ROOTS()); fprintf(fp, "NR_MEM_SECTIONS = %ld\n", NR_MEM_SECTIONS()); fprintf(fp, "SECTIONS_PER_ROOT = %ld\n", SECTIONS_PER_ROOT() ); fprintf(fp, "SECTION_ROOT_MASK = 0x%lx\n", SECTION_ROOT_MASK()); fprintf(fp, "PAGES_PER_SECTION = %ld\n", PAGES_PER_SECTION()); if (!mem_section_is_ptr && IS_SPARSEMEM_EX() && !len) error(WARNING, "SPARSEMEM_EX: questionable section values\n"); } if (!(vt->mem_sec = (void *)malloc(mem_section_size))) error(FATAL, "cannot malloc mem_sec cache\n"); if (!(vt->mem_section = (char *)malloc(SIZE(mem_section)))) error(FATAL, "cannot malloc mem_section cache\n"); if (mem_section_is_ptr) get_symbol_data("mem_section", sizeof(void *), &addr); else addr = symbol_value("mem_section"); readmem(addr, KVADDR, vt->mem_sec, mem_section_size, "memory section root table", FAULT_ON_ERROR); } char * read_mem_section(ulong addr) { if ((addr == 0) || !IS_KVADDR(addr)) return 0; readmem(addr, KVADDR, vt->mem_section, SIZE(mem_section), "memory section", FAULT_ON_ERROR); return vt->mem_section; } ulong nr_to_section(ulong nr) { ulong addr; ulong *mem_sec = vt->mem_sec; if (IS_SPARSEMEM_EX()) { if (SECTION_NR_TO_ROOT(nr) >= NR_SECTION_ROOTS()) { if (!STREQ(pc->curcmd, "rd") && !STREQ(pc->curcmd, "search") && !STREQ(pc->curcmd, "kmem")) error(WARNING, "sparsemem: invalid section number: %ld\n", nr); return 0; } } if (IS_SPARSEMEM_EX()) { if ((mem_sec[SECTION_NR_TO_ROOT(nr)] == 0) || !IS_KVADDR(mem_sec[SECTION_NR_TO_ROOT(nr)])) return 0; addr = mem_sec[SECTION_NR_TO_ROOT(nr)] + (nr & SECTION_ROOT_MASK()) * SIZE(mem_section); } else addr = symbol_value("mem_section") + (SECTIONS_PER_ROOT() * SECTION_NR_TO_ROOT(nr) + (nr & SECTION_ROOT_MASK())) * SIZE(mem_section); if (!IS_KVADDR(addr)) return 0; return addr; } /* * We use the lower bits of the mem_map pointer to store * a little bit of information. There should be at least * 3 bits here due to 32-bit alignment. */ #define SECTION_MARKED_PRESENT (1UL<<0) #define SECTION_HAS_MEM_MAP (1UL<<1) #define SECTION_IS_ONLINE (1UL<<2) #define SECTION_MAP_LAST_BIT (1UL<<3) #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) int valid_section(ulong addr) { char *mem_section; if ((mem_section = read_mem_section(addr))) return (ULONG(mem_section + OFFSET(mem_section_section_mem_map)) & SECTION_MARKED_PRESENT); return 0; } int section_has_mem_map(ulong addr) { char *mem_section; ulong kernel_version_bit; if (THIS_KERNEL_VERSION >= LINUX(2,6,24)) kernel_version_bit = SECTION_HAS_MEM_MAP; else kernel_version_bit = SECTION_MARKED_PRESENT; if ((mem_section = read_mem_section(addr))) return (ULONG(mem_section + OFFSET(mem_section_section_mem_map)) & kernel_version_bit); return 0; } ulong section_mem_map_addr(ulong addr) { char *mem_section; ulong map; if ((mem_section = read_mem_section(addr))) { map = ULONG(mem_section + OFFSET(mem_section_section_mem_map)); map &= SECTION_MAP_MASK; return map; } return 0; } ulong valid_section_nr(ulong nr) { ulong addr = nr_to_section(nr); if (valid_section(addr)) return addr; return 0; } ulong pfn_to_map(ulong pfn) { ulong section, page_offset; ulong section_nr; ulong coded_mem_map, mem_map; section_nr = pfn_to_section_nr(pfn); if (!(section = valid_section_nr(section_nr))) return 0; if (section_has_mem_map(section)) { page_offset = pfn - section_nr_to_pfn(section_nr); coded_mem_map = section_mem_map_addr(section); mem_map = sparse_decode_mem_map(coded_mem_map, section_nr) + (page_offset * SIZE(page)); return mem_map; } return 0; } void dump_mem_sections(void) { ulong nr,addr; ulong nr_mem_sections; ulong coded_mem_map, mem_map, pfn; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; nr_mem_sections = NR_MEM_SECTIONS(); fprintf(fp, "\n"); pad_line(fp, BITS32() ? 59 : 67, '-'); fprintf(fp, "\n\nNR %s %s %s PFN\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "SECTION"), mkstring(buf2, MAX(VADDR_PRLEN,strlen("CODED_MEM_MAP")), CENTER|LJUST, "CODED_MEM_MAP"), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "MEM_MAP")); for (nr = 0; nr < nr_mem_sections ; nr++) { if ((addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(addr); mem_map = sparse_decode_mem_map(coded_mem_map,nr); pfn = section_nr_to_pfn(nr); fprintf(fp, "%2ld %s %s %s %s\n", nr, mkstring(buf1, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(addr)), mkstring(buf2, MAX(VADDR_PRLEN, strlen("CODED_MEM_MAP")), CENTER|LONG_HEX|RJUST, MKSTR(coded_mem_map)), mkstring(buf3, VADDR_PRLEN, CENTER|LONG_HEX|RJUST, MKSTR(mem_map)), pc->output_radix == 10 ? mkstring(buf4, VADDR_PRLEN, LONG_DEC|LJUST, MKSTR(pfn)) : mkstring(buf4, VADDR_PRLEN, LONG_HEX|LJUST, MKSTR(pfn))); } } } void list_mem_sections(void) { ulong nr,addr; ulong nr_mem_sections = NR_MEM_SECTIONS(); ulong coded_mem_map; for (nr = 0; nr <= nr_mem_sections ; nr++) { if ((addr = valid_section_nr(nr))) { coded_mem_map = section_mem_map_addr(addr); fprintf(fp, "nr=%ld section = %lx coded_mem_map=%lx pfn=%ld mem_map=%lx\n", nr, addr, coded_mem_map, section_nr_to_pfn(nr), sparse_decode_mem_map(coded_mem_map,nr)); } } } /* * For kernels containing the node_online_map or node_states[], * return the number of online node bits set. */ static int get_nodes_online(void) { int i, len, online; struct gnu_request req; ulong *maskptr; long N_ONLINE; ulong mapaddr; if (!symbol_exists("node_online_map") && !symbol_exists("node_states")) return 0; len = mapaddr = 0; if (symbol_exists("node_online_map")) { if (LKCD_KERNTYPES()) { if ((len = STRUCT_SIZE("nodemask_t")) < 0) error(FATAL, "cannot determine type nodemask_t\n"); mapaddr = symbol_value("node_online_map"); } else { len = get_symbol_type("node_online_map", NULL, &req) == TYPE_CODE_UNDEF ? sizeof(ulong) : req.length; mapaddr = symbol_value("node_online_map"); } } else if (symbol_exists("node_states")) { if ((get_symbol_type("node_states", NULL, &req) != TYPE_CODE_ARRAY) || !(len = get_array_length("node_states", NULL, 0)) || !enumerator_value("N_ONLINE", &N_ONLINE)) return 0; len = req.length / len; mapaddr = symbol_value("node_states") + (N_ONLINE * len); } if (!(vt->node_online_map = (ulong *)malloc(len))) error(FATAL, "cannot malloc node_online_map\n"); if (!readmem(mapaddr, KVADDR, (void *)&vt->node_online_map[0], len, "node_online_map", QUIET|RETURN_ON_ERROR)) error(FATAL, "cannot read node_online_map/node_states\n"); vt->node_online_map_len = len/sizeof(ulong); online = 0; maskptr = (ulong *)vt->node_online_map; for (i = 0; i < vt->node_online_map_len; i++, maskptr++) online += count_bits_long(*maskptr); if (CRASHDEBUG(1)) { fprintf(fp, "node_online_map: ["); for (i = 0; i < vt->node_online_map_len; i++) fprintf(fp, "%s%lx", i ? ", " : "", vt->node_online_map[i]); fprintf(fp, "] -> nodes online: %d\n", online); } if (online) vt->numnodes = online; return online; } /* * Return the next node index, with "first" being the first acceptable node. */ static int next_online_node(int first) { int i, j, node; ulong mask, *maskptr; if ((first/BITS_PER_LONG) >= vt->node_online_map_len) return -1; maskptr = (ulong *)vt->node_online_map; for (i = node = 0; i < vt->node_online_map_len; i++, maskptr++) { mask = *maskptr; for (j = 0; j < BITS_PER_LONG; j++, node++) { if (mask & 1) { if (node >= first) return node; } mask >>= 1; } } return -1; } /* * Modify appropriately for architecture/kernel nuances. */ static ulong next_online_pgdat(int node) { char buf[BUFSIZE]; ulong pgdat; /* * Default -- look for type: struct pglist_data node_data[] */ if (LKCD_KERNTYPES()) { if (!kernel_symbol_exists("node_data")) goto pgdat2; /* * Just index into node_data[] without checking that it is * an array; kerntypes have no such symbol information. */ } else { if (get_symbol_type("node_data", NULL, NULL) != TYPE_CODE_ARRAY) goto pgdat2; open_tmpfile(); sprintf(buf, "whatis node_data"); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); goto pgdat2; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "type = ")) break; } close_tmpfile(); if ((!strstr(buf, "struct pglist_data *") && !strstr(buf, "pg_data_t *")) || (count_chars(buf, '[') != 1) || (count_chars(buf, ']') != 1)) goto pgdat2; } if (!readmem(symbol_value("node_data") + (node * sizeof(void *)), KVADDR, &pgdat, sizeof(void *), "node_data", RETURN_ON_ERROR) || !IS_KVADDR(pgdat)) goto pgdat2; return pgdat; pgdat2: if (LKCD_KERNTYPES()) { if (!kernel_symbol_exists("pgdat_list")) goto pgdat3; } else { if (get_symbol_type("pgdat_list",NULL,NULL) != TYPE_CODE_ARRAY) goto pgdat3; open_tmpfile(); sprintf(buf, "whatis pgdat_list"); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); goto pgdat3; } rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "type = ")) break; } close_tmpfile(); if ((!strstr(buf, "struct pglist_data *") && !strstr(buf, "pg_data_t *")) || (count_chars(buf, '[') != 1) || (count_chars(buf, ']') != 1)) goto pgdat3; } if (!readmem(symbol_value("pgdat_list") + (node * sizeof(void *)), KVADDR, &pgdat, sizeof(void *), "pgdat_list", RETURN_ON_ERROR) || !IS_KVADDR(pgdat)) goto pgdat3; return pgdat; pgdat3: if (symbol_exists("contig_page_data") && (node == 0)) return symbol_value("contig_page_data"); return 0; } /* * Make the vm_stat[] array contents easily accessible. */ static int vm_stat_init(void) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int i, count, stringlen, total; int c ATTRIBUTE_UNUSED; struct gnu_request *req; char *start; long enum_value, zc = -1; int split_vmstat = 0, ni = 0; if (vt->flags & VM_STAT) return TRUE; if ((vt->nr_vm_stat_items == -1) || (!symbol_exists("vm_stat") && !symbol_exists("vm_zone_stat"))) goto bailout; /* * look for type: type = atomic_long_t [] */ if (LKCD_KERNTYPES()) { if ((!symbol_exists("vm_stat") && !symbol_exists("vm_zone_stat"))) goto bailout; /* * Just assume that vm_stat is an array; there is * no symbol info in a kerntypes file. */ } else { if (symbol_exists("vm_stat") && get_symbol_type("vm_stat", NULL, NULL) == TYPE_CODE_ARRAY) { vt->nr_vm_stat_items = get_array_length("vm_stat", NULL, 0); } else if (symbol_exists("vm_zone_stat") && get_symbol_type("vm_zone_stat", NULL, NULL) == TYPE_CODE_ARRAY) { vt->nr_vm_stat_items = get_array_length("vm_zone_stat", NULL, 0) + get_array_length("vm_node_stat", NULL, 0); split_vmstat = 1; enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zc); } else { goto bailout; } } open_tmpfile(); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_GET_DATATYPE; req->name = "zone_stat_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); if (split_vmstat) { req->command = GNU_GET_DATATYPE; req->name = "node_stat_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); } FREEBUF(req); stringlen = 1; count = -1; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; clean_line(buf); c = parse_line(buf, arglist); if ((!split_vmstat && STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) || (split_vmstat && STREQ(arglist[0], "NR_VM_NODE_STAT_ITEMS"))) { if (LKCD_KERNTYPES()) vt->nr_vm_stat_items = MAX(atoi(arglist[2]), count); break; } else if (split_vmstat && STREQ(arglist[0], "NR_VM_ZONE_STAT_ITEMS")) { continue; } else { stringlen += strlen(arglist[0]); count++; } } total = stringlen + vt->nr_vm_stat_items + (sizeof(void *) * vt->nr_vm_stat_items); if (!(vt->vm_stat_items = (char **)malloc(total))) { close_tmpfile(); error(FATAL, "cannot malloc vm_stat_items cache\n"); } BZERO(vt->vm_stat_items, total); start = (char *)&vt->vm_stat_items[vt->nr_vm_stat_items]; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; c = parse_line(buf, arglist); if (!enumerator_value(arglist[0], &enum_value)) { close_tmpfile(); goto bailout; } i = ni + enum_value; if (!ni && (enum_value == zc)) { ni = zc; continue; } if (i < vt->nr_vm_stat_items) { vt->vm_stat_items[i] = start; strcpy(start, arglist[0]); start += strlen(arglist[0]) + 1; } } close_tmpfile(); vt->flags |= VM_STAT; return TRUE; bailout: vt->nr_vm_stat_items = -1; return FALSE; } /* * Either dump all vm_stat entries, or return the value of * the specified vm_stat item. Use the global counter unless * a zone-specific address is passed. */ static int dump_vm_stat(char *item, long *retval, ulong zone) { char *buf; ulong *vp; ulong location; int i, maxlen, len; long tc, zc = 0, nc = 0; int split_vmstat = 0; if (!vm_stat_init()) { if (!item) if (CRASHDEBUG(1)) error(INFO, "vm_stat not available in this kernel\n"); return FALSE; } buf = GETBUF(sizeof(ulong) * vt->nr_vm_stat_items); if (symbol_exists("vm_node_stat") && symbol_exists("vm_zone_stat")) split_vmstat = 1; else location = zone ? zone : symbol_value("vm_stat"); if (split_vmstat) { enumerator_value("NR_VM_ZONE_STAT_ITEMS", &zc); location = zone ? zone : symbol_value("vm_zone_stat"); readmem(location, KVADDR, buf, sizeof(ulong) * zc, "vm_zone_stat", FAULT_ON_ERROR); if (!zone) { location = symbol_value("vm_node_stat"); enumerator_value("NR_VM_NODE_STAT_ITEMS", &nc); readmem(location, KVADDR, buf + (sizeof(ulong) * zc), sizeof(ulong) * nc, "vm_node_stat", FAULT_ON_ERROR); } tc = zc + nc; } else { readmem(location, KVADDR, buf, sizeof(ulong) * vt->nr_vm_stat_items, "vm_stat", FAULT_ON_ERROR); tc = vt->nr_vm_stat_items; } if (!item) { if (!zone) fprintf(fp, " VM_STAT:\n"); for (i = maxlen = 0; i < tc; i++) if ((len = strlen(vt->vm_stat_items[i])) > maxlen) maxlen = len; vp = (ulong *)buf; for (i = 0; i < tc; i++) fprintf(fp, "%s%s: %ld\n", space(maxlen - strlen(vt->vm_stat_items[i])), vt->vm_stat_items[i], vp[i]); return TRUE; } vp = (ulong *)buf; for (i = 0; i < tc; i++) { if (STREQ(vt->vm_stat_items[i], item)) { *retval = vp[i]; return TRUE; } } return FALSE; } /* * Dump the cumulative totals of the per_cpu__page_states counters. */ int dump_page_states(void) { struct syment *sp; ulong addr, value; int i, c, fd, len, instance, members; char buf[BUFSIZE]; char *arglist[MAXARGS]; struct entry { char *name; ulong value; } *entry_list; struct stat stat; char *namebuf, *nameptr; if (!(sp = per_cpu_symbol_search("per_cpu__page_states"))) { if (CRASHDEBUG(1)) error(INFO, "per_cpu__page_states" "not available in this kernel\n"); return FALSE; } instance = members = len = 0; sprintf(buf, "ptype struct page_state"); open_tmpfile(); if (!gdb_pass_through(buf, fp, GNU_RETURN_ON_ERROR)) { close_tmpfile(); return FALSE; } fflush(pc->tmpfile); fd = fileno(pc->tmpfile); fstat(fd, &stat); namebuf = GETBUF(stat.st_size); nameptr = namebuf; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state") || strstr(buf, "}")) continue; members++; } entry_list = (struct entry *) GETBUF(sizeof(struct entry) * members); rewind(pc->tmpfile); i = 0; while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state") || strstr(buf, "}")) continue; strip_ending_char(strip_linefeeds(buf), ';'); c = parse_line(buf, arglist); strcpy(nameptr, arglist[c-1]); entry_list[i].name = nameptr; if (strlen(nameptr) > len) len = strlen(nameptr); nameptr += strlen(nameptr)+2; i++; } close_tmpfile(); open_tmpfile(); for (c = 0; c < kt->cpus; c++) { addr = sp->value + kt->__per_cpu_offset[c]; dump_struct("page_state", addr, RADIX(16)); } i = 0; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "struct page_state")) { instance++; i = 0; continue; } if (strstr(buf, "}")) continue; strip_linefeeds(buf); extract_hex(buf, &value, ',', TRUE); entry_list[i].value += value; i++; } close_tmpfile(); fprintf(fp, " PAGE_STATES:\n"); for (i = 0; i < members; i++) { sprintf(buf, "%s", entry_list[i].name); fprintf(fp, "%s", mkstring(buf, len+2, RJUST, 0)); fprintf(fp, ": %ld\n", entry_list[i].value); } FREEBUF(namebuf); FREEBUF(entry_list); return TRUE; } /* * Dump the cumulative totals of the per_cpu__vm_event_state * counters. */ static int dump_vm_event_state(void) { int i, c, maxlen, len; struct syment *sp; ulong addr; ulong *events, *cumulative; if (!vm_event_state_init()) return FALSE; events = (ulong *)GETBUF((sizeof(ulong) * vt->nr_vm_event_items) * 2); cumulative = &events[vt->nr_vm_event_items]; sp = per_cpu_symbol_search("per_cpu__vm_event_states"); for (c = 0; c < kt->cpus; c++) { addr = sp->value + kt->__per_cpu_offset[c]; if (CRASHDEBUG(1)) { fprintf(fp, "[%d]: %lx\n", c, addr); dump_struct("vm_event_state", addr, RADIX(16)); } readmem(addr, KVADDR, events, sizeof(ulong) * vt->nr_vm_event_items, "vm_event_states buffer", FAULT_ON_ERROR); for (i = 0; i < vt->nr_vm_event_items; i++) cumulative[i] += events[i]; } fprintf(fp, "\n VM_EVENT_STATES:\n"); for (i = maxlen = 0; i < vt->nr_vm_event_items; i++) if ((len = strlen(vt->vm_event_items[i])) > maxlen) maxlen = len; for (i = 0; i < vt->nr_vm_event_items; i++) fprintf(fp, "%s%s: %lu\n", space(maxlen - strlen(vt->vm_event_items[i])), vt->vm_event_items[i], cumulative[i]); FREEBUF(events); return TRUE; } static int vm_event_state_init(void) { int i, stringlen, total; int c ATTRIBUTE_UNUSED; long count, enum_value; struct gnu_request *req; char *arglist[MAXARGS]; char buf[BUFSIZE]; char *start; if (vt->flags & VM_EVENT) return TRUE; if ((vt->nr_vm_event_items == -1) || !per_cpu_symbol_search("per_cpu__vm_event_states")) goto bailout; if (!enumerator_value("NR_VM_EVENT_ITEMS", &count)) return FALSE; vt->nr_vm_event_items = count; open_tmpfile(); req = (struct gnu_request *)GETBUF(sizeof(struct gnu_request)); req->command = GNU_GET_DATATYPE; req->name = "vm_event_item"; req->flags = GNU_PRINT_ENUMERATORS; gdb_interface(req); FREEBUF(req); stringlen = 1; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; clean_line(buf); c = parse_line(buf, arglist); if (STREQ(arglist[0], "NR_VM_EVENT_ITEMS")) break; else stringlen += strlen(arglist[0]); } total = stringlen + vt->nr_vm_event_items + (sizeof(void *) * vt->nr_vm_event_items); if (!(vt->vm_event_items = (char **)malloc(total))) { close_tmpfile(); error(FATAL, "cannot malloc vm_event_items cache\n"); } BZERO(vt->vm_event_items, total); start = (char *)&vt->vm_event_items[vt->nr_vm_event_items]; rewind(pc->tmpfile); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (strstr(buf, "{") || strstr(buf, "}")) continue; c = parse_line(buf, arglist); if (enumerator_value(arglist[0], &enum_value)) i = enum_value; else { close_tmpfile(); goto bailout; } if (i < vt->nr_vm_event_items) { vt->vm_event_items[i] = start; strcpy(start, arglist[0]); start += strlen(arglist[0]) + 1; } } close_tmpfile(); vt->flags |= VM_EVENT; return TRUE; bailout: vt->nr_vm_event_items = -1; return FALSE; } /* * Dump the per-cpu offset values that are used to * resolve per-cpu symbol values. */ static void dump_per_cpu_offsets(void) { int c; char buf[BUFSIZE]; fprintf(fp, "PER-CPU OFFSET VALUES:\n"); for (c = 0; c < kt->cpus; c++) { sprintf(buf, "CPU %d", c); fprintf(fp, "%7s: %lx", buf, kt->__per_cpu_offset[c]); if (hide_offline_cpu(c)) fprintf(fp, " [OFFLINE]\n"); else fprintf(fp, "\n"); } } /* * Dump the value(s) of a page->flags bitmap. */ void dump_page_flags(ulonglong flags) { int c ATTRIBUTE_UNUSED; int sz, val, found, largest, longest, header_printed; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char header[BUFSIZE]; char *arglist[MAXARGS]; ulonglong tmpflag; found = longest = largest = header_printed = 0; open_tmpfile(); if (dump_enumerator_list("pageflags")) { rewind(pc->tmpfile); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (strstr(buf1, " = ")) { c = parse_line(buf1, arglist); if ((sz = strlen(arglist[0])) > longest) longest = sz; if (strstr(arglist[0], "PG_") && ((val = atoi(arglist[2])) > largest)) largest = val; } } } else error(FATAL, "enum pageflags does not exist in this kernel\n"); largest = (largest+1)/4 + 1; sprintf(header, "%s BIT VALUE\n", mkstring(buf1, longest, LJUST, "PAGE-FLAG")); rewind(pc->tmpfile); if (flags) fprintf(pc->saved_fp, "FLAGS: %llx\n", flags); fprintf(pc->saved_fp, "%s%s", flags ? " " : "", header); while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (strstr(buf1, " = ") && strstr(buf1, "PG_")) { c = parse_line(buf1, arglist); val = atoi(arglist[2]); tmpflag = 1ULL << val; if (!flags || (flags & tmpflag)) { fprintf(pc->saved_fp, "%s%s %2d %.*lx\n", flags ? " " : "", mkstring(buf2, longest, LJUST, arglist[0]), val, largest, (ulong)(1ULL << val)); if (flags & tmpflag) found++; } } } if (flags && !found) fprintf(pc->saved_fp, " (none found)\n"); close_tmpfile(); } /* * Support for slub.c slab cache. */ static void kmem_cache_init_slub(void) { if (vt->flags & KMEM_CACHE_INIT) return; if (CRASHDEBUG(1) && !(vt->flags & CONFIG_NUMA) && (vt->numnodes > 1)) error(WARNING, "kmem_cache_init_slub: numnodes: %d without CONFIG_NUMA\n", vt->numnodes); if (kmem_cache_downsize()) add_to_downsized("kmem_cache"); vt->cpu_slab_type = MEMBER_TYPE("kmem_cache", "cpu_slab"); vt->flags |= KMEM_CACHE_INIT; } static void kmem_cache_list_common(void) { int i, cnt; ulong *cache_list; ulong name; char buf[BUFSIZE]; cnt = get_kmem_cache_list(&cache_list); for (i = 0; i < cnt; i++) { fprintf(fp, "%lx ", cache_list[i]); readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) sprintf(buf, "(unknown)\n"); fprintf(fp, "%s\n", buf); } FREEBUF(cache_list); } #define DUMP_KMEM_CACHE_INFO_SLUB() dump_kmem_cache_info_slub(si) static void dump_kmem_cache_info_slub(struct meminfo *si) { char b1[BUFSIZE]; char b2[BUFSIZE]; int namelen, sizelen, spacelen; if (si->flags & SLAB_GATHER_FAILURE) error(INFO, "%s: cannot gather relevant slab data\n", si->curname); fprintf(fp, "%s ", mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->cache))); namelen = strlen(si->curname); sprintf(b2, "%ld", si->objsize); sizelen = strlen(b2); spacelen = 0; if (namelen++ > 18) { spacelen = 29 - namelen - sizelen; fprintf(fp, "%s%s%ld ", si->curname, space(spacelen <= 0 ? 1 : spacelen), si->objsize); if (spacelen > 0) spacelen = 1; if (si->flags & SLAB_GATHER_FAILURE) sprintf(b1, "%c%ds ", '%', 9 + spacelen - 1); else sprintf(b1, "%c%dld ", '%', 9 + spacelen - 1); } else { fprintf(fp, "%-18s %8ld ", si->curname, si->objsize); if (si->flags & SLAB_GATHER_FAILURE) sprintf(b1, "%c%ds ", '%', 9); else sprintf(b1, "%c%dld ", '%', 9); } if (si->flags & SLAB_GATHER_FAILURE) { fprintf(fp, b1, "?"); fprintf(fp, "%8s %5s %4ldk\n", "?", "?", si->slabsize/1024); } else { fprintf(fp, b1, si->inuse); fprintf(fp, "%8ld %5ld %4ldk\n", si->inuse + si->free, si->num_slabs, si->slabsize/1024); } } static void dump_kmem_cache_slub(struct meminfo *si) { int i; ulong name, oo; unsigned int size, objsize, objects, order, offset; char *reqname, *p1; char kbuf[BUFSIZE]; char buf[BUFSIZE]; if (INVALID_MEMBER(kmem_cache_node_nr_slabs)) { error(INFO, "option requires kmem_cache_node.nr_slabs member!\n" "(the kernel must be built with CONFIG_SLUB_DEBUG)\n"); return; } order = objects = 0; si->cache_count = get_kmem_cache_list(&si->cache_list); si->cache_buf = GETBUF(SIZE(kmem_cache)); if (VALID_MEMBER(page_objects) && OFFSET(page_objects) == OFFSET(page_inuse)) si->flags |= SLAB_BITFIELD; if (!si->reqname && !(si->flags & (ADDRESS_SPECIFIED|GET_SLAB_PAGES))) fprintf(fp, "%s", kmem_cache_hdr); if (si->flags & ADDRESS_SPECIFIED) { if ((p1 = is_slab_page(si, kbuf))) { si->flags |= VERBOSE; si->slab = (ulong)si->spec_addr; } else if (!(p1 = vaddr_to_kmem_cache(si->spec_addr, kbuf, VERBOSE))) { error(INFO, "address is not allocated in slab subsystem: %lx\n", si->spec_addr); goto bailout; } if (si->reqname && (si->reqname != p1)) error(INFO, "ignoring pre-selected %s cache for address: %lx\n", si->reqname, si->spec_addr, si->reqname); reqname = p1; } else reqname = si->reqname; for (i = 0; i < si->cache_count; i++) { BZERO(si->cache_buf, SIZE(kmem_cache)); if (!readmem(si->cache_list[i], KVADDR, si->cache_buf, SIZE(kmem_cache), "kmem_cache buffer", RETURN_ON_ERROR|RETURN_PARTIAL)) goto next_cache; name = ULONG(si->cache_buf + OFFSET(kmem_cache_name)); if (!read_string(name, buf, BUFSIZE-1)) sprintf(buf, "(unknown)"); if (reqname) { if (!STREQ(reqname, buf)) continue; fprintf(fp, "%s", kmem_cache_hdr); } if (ignore_cache(si, buf)) { fprintf(fp, "%lx %-18s [IGNORED]\n", si->cache_list[i], buf); goto next_cache; } objsize = UINT(si->cache_buf + OFFSET(kmem_cache_objsize)); size = UINT(si->cache_buf + OFFSET(kmem_cache_size)); offset = UINT(si->cache_buf + OFFSET(kmem_cache_offset)); if (VALID_MEMBER(kmem_cache_objects)) { objects = UINT(si->cache_buf + OFFSET(kmem_cache_objects)); order = UINT(si->cache_buf + OFFSET(kmem_cache_order)); } else if (VALID_MEMBER(kmem_cache_oo)) { oo = ULONG(si->cache_buf + OFFSET(kmem_cache_oo)); objects = oo_objects(oo); order = oo_order(oo); } else error(FATAL, "cannot determine " "kmem_cache objects/order values\n"); si->cache = si->cache_list[i]; si->curname = buf; si->objsize = objsize; si->size = size; si->objects = objects; si->slabsize = (PAGESIZE() << order); si->inuse = si->num_slabs = 0; si->slab_offset = offset; si->random = VALID_MEMBER(kmem_cache_random) ? ULONG(si->cache_buf + OFFSET(kmem_cache_random)) : 0; if (!get_kmem_cache_slub_data(GET_SLUB_SLABS, si) || !get_kmem_cache_slub_data(GET_SLUB_OBJECTS, si)) si->flags |= SLAB_GATHER_FAILURE; DUMP_KMEM_CACHE_INFO_SLUB(); if (si->flags & SLAB_GATHER_FAILURE) { si->flags &= ~SLAB_GATHER_FAILURE; goto next_cache; } if (si->flags & ADDRESS_SPECIFIED) { if (!si->slab) si->slab = vaddr_to_slab(si->spec_addr); do_slab_slub(si, VERBOSE); } else if (si->flags & VERBOSE) { do_kmem_cache_slub(si); if (!reqname && ((i+1) < si->cache_count)) fprintf(fp, "%s", kmem_cache_hdr); } next_cache: if (reqname) break; } bailout: FREEBUF(si->cache_list); FREEBUF(si->cache_buf); } static ushort slub_page_objects(struct meminfo *si, ulong page) { ulong objects_vaddr; ushort objects; /* * Pre-2.6.27, the object count and order were fixed in the * kmem_cache structure. Now they may change, say if a high * order slab allocation fails, so the per-slab object count * is kept in the slab. */ if (VALID_MEMBER(page_objects)) { objects_vaddr = page + OFFSET(page_objects); if (si->flags & SLAB_BITFIELD) objects_vaddr += sizeof(ushort); if (!readmem(objects_vaddr, KVADDR, &objects, sizeof(ushort), "page.objects", RETURN_ON_ERROR)) return 0; /* * Strip page.frozen bit. */ if (si->flags & SLAB_BITFIELD) { if (__BYTE_ORDER == __LITTLE_ENDIAN) { objects <<= 1; objects >>= 1; } if (__BYTE_ORDER == __BIG_ENDIAN) objects >>= 1; } if (CRASHDEBUG(1) && (objects != si->objects)) error(NOTE, "%s: slab: %lx oo objects: %ld " "slab objects: %d\n", si->curname, page, si->objects, objects); if (objects == (ushort)(-1)) { error(INFO, "%s: slab: %lx invalid page.objects: -1\n", si->curname, page); return 0; } } else objects = (ushort)si->objects; return objects; } static short count_cpu_partial(struct meminfo *si, int cpu) { short cpu_partial_inuse, cpu_partial_objects, free_objects; ulong cpu_partial; free_objects = 0; if (VALID_MEMBER(kmem_cache_cpu_partial) && VALID_MEMBER(page_objects)) { readmem(ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu] + OFFSET(kmem_cache_cpu_partial), KVADDR, &cpu_partial, sizeof(ulong), "kmem_cache_cpu.partial", RETURN_ON_ERROR); while (cpu_partial) { if (!is_page_ptr(cpu_partial, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, cpu_partial); return 0; } if (!readmem(cpu_partial + OFFSET(page_inuse), KVADDR, &cpu_partial_inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) return 0; if (cpu_partial_inuse == -1) return 0; cpu_partial_objects = slub_page_objects(si, cpu_partial); if (!cpu_partial_objects) return 0; free_objects += cpu_partial_objects - cpu_partial_inuse; readmem(cpu_partial + OFFSET(page_next), KVADDR, &cpu_partial, sizeof(ulong), "page.next", RETURN_ON_ERROR); } } return free_objects; } /* * Emulate the total count calculation done by the * slab_objects() sysfs function in slub.c. */ static int get_kmem_cache_slub_data(long cmd, struct meminfo *si) { int i, n, node; ulong total_objects, total_slabs, free_objects; ulong cpu_slab_ptr, node_ptr, cpu_freelist, orig_slab; ulong node_nr_partial, node_nr_slabs, node_total_objects; int full_slabs, objects, node_total_avail; long p; short inuse; ulong *nodes, *per_cpu; struct node_table *nt; /* * nodes[n] is not being used (for now) * per_cpu[n] is a count of cpu_slab pages per node. */ nodes = (ulong *)GETBUF(2 * sizeof(ulong) * vt->numnodes); per_cpu = nodes + vt->numnodes; total_slabs = total_objects = free_objects = cpu_freelist = 0; node_total_avail = VALID_MEMBER(kmem_cache_node_total_objects) ? TRUE : FALSE; for (i = 0; i < kt->cpus; i++) { cpu_slab_ptr = get_cpu_slab_ptr(si, i, &cpu_freelist); if (!cpu_slab_ptr) continue; if ((node = page_to_nid(cpu_slab_ptr)) < 0) goto bailout; switch (cmd) { case GET_SLUB_OBJECTS: { /* For better error report, set cur slab to si->slab. */ orig_slab = si->slab; si->slab = cpu_slab_ptr; if (!readmem(cpu_slab_ptr + OFFSET(page_inuse), KVADDR, &inuse, sizeof(short), "page inuse", RETURN_ON_ERROR)) { si->slab = orig_slab; return FALSE; } objects = slub_page_objects(si, cpu_slab_ptr); if (!objects) { si->slab = orig_slab; return FALSE; } free_objects += objects - inuse; free_objects += count_free_objects(si, cpu_freelist); free_objects += count_cpu_partial(si, i); if (!node_total_avail) total_objects += inuse; total_slabs++; si->slab = orig_slab; } break; case GET_SLUB_SLABS: total_slabs++; break; } per_cpu[node]++; } for (n = 0; n < vt->numnodes; n++) { if (vt->flags & CONFIG_NUMA) { nt = &vt->node_table[n]; node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *) * nt->node_id)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); if (!node_ptr) continue; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), KVADDR, &node_nr_partial, sizeof(ulong), "kmem_cache_node nr_partial", RETURN_ON_ERROR)) goto bailout; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), KVADDR, &node_nr_slabs, sizeof(ulong), "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) goto bailout; if (node_total_avail) { if (!readmem(node_ptr + OFFSET(kmem_cache_node_total_objects), KVADDR, &node_total_objects, sizeof(ulong), "kmem_cache_node total_objects", RETURN_ON_ERROR)) goto bailout; } switch (cmd) { case GET_SLUB_OBJECTS: if ((p = count_partial(node_ptr, si, &free_objects)) < 0) return FALSE; if (!node_total_avail) total_objects += p; total_slabs += node_nr_partial; break; case GET_SLUB_SLABS: total_slabs += node_nr_partial; break; } full_slabs = node_nr_slabs - per_cpu[n] - node_nr_partial; objects = si->objects; switch (cmd) { case GET_SLUB_OBJECTS: if (node_total_avail) total_objects += node_total_objects; else total_objects += (full_slabs * objects); total_slabs += full_slabs; break; case GET_SLUB_SLABS: total_slabs += full_slabs; break; } if (!(vt->flags & CONFIG_NUMA)) break; } switch (cmd) { case GET_SLUB_OBJECTS: if (!node_total_avail) si->inuse = total_objects; else si->inuse = total_objects - free_objects; if (VALID_MEMBER(page_objects) && node_total_avail) si->free = free_objects; else si->free = (total_slabs * si->objects) - si->inuse; break; case GET_SLUB_SLABS: si->num_slabs = total_slabs; break; } FREEBUF(nodes); return TRUE; bailout: FREEBUF(nodes); return FALSE; } static void do_cpu_partial_slub(struct meminfo *si, int cpu) { ulong cpu_slab_ptr; void *partial; cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu]; readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_partial), KVADDR, &partial, sizeof(void *), "kmem_cache_cpu.partial", RETURN_ON_ERROR); fprintf(fp, "CPU %d PARTIAL:\n%s", cpu, partial ? "" : " (empty)\n"); /* * kmem_cache_cpu.partial points to the first page of per cpu partial * list. */ while (partial) { si->slab = (ulong)partial; if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, si->slab); break; } if (!do_slab_slub(si, VERBOSE)) break; readmem((ulong)partial + OFFSET(page_next), KVADDR, &partial, sizeof(void *), "page.next", RETURN_ON_ERROR); } } static void do_kmem_cache_slub(struct meminfo *si) { int i, n; ulong cpu_slab_ptr, node_ptr; ulong node_nr_partial, node_nr_slabs; ulong *per_cpu; struct node_table *nt; per_cpu = (ulong *)GETBUF(sizeof(ulong) * vt->numnodes); for (i = 0; i < kt->cpus; i++) { if (hide_offline_cpu(i)) { fprintf(fp, "CPU %d [OFFLINE]\n", i); continue; } cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[i]; fprintf(fp, "CPU %d KMEM_CACHE_CPU:\n %lx\n", i, cpu_slab_ptr); cpu_slab_ptr = get_cpu_slab_ptr(si, i, NULL); fprintf(fp, "CPU %d SLAB:\n%s", i, cpu_slab_ptr ? "" : " (empty)\n"); if (cpu_slab_ptr) { if ((n = page_to_nid(cpu_slab_ptr)) >= 0) per_cpu[n]++; si->slab = cpu_slab_ptr; if (!do_slab_slub(si, VERBOSE)) continue; } if (VALID_MEMBER(kmem_cache_cpu_partial)) do_cpu_partial_slub(si, i); if (received_SIGINT()) restart(0); } for (n = 0; n < vt->numnodes; n++) { if (vt->flags & CONFIG_NUMA) { nt = &vt->node_table[n]; node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *)* nt->node_id)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); if (node_ptr) { if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_partial), KVADDR, &node_nr_partial, sizeof(ulong), "kmem_cache_node nr_partial", RETURN_ON_ERROR)) break; if (!readmem(node_ptr + OFFSET(kmem_cache_node_nr_slabs), KVADDR, &node_nr_slabs, sizeof(ulong), "kmem_cache_node nr_slabs", RETURN_ON_ERROR)) break; } else node_nr_partial = node_nr_slabs = 0; fprintf(fp, "KMEM_CACHE_NODE NODE SLABS PARTIAL PER-CPU\n"); fprintf(fp, "%lx%s", node_ptr, space(VADDR_PRLEN > 8 ? 2 : 10)); fprintf(fp, "%4d %5ld %7ld %7ld\n", n, node_nr_slabs, node_nr_partial, per_cpu[n]); do_node_lists_slub(si, node_ptr, n); if (!(vt->flags & CONFIG_NUMA)) break; } fprintf(fp, "\n"); FREEBUF(per_cpu); } #define DUMP_SLAB_INFO_SLUB() \ { \ char b1[BUFSIZE], b2[BUFSIZE]; \ fprintf(fp, " %s %s %4d %5d %9d %4d\n", \ mkstring(b1, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(si->slab)), \ mkstring(b2, VADDR_PRLEN, LJUST|LONG_HEX, MKSTR(vaddr)), \ node, objects, inuse, objects - inuse); \ } static int do_slab_slub(struct meminfo *si, int verbose) { physaddr_t paddr; ulong vaddr; ushort inuse, objects; ulong freelist, cpu_freelist, cpu_slab_ptr; int i, free_objects, cpu_slab, is_free, node; ulong p, q; #define SLAB_RED_ZONE 0x00000400UL ulong flags, red_left_pad; if (!si->slab) { if (CRASHDEBUG(1)) error(INFO, "-S option not supported for CONFIG_SLUB\n"); return FALSE; } if (!page_to_phys(si->slab, &paddr)) { error(INFO, "%s: invalid slab address: %lx\n", si->curname, si->slab); return FALSE; } node = page_to_nid(si->slab); vaddr = PTOV(paddr); if (verbose) fprintf(fp, " %s", slab_hdr); if (!readmem(si->slab + OFFSET(page_inuse), KVADDR, &inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) return FALSE; if (!readmem(si->slab + OFFSET(page_freelist), KVADDR, &freelist, sizeof(void *), "page.freelist", RETURN_ON_ERROR)) return FALSE; objects = slub_page_objects(si, si->slab); if (!objects) return FALSE; if (!verbose) { DUMP_SLAB_INFO_SLUB(); return TRUE; } cpu_freelist = 0; for (i = 0, cpu_slab = -1; i < kt->cpus; i++) { cpu_slab_ptr = get_cpu_slab_ptr(si, i, &cpu_freelist); if (!cpu_slab_ptr) continue; if (cpu_slab_ptr == si->slab) { cpu_slab = i; /* * Later slub scheme uses the per-cpu freelist * so count the free objects by hand. */ if ((free_objects = count_free_objects(si, cpu_freelist)) < 0) return FALSE; /* * If the object is freed on foreign cpu, the * object is liked to page->freelist. */ if (freelist) free_objects += objects - inuse; inuse = objects - free_objects; break; } } DUMP_SLAB_INFO_SLUB(); fprintf(fp, " %s", free_inuse_hdr); #define PAGE_MAPPING_ANON 1 if (CRASHDEBUG(8)) { fprintf(fp, "< SLUB: free list START: >\n"); i = 0; for (q = freelist; q; q = get_freepointer(si, (void *)q)) { if (q & PAGE_MAPPING_ANON) { fprintf(fp, "< SLUB: free list END: %lx (%d found) >\n", q, i); break; } fprintf(fp, " %lx\n", q); i++; } if (!q) fprintf(fp, "< SLUB: free list END (%d found) >\n", i); } red_left_pad = 0; if (VALID_MEMBER(kmem_cache_red_left_pad)) { flags = ULONG(si->cache_buf + OFFSET(kmem_cache_flags)); if (flags & SLAB_RED_ZONE) red_left_pad = ULONG(si->cache_buf + OFFSET(kmem_cache_red_left_pad)); } for (p = vaddr; p < vaddr + objects * si->size; p += si->size) { hq_open(); is_free = FALSE; /* Search an object on both of freelist and cpu_freelist */ ulong lists[] = { freelist, cpu_freelist, }; for (i = 0; i < sizeof(lists) / sizeof(lists[0]); i++) { for (is_free = 0, q = lists[i]; q; q = get_freepointer(si, (void *)q)) { if (q == BADADDR) { hq_close(); return FALSE; } if (q & PAGE_MAPPING_ANON) break; if ((p + red_left_pad) == q) { is_free = TRUE; goto found_object; } if (!hq_enter(q)) { hq_close(); error(INFO, "%s: slab: %lx duplicate freelist object: %lx\n", si->curname, si->slab, q); return FALSE; } } } found_object: hq_close(); if (si->flags & ADDRESS_SPECIFIED) { if ((si->spec_addr < p) || (si->spec_addr >= (p + si->size))) { if (!(si->flags & VERBOSE)) continue; } } fprintf(fp, " %s%lx%s", is_free ? " " : "[", pc->flags2 & REDZONE ? p : p + red_left_pad, is_free ? " " : "]"); if (is_free && (cpu_slab >= 0)) fprintf(fp, "(cpu %d cache)", cpu_slab); fprintf(fp, "\n"); } return TRUE; } static int count_free_objects(struct meminfo *si, ulong freelist) { int c; ulong q; hq_open(); c = 0; for (q = freelist; q; q = get_freepointer(si, (void *)q)) { if (q & PAGE_MAPPING_ANON) break; if (!hq_enter(q)) { error(INFO, "%s: slab: %lx duplicate freelist object: %lx\n", si->curname, si->slab, q); break; } c++; } hq_close(); return c; } static ulong freelist_ptr(struct meminfo *si, ulong ptr, ulong ptr_addr) { if (si->random) /* CONFIG_SLAB_FREELIST_HARDENED */ return (ptr ^ si->random ^ ptr_addr); else return ptr; } static ulong get_freepointer(struct meminfo *si, void *object) { ulong vaddr, nextfree; vaddr = (ulong)(object + si->slab_offset); if (!readmem(vaddr, KVADDR, &nextfree, sizeof(void *), "get_freepointer", QUIET|RETURN_ON_ERROR)) { error(INFO, "%s: slab: %lx invalid freepointer: %lx\n", si->curname, si->slab, vaddr); return BADADDR; } return (freelist_ptr(si, nextfree, vaddr)); } static void do_node_lists_slub(struct meminfo *si, ulong node_ptr, int node) { ulong next, last, list_head, flags; int first; if (!node_ptr) return; list_head = node_ptr + OFFSET(kmem_cache_node_partial); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node partial", RETURN_ON_ERROR)) return; fprintf(fp, "NODE %d PARTIAL:\n%s", node, next == list_head ? " (empty)\n" : ""); first = 0; while (next != list_head) { si->slab = last = next - OFFSET(page_lru); if (first++ == 0) fprintf(fp, " %s", slab_hdr); if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid partial list slab pointer: %lx\n", si->curname, si->slab); return; } if (!do_slab_slub(si, !VERBOSE)) return; if (received_SIGINT()) restart(0); if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) return; if (!IS_KVADDR(next) || ((next != list_head) && !is_page_ptr(next - OFFSET(page_lru), NULL))) { error(INFO, "%s: partial list slab: %lx invalid page.lru.next: %lx\n", si->curname, last, next); return; } } #define SLAB_STORE_USER (0x00010000UL) flags = ULONG(si->cache_buf + OFFSET(kmem_cache_flags)); if (INVALID_MEMBER(kmem_cache_node_full) || !(flags & SLAB_STORE_USER)) { fprintf(fp, "NODE %d FULL:\n (not tracked)\n", node); return; } list_head = node_ptr + OFFSET(kmem_cache_node_full); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node full", RETURN_ON_ERROR)) return; fprintf(fp, "NODE %d FULL:\n%s", node, next == list_head ? " (empty)\n" : ""); first = 0; while (next != list_head) { si->slab = next - OFFSET(page_lru); if (first++ == 0) fprintf(fp, " %s", slab_hdr); if (!is_page_ptr(si->slab, NULL)) { error(INFO, "%s: invalid full list slab pointer: %lx\n", si->curname, si->slab); return; } if (!do_slab_slub(si, !VERBOSE)) return; if (received_SIGINT()) restart(0); if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) return; if (!IS_KVADDR(next)) { error(INFO, "%s: full list slab: %lx page.lru.next: %lx\n", si->curname, si->slab, next); return; } } } static char * is_kmem_cache_addr_common(ulong vaddr, char *kbuf) { int i, cnt; ulong *cache_list; ulong name; int found; cnt = get_kmem_cache_list(&cache_list); for (i = 0, found = FALSE; i < cnt; i++) { if (cache_list[i] != vaddr) continue; if (!readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", RETURN_ON_ERROR)) break; if (!read_string(name, kbuf, BUFSIZE-1)) sprintf(kbuf, "(unknown)"); found = TRUE; break; } FREEBUF(cache_list); return (found ? kbuf : NULL); } /* * Kernel-config-neutral page-to-node evaluator. */ static int page_to_nid(ulong page) { int i; physaddr_t paddr; struct node_table *nt; physaddr_t end_paddr; if (!page_to_phys(page, &paddr)) { error(INFO, "page_to_nid: invalid page: %lx\n", page); return -1; } for (i = 0; i < vt->numnodes; i++) { nt = &vt->node_table[i]; end_paddr = nt->start_paddr + ((physaddr_t)nt->size * (physaddr_t)PAGESIZE()); if ((paddr >= nt->start_paddr) && (paddr < end_paddr)) return i; } error(INFO, "page_to_nid: cannot determine node for pages: %lx\n", page); return -1; } /* * Allocate and fill the passed-in buffer with a list of * the current kmem_cache structures. */ static int get_kmem_cache_list(ulong **cache_buf) { int cnt; ulong vaddr; struct list_data list_data, *ld; get_symbol_data("slab_caches", sizeof(void *), &vaddr); ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; ld->start = vaddr; ld->list_head_offset = OFFSET(kmem_cache_list); ld->end = symbol_value("slab_caches"); if (CRASHDEBUG(3)) ld->flags |= VERBOSE; cnt = do_list(ld); *cache_buf = ld->list_ptr; return cnt; } /* * Get the address of the head page of a compound page. */ static ulong compound_head(ulong page) { ulong flags, first_page, compound_head; first_page = page; if (VALID_MEMBER(page_compound_head)) { if (readmem(page+OFFSET(page_compound_head), KVADDR, &compound_head, sizeof(ulong), "page.compound_head", RETURN_ON_ERROR)) { if (compound_head & 1) first_page = compound_head - 1; } } else if (readmem(page+OFFSET(page_flags), KVADDR, &flags, sizeof(ulong), "page.flags", RETURN_ON_ERROR)) { if ((flags & vt->PG_head_tail_mask) == vt->PG_head_tail_mask) readmem(page+OFFSET(page_first_page), KVADDR, &first_page, sizeof(ulong), "page.first_page", RETURN_ON_ERROR); } return first_page; } long count_partial(ulong node, struct meminfo *si, ulong *free) { ulong list_head, next, last; short inuse, objects; ulong total_inuse; ulong count = 0; count = 0; total_inuse = 0; list_head = node + OFFSET(kmem_cache_node_partial); if (!readmem(list_head, KVADDR, &next, sizeof(ulong), "kmem_cache_node.partial", RETURN_ON_ERROR)) return -1; hq_open(); while (next != list_head) { if (!readmem(next - OFFSET(page_lru) + OFFSET(page_inuse), KVADDR, &inuse, sizeof(ushort), "page.inuse", RETURN_ON_ERROR)) { hq_close(); return -1; } last = next - OFFSET(page_lru); if (inuse == -1) { error(INFO, "%s: partial list slab: %lx invalid page.inuse: -1\n", si->curname, last); break; } total_inuse += inuse; if (VALID_MEMBER(page_objects)) { objects = slub_page_objects(si, last); if (!objects) { hq_close(); return -1; } *free += objects - inuse; } if (!readmem(next, KVADDR, &next, sizeof(ulong), "page.lru.next", RETURN_ON_ERROR)) { hq_close(); return -1; } if (!IS_KVADDR(next) || ((next != list_head) && !is_page_ptr(next - OFFSET(page_lru), NULL))) { error(INFO, "%s: partial list slab: %lx invalid page.lru.next: %lx\n", si->curname, last, next); break; } /* * Keep track of the last 1000 entries, and check * whether the list has recursed back onto itself. */ if ((++count % 1000) == 0) { hq_close(); hq_open(); } if (!hq_enter(next)) { error(INFO, "%s: partial list slab: %lx duplicate slab entry: %lx\n", si->curname, last, next); hq_close(); return -1; } } hq_close(); return total_inuse; } char * is_slab_page(struct meminfo *si, char *buf) { int i, cnt; ulong page_slab, page_flags, name; ulong *cache_list; char *retval; if (!(vt->flags & KMALLOC_SLUB)) return NULL; if (!is_page_ptr((ulong)si->spec_addr, NULL)) return NULL; if (!readmem(si->spec_addr + OFFSET(page_flags), KVADDR, &page_flags, sizeof(ulong), "page.flags", RETURN_ON_ERROR|QUIET)) return NULL; if (!(page_flags & (1 << vt->PG_slab))) return NULL; if (!readmem(si->spec_addr + OFFSET(page_slab), KVADDR, &page_slab, sizeof(ulong), "page.slab", RETURN_ON_ERROR|QUIET)) return NULL; retval = NULL; cnt = get_kmem_cache_list(&cache_list); for (i = 0; i < cnt; i++) { if (page_slab == cache_list[i]) { if (!readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", QUIET|RETURN_ON_ERROR)) goto bailout; if (!read_string(name, buf, BUFSIZE-1)) goto bailout; retval = buf; break; } } bailout: FREEBUF(cache_list); return retval; } /* * Figure out which of the kmem_cache.cpu_slab declarations * is used by this kernel, and return a pointer to the slab * page being used. Return the kmem_cache_cpu.freelist pointer * if requested. */ static ulong get_cpu_slab_ptr(struct meminfo *si, int cpu, ulong *cpu_freelist) { ulong cpu_slab_ptr, page, freelist; if (cpu_freelist) *cpu_freelist = 0; switch (vt->cpu_slab_type) { case TYPE_CODE_STRUCT: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + OFFSET(kmem_cache_cpu_page)); if (cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) *cpu_freelist = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + OFFSET(kmem_cache_cpu_freelist)); break; case TYPE_CODE_ARRAY: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab) + (sizeof(void *)*cpu)); if (cpu_slab_ptr && cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) { if (readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_freelist), KVADDR, &freelist, sizeof(void *), "kmem_cache_cpu.freelist", RETURN_ON_ERROR)) *cpu_freelist = freelist; } if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) { if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page), KVADDR, &page, sizeof(void *), "kmem_cache_cpu.page", RETURN_ON_ERROR)) cpu_slab_ptr = 0; else cpu_slab_ptr = page; } break; case TYPE_CODE_PTR: cpu_slab_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_cpu_slab)) + kt->__per_cpu_offset[cpu]; if (cpu_slab_ptr && cpu_freelist && VALID_MEMBER(kmem_cache_cpu_freelist)) { if (readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_freelist), KVADDR, &freelist, sizeof(void *), "kmem_cache_cpu.freelist", RETURN_ON_ERROR)) *cpu_freelist = freelist; } if (cpu_slab_ptr && VALID_MEMBER(kmem_cache_cpu_page)) { if (!readmem(cpu_slab_ptr + OFFSET(kmem_cache_cpu_page), KVADDR, &page, sizeof(void *), "kmem_cache_cpu.page", RETURN_ON_ERROR)) cpu_slab_ptr = 0; else cpu_slab_ptr = page; } break; default: cpu_slab_ptr = 0; error(FATAL, "cannot determine location of kmem_cache.cpu_slab page\n"); } return cpu_slab_ptr; } /* * In 2.6.27 kmem_cache.order and kmem_cache.objects were merged * into the kmem_cache.oo, a kmem_cache_order_objects structure. * oo_order() and oo_objects() emulate the kernel functions * of the same name. */ static unsigned int oo_order(ulong oo) { return (oo >> 16); } static unsigned int oo_objects(ulong oo) { return (oo & ((1 << 16) - 1)); } #ifdef NOT_USED ulong slab_to_kmem_cache_node(struct meminfo *si, ulong slab_page) { int node; ulong node_ptr; if (vt->flags & CONFIG_NUMA) { node = page_to_nid(slab_page); node_ptr = ULONG(si->cache_buf + OFFSET(kmem_cache_node) + (sizeof(void *)*node)); } else node_ptr = si->cache + OFFSET(kmem_cache_local_node); return node_ptr; } ulong get_kmem_cache_by_name(char *request) { int i, cnt; ulong *cache_list; ulong name; char buf[BUFSIZE]; ulong found; cnt = get_kmem_cache_list(&cache_list); cache_buf = GETBUF(SIZE(kmem_cache)); found = 0; for (i = 0; i < cnt; i++) { readmem(cache_list[i] + OFFSET(kmem_cache_name), KVADDR, &name, sizeof(char *), "kmem_cache.name", FAULT_ON_ERROR); if (!read_string(name, buf, BUFSIZE-1)) continue; if (STREQ(buf, request)) { found = cache_list[i]; break; } } FREEBUF(cache_list); return found; } #endif /* NOT_USED */ crash-7.2.1/filesys.c0000775000000000000000000033131713240637645013170 0ustar rootroot/* filesys.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2017 David Anderson * Copyright (C) 2002-2017 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" #include #include #include #include static void show_mounts(ulong, int, struct task_context *); static int find_booted_kernel(void); static int find_booted_system_map(void); static int verify_utsname(char *); static char **build_searchdirs(int, int *); static int build_kernel_directory(char *); static int redhat_kernel_directory_v1(char *); static int redhat_kernel_directory_v2(char *); static int redhat_debug_directory(char *); static ulong *create_dentry_array(ulong, int *); static ulong *create_dentry_array_percpu(ulong, int *); static void show_fuser(char *, char *); static int mount_point(char *); static int open_file_reference(struct reference *); static void memory_source_init(void); static int get_pathname_component(ulong, ulong, int, char *, char *); static ulong *get_mount_list(int *, struct task_context *); char *inode_type(char *, char *); static void match_proc_version(void); static void get_live_memory_source(void); static int memory_driver_module_loaded(int *); static int insmod_memory_driver_module(void); static int get_memory_driver_dev(dev_t *); static int memory_driver_init(void); static int create_memory_device(dev_t); static int match_file_string(char *, char *, char *); static ulong get_root_vfsmount(char *); static void check_live_arch_mismatch(void); static long get_inode_nrpages(ulong); static void dump_inode_page_cache_info(ulong); #define DENTRY_CACHE (20) #define INODE_CACHE (20) #define FILE_CACHE (20) static struct filesys_table { char *dentry_cache; ulong cached_dentry[DENTRY_CACHE]; ulong cached_dentry_hits[DENTRY_CACHE]; int dentry_cache_index; ulong dentry_cache_fills; char *inode_cache; ulong cached_inode[INODE_CACHE]; ulong cached_inode_hits[INODE_CACHE]; int inode_cache_index; ulong inode_cache_fills; char *file_cache; ulong cached_file[FILE_CACHE]; ulong cached_file_hits[FILE_CACHE]; int file_cache_index; ulong file_cache_fills; } filesys_table = { 0 }; static struct filesys_table *ft = &filesys_table; /* * Open the namelist, dumpfile and output devices. */ void fd_init(void) { pc->nfd = pc->kfd = pc->mfd = pc->dfd = -1; if ((pc->nullfp = fopen("/dev/null", "w+")) == NULL) error(INFO, "cannot open /dev/null (for extraneous output)"); if (REMOTE()) remote_fd_init(); else { if (pc->namelist && pc->namelist_debug && pc->system_map) { error(INFO, "too many namelist options:\n %s\n %s\n %s\n", pc->namelist, pc->namelist_debug, pc->system_map); program_usage(SHORT_FORM); } if (pc->namelist) { if (XEN_HYPER_MODE() && !pc->dumpfile) error(FATAL, "Xen hypervisor mode requires a dumpfile\n"); if (!pc->dumpfile && !get_proc_version()) error(INFO, "/proc/version: %s\n", strerror(errno)); } else { if (pc->dumpfile) { error(INFO, "namelist argument required\n"); program_usage(SHORT_FORM); } if (!pc->dumpfile) check_live_arch_mismatch(); if (!find_booted_kernel()) program_usage(SHORT_FORM); } if (!pc->dumpfile) { pc->flags |= LIVE_SYSTEM; get_live_memory_source(); } if ((pc->nfd = open(pc->namelist, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->namelist, strerror(errno)); else { close(pc->nfd); pc->nfd = -1; } if (LOCAL_ACTIVE() && !(pc->namelist_debug || pc->system_map)) { memory_source_init(); match_proc_version(); } } memory_source_init(); if (CRASHDEBUG(1)) { fprintf(fp, "readmem: %s() ", readmem_function_name()); if (ACTIVE()) { fprintf(fp, "-> %s ", pc->live_memsrc); if (pc->flags & MEMMOD) fprintf(fp, "(module)"); else if (pc->flags & CRASHBUILTIN) fprintf(fp, "(built-in)"); } fprintf(fp, "\n"); } } /* * Do whatever's necessary to handle the memory source. */ static void memory_source_init(void) { if (REMOTE() && !(pc->flags2 & MEMSRC_LOCAL)) return; if (pc->flags & KERNEL_DEBUG_QUERY) return; if (LOCAL_ACTIVE()) { if (pc->mfd != -1) /* already been here */ return; if (!STREQ(pc->live_memsrc, "/dev/mem") && STREQ(pc->live_memsrc, pc->memory_device)) { if (memory_driver_init()) return; error(INFO, "cannot initialize crash memory driver\n"); error(INFO, "using /dev/mem\n\n"); pc->flags &= ~MEMMOD; pc->flags |= DEVMEM; pc->readmem = read_dev_mem; pc->writemem = write_dev_mem; pc->live_memsrc = "/dev/mem"; } if (STREQ(pc->live_memsrc, "/dev/mem")) { if ((pc->mfd = open("/dev/mem", O_RDWR)) < 0) { if ((pc->mfd = open("/dev/mem", O_RDONLY)) < 0) error(FATAL, "/dev/mem: %s\n", strerror(errno)); } else pc->flags |= MFD_RDWR; } else if (STREQ(pc->live_memsrc, "/proc/kcore")) { if ((pc->mfd = open("/proc/kcore", O_RDONLY)) < 0) error(FATAL, "/proc/kcore: %s\n", strerror(errno)); if (!proc_kcore_init(fp)) error(FATAL, "/proc/kcore: initialization failed\n"); } else error(FATAL, "unknown memory device: %s\n", pc->live_memsrc); return; } if (pc->dumpfile) { if (!file_exists(pc->dumpfile, NULL)) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(ENOENT)); if (!(pc->flags & DUMPFILE_TYPES)) error(FATAL, "%s: dump format not supported!\n", pc->dumpfile); if (pc->flags & NETDUMP) { if (!netdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & KDUMP) { if (!kdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & XENDUMP) { if (!xendump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & KVMDUMP) { if (!kvmdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & DISKDUMP) { if (!diskdump_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & LKCD) { if ((pc->dfd = open(pc->dumpfile, O_RDONLY)) < 0) error(FATAL, "%s: %s\n", pc->dumpfile, strerror(errno)); if (!lkcd_dump_init(fp, pc->dfd, pc->dumpfile)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & S390D) { if (!s390_dump_init(pc->dumpfile)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } else if (pc->flags & VMWARE_VMSS) { if (!vmware_vmss_init(pc->dumpfile, fp)) error(FATAL, "%s: initialization failed\n", pc->dumpfile); } } } /* * If only a namelist argument is entered for a live system, and the * version string doesn't match /proc/version, try to avert a failure * by assigning it to a matching System.map. */ static void match_proc_version(void) { char buffer[BUFSIZE], *p1, *p2; if (pc->flags & KERNEL_DEBUG_QUERY) return; if (!strlen(kt->proc_version)) return; if (match_file_string(pc->namelist, kt->proc_version, buffer)) { if (CRASHDEBUG(1)) { fprintf(fp, "/proc/version:\n%s\n", kt->proc_version); fprintf(fp, "%s:\n%s", pc->namelist, buffer); } return; } error(WARNING, "%s%sand /proc/version do not match!\n\n", pc->namelist, strlen(pc->namelist) > 39 ? "\n " : " "); /* * find_booted_system_map() requires VTOP(), which used to be a * hardwired masking of the kernel address. But some architectures * may not know what their physical base address is at this point, * and others may have different machdep->kvbase values, so for all * but the 0-based kernel virtual address architectures, bail out * here with a relevant error message. */ if (!machine_type("S390") && !machine_type("S390X")) { p1 = &kt->proc_version[strlen("Linux version ")]; p2 = strstr(p1, " "); *p2 = NULLCHAR; error(WARNING, "/proc/version indicates kernel version: %s\n", p1); error(FATAL, "please use the vmlinux file for that kernel version, or try using\n" " the System.map for that kernel version as an additional argument.\n", p1); clean_exit(1); } if (find_booted_system_map()) pc->flags |= SYSMAP; } #define CREATE 1 #define DESTROY 0 #define DEFAULT_SEARCHDIRS 5 #define EXTRA_SEARCHDIRS 5 static char ** build_searchdirs(int create, int *preferred) { int i; int cnt, start; DIR *dirp; struct dirent *dp; char dirbuf[BUFSIZE]; static char **searchdirs = { 0 }; static char *default_searchdirs[DEFAULT_SEARCHDIRS+1] = { "/usr/src/linux/", "/boot/", "/boot/efi/redhat", "/boot/efi/EFI/redhat", "/", NULL }; if (!create) { if (searchdirs) { for (i = DEFAULT_SEARCHDIRS; searchdirs[i]; i++) free(searchdirs[i]); free(searchdirs); } return NULL; } if (preferred) *preferred = 0; /* * Allow, at a minimum, the defaults plus an extra four directories: * * /lib/modules * /usr/src/redhat/BUILD/kernel-/linux * /usr/src/redhat/BUILD/kernel-/linux- * /usr/lib/debug/lib/modules * */ cnt = DEFAULT_SEARCHDIRS + EXTRA_SEARCHDIRS; if ((dirp = opendir("/usr/src"))) { for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) cnt++; if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) { error(INFO, "/usr/src/ directory list malloc: %s\n", strerror(errno)); closedir(dirp); return default_searchdirs; } for (i = 0; i < DEFAULT_SEARCHDIRS; i++) searchdirs[i] = default_searchdirs[i]; cnt = DEFAULT_SEARCHDIRS; rewinddir(dirp); for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (STREQ(dp->d_name, "linux") || STREQ(dp->d_name, "redhat") || STREQ(dp->d_name, ".") || STREQ(dp->d_name, "..")) continue; sprintf(dirbuf, "/usr/src/%s", dp->d_name); if (mount_point(dirbuf)) continue; if (!is_directory(dirbuf)) continue; if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/usr/src/ directory entry malloc: %s\n", strerror(errno)); break; } sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } closedir(dirp); searchdirs[cnt] = NULL; } else { if ((searchdirs = calloc(cnt, sizeof(char *))) == NULL) { error(INFO, "search directory list malloc: %s\n", strerror(errno)); return default_searchdirs; } for (i = 0; i < DEFAULT_SEARCHDIRS; i++) searchdirs[i] = default_searchdirs[i]; cnt = DEFAULT_SEARCHDIRS; } if (build_kernel_directory(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/lib/modules/ directory entry malloc: %s\n", strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } } if (redhat_kernel_directory_v1(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/usr/src/redhat directory entry malloc: %s\n", strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } } if (redhat_kernel_directory_v2(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "/usr/src/redhat directory entry malloc: %s\n", strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); cnt++; } } if (redhat_debug_directory(dirbuf)) { if ((searchdirs[cnt] = (char *) malloc(strlen(dirbuf)+2)) == NULL) { error(INFO, "%s directory entry malloc: %s\n", dirbuf, strerror(errno)); } else { sprintf(searchdirs[cnt], "%s/", dirbuf); if (preferred) *preferred = cnt; cnt++; } } searchdirs[cnt] = NULL; if (CRASHDEBUG(1)) { i = start = preferred ? *preferred : 0; do { fprintf(fp, "searchdirs[%d]: %s\n", i, searchdirs[i]); if (++i == cnt) { if (start != 0) i = 0; else break; } } while (i != start); } return searchdirs; } static int build_kernel_directory(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "/lib/modules/"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (*p1 != ' ') *p2++ = *p1++; strcat(buf, "/build"); return TRUE; } static int redhat_kernel_directory_v1(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "/usr/src/redhat/BUILD/kernel-"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (((*p1 >= '0') && (*p1 <= '9')) || (*p1 == '.')) *p2++ = *p1++; strcat(buf, "/linux"); return TRUE; } static int redhat_kernel_directory_v2(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "/usr/src/redhat/BUILD/kernel-"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (((*p1 >= '0') && (*p1 <= '9')) || (*p1 == '.')) *p2++ = *p1++; strcat(buf, "/linux-"); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (((*p1 >= '0') && (*p1 <= '9')) || (*p1 == '.')) *p2++ = *p1++; return TRUE; } static int redhat_debug_directory(char *buf) { char *p1, *p2; if (!strstr(kt->proc_version, "Linux version ")) return FALSE; BZERO(buf, BUFSIZE); sprintf(buf, "%s/", pc->redhat_debug_loc); p1 = &kt->proc_version[strlen("Linux version ")]; p2 = &buf[strlen(buf)]; while (*p1 != ' ') *p2++ = *p1++; return TRUE; } /* * If a namelist was not entered, presume we're using the currently-running * kernel. Read its version string from /proc/version, and then look in * the search directories for a kernel with the same version string embedded * in it. */ static int find_booted_kernel(void) { char kernel[BUFSIZE]; char buffer[BUFSIZE]; char **searchdirs; int i, preferred, wrapped; DIR *dirp; struct dirent *dp; int found; pc->flags |= FINDKERNEL; fflush(fp); if (!file_exists("/proc/version", NULL)) { error(INFO, "/proc/version: %s: cannot determine booted kernel\n", strerror(ENOENT)); return FALSE; } if (!get_proc_version()) { error(INFO, "/proc/version: %s\n", strerror(errno)); return FALSE; } if (CRASHDEBUG(1)) fprintf(fp, "\nfind_booted_kernel: search for [%s]\n", kt->proc_version); searchdirs = build_searchdirs(CREATE, &preferred); for (i = preferred, wrapped = found = FALSE; !found; i++) { if (!searchdirs[i]) { if (preferred && !wrapped) { wrapped = TRUE; i = 0; } else break; } else if (wrapped && (preferred == i)) break; dirp = opendir(searchdirs[i]); if (!dirp) continue; for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (dp->d_name[0] == '.') continue; sprintf(kernel, "%s%s", searchdirs[i], dp->d_name); if (mount_point(kernel) || !file_readable(kernel) || !is_elf_file(kernel)) continue; if (CRASHDEBUG(1)) fprintf(fp, "find_booted_kernel: check: %s\n", kernel); found = match_file_string(kernel, kt->proc_version, buffer); if (found) break; } closedir(dirp); } mount_point(DESTROY); build_searchdirs(DESTROY, NULL); if (found) { if ((pc->namelist = (char *)malloc (strlen(kernel)+1)) == NULL) error(FATAL, "booted kernel name malloc: %s\n", strerror(errno)); else { strcpy(pc->namelist, kernel); if (CRASHDEBUG(1)) fprintf(fp, "find_booted_kernel: found: %s\n", pc->namelist); return TRUE; } } error(INFO, "cannot find booted kernel -- please enter namelist argument\n\n"); return FALSE; } /* * Determine whether a file is a mount point, without the benefit of stat(). * This horrendous kludge is necessary to avoid uninterruptible stat() or * fstat() calls on nfs mount-points where the remote directory is no longer * available. */ static int mount_point(char *name) { int i; static int mount_points_gathered = -1; static char **mount_points; char *arglist[MAXARGS]; char buf[BUFSIZE]; char mntfile[BUFSIZE]; int argc, found; FILE *mp; /* * The first time through, stash a list of mount points. */ if (mount_points_gathered < 0) { found = mount_points_gathered = 0; if (file_exists("/proc/mounts", NULL)) sprintf(mntfile, "/proc/mounts"); else if (file_exists("/etc/mtab", NULL)) sprintf(mntfile, "/etc/mtab"); else return FALSE; if ((mp = fopen(mntfile, "r")) == NULL) return FALSE; while (fgets(buf, BUFSIZE, mp)) { argc = parse_line(buf, arglist); if (argc < 2) continue; found++; } pclose(mp); if (!(mount_points = (char **)malloc(sizeof(char *) * found))) return FALSE; if ((mp = fopen(mntfile, "r")) == NULL) return FALSE; i = 0; while (fgets(buf, BUFSIZE, mp) && (mount_points_gathered < found)) { argc = parse_line(buf, arglist); if (argc < 2) continue; if ((mount_points[i] = (char *) malloc(strlen(arglist[1])*2))) { strcpy(mount_points[i], arglist[1]); mount_points_gathered++, i++; } } pclose(mp); if (CRASHDEBUG(2)) for (i = 0; i < mount_points_gathered; i++) fprintf(fp, "mount_points[%d]: %s (%lx)\n", i, mount_points[i], (ulong)mount_points[i]); } /* * A null name string means we're done with this routine forever, * so the malloc'd memory can be freed. */ if (!name) { for (i = 0; i < mount_points_gathered; i++) free(mount_points[i]); free(mount_points); return FALSE; } for (i = 0; i < mount_points_gathered; i++) { if (STREQ(name, mount_points[i])) return TRUE; } return FALSE; } /* * If /proc/version exists, get it for verification purposes later. */ int get_proc_version(void) { FILE *version; if (strlen(kt->proc_version)) /* been here, done that... */ return TRUE; if (!file_exists("/proc/version", NULL)) return FALSE; if ((version = fopen("/proc/version", "r")) == NULL) return FALSE; if (fread(&kt->proc_version, sizeof(char), BUFSIZE-1, version) <= 0) { fclose(version); return FALSE; } fclose(version); strip_linefeeds(kt->proc_version); return TRUE; } /* * Given a non-matching kernel namelist, try to find a System.map file * that has a system_utsname whose contents match /proc/version. */ static int find_booted_system_map(void) { char system_map[BUFSIZE]; char **searchdirs; int i; DIR *dirp; struct dirent *dp; int found; fflush(fp); if (!file_exists("/proc/version", NULL)) { error(INFO, "/proc/version: %s: cannot determine booted System.map\n", strerror(ENOENT)); return FALSE; } if (!get_proc_version()) { error(INFO, "/proc/version: %s\n", strerror(errno)); return FALSE; } found = FALSE; /* * To avoid a search, try the obvious first. */ sprintf(system_map, "/boot/System.map"); if (file_readable(system_map) && verify_utsname(system_map)) { found = TRUE; } else { searchdirs = build_searchdirs(CREATE, NULL); for (i = 0; !found && searchdirs[i]; i++) { dirp = opendir(searchdirs[i]); if (!dirp) continue; for (dp = readdir(dirp); dp != NULL; dp = readdir(dirp)) { if (!strstr(dp->d_name, "System.map")) continue; sprintf(system_map, "%s%s", searchdirs[i], dp->d_name); if (mount_point(system_map) || !file_readable(system_map) || !is_system_map(system_map)) continue; if (verify_utsname(system_map)) { found = TRUE; break; } } closedir(dirp); } mount_point(DESTROY); build_searchdirs(DESTROY, NULL); } if (found) { if ((pc->system_map = (char *)malloc (strlen(system_map)+1)) == NULL) error(FATAL, "booted system map name malloc: %s\n", strerror(errno)); strcpy(pc->system_map, system_map); if (CRASHDEBUG(1)) fprintf(fp, "find_booted_system_map: found: %s\n", pc->system_map); return TRUE; } error(INFO, "cannot find booted system map -- please enter namelist or system map\n\n"); return FALSE; } /* * Read the system_utsname from /dev/mem, based upon the address found * in the passed-in System.map file, and compare it to /proc/version. */ static int verify_utsname(char *system_map) { char buffer[BUFSIZE]; ulong value; struct new_utsname new_utsname; if (CRASHDEBUG(1)) fprintf(fp, "verify_utsname: check: %s\n", system_map); if (!match_file_string(system_map, "D system_utsname", buffer)) return FALSE; if (extract_hex(buffer, &value, NULLCHAR, TRUE) && (READMEM(pc->mfd, &new_utsname, sizeof(struct new_utsname), value, VTOP(value)) > 0) && ascii_string(new_utsname.release) && ascii_string(new_utsname.version) && STRNEQ(new_utsname.release, "2.") && (strlen(new_utsname.release) > 4) && (strlen(new_utsname.version) > 27)) { if (CRASHDEBUG(1)) { fprintf(fp, "release: [%s]\n", new_utsname.release); fprintf(fp, "version: [%s]\n", new_utsname.version); } if (strstr(kt->proc_version, new_utsname.release) && strstr(kt->proc_version, new_utsname.version)) { return TRUE; } } return FALSE; } /* * Determine whether a file exists, using the caller's stat structure if * one was passed in. */ int file_exists(char *file, struct stat *sp) { struct stat sbuf; if (stat(file, sp ? sp : &sbuf) == 0) return TRUE; return FALSE; } /* * Determine whether a file exists, and if so, if it's readable. */ int file_readable(char *file) { char tmp; int fd; if (!file_exists(file, NULL)) return FALSE; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; if (read(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) { close(fd); return FALSE; } close(fd); return TRUE; } /* * Quick file checksummer. */ int file_checksum(char *file, long *retsum) { int i; int fd; ssize_t cnt; char buf[MIN_PAGE_SIZE]; long csum; if ((fd = open(file, O_RDONLY)) < 0) return FALSE; csum = 0; BZERO(buf, MIN_PAGE_SIZE); while ((cnt = read(fd, buf, MIN_PAGE_SIZE)) > 0) { for (i = 0; i < cnt; i++) csum += buf[i]; BZERO(buf, MIN_PAGE_SIZE); } close(fd); *retsum = csum; return TRUE; } int is_directory(char *file) { struct stat sbuf; if (!file || !strlen(file)) return(FALSE); if (stat(file, &sbuf) == -1) return(FALSE); /* This file doesn't exist. */ return((sbuf.st_mode & S_IFMT) == S_IFDIR ? TRUE : FALSE); } /* * Search a directory tree for filename, and if found, return a temporarily * allocated buffer containing the full pathname. The "done" business is * protection against fgets() prematurely returning NULL before the find * command completes. (I thought this was impossible until I saw it happen...) * When time permits, rewrite this doing the search by hand. */ char * search_directory_tree(char *directory, char *file, int follow_links) { char command[BUFSIZE]; char buf[BUFSIZE]; char *retbuf, *start, *end, *module; FILE *pipe; regex_t regex; int regex_used, done; if (!file_exists("/usr/bin/find", NULL) || !file_exists("/bin/echo", NULL) || !is_directory(directory) || (*file == '(')) return NULL; sprintf(command, "/usr/bin/find %s %s -name %s -print; /bin/echo search done", follow_links ? "-L" : "", directory, file); if ((pipe = popen(command, "r")) == NULL) { error(INFO, "%s: %s\n", command, strerror(errno)); return NULL; } done = FALSE; retbuf = NULL; regex_used = ((start = strstr(file, "[")) && (end = strstr(file, "]")) && (start < end) && (regcomp(®ex, file, 0) == 0)); while (fgets(buf, BUFSIZE-1, pipe) || !done) { if (STREQ(buf, "search done\n")) { done = TRUE; break; } if (!retbuf && !regex_used && STREQ((char *)basename(strip_linefeeds(buf)), file)) { retbuf = GETBUF(strlen(buf)+1); strcpy(retbuf, buf); } if (!retbuf && regex_used) { module = basename(strip_linefeeds(buf)); if (regexec(®ex, module, 0, NULL, 0) == 0) { retbuf = GETBUF(strlen(buf)+1); strcpy(retbuf, buf); } } } if (regex_used) regfree(®ex); pclose(pipe); return retbuf; } /* * Determine whether a file exists, and if so, if it's a tty. */ int is_a_tty(char *filename) { int fd; if ((fd = open(filename, O_RDONLY)) < 0) return FALSE; if (isatty(fd)) { close(fd); return TRUE; } close(fd); return FALSE; } /* * Open a tmpfile for command output. fp is stashed in pc->saved_fp, and * temporarily set to the new FILE pointer. This allows a command to still * print to the original output while the tmpfile is still open. */ #define OPEN_ONLY_ONCE #ifdef OPEN_ONLY_ONCE void open_tmpfile(void) { int ret ATTRIBUTE_UNUSED; if (pc->tmpfile) error(FATAL, "recursive temporary file usage\n"); if (!pc->tmp_fp) { if ((pc->tmp_fp = tmpfile()) == NULL) error(FATAL, "cannot open temporary file\n"); } fflush(pc->tmpfile); ret = ftruncate(fileno(pc->tmp_fp), 0); rewind(pc->tmp_fp); pc->tmpfile = pc->tmp_fp; pc->saved_fp = fp; fp = pc->tmpfile; } #else void open_tmpfile(void) { if (pc->tmpfile) error(FATAL, "recursive temporary file usage\n"); if ((pc->tmpfile = tmpfile()) == NULL) { error(FATAL, "cannot open temporary file\n"); } else { pc->saved_fp = fp; fp = pc->tmpfile; } } #endif /* * Destroy the reference to the tmpfile, and restore fp to the state * it had when open_tmpfile() was called. */ #ifdef OPEN_ONLY_ONCE void close_tmpfile(void) { int ret ATTRIBUTE_UNUSED; if (pc->tmpfile) { fflush(pc->tmpfile); ret = ftruncate(fileno(pc->tmpfile), 0); rewind(pc->tmpfile); pc->tmpfile = NULL; fp = pc->saved_fp; } else error(FATAL, "trying to close an unopened temporary file\n"); } #else void close_tmpfile(void) { if (pc->tmpfile) { fp = pc->saved_fp; fclose(pc->tmpfile); pc->tmpfile = NULL; } else error(FATAL, "trying to close an unopened temporary file\n"); } #endif /* * open_tmpfile2(), set_tmpfile2() and close_tmpfile2() do not use a * permanent tmpfile, and do NOT modify the global fp pointer or pc->saved_fp. * That being the case, all wrapped functions must be aware of it, or the * global fp pointer has to explicitly manipulated by the calling function. * The secondary tmpfile should only be used by common functions that might * be called by a higher-level function using the primary permanent tmpfile, * or alternatively a caller may pass in a FILE pointer to set_tmpfile2(). */ void open_tmpfile2(void) { if (pc->tmpfile2) error(FATAL, "recursive secondary temporary file usage\n"); if ((pc->tmpfile2 = tmpfile()) == NULL) error(FATAL, "cannot open secondary temporary file\n"); rewind(pc->tmpfile2); } void close_tmpfile2(void) { if (pc->tmpfile2) { fflush(pc->tmpfile2); fclose(pc->tmpfile2); pc->tmpfile2 = NULL; } } void set_tmpfile2(FILE *fptr) { if (pc->tmpfile2) error(FATAL, "secondary temporary file already in use\n"); pc->tmpfile2 = fptr; } #define MOUNT_PRINT_INODES 0x1 #define MOUNT_PRINT_FILES 0x2 /* * Display basic information about the currently mounted filesystems. * The -f option lists the open files for the filesystem(s). * The -i option dumps the dirty inodes of the filesystem(s). * If an inode address, mount, vfsmount, superblock, device name or * directory name is also entered, just show the data for the * filesystem indicated by the argument. */ static char mount_hdr[BUFSIZE] = { 0 }; void cmd_mount(void) { int i; int c, found; struct task_context *tc, *namespace_context; ulong value1, value2; char *spec_string; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char *arglist[MAXARGS*2]; ulong vfsmount = 0; int flags = 0; int save_next; ulong pid; /* find a context */ pid = 1; while ((namespace_context = pid_to_context(pid)) == NULL) pid++; while ((c = getopt(argcnt, args, "ifn:")) != EOF) { switch(c) { case 'i': if (INVALID_MEMBER(super_block_s_dirty)) { error(INFO, "the super_block.s_dirty linked list does " "not exist in this kernel\n"); option_not_supported(c); } flags |= MOUNT_PRINT_INODES; break; case 'f': flags |= MOUNT_PRINT_FILES; break; case 'n': switch (str_to_context(optarg, &value1, &tc)) { case STR_PID: case STR_TASK: namespace_context = tc; break; case STR_INVALID: error(FATAL, "invalid task or pid value: %s\n", optarg); break; } break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (args[optind] == 0) { show_mounts(0, flags, namespace_context); return; } /* * Dump everything into a tmpfile, and then walk * through it for each search argument entered. */ open_tmpfile(); show_mounts(0, MOUNT_PRINT_FILES | (VALID_MEMBER(super_block_s_dirty) ? MOUNT_PRINT_INODES : 0), namespace_context); pc->curcmd_flags &= ~HEADER_PRINTED; do { spec_string = args[optind]; if (STRNEQ(spec_string, "0x") && hexadecimal(spec_string, 0)) shift_string_left(spec_string, 2); found = FALSE; rewind(pc->tmpfile); save_next = 0; while (fgets(buf1, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf1, mount_hdr)) { save_next = TRUE; continue; } if (save_next) { strcpy(buf2, buf1); save_next = FALSE; } if (!(c = parse_line(buf1, arglist))) continue; for (i = 0; i < c; i++) { if (PATHEQ(arglist[i], spec_string)) found = TRUE; /* * Check for a vfsmount address * embedded in a struct mount. */ if ((i == 0) && (c == 5) && VALID_MEMBER(mount_mnt) && hexadecimal(spec_string, 0) && hexadecimal(arglist[i], 0)) { value1 = htol(spec_string, FAULT_ON_ERROR, NULL); value2 = htol(arglist[i], FAULT_ON_ERROR, NULL) + OFFSET(mount_mnt); if (value1 == value2) found = TRUE; } } if (found) { fp = pc->saved_fp; if (flags) { sscanf(buf2,"%lx", &vfsmount); show_mounts(vfsmount, flags, namespace_context); } else { if (!(pc->curcmd_flags & HEADER_PRINTED)) { fprintf(fp, "%s", mount_hdr); pc->curcmd_flags |= HEADER_PRINTED; } fprintf(fp, "%s", buf2); } found = FALSE; fp = pc->tmpfile; } } } while (args[++optind]); close_tmpfile(); } /* * Do the work for cmd_mount(); */ static void show_mounts(ulong one_vfsmount, int flags, struct task_context *namespace_context) { ulong one_vfsmount_list; long sb_s_files; long s_dirty; ulong devp, dirp, sbp, dirty, type, name; struct list_data list_data, *ld; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; ulong *dentry_list, *dp, *mntlist; ulong *vfsmnt; char *vfsmount_buf, *super_block_buf, *mount_buf; ulong dentry, inode, inode_sb, mnt_parent; char *dentry_buf, *inode_buf; int cnt, i, m, files_header_printed; int mount_cnt; int devlen; char mount_files_header[BUFSIZE]; long per_cpu_s_files; sprintf(mount_files_header, "%s%s%s%sTYPE%sPATH\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "DENTRY"), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), space(MINSPACE)); dirp = dentry = mnt_parent = sb_s_files = s_dirty = 0; if (VALID_MEMBER(super_block_s_dirty)) s_dirty = OFFSET(super_block_s_dirty); per_cpu_s_files = MEMBER_EXISTS("file", "f_sb_list_cpu"); dentry_list = NULL; mntlist = 0; ld = &list_data; if (one_vfsmount) { one_vfsmount_list = one_vfsmount; mount_cnt = 1; mntlist = &one_vfsmount_list; } else mntlist = get_mount_list(&mount_cnt, namespace_context); devlen = strlen("DEVNAME")+2; if (!strlen(mount_hdr)) { snprintf(mount_hdr, sizeof(mount_hdr), "%s %s %s %s DIRNAME\n", mkstring(buf1, VADDR_PRLEN, CENTER, VALID_STRUCT(mount) ? "MOUNT" : "VFSMOUNT"), mkstring(buf2, VADDR_PRLEN, CENTER, "SUPERBLK"), mkstring(buf3, strlen("rootfs"), LJUST, "TYPE"), mkstring(buf4, devlen, LJUST, "DEVNAME")); } if (flags == 0) fprintf(fp, "%s", mount_hdr); sb_s_files = VALID_MEMBER(super_block_s_files) ? OFFSET(super_block_s_files) : INVALID_OFFSET; if ((flags & MOUNT_PRINT_FILES) && (sb_s_files == INVALID_OFFSET)) { /* * super_block.s_files deprecated */ if (!kernel_symbol_exists("inuse_filps")) { error(INFO, "the super_block.s_files linked list does " "not exist in this kernel\n"); option_not_supported('f'); } /* * No open files list in super_block (2.2). * Use inuse_filps list instead. */ dentry_list = create_dentry_array(symbol_value("inuse_filps"), &cnt); } if (VALID_STRUCT(mount)) { mount_buf = GETBUF(SIZE(mount)); vfsmount_buf = mount_buf + OFFSET(mount_mnt); } else { mount_buf = NULL; vfsmount_buf = GETBUF(SIZE(vfsmount)); } super_block_buf = GETBUF(SIZE(super_block)); for (m = 0, vfsmnt = mntlist; m < mount_cnt; m++, vfsmnt++) { if (VALID_STRUCT(mount)) { readmem(*vfsmnt, KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); devp = ULONG(mount_buf + OFFSET(mount_mnt_devname)); } else { readmem(*vfsmnt, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); devp = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_devname)); } if (VALID_MEMBER(vfsmount_mnt_dirname)) { dirp = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_dirname)); } else { if (VALID_STRUCT(mount)) { mnt_parent = ULONG(mount_buf + OFFSET(mount_mnt_parent)); dentry = ULONG(mount_buf + OFFSET(mount_mnt_mountpoint)); } else { mnt_parent = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_parent)); dentry = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_mountpoint)); } } sbp = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (flags) fprintf(fp, "%s", mount_hdr); fprintf(fp, "%s %s ", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(*vfsmnt)), mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(sbp))); readmem(sbp, KVADDR, super_block_buf, SIZE(super_block), "super_block buffer", FAULT_ON_ERROR); type = ULONG(super_block_buf + OFFSET(super_block_s_type)); readmem(type + OFFSET(file_system_type_name), KVADDR, &name, sizeof(void *), "file_system_type name", FAULT_ON_ERROR); if (read_string(name, buf1, BUFSIZE-1)) sprintf(buf3, "%-6s ", buf1); else sprintf(buf3, "unknown "); if (read_string(devp, buf1, BUFSIZE-1)) sprintf(buf4, "%s ", mkstring(buf2, devlen, LJUST, buf1)); else sprintf(buf4, "%s ", mkstring(buf2, devlen, LJUST, "(unknown)")); sprintf(buf1, "%s%s", buf3, buf4); while ((strlen(buf1) > 17) && (buf1[strlen(buf1)-2] == ' ')) strip_ending_char(buf1, ' '); fprintf(fp, "%s", buf1); if (VALID_MEMBER(vfsmount_mnt_dirname)) { if (read_string(dirp, buf1, BUFSIZE-1)) fprintf(fp, "%-10s\n", buf1); else fprintf(fp, "%-10s\n", "(unknown)"); } else { get_pathname(dentry, buf1, BUFSIZE, 1, VALID_STRUCT(mount) ? mnt_parent + OFFSET(mount_mnt) : mnt_parent); fprintf(fp, "%-10s\n", buf1); } if (flags & MOUNT_PRINT_FILES) { if (sb_s_files != INVALID_OFFSET) { dentry_list = per_cpu_s_files ? create_dentry_array_percpu(sbp+ sb_s_files, &cnt) : create_dentry_array(sbp+sb_s_files, &cnt); } files_header_printed = 0; for (i=0, dp = dentry_list; iflags = VERBOSE; ld->start = dirty; ld->end = (sbp+s_dirty); ld->header = "DIRTY INODES\n"; hq_open(); do_list(ld); hq_close(); } else { fprintf(fp, "DIRTY INODES\nNo dirty inodes found\n"); } } if (flags && !one_vfsmount) fprintf(fp, "\n"); } if (!one_vfsmount) FREEBUF(mntlist); if (VALID_STRUCT(mount)) FREEBUF(mount_buf); else FREEBUF(vfsmount_buf); FREEBUF(super_block_buf); } /* * Allocate and fill a list of the currently-mounted vfsmount pointers. */ static ulong * get_mount_list(int *cntptr, struct task_context *namespace_context) { struct list_data list_data, *ld; ulong namespace, root, nsproxy, mnt_ns; struct task_context *tc; ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->flags |= LIST_ALLOCATE; if (symbol_exists("vfsmntlist")) { get_symbol_data("vfsmntlist", sizeof(void *), &ld->start); ld->end = symbol_value("vfsmntlist"); } else if (VALID_MEMBER(task_struct_nsproxy)) { tc = namespace_context; readmem(tc->task + OFFSET(task_struct_nsproxy), KVADDR, &nsproxy, sizeof(void *), "task nsproxy", FAULT_ON_ERROR); if (!readmem(nsproxy + OFFSET(nsproxy_mnt_ns), KVADDR, &mnt_ns, sizeof(void *), "nsproxy mnt_ns", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine mount list location!\n"); if (!readmem(mnt_ns + OFFSET(mnt_namespace_root), KVADDR, &root, sizeof(void *), "mnt_namespace root", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine mount list location!\n"); ld->start = root + OFFSET_OPTION(vfsmount_mnt_list, mount_mnt_list); ld->end = mnt_ns + OFFSET(mnt_namespace_list); } else if (VALID_MEMBER(namespace_root)) { tc = namespace_context; readmem(tc->task + OFFSET(task_struct_namespace), KVADDR, &namespace, sizeof(void *), "task namespace", FAULT_ON_ERROR); if (!readmem(namespace + OFFSET(namespace_root), KVADDR, &root, sizeof(void *), "namespace root", RETURN_ON_ERROR|QUIET)) error(FATAL, "cannot determine mount list location!\n"); if (CRASHDEBUG(1)) console("namespace: %lx => root: %lx\n", namespace, root); ld->start = root + OFFSET_OPTION(vfsmount_mnt_list, mount_mnt_list); ld->end = namespace + OFFSET(namespace_list); } else error(FATAL, "cannot determine mount list location!\n"); if (VALID_MEMBER(vfsmount_mnt_list)) ld->list_head_offset = OFFSET(vfsmount_mnt_list); else if (VALID_STRUCT(mount)) ld->list_head_offset = OFFSET(mount_mnt_list); else ld->member_offset = OFFSET(vfsmount_mnt_next); *cntptr = do_list(ld); return(ld->list_ptr); } /* * Given a dentry, display its address, inode, super_block, pathname. */ static void display_dentry_info(ulong dentry) { int m, found; char *dentry_buf, *inode_buf, *vfsmount_buf, *mount_buf; ulong inode, superblock, sb, vfs; ulong *mntlist, *vfsmnt; char pathname[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; int mount_cnt; fprintf(fp, "%s%s%s%s%s%sTYPE%sPATH\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "DENTRY"), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "SUPERBLK"), space(MINSPACE), space(MINSPACE)); dentry_buf = fill_dentry_cache(dentry); inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); pathname[0] = NULLCHAR; if (inode) { inode_buf = fill_inode_cache(inode); superblock = ULONG(inode_buf + OFFSET(inode_i_sb)); } else { inode_buf = NULL; superblock = 0; } if (!inode || !superblock) goto nopath; if (VALID_MEMBER(file_f_vfsmnt)) { mntlist = get_mount_list(&mount_cnt, pid_to_context(1)); if (VALID_STRUCT(mount)) { mount_buf = GETBUF(SIZE(mount)); vfsmount_buf = mount_buf + OFFSET(mount_mnt); } else { mount_buf = NULL; vfsmount_buf = GETBUF(SIZE(vfsmount)); } for (m = found = 0, vfsmnt = mntlist; m < mount_cnt; m++, vfsmnt++) { if (VALID_STRUCT(mount)) readmem(*vfsmnt, KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); else readmem(*vfsmnt, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); sb = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (superblock && (sb == superblock)) { get_pathname(dentry, pathname, BUFSIZE, 1, VALID_STRUCT(mount) ? *vfsmnt+OFFSET(mount_mnt) : *vfsmnt); found = TRUE; } } if (!found && symbol_exists("pipe_mnt")) { get_symbol_data("pipe_mnt", sizeof(long), &vfs); if (VALID_STRUCT(mount)) readmem(vfs - OFFSET(mount_mnt), KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); else readmem(vfs, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); sb = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (superblock && (sb == superblock)) { get_pathname(dentry, pathname, BUFSIZE, 1, vfs); found = TRUE; } } if (!found && symbol_exists("sock_mnt")) { get_symbol_data("sock_mnt", sizeof(long), &vfs); if (VALID_STRUCT(mount)) readmem(vfs - OFFSET(mount_mnt), KVADDR, mount_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); else readmem(vfs, KVADDR, vfsmount_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); sb = ULONG(vfsmount_buf + OFFSET(vfsmount_mnt_sb)); if (superblock && (sb == superblock)) { get_pathname(dentry, pathname, BUFSIZE, 1, vfs); found = TRUE; } } } else { mntlist = 0; get_pathname(dentry, pathname, BUFSIZE, 1, 0); } if (mntlist) { FREEBUF(mntlist); if (VALID_STRUCT(mount)) FREEBUF(mount_buf); else FREEBUF(vfsmount_buf); } nopath: fprintf(fp, "%s%s%s%s%s%s%s%s%s\n", mkstring(buf1, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(dentry)), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, RJUST|LONG_HEX, MKSTR(inode)), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|LONG_HEX, MKSTR(superblock)), space(MINSPACE), inode ? inode_type(inode_buf, pathname) : "N/A", space(MINSPACE), pathname); } /* * Return a 4-character type string of an inode, modifying a previously * gathered pathname if necessary. */ char * inode_type(char *inode_buf, char *pathname) { char *type; uint32_t umode32; uint16_t umode16; uint mode; ulong inode_i_op; ulong inode_i_fop; long i_fop_off; mode = umode16 = umode32 = 0; switch (SIZE(umode_t)) { case SIZEOF_32BIT: umode32 = UINT(inode_buf + OFFSET(inode_i_mode)); mode = umode32; break; case SIZEOF_16BIT: umode16 = USHORT(inode_buf + OFFSET(inode_i_mode)); mode = (uint)umode16; break; } type = "UNKN"; if (S_ISREG(mode)) type = "REG "; if (S_ISLNK(mode)) type = "LNK "; if (S_ISDIR(mode)) type = "DIR "; if (S_ISCHR(mode)) type = "CHR "; if (S_ISBLK(mode)) type = "BLK "; if (S_ISFIFO(mode)) { type = "FIFO"; if (symbol_exists("pipe_inode_operations")) { inode_i_op = ULONG(inode_buf + OFFSET(inode_i_op)); if (inode_i_op == symbol_value("pipe_inode_operations")) { type = "PIPE"; pathname[0] = NULLCHAR; } } else { if (symbol_exists("rdwr_pipe_fops") && (i_fop_off = OFFSET(inode_i_fop)) > 0) { inode_i_fop = ULONG(inode_buf + i_fop_off); if (inode_i_fop == symbol_value("rdwr_pipe_fops")) { type = "PIPE"; pathname[0] = NULLCHAR; } } } } if (S_ISSOCK(mode)) { type = "SOCK"; if (STREQ(pathname, "/")) pathname[0] = NULLCHAR; } return type; } /* * Walk an open file list and return an array of open dentries. */ static ulong * create_dentry_array(ulong list_addr, int *count) { struct list_data list_data, *ld; ulong *file, *files_list, *dentry_list; ulong dentry, inode; char *file_buf, *dentry_buf; int cnt, f_count, i; int dentry_cnt = 0; ld = &list_data; BZERO(ld, sizeof(struct list_data)); readmem(list_addr, KVADDR, &ld->start, sizeof(void *), "file list head", FAULT_ON_ERROR); if (list_addr == ld->start) { /* empty list? */ *count = 0; return NULL; } ld->end = list_addr; hq_open(); cnt = do_list(ld); if (cnt == 0) { hq_close(); *count = 0; return NULL; } files_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(files_list, cnt); hq_close(); hq_open(); for (i=0, file = files_list; i__per_cpu_offset[c]; percpu_list[c].dentry_list = create_dentry_array(list_addr, &percpu_list[c].count); total += percpu_list[c].count; } if (total) { dentry_list = (ulong *)GETBUF(total * sizeof(ulong)); for (c = i = 0; c < (cpu+1); c++) { if (percpu_list[c].count == 0) continue; for (j = 0; j < percpu_list[c].count; j++) dentry_list[i++] = percpu_list[c].dentry_list[j]; FREEBUF(percpu_list[c].dentry_list); } } else dentry_list = NULL; FREEBUF(percpu_list); *count = total; return dentry_list; } /* * Stash vfs structure offsets */ void vfs_init(void) { MEMBER_OFFSET_INIT(nlm_file_f_file, "nlm_file", "f_file"); MEMBER_OFFSET_INIT(task_struct_files, "task_struct", "files"); MEMBER_OFFSET_INIT(task_struct_fs, "task_struct", "fs"); MEMBER_OFFSET_INIT(fs_struct_root, "fs_struct", "root"); MEMBER_OFFSET_INIT(fs_struct_pwd, "fs_struct", "pwd"); MEMBER_OFFSET_INIT(fs_struct_rootmnt, "fs_struct", "rootmnt"); MEMBER_OFFSET_INIT(fs_struct_pwdmnt, "fs_struct", "pwdmnt"); MEMBER_OFFSET_INIT(files_struct_open_fds_init, "files_struct", "open_fds_init"); MEMBER_OFFSET_INIT(files_struct_fdt, "files_struct", "fdt"); if (VALID_MEMBER(files_struct_fdt)) { MEMBER_OFFSET_INIT(fdtable_max_fds, "fdtable", "max_fds"); MEMBER_OFFSET_INIT(fdtable_max_fdset, "fdtable", "max_fdset"); MEMBER_OFFSET_INIT(fdtable_open_fds, "fdtable", "open_fds"); MEMBER_OFFSET_INIT(fdtable_fd, "fdtable", "fd"); } else { MEMBER_OFFSET_INIT(files_struct_max_fds, "files_struct", "max_fds"); MEMBER_OFFSET_INIT(files_struct_max_fdset, "files_struct", "max_fdset"); MEMBER_OFFSET_INIT(files_struct_open_fds, "files_struct", "open_fds"); MEMBER_OFFSET_INIT(files_struct_fd, "files_struct", "fd"); } MEMBER_OFFSET_INIT(file_f_dentry, "file", "f_dentry"); MEMBER_OFFSET_INIT(file_f_vfsmnt, "file", "f_vfsmnt"); MEMBER_OFFSET_INIT(file_f_count, "file", "f_count"); MEMBER_OFFSET_INIT(path_mnt, "path", "mnt"); MEMBER_OFFSET_INIT(path_dentry, "path", "dentry"); if (INVALID_MEMBER(file_f_dentry)) { MEMBER_OFFSET_INIT(file_f_path, "file", "f_path"); ASSIGN_OFFSET(file_f_dentry) = OFFSET(file_f_path) + OFFSET(path_dentry); ASSIGN_OFFSET(file_f_vfsmnt) = OFFSET(file_f_path) + OFFSET(path_mnt); } MEMBER_OFFSET_INIT(dentry_d_inode, "dentry", "d_inode"); MEMBER_OFFSET_INIT(dentry_d_parent, "dentry", "d_parent"); MEMBER_OFFSET_INIT(dentry_d_covers, "dentry", "d_covers"); MEMBER_OFFSET_INIT(dentry_d_name, "dentry", "d_name"); MEMBER_OFFSET_INIT(dentry_d_iname, "dentry", "d_iname"); MEMBER_OFFSET_INIT(inode_i_mode, "inode", "i_mode"); MEMBER_OFFSET_INIT(inode_i_op, "inode", "i_op"); MEMBER_OFFSET_INIT(inode_i_sb, "inode", "i_sb"); MEMBER_OFFSET_INIT(inode_u, "inode", "u"); MEMBER_OFFSET_INIT(qstr_name, "qstr", "name"); MEMBER_OFFSET_INIT(qstr_len, "qstr", "len"); if (INVALID_MEMBER(qstr_len)) ANON_MEMBER_OFFSET_INIT(qstr_len, "qstr", "len"); MEMBER_OFFSET_INIT(vfsmount_mnt_next, "vfsmount", "mnt_next"); MEMBER_OFFSET_INIT(vfsmount_mnt_devname, "vfsmount", "mnt_devname"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_devname, "mount", "mnt_devname"); MEMBER_OFFSET_INIT(vfsmount_mnt_dirname, "vfsmount", "mnt_dirname"); MEMBER_OFFSET_INIT(vfsmount_mnt_sb, "vfsmount", "mnt_sb"); MEMBER_OFFSET_INIT(vfsmount_mnt_list, "vfsmount", "mnt_list"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_list, "mount", "mnt_list"); MEMBER_OFFSET_INIT(vfsmount_mnt_parent, "vfsmount", "mnt_parent"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_parent, "mount", "mnt_parent"); MEMBER_OFFSET_INIT(vfsmount_mnt_mountpoint, "vfsmount", "mnt_mountpoint"); if (INVALID_MEMBER(vfsmount_mnt_devname)) MEMBER_OFFSET_INIT(mount_mnt_mountpoint, "mount", "mnt_mountpoint"); MEMBER_OFFSET_INIT(mount_mnt, "mount", "mnt"); MEMBER_OFFSET_INIT(namespace_root, "namespace", "root"); MEMBER_OFFSET_INIT(task_struct_nsproxy, "task_struct", "nsproxy"); if (VALID_MEMBER(namespace_root)) { MEMBER_OFFSET_INIT(namespace_list, "namespace", "list"); MEMBER_OFFSET_INIT(task_struct_namespace, "task_struct", "namespace"); } else if (VALID_MEMBER(task_struct_nsproxy)) { MEMBER_OFFSET_INIT(nsproxy_mnt_ns, "nsproxy", "mnt_ns"); MEMBER_OFFSET_INIT(mnt_namespace_root, "mnt_namespace", "root"); MEMBER_OFFSET_INIT(mnt_namespace_list, "mnt_namespace", "list"); } else if (THIS_KERNEL_VERSION >= LINUX(2,4,20)) { if (CRASHDEBUG(2)) fprintf(fp, "hardwiring namespace stuff\n"); ASSIGN_OFFSET(task_struct_namespace) = OFFSET(task_struct_files) + sizeof(void *); ASSIGN_OFFSET(namespace_root) = sizeof(void *); ASSIGN_OFFSET(namespace_list) = sizeof(void *) * 2; } MEMBER_OFFSET_INIT(super_block_s_dirty, "super_block", "s_dirty"); MEMBER_OFFSET_INIT(super_block_s_type, "super_block", "s_type"); MEMBER_OFFSET_INIT(file_system_type_name, "file_system_type", "name"); MEMBER_OFFSET_INIT(super_block_s_files, "super_block", "s_files"); MEMBER_OFFSET_INIT(inode_i_flock, "inode", "i_flock"); MEMBER_OFFSET_INIT(file_lock_fl_owner, "file_lock", "fl_owner"); MEMBER_OFFSET_INIT(nlm_host_h_exportent, "nlm_host", "h_exportent"); MEMBER_OFFSET_INIT(svc_client_cl_ident, "svc_client", "cl_ident"); MEMBER_OFFSET_INIT(inode_i_fop, "inode","i_fop"); STRUCT_SIZE_INIT(umode_t, "umode_t"); STRUCT_SIZE_INIT(dentry, "dentry"); STRUCT_SIZE_INIT(files_struct, "files_struct"); if (VALID_MEMBER(files_struct_fdt)) STRUCT_SIZE_INIT(fdtable, "fdtable"); STRUCT_SIZE_INIT(file, "file"); STRUCT_SIZE_INIT(inode, "inode"); STRUCT_SIZE_INIT(mount, "mount"); STRUCT_SIZE_INIT(vfsmount, "vfsmount"); STRUCT_SIZE_INIT(fs_struct, "fs_struct"); STRUCT_SIZE_INIT(super_block, "super_block"); if (!(ft->file_cache = (char *)malloc(SIZE(file)*FILE_CACHE))) error(FATAL, "cannot malloc file cache\n"); if (!(ft->dentry_cache = (char *)malloc(SIZE(dentry)*DENTRY_CACHE))) error(FATAL, "cannot malloc dentry cache\n"); if (!(ft->inode_cache = (char *)malloc(SIZE(inode)*INODE_CACHE))) error(FATAL, "cannot malloc inode cache\n"); MEMBER_OFFSET_INIT(rb_root_rb_node, "rb_root","rb_node"); MEMBER_OFFSET_INIT(rb_node_rb_left, "rb_node","rb_left"); MEMBER_OFFSET_INIT(rb_node_rb_right, "rb_node","rb_right"); } void dump_filesys_table(int verbose) { int i; ulong fhits, dhits, ihits; if (!verbose) goto show_hit_rates; for (i = 0; i < FILE_CACHE; i++) fprintf(fp, " cached_file[%2d]: %lx (%ld)\n", i, ft->cached_file[i], ft->cached_file_hits[i]); fprintf(fp, " file_cache: %lx\n", (ulong)ft->file_cache); fprintf(fp, " file_cache_index: %d\n", ft->file_cache_index); fprintf(fp, " file_cache_fills: %ld\n", ft->file_cache_fills); for (i = 0; i < DENTRY_CACHE; i++) fprintf(fp, " cached_dentry[%2d]: %lx (%ld)\n", i, ft->cached_dentry[i], ft->cached_dentry_hits[i]); fprintf(fp, " dentry_cache: %lx\n", (ulong)ft->dentry_cache); fprintf(fp, "dentry_cache_index: %d\n", ft->dentry_cache_index); fprintf(fp, "dentry_cache_fills: %ld\n", ft->dentry_cache_fills); for (i = 0; i < INODE_CACHE; i++) fprintf(fp, " cached_inode[%2d]: %lx (%ld)\n", i, ft->cached_inode[i], ft->cached_inode_hits[i]); fprintf(fp, " inode_cache: %lx\n", (ulong)ft->inode_cache); fprintf(fp, " inode_cache_index: %d\n", ft->inode_cache_index); fprintf(fp, " inode_cache_fills: %ld\n", ft->inode_cache_fills); show_hit_rates: if (ft->file_cache_fills) { for (i = fhits = 0; i < FILE_CACHE; i++) fhits += ft->cached_file_hits[i]; fprintf(fp, " file hit rate: %2ld%% (%ld of %ld)\n", (fhits * 100)/ft->file_cache_fills, fhits, ft->file_cache_fills); } if (ft->dentry_cache_fills) { for (i = dhits = 0; i < DENTRY_CACHE; i++) dhits += ft->cached_dentry_hits[i]; fprintf(fp, " dentry hit rate: %2ld%% (%ld of %ld)\n", (dhits * 100)/ft->dentry_cache_fills, dhits, ft->dentry_cache_fills); } if (ft->inode_cache_fills) { for (i = ihits = 0; i < INODE_CACHE; i++) ihits += ft->cached_inode_hits[i]; fprintf(fp, " inode hit rate: %2ld%% (%ld of %ld)\n", (ihits * 100)/ft->inode_cache_fills, ihits, ft->inode_cache_fills); } } /* * Get the page count for the specific mapping */ static long get_inode_nrpages(ulong i_mapping) { char *address_space_buf; ulong nrpages; address_space_buf = GETBUF(SIZE(address_space)); readmem(i_mapping, KVADDR, address_space_buf, SIZE(address_space), "address_space buffer", FAULT_ON_ERROR); nrpages = ULONG(address_space_buf + OFFSET(address_space_nrpages)); FREEBUF(address_space_buf); return nrpages; } static void dump_inode_page_cache_info(ulong inode) { char *inode_buf; ulong i_mapping, nrpages, root_rnode, count; struct radix_tree_pair rtp; char header[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; inode_buf = GETBUF(SIZE(inode)); readmem(inode, KVADDR, inode_buf, SIZE(inode), "inode buffer", FAULT_ON_ERROR); i_mapping = ULONG(inode_buf + OFFSET(inode_i_mapping)); nrpages = get_inode_nrpages(i_mapping); sprintf(header, "%s NRPAGES\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "INODE")); fprintf(fp, "%s", header); fprintf(fp, "%s %s\n\n", mkstring(buf1, VADDR_PRLEN, CENTER|RJUST|LONG_HEX, MKSTR(inode)), mkstring(buf2, strlen("NRPAGES"), RJUST|LONG_DEC, MKSTR(nrpages))); root_rnode = i_mapping + OFFSET(address_space_page_tree); rtp.index = 0; rtp.value = (void *)&dump_inode_page; count = do_radix_tree(root_rnode, RADIX_TREE_DUMP_CB, &rtp); if (count != nrpages) error(INFO, "page_tree count: %ld nrpages: %ld\n", count, nrpages); FREEBUF(inode_buf); return; } /* * This command displays information about the open files of a context. * For each open file descriptor the file descriptor number, a pointer * to the file struct, pointer to the dentry struct, pointer to the inode * struct, indication of file type and pathname are printed. * The argument can be a task address or a PID number; if no args, the * current context is used. * If the flag -l is passed, any files held open in the kernel by the * lockd server on behalf of an NFS client are displayed. */ void cmd_files(void) { int c; ulong value; struct task_context *tc; int subsequent; struct reference reference, *ref; char *refarg; int open_flags = 0; ref = NULL; refarg = NULL; while ((c = getopt(argcnt, args, "d:R:p:c")) != EOF) { switch(c) { case 'R': if (ref) { error(INFO, "only one -R option allowed\n"); argerrs++; } else { ref = &reference; BZERO(ref, sizeof(struct reference)); ref->str = refarg = optarg; } break; case 'd': value = htol(optarg, FAULT_ON_ERROR, NULL); display_dentry_info(value); return; case 'p': if (VALID_MEMBER(address_space_page_tree) && VALID_MEMBER(inode_i_mapping)) { value = htol(optarg, FAULT_ON_ERROR, NULL); dump_inode_page_cache_info(value); } else option_not_supported('p'); return; case 'c': if (VALID_MEMBER(address_space_page_tree) && VALID_MEMBER(inode_i_mapping)) open_flags |= PRINT_NRPAGES; else option_not_supported('c'); break; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!args[optind]) { if (!ref) print_task_header(fp, CURRENT_CONTEXT(), 0); open_files_dump(CURRENT_TASK(), open_flags, ref); return; } subsequent = 0; while (args[optind]) { if (ref && subsequent) { BZERO(ref, sizeof(struct reference)); ref->str = refarg; } switch (str_to_context(args[optind], &value, &tc)) { case STR_PID: for (tc = pid_to_context(value); tc; tc = tc->tc_next) { if (!ref) print_task_header(fp, tc, subsequent); open_files_dump(tc->task, open_flags, ref); fprintf(fp, "\n"); } break; case STR_TASK: if (!ref) print_task_header(fp, tc, subsequent); open_files_dump(tc->task, open_flags, ref); break; case STR_INVALID: error(INFO, "invalid task or pid value: %s\n", args[optind]); break; } subsequent++; optind++; } } #define FILES_REF_HEXNUM (0x1) #define FILES_REF_DECNUM (0x2) #define FILES_REF_FOUND (0x4) #define PRINT_FILE_REFERENCE() \ if (!root_pwd_printed) { \ print_task_header(fp, tc, 0); \ fprintf(fp, "%s", root_pwd); \ root_pwd_printed = TRUE; \ } \ if (!header_printed) { \ fprintf(fp, "%s", files_header);\ header_printed = TRUE; \ } \ fprintf(fp, "%s", buf4); \ ref->cmdflags |= FILES_REF_FOUND; #define FILENAME_COMPONENT(P,C) \ ((STREQ((P), "/") && STREQ((C), "/")) || \ (!STREQ((C), "/") && strstr((P),(C)))) /* * open_files_dump() does the work for cmd_files(). */ void open_files_dump(ulong task, int flags, struct reference *ref) { struct task_context *tc; ulong files_struct_addr; ulong fdtable_addr = 0; char *files_struct_buf, *fdtable_buf = NULL; ulong fs_struct_addr; char *dentry_buf, *fs_struct_buf; char *ret ATTRIBUTE_UNUSED; ulong root_dentry, pwd_dentry; ulong root_inode, pwd_inode; ulong vfsmnt; int max_fdset = 0; int max_fds = 0; ulong open_fds_addr; fd_set open_fds; ulong fd; ulong file; ulong value; int i, j, use_path; int header_printed = 0; char root_pathname[BUFSIZE]; char pwd_pathname[BUFSIZE]; char files_header[BUFSIZE]; char buf1[BUFSIZE]; char buf2[BUFSIZE]; char buf3[BUFSIZE]; char buf4[BUFSIZE]; char root_pwd[BUFSIZE]; int root_pwd_printed = 0; int file_dump_flags = 0; BZERO(root_pathname, BUFSIZE); BZERO(pwd_pathname, BUFSIZE); files_struct_buf = GETBUF(SIZE(files_struct)); if (VALID_STRUCT(fdtable)) fdtable_buf = GETBUF(SIZE(fdtable)); fill_task_struct(task); if (flags & PRINT_NRPAGES) { sprintf(files_header, " FD%s%s%s%s%sNRPAGES%sTYPE%sPATH\n", space(MINSPACE), mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), mkstring(buf2, MAX(VADDR_PRLEN, strlen("I_MAPPING")), BITS32() ? (CENTER|RJUST) : (CENTER|LJUST), "I_MAPPING"), space(MINSPACE), space(MINSPACE), space(MINSPACE)); } else { sprintf(files_header, " FD%s%s%s%s%s%s%sTYPE%sPATH\n", space(MINSPACE), mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "FILE"), space(MINSPACE), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "DENTRY"), space(MINSPACE), mkstring(buf3, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), space(MINSPACE)); } tc = task_to_context(task); if (ref) ref->cmdflags = 0; fs_struct_addr = ULONG(tt->task_struct + OFFSET(task_struct_fs)); if (fs_struct_addr) { fs_struct_buf = GETBUF(SIZE(fs_struct)); readmem(fs_struct_addr, KVADDR, fs_struct_buf, SIZE(fs_struct), "fs_struct buffer", FAULT_ON_ERROR); use_path = (MEMBER_TYPE("fs_struct", "root") == TYPE_CODE_STRUCT); if (use_path) root_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_root) + OFFSET(path_dentry)); else root_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_root)); if (root_dentry) { if (VALID_MEMBER(fs_struct_rootmnt)) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_rootmnt)); get_pathname(root_dentry, root_pathname, BUFSIZE, 1, vfsmnt); } else if (use_path) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_root) + OFFSET(path_mnt)); get_pathname(root_dentry, root_pathname, BUFSIZE, 1, vfsmnt); } else { get_pathname(root_dentry, root_pathname, BUFSIZE, 1, 0); } } if (use_path) pwd_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd) + OFFSET(path_dentry)); else pwd_dentry = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd)); if (pwd_dentry) { if (VALID_MEMBER(fs_struct_pwdmnt)) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_pwdmnt)); get_pathname(pwd_dentry, pwd_pathname, BUFSIZE, 1, vfsmnt); } else if (use_path) { vfsmnt = ULONG(fs_struct_buf + OFFSET(fs_struct_pwd) + OFFSET(path_mnt)); get_pathname(pwd_dentry, pwd_pathname, BUFSIZE, 1, vfsmnt); } else { get_pathname(pwd_dentry, pwd_pathname, BUFSIZE, 1, 0); } } if ((flags & PRINT_INODES) && root_dentry && pwd_dentry) { dentry_buf = fill_dentry_cache(root_dentry); root_inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); dentry_buf = fill_dentry_cache(pwd_dentry); pwd_inode = ULONG(dentry_buf + OFFSET(dentry_d_inode)); fprintf(fp, "ROOT: %lx %s CWD: %lx %s\n", root_inode, root_pathname, pwd_inode, pwd_pathname); } else if (ref) { snprintf(root_pwd, sizeof(root_pwd), "ROOT: %s CWD: %s \n", root_pathname, pwd_pathname); if (FILENAME_COMPONENT(root_pathname, ref->str) || FILENAME_COMPONENT(pwd_pathname, ref->str)) { print_task_header(fp, tc, 0); fprintf(fp, "%s", root_pwd); root_pwd_printed = TRUE; ref->cmdflags |= FILES_REF_FOUND; } } else fprintf(fp, "ROOT: %s CWD: %s\n", root_pathname, pwd_pathname); FREEBUF(fs_struct_buf); } files_struct_addr = ULONG(tt->task_struct + OFFSET(task_struct_files)); if (files_struct_addr) { readmem(files_struct_addr, KVADDR, files_struct_buf, SIZE(files_struct), "files_struct buffer", FAULT_ON_ERROR); if (VALID_MEMBER(files_struct_max_fdset)) { max_fdset = INT(files_struct_buf + OFFSET(files_struct_max_fdset)); max_fds = INT(files_struct_buf + OFFSET(files_struct_max_fds)); } } if (VALID_MEMBER(files_struct_fdt)) { fdtable_addr = ULONG(files_struct_buf + OFFSET(files_struct_fdt)); if (fdtable_addr) { readmem(fdtable_addr, KVADDR, fdtable_buf, SIZE(fdtable), "fdtable buffer", FAULT_ON_ERROR); if (VALID_MEMBER(fdtable_max_fdset)) max_fdset = INT(fdtable_buf + OFFSET(fdtable_max_fdset)); else max_fdset = -1; max_fds = INT(fdtable_buf + OFFSET(fdtable_max_fds)); } } if ((VALID_MEMBER(files_struct_fdt) && !fdtable_addr) || !files_struct_addr || max_fdset == 0 || max_fds == 0) { if (ref) { if (ref->cmdflags & FILES_REF_FOUND) fprintf(fp, "\n"); } else fprintf(fp, "No open files\n"); if (fdtable_buf) FREEBUF(fdtable_buf); FREEBUF(files_struct_buf); return; } if (ref && IS_A_NUMBER(ref->str)) { if (hexadecimal_only(ref->str, 0)) { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= FILES_REF_HEXNUM; } else { value = dtol(ref->str, FAULT_ON_ERROR, NULL); if (value <= MAX(max_fdset, max_fds)) { ref->decval = value; ref->cmdflags |= FILES_REF_DECNUM; } else { ref->hexval = htol(ref->str, FAULT_ON_ERROR, NULL); ref->cmdflags |= FILES_REF_HEXNUM; } } } if (VALID_MEMBER(fdtable_open_fds)) open_fds_addr = ULONG(fdtable_buf + OFFSET(fdtable_open_fds)); else open_fds_addr = ULONG(files_struct_buf + OFFSET(files_struct_open_fds)); if (open_fds_addr) { if (VALID_MEMBER(files_struct_open_fds_init) && (open_fds_addr == (files_struct_addr + OFFSET(files_struct_open_fds_init)))) BCOPY(files_struct_buf + OFFSET(files_struct_open_fds_init), &open_fds, sizeof(fd_set)); else readmem(open_fds_addr, KVADDR, &open_fds, sizeof(fd_set), "fdtable open_fds", FAULT_ON_ERROR); } if (VALID_MEMBER(fdtable_fd)) fd = ULONG(fdtable_buf + OFFSET(fdtable_fd)); else fd = ULONG(files_struct_buf + OFFSET(files_struct_fd)); if (!open_fds_addr || !fd) { if (ref && (ref->cmdflags & FILES_REF_FOUND)) fprintf(fp, "\n"); if (fdtable_buf) FREEBUF(fdtable_buf); FREEBUF(files_struct_buf); return; } file_dump_flags = DUMP_FULL_NAME | DUMP_EMPTY_FILE; if (flags & PRINT_NRPAGES) file_dump_flags |= DUMP_FILE_NRPAGES; j = 0; for (;;) { unsigned long set; i = j * __NFDBITS; if (((max_fdset >= 0) && (i >= max_fdset)) || (i >= max_fds)) break; set = open_fds.__fds_bits[j++]; while (set) { if (set & 1) { readmem(fd + i*sizeof(struct file *), KVADDR, &file, sizeof(struct file *), "fd file", FAULT_ON_ERROR); if (ref && file) { open_tmpfile(); if (file_dump(file, 0, 0, i, file_dump_flags)) { BZERO(buf4, BUFSIZE); rewind(pc->tmpfile); ret = fgets(buf4, BUFSIZE, pc->tmpfile); close_tmpfile(); ref->refp = buf4; if (open_file_reference(ref)) { PRINT_FILE_REFERENCE(); } } else close_tmpfile(); } else if (file) { if (!header_printed) { fprintf(fp, "%s", files_header); header_printed = 1; } file_dump(file, 0, 0, i, file_dump_flags); } } i++; set >>= 1; } } if (!header_printed && !ref) fprintf(fp, "No open files\n"); if (ref && (ref->cmdflags & FILES_REF_FOUND)) fprintf(fp, "\n"); if (fdtable_buf) FREEBUF(fdtable_buf); FREEBUF(files_struct_buf); } /* * Check an open file string for references. */ static int open_file_reference(struct reference *ref) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int i, fd, argcnt; ulong vaddr; strcpy(buf, ref->refp); if ((argcnt = parse_line(buf, arglist)) < 5) return FALSE; if (ref->cmdflags & (FILES_REF_HEXNUM|FILES_REF_DECNUM)) { fd = dtol(arglist[0], FAULT_ON_ERROR, NULL); if (((ref->cmdflags & FILES_REF_HEXNUM) && (fd == ref->hexval)) || ((ref->cmdflags & FILES_REF_DECNUM) && (fd == ref->decval))) { return TRUE; } for (i = 1; i < 4; i++) { if (STREQ(arglist[i], "?")) continue; vaddr = htol(arglist[i], FAULT_ON_ERROR, NULL); if (vaddr == ref->hexval) return TRUE; } } if (STREQ(ref->str, arglist[4])) { return TRUE; } if ((argcnt == 6) && FILENAME_COMPONENT(arglist[5], ref->str)) { return TRUE; } return FALSE; } #ifdef DEPRECATED /* * nlm_files_dump() prints files held open by lockd server on behalf * of NFS clients */ #define FILE_NRHASH 32 char nlm_files_header[BUFSIZE] = { 0 }; char *nlm_header = \ "Files open by lockd for client discretionary file locks:\n"; void nlm_files_dump(void) { int header_printed = 0; int i, j, cnt; ulong nlmsvc_ops, nlm_files; struct syment *nsp; ulong nlm_files_array[FILE_NRHASH]; struct list_data list_data, *ld; ulong *file, *files_list; ulong dentry, inode, flock, host, client; char buf1[BUFSIZE]; char buf2[BUFSIZE]; if (!strlen(nlm_files_header)) { sprintf(nlm_files_header, "CLIENT %s %s%sTYPE%sPATH\n", mkstring(buf1, VADDR_PRLEN, CENTER|LJUST, "NLM_FILE"), mkstring(buf2, VADDR_PRLEN, CENTER|LJUST, "INODE"), space(MINSPACE), space(MINSPACE)); } if (!symbol_exists("nlm_files") || !symbol_exists("nlmsvc_ops") || !symbol_exists("nfsd_nlm_ops")) { goto out; } get_symbol_data("nlmsvc_ops", sizeof(void *), &nlmsvc_ops); if (nlmsvc_ops != symbol_value("nfsd_nlm_ops")) { goto out; } if ((nsp = next_symbol("nlm_files", NULL)) == NULL) { error(WARNING, "cannot find next symbol after nlm_files\n"); goto out; } nlm_files = symbol_value("nlm_files"); if (((nsp->value - nlm_files) / sizeof(void *)) != FILE_NRHASH ) { error(WARNING, "FILE_NRHASH has changed from %d\n", FILE_NRHASH); if (((nsp->value - nlm_files) / sizeof(void *)) < FILE_NRHASH ) goto out; } readmem(nlm_files, KVADDR, nlm_files_array, sizeof(ulong) * FILE_NRHASH, "nlm_files array", FAULT_ON_ERROR); for (i = 0; i < FILE_NRHASH; i++) { if (nlm_files_array[i] == 0) { continue; } ld = &list_data; BZERO(ld, sizeof(struct list_data)); ld->start = nlm_files_array[i]; hq_open(); cnt = do_list(ld); files_list = (ulong *)GETBUF(cnt * sizeof(ulong)); cnt = retrieve_list(files_list, cnt); hq_close(); for (j=0, file = files_list; j 1 || !STREQ(buf, "/")) && !STRNEQ(tmpname, "/")) { sprintf(pathname, "%s%s%s", buf, "/", tmpname); } else { sprintf(pathname, "%s%s", buf, tmpname); } } } else { strncpy(pathname, buf, BUFSIZE); } parent = ULONG(dentry_buf + OFFSET(dentry_d_parent)); if (tmp_dentry == parent && full) { if (VALID_MEMBER(vfsmount_mnt_mountpoint)) { if (tmp_vfsmnt) { if (strncmp(pathname, "//", 2) == 0) shift_string_left(pathname, 1); readmem(tmp_vfsmnt, KVADDR, vfsmnt_buf, SIZE(vfsmount), "vfsmount buffer", FAULT_ON_ERROR); parent = ULONG(vfsmnt_buf + OFFSET(vfsmount_mnt_mountpoint)); mnt_parent = ULONG(vfsmnt_buf + OFFSET(vfsmount_mnt_parent)); if (tmp_vfsmnt == mnt_parent) break; else tmp_vfsmnt = mnt_parent; } } else if (VALID_STRUCT(mount)) { if (tmp_vfsmnt) { if (strncmp(pathname, "//", 2) == 0) shift_string_left(pathname, 1); readmem(tmp_vfsmnt - OFFSET(mount_mnt), KVADDR, mnt_buf, SIZE(mount), "mount buffer", FAULT_ON_ERROR); parent = ULONG(mnt_buf + OFFSET(mount_mnt_mountpoint)); mnt_parent = ULONG(mnt_buf + OFFSET(mount_mnt_parent)); if ((tmp_vfsmnt - OFFSET(mount_mnt)) == mnt_parent) break; else tmp_vfsmnt = mnt_parent + OFFSET(mount_mnt); } } else { parent = ULONG(dentry_buf + OFFSET(dentry_d_covers)); } } } while (tmp_dentry != parent && parent); if (mnt_buf) FREEBUF(mnt_buf); else if (vfsmnt_buf) FREEBUF(vfsmnt_buf); } /* * If the pathname component, which may be internal or external to the * dentry, has string length equal to what's expected, copy it into the * passed-in buffer, and return its length. If it doesn't match, return 0. */ static int get_pathname_component(ulong dentry, ulong d_name_name, int d_name_len, char *dentry_buf, char *pathbuf) { int len = d_name_len; /* presume success */ if (d_name_name == (dentry + OFFSET(dentry_d_iname))) { if (strlen(dentry_buf + OFFSET(dentry_d_iname)) == d_name_len) strcpy(pathbuf, dentry_buf + OFFSET(dentry_d_iname)); else len = 0; } else if ((read_string(d_name_name, pathbuf, BUFSIZE)) != d_name_len) len = 0; return len; } /* * Cache the passed-in file structure. */ char * fill_file_cache(ulong file) { int i; char *cache; ft->file_cache_fills++; for (i = 0; i < DENTRY_CACHE; i++) { if (ft->cached_file[i] == file) { ft->cached_file_hits[i]++; cache = ft->file_cache + (SIZE(file)*i); return(cache); } } cache = ft->file_cache + (SIZE(file)*ft->file_cache_index); readmem(file, KVADDR, cache, SIZE(file), "fill_file_cache", FAULT_ON_ERROR); ft->cached_file[ft->file_cache_index] = file; ft->file_cache_index = (ft->file_cache_index+1) % DENTRY_CACHE; return(cache); } /* * If active, clear the file references. */ void clear_file_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < DENTRY_CACHE; i++) { ft->cached_file[i] = 0; ft->cached_file_hits[i] = 0; } ft->file_cache_fills = 0; ft->file_cache_index = 0; } /* * Cache the passed-in dentry structure. */ char * fill_dentry_cache(ulong dentry) { int i; char *cache; ft->dentry_cache_fills++; for (i = 0; i < DENTRY_CACHE; i++) { if (ft->cached_dentry[i] == dentry) { ft->cached_dentry_hits[i]++; cache = ft->dentry_cache + (SIZE(dentry)*i); return(cache); } } cache = ft->dentry_cache + (SIZE(dentry)*ft->dentry_cache_index); readmem(dentry, KVADDR, cache, SIZE(dentry), "fill_dentry_cache", FAULT_ON_ERROR); ft->cached_dentry[ft->dentry_cache_index] = dentry; ft->dentry_cache_index = (ft->dentry_cache_index+1) % DENTRY_CACHE; return(cache); } /* * If active, clear the dentry references. */ void clear_dentry_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < DENTRY_CACHE; i++) { ft->cached_dentry[i] = 0; ft->cached_dentry_hits[i] = 0; } ft->dentry_cache_fills = 0; ft->dentry_cache_index = 0; } /* * Cache the passed-in inode structure. */ char * fill_inode_cache(ulong inode) { int i; char *cache; ft->inode_cache_fills++; for (i = 0; i < INODE_CACHE; i++) { if (ft->cached_inode[i] == inode) { ft->cached_inode_hits[i]++; cache = ft->inode_cache + (SIZE(inode)*i); return(cache); } } cache = ft->inode_cache + (SIZE(inode)*ft->inode_cache_index); readmem(inode, KVADDR, cache, SIZE(inode), "fill_inode_cache", FAULT_ON_ERROR); ft->cached_inode[ft->inode_cache_index] = inode; ft->inode_cache_index = (ft->inode_cache_index+1) % INODE_CACHE; return(cache); } /* * If active, clear the inode references. */ void clear_inode_cache(void) { int i; if (DUMPFILE()) return; for (i = 0; i < DENTRY_CACHE; i++) { ft->cached_inode[i] = 0; ft->cached_inode_hits[i] = 0; } ft->inode_cache_fills = 0; ft->inode_cache_index = 0; } /* * This command displays the tasks using specified files or sockets. * Tasks will be listed that reference the file as the current working * directory, root directory, an open file descriptor, or that mmap the * file. * The argument can be a full pathname without symbolic links, or inode * address. */ void cmd_fuser(void) { int c; char *spec_string, *tmp; struct foreach_data foreach_data, *fd; char task_buf[BUFSIZE]; char buf[BUFSIZE]; char uses[20]; char fuser_header[BUFSIZE]; int doing_fds, doing_mmap, len; int fuser_header_printed, lockd_header_printed; while ((c = getopt(argcnt, args, "")) != EOF) { switch(c) { default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, SYNOPSIS); if (!args[optind]) { cmd_usage(pc->curcmd, SYNOPSIS); return; } sprintf(fuser_header, " PID %s COMM USAGE\n", mkstring(buf, VADDR_PRLEN, CENTER, "TASK")); doing_fds = doing_mmap = 0; while (args[optind]) { spec_string = args[optind]; if (STRNEQ(spec_string, "0x") && hexadecimal(spec_string, 0)) shift_string_left(spec_string, 2); len = strlen(spec_string); fuser_header_printed = 0; lockd_header_printed = 0; open_tmpfile(); BZERO(&foreach_data, sizeof(struct foreach_data)); fd = &foreach_data; fd->keyword_array[0] = FOREACH_FILES; fd->keyword_array[1] = FOREACH_VM; fd->keys = 2; fd->flags |= FOREACH_i_FLAG; foreach(fd); rewind(pc->tmpfile); BZERO(uses, 20); while (fgets(buf, BUFSIZE, pc->tmpfile)) { if (STRNEQ(buf, "PID:")) { if (!STREQ(uses, "")) { if (!fuser_header_printed) { fprintf(pc->saved_fp, "%s", fuser_header); fuser_header_printed = 1; } show_fuser(task_buf, uses); BZERO(uses, 20); } BZERO(task_buf, BUFSIZE); strcpy(task_buf, buf); doing_fds = doing_mmap = 0; continue; } if (STRNEQ(buf, "ROOT:")) { if ((tmp = strstr(buf, spec_string)) && (tmp[len] == ' ' || tmp[len] == '\n')) { if (strstr(tmp, "CWD:")) { strcat(uses, "root "); if ((tmp = strstr(tmp+len, spec_string)) && (tmp[len] == ' ' || tmp[len] == '\n')) { strcat(uses, "cwd "); } } else { strcat(uses, "cwd "); } } continue; } if (strstr(buf, "DENTRY")) { doing_fds = 1; continue; } if (strstr(buf, "TOTAL_VM")) { doing_fds = 0; continue; } if (strstr(buf, " VMA ")) { doing_mmap = 1; doing_fds = 0; continue; } if ((tmp = strstr(buf, spec_string)) && (tmp[len] == ' ' || tmp[len] == '\n')) { if (doing_fds) { strcat(uses, "fd "); doing_fds = 0; } if (doing_mmap) { strcat(uses, "mmap "); doing_mmap = 0; } } } if (!STREQ(uses, "")) { if (!fuser_header_printed) { fprintf(pc->saved_fp, "%s", fuser_header); fuser_header_printed = 1; } show_fuser(task_buf, uses); BZERO(uses, 20); } close_tmpfile(); optind++; if (!fuser_header_printed && !lockd_header_printed) { fprintf(fp, "No users of %s found\n", spec_string); } } } static void show_fuser(char *buf, char *uses) { char pid[10]; char task[20]; char command[20]; char *p; int i; BZERO(pid, 10); BZERO(task, 20); BZERO(command, 20); p = strstr(buf, "PID: ") + strlen("PID: "); i = 0; while (*p != ' ' && i < 10) { pid[i++] = *p++; } pid[i] = NULLCHAR; p = strstr(buf, "TASK: ") + strlen("TASK: "); while (*p == ' ') p++; i = 0; while (*p != ' ' && i < 20) { task[i++] = *p++; } task[i] = NULLCHAR; mkstring(task, VADDR_PRLEN, RJUST, task); p = strstr(buf, "COMMAND: ") + strlen("COMMAND: "); strncpy(command, p, 16); i = strlen(command) - 1; while (i < 16) { command[i++] = ' '; } command[16] = NULLCHAR; fprintf(pc->saved_fp, "%5s %s %s %s\n", pid, task, command, uses); } /* * Gather some host memory/swap statistics, passing back whatever the * caller requires. */ int monitor_memory(long *freemem_pages, long *freeswap_pages, long *mem_usage, long *swap_usage) { FILE *mp; char buf[BUFSIZE]; char *arglist[MAXARGS]; int argc ATTRIBUTE_UNUSED; int params; ulong freemem, memtotal, freeswap, swaptotal; if (!file_exists("/proc/meminfo", NULL)) return FALSE; if ((mp = fopen("/proc/meminfo", "r")) == NULL) return FALSE; params = 0; freemem = memtotal = freeswap = swaptotal = 0; while (fgets(buf, BUFSIZE, mp)) { if (strstr(buf, "SwapFree")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) freeswap = (atol(arglist[1]) * 1024)/PAGESIZE(); } if (strstr(buf, "MemFree")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) freemem = (atol(arglist[1]) * 1024)/PAGESIZE(); } if (strstr(buf, "MemTotal")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) memtotal = (atol(arglist[1]) * 1024)/PAGESIZE(); } if (strstr(buf, "SwapTotal")) { params++; argc = parse_line(buf, arglist); if (decimal(arglist[1], 0)) swaptotal = (atol(arglist[1]) * 1024)/PAGESIZE(); } } fclose(mp); if (params != 4) return FALSE; if (freemem_pages) *freemem_pages = freemem; if (freeswap_pages) *freeswap_pages = freeswap; if (mem_usage) *mem_usage = ((memtotal-freemem)*100) / memtotal; if (swap_usage) *swap_usage = ((swaptotal-freeswap)*100) / swaptotal; return TRUE; } /* * Determine whether two filenames reference the same file. */ int same_file(char *f1, char *f2) { struct stat stat1, stat2; if ((stat(f1, &stat1) != 0) || (stat(f2, &stat2) != 0)) return FALSE; if ((stat1.st_dev == stat2.st_dev) && (stat1.st_ino == stat2.st_ino)) return TRUE; return FALSE; } /* * Determine which live memory source to use. */ #define MODPROBE_CMD "/sbin/modprobe -l --type drivers/char 2>&1" static void get_live_memory_source(void) { FILE *pipe; char buf[BUFSIZE]; char modname1[BUFSIZE]; char modname2[BUFSIZE]; char *name; int use_module, crashbuiltin; struct stat stat1, stat2; struct utsname utsname; if (!(pc->flags & PROC_KCORE)) pc->flags |= DEVMEM; if (pc->live_memsrc) goto live_report; pc->live_memsrc = "/dev/mem"; use_module = crashbuiltin = FALSE; if (file_exists("/dev/mem", &stat1) && file_exists(pc->memory_device, &stat2) && S_ISCHR(stat1.st_mode) && S_ISCHR(stat2.st_mode) && (stat1.st_rdev == stat2.st_rdev)) { if (!STREQ(pc->memory_device, "/dev/mem")) error(INFO, "%s: same device as /dev/mem\n%s", pc->memory_device, pc->memory_module ? "" : "\n"); if (pc->memory_module) error(INFO, "ignoring --memory_module %s request\n\n", pc->memory_module); } else if (pc->memory_module && memory_driver_module_loaded(NULL)) { error(INFO, "using pre-loaded \"%s\" module\n\n", pc->memory_module); pc->flags |= MODPRELOAD; use_module = TRUE; } else { pc->memory_module = MEMORY_DRIVER_MODULE; if ((pipe = popen(MODPROBE_CMD, "r")) == NULL) { error(INFO, "%s: %s\n", MODPROBE_CMD, strerror(errno)); return; } sprintf(modname1, "%s.o", pc->memory_module); sprintf(modname2, "%s.ko", pc->memory_module); while (fgets(buf, BUFSIZE, pipe)) { if (strstr(buf, "invalid option") && (uname(&utsname) == 0)) { sprintf(buf, "/lib/modules/%s/kernel/drivers/char/%s", utsname.release, modname2); if (file_exists(buf, &stat1)) use_module = TRUE; else { strcat(buf, ".xz"); if (file_exists(buf, &stat1)) use_module = TRUE; } break; } name = basename(strip_linefeeds(buf)); if (STREQ(name, modname1) || STREQ(name, modname2)) { use_module = TRUE; break; } } pclose(pipe); if (!use_module && file_exists("/dev/crash", &stat1) && S_ISCHR(stat1.st_mode)) crashbuiltin = TRUE; } if (use_module) { pc->flags &= ~DEVMEM; pc->flags |= MEMMOD; pc->readmem = read_memory_device; pc->writemem = write_memory_device; pc->live_memsrc = pc->memory_device; } if (crashbuiltin) { pc->flags &= ~DEVMEM; pc->flags |= CRASHBUILTIN; pc->readmem = read_memory_device; pc->writemem = write_memory_device; pc->live_memsrc = pc->memory_device; pc->memory_module = NULL; } live_report: if (CRASHDEBUG(1)) fprintf(fp, "get_live_memory_source: %s\n", pc->live_memsrc); } /* * Read /proc/modules to determine whether the crash driver module * has been loaded. */ static int memory_driver_module_loaded(int *count) { FILE *modules; int argcnt, module_loaded; char *arglist[MAXARGS]; char buf[BUFSIZE]; if ((modules = fopen("/proc/modules", "r")) == NULL) { error(INFO, "/proc/modules: %s\n", strerror(errno)); return FALSE; } module_loaded = FALSE; while (fgets(buf, BUFSIZE, modules)) { console("%s", buf); argcnt = parse_line(buf, arglist); if (argcnt < 3) continue; if (STREQ(arglist[0], pc->memory_module)) { module_loaded = TRUE; if (CRASHDEBUG(1)) fprintf(stderr, "\"%s\" module loaded: [%s][%s][%s]\n", arglist[0], arglist[0], arglist[1], arglist[2]); if (count) *count = atoi(arglist[2]); break; } } fclose(modules); return module_loaded; } /* * Insmod the memory driver module. */ static int insmod_memory_driver_module(void) { FILE *pipe; char buf[BUFSIZE]; char command[BUFSIZE]; sprintf(command, "/sbin/modprobe %s", pc->memory_module); if (CRASHDEBUG(1)) fprintf(fp, "%s\n", command); if ((pipe = popen(command, "r")) == NULL) { error(INFO, "%s: %s", command, strerror(errno)); return FALSE; } while (fgets(buf, BUFSIZE, pipe)) fprintf(fp, "%s\n", buf); pclose(pipe); if (!memory_driver_module_loaded(NULL)) { error(INFO, "cannot insmod \"%s\" module\n", pc->memory_module); return FALSE; } return TRUE; } /* * Return the dev_t for the memory device driver. The major number will * be that of the kernel's misc driver; the minor is dynamically created * when the module at inmod time, and found in /proc/misc. */ static int get_memory_driver_dev(dev_t *devp) { char buf[BUFSIZE]; char *arglist[MAXARGS]; int argcnt; FILE *misc; int minor; dev_t dev; dev = 0; if ((misc = fopen("/proc/misc", "r")) == NULL) { error(INFO, "/proc/misc: %s", strerror(errno)); } else { while (fgets(buf, BUFSIZE, misc)) { argcnt = parse_line(buf, arglist); if ((argcnt == 2) && STREQ(arglist[1], pc->memory_module)) { minor = atoi(arglist[0]); dev = makedev(MISC_MAJOR, minor); if (CRASHDEBUG(1)) fprintf(fp, "/proc/misc: %s %s => %d/%d\n", arglist[0], arglist[1], major(dev), minor(dev)); break; } } fclose(misc); } if (!dev) { error(INFO, "cannot determine minor number of %s driver\n", pc->memory_module); return FALSE; } *devp = dev; return TRUE; } /* * Deal with the creation or verification of the memory device file: * * 1. If the device exists, and has the correct major/minor device numbers, * nothing needs to be done. * 2. If the filename exists, but it's not a device file, has the wrong * major/minor device numbers, or the wrong permissions, advise the * user to delete it. * 3. Otherwise, create it. */ static int create_memory_device(dev_t dev) { struct stat stat; if (file_exists(pc->live_memsrc, &stat)) { /* * It already exists -- just use it. */ if ((stat.st_mode == MEMORY_DRIVER_DEVICE_MODE) && (stat.st_rdev == dev)) return TRUE; /* * Either it's not a device special file, or it's got * the wrong major/minor numbers, or the wrong permissions. * Unlink the file -- it shouldn't be there. */ if (!S_ISCHR(stat.st_mode)) error(FATAL, "%s: not a character device -- please delete it!\n", pc->live_memsrc); else if (dev != stat.st_rdev) error(FATAL, "%s: invalid device: %d/%d -- please delete it!\n", pc->live_memsrc, major(stat.st_rdev), minor(stat.st_rdev)); else unlink(pc->live_memsrc); } /* * Either it doesn't exist or it was just unlinked. * In either case, try to create it. */ if (mknod(pc->live_memsrc, MEMORY_DRIVER_DEVICE_MODE, dev)) { error(INFO, "%s: mknod: %s\n", pc->live_memsrc, strerror(errno)); return FALSE; } return TRUE; } /* * If we're here, the memory driver module is being requested: * * 1. If /dev/crash is built into the kernel, just open it. * 2. If the module is not already loaded, insmod it. * 3. Determine the misc driver minor device number that it was assigned. * 4. Create (or verify) the device file. * 5. Then just open it. */ static int memory_driver_init(void) { dev_t dev; if (pc->flags & CRASHBUILTIN) goto open_device; if (!memory_driver_module_loaded(NULL)) { if (!insmod_memory_driver_module()) return FALSE; } else pc->flags |= MODPRELOAD; if (!get_memory_driver_dev(&dev)) return FALSE; if (!create_memory_device(dev)) return FALSE; open_device: if ((pc->mfd = open(pc->memory_device, O_RDONLY)) < 0) { error(INFO, "%s: open: %s\n", pc->memory_device, strerror(errno)); return FALSE; } return TRUE; } /* * Remove the memory driver module and associated file. */ int cleanup_memory_driver(void) { int errors, count; char command[BUFSIZE]; count = errors = 0; if (pc->flags & KERNEL_DEBUG_QUERY) return TRUE; close(pc->mfd); if (file_exists(pc->memory_device, NULL) && unlink(pc->memory_device)) { error(INFO, "%s: %s\n", pc->memory_device, strerror(errno)); errors++; } if (!(pc->flags & MODPRELOAD) && memory_driver_module_loaded(&count) && !count) { sprintf(command, "/sbin/rmmod %s", pc->memory_module); if (CRASHDEBUG(1)) fprintf(fp, "%s\n", command); errors += system(command); } if (errors) error(NOTE, "cleanup_memory_driver failed\n"); return errors ? FALSE : TRUE; } struct do_radix_tree_info { ulong maxcount; ulong count; void *data; }; static void do_radix_tree_count(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_radix_tree_info *info = private; info->count++; } static void do_radix_tree_search(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_radix_tree_info *info = private; struct radix_tree_pair *rtp = info->data; if (rtp->index == index) { rtp->value = (void *)slot; info->count = 1; } } static void do_radix_tree_dump(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_radix_tree_info *info = private; fprintf(fp, "[%ld] %lx\n", index, slot); info->count++; } static void do_radix_tree_gather(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_radix_tree_info *info = private; struct radix_tree_pair *rtp = info->data; if (info->maxcount) { rtp[info->count].index = index; rtp[info->count].value = (void *)slot; info->count++; info->maxcount--; } } static void do_radix_tree_dump_cb(ulong node, ulong slot, const char *path, ulong index, void *private) { struct do_radix_tree_info *info = private; struct radix_tree_pair *rtp = info->data; int (*cb)(ulong) = rtp->value; /* Caller defined operation */ if (!cb(slot)) { error(FATAL, "do_radix_tree: callback " "operation failed: entry: %ld item: %lx\n", info->count, slot); } info->count++; } /* * do_radix_tree argument usage: * * root: Address of a radix_tree_root structure * * flag: RADIX_TREE_COUNT - Return the number of entries in the tree. * RADIX_TREE_SEARCH - Search for an entry at rtp->index; if found, * store the entry in rtp->value and return a count of 1; otherwise * return a count of 0. * RADIX_TREE_DUMP - Dump all existing index/value pairs. * RADIX_TREE_GATHER - Store all existing index/value pairs in the * passed-in array of radix_tree_pair structs starting at rtp, * returning the count of entries stored; the caller can/should * limit the number of returned entries by putting the array size * (max count) in the rtp->index field of the first structure * in the passed-in array. * RADIX_TREE_DUMP_CB - Similar with RADIX_TREE_DUMP, but for each * radix tree entry, a user defined callback at rtp->value will * be invoked. * * rtp: Unused by RADIX_TREE_COUNT and RADIX_TREE_DUMP. * A pointer to a radix_tree_pair structure for RADIX_TREE_SEARCH. * A pointer to an array of radix_tree_pair structures for * RADIX_TREE_GATHER; the dimension (max count) of the array may * be stored in the index field of the first structure to avoid * any chance of an overrun. * For RADIX_TREE_DUMP_CB, the rtp->value must be initialized as a * callback function. The callback prototype must be: int (*)(ulong); */ ulong do_radix_tree(ulong root, int flag, struct radix_tree_pair *rtp) { struct do_radix_tree_info info = { .count = 0, .data = rtp, }; struct radix_tree_ops ops = { .radix = 16, .private = &info, }; switch (flag) { case RADIX_TREE_COUNT: ops.entry = do_radix_tree_count; break; case RADIX_TREE_SEARCH: /* * FIXME: do_radix_tree_traverse() traverses whole * radix tree, not binary search. So this search is * not efficient. */ ops.entry = do_radix_tree_search; break; case RADIX_TREE_DUMP: ops.entry = do_radix_tree_dump; break; case RADIX_TREE_GATHER: if (!(info.maxcount = rtp->index)) info.maxcount = (ulong)(-1); /* caller beware */ ops.entry = do_radix_tree_gather; break; case RADIX_TREE_DUMP_CB: if (rtp->value == NULL) { error(FATAL, "do_radix_tree: need set callback function"); return -EINVAL; } ops.entry = do_radix_tree_dump_cb; break; default: error(FATAL, "do_radix_tree: invalid flag: %lx\n", flag); } do_radix_tree_traverse(root, 1, &ops); return info.count; } int is_readable(char *filename) { int fd; if ((fd = open(filename, O_RDONLY)) < 0) { error(INFO, "%s: %s\n", filename, strerror(errno)); return FALSE; } else close(fd); return TRUE; } static int match_file_string(char *filename, char *string, char *buffer) { int found; char command[BUFSIZE]; FILE *pipe; sprintf(command, "/usr/bin/strings %s", filename); if ((pipe = popen(command, "r")) == NULL) { error(INFO, "%s: %s\n", filename, strerror(errno)); return FALSE; } found = FALSE; while (fgets(buffer, BUFSIZE-1, pipe)) { if (strstr(buffer, string)) { found = TRUE; break; } } pclose(pipe); return found; } char * vfsmount_devname(ulong vfsmnt, char *buf, int maxlen) { ulong devp; BZERO(buf, maxlen); if (VALID_STRUCT(mount)) { if (!readmem(vfsmnt - OFFSET(mount_mnt) + OFFSET(mount_mnt_devname), KVADDR, &devp, sizeof(void *), "mount mnt_devname", QUIET|RETURN_ON_ERROR)) return buf; } else { if (!readmem(vfsmnt + OFFSET(vfsmount_mnt_devname), KVADDR, &devp, sizeof(void *), "vfsmount mnt_devname", QUIET|RETURN_ON_ERROR)) return buf; } if (read_string(devp, buf, BUFSIZE-1)) return buf; return buf; } static ulong get_root_vfsmount(char *file_buf) { char buf1[BUFSIZE]; char buf2[BUFSIZE]; ulong vfsmnt; ulong mnt_parent; vfsmnt = ULONG(file_buf + OFFSET(file_f_vfsmnt)); if (!strlen(vfsmount_devname(vfsmnt, buf1, BUFSIZE))) return vfsmnt; if (STREQ(buf1, "udev") || STREQ(buf1, "devtmpfs")) { if (VALID_STRUCT(mount)) { if (!readmem(vfsmnt - OFFSET(mount_mnt) + OFFSET(mount_mnt_parent), KVADDR, &mnt_parent, sizeof(void *), "mount mnt_parent", QUIET|RETURN_ON_ERROR)) return vfsmnt; } else { if (!readmem(vfsmnt + OFFSET(vfsmount_mnt_parent), KVADDR, &mnt_parent, sizeof(void *), "vfsmount mnt_parent", QUIET|RETURN_ON_ERROR)) return vfsmnt; } if (!strlen(vfsmount_devname(mnt_parent, buf2, BUFSIZE))) return vfsmnt; if (STREQ(buf1, "udev") && STREQ(buf2, "udev")) return mnt_parent; if (STREQ(buf1, "devtmpfs") && STREQ(buf2, "devtmpfs")) return mnt_parent; } return vfsmnt; } void check_live_arch_mismatch(void) { struct utsname utsname; if (machine_type("X86") && (uname(&utsname) == 0) && STRNEQ(utsname.machine, "x86_64")) error(FATAL, "compiled for the X86 architecture\n"); #if defined(__i386__) || defined(__x86_64__) if (machine_type("ARM")) error(FATAL, "compiled for the ARM architecture\n"); #endif #ifdef __x86_64__ if (machine_type("ARM64")) error(FATAL, "compiled for the ARM64 architecture\n"); #endif #ifdef __x86_64__ if (machine_type("PPC64")) error(FATAL, "compiled for the PPC64 architecture\n"); #endif #ifdef __powerpc64__ if (machine_type("PPC")) error(FATAL, "compiled for the PPC architecture\n"); #endif } crash-7.2.1/help.c0000775000000000000000000140176413240637645012447 0ustar rootroot/* help.c - core analysis suite * * Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. * Copyright (C) 2002-2018 David Anderson * Copyright (C) 2002-2018 Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include "defs.h" static void reshuffle_cmdlist(void); static int sort_command_name(const void *, const void *); static void display_commands(void); static void display_copying_info(void); static void display_warranty_info(void); static void display_output_info(void); static void display_input_info(void); static void display_README(void); static char *gnu_public_license[]; static char *gnu_public_license_v3[]; static char *version_info[]; static char *output_info[]; static char *input_info[]; static char *README[]; static void dump_registers(void); #define GPLv2 2 #define GPLv3 3 #if defined(GDB_5_3) || defined(GDB_6_0) || defined(GDB_6_1) static int GPL_version = GPLv2; #else static int GPL_version = GPLv3; #endif static char *program_usage_info[] = { "", "USAGE:", "", " crash [OPTION]... NAMELIST MEMORY-IMAGE[@ADDRESS] (dumpfile form)", " crash [OPTION]... [NAMELIST] (live system form)", "", "OPTIONS:", "", " NAMELIST", " This is a pathname to an uncompressed kernel image (a vmlinux", " file), or a Xen hypervisor image (a xen-syms file) which has", " been compiled with the \"-g\" option. If using the dumpfile form,", " a vmlinux file may be compressed in either gzip or bzip2 formats.", "", " MEMORY-IMAGE", " A kernel core dump file created by the netdump, diskdump, LKCD", " kdump, xendump or kvmdump facilities.", "", " If a MEMORY-IMAGE argument is not entered, the session will be", " invoked on the live system, which typically requires root privileges", " because of the device file used to access system RAM. By default, ", " /dev/crash will be used if it exists. If it does not exist, then ", " /dev/mem will be used; but if the kernel has been configured with ", " CONFIG_STRICT_DEVMEM, then /proc/kcore will be used. It is permissible", " to explicitly enter /dev/crash, /dev/mem or /proc/kcore.", "", " An @ADDRESS value must be appended to the MEMORY-IMAGE if the dumpfile", " is a raw RAM dumpfile that has no header information describing the file", " contents. Multiple MEMORY-IMAGE@ADDRESS ordered pairs may be entered,", " with each dumpfile containing a contiguous block of RAM, where the ADDRESS", " value is the physical start address of the block expressed in hexadecimal.", " The physical address value(s) will be used to create a temporary ELF header", " in /var/tmp, which will only exist during the crash session. If a raw RAM", " dumpile represents a live memory source, such as that specified by the QEMU", " mem-path argument of a memory-backend-file object, then \"live:\" must be", " prepended to the MEMORY-IMAGE name.", "", " mapfile", " If the NAMELIST file is not the same kernel that is running", " (live system form), or the kernel that was running when the system", " crashed (dumpfile form), then the System.map file of the original ", " kernel should be entered on the command line.", "", " -h [option]", " --help [option]", " Without an option argument, display a crash usage help message.", " If the option argument is a crash command name, the help page", " for that command is displayed. If it is the string \"input\", a", " page describing the various crash command line input options is", " displayed. If it is the string \"output\", a page describing command", " line output options is displayed. If it is the string \"all\", then", " all of the possible help messages are displayed. After the help", " message is displayed, crash exits.", "", " -s ", " Silently proceed directly to the \"crash>\" prompt without displaying", " any version, GPL, or crash initialization data during startup, and by", " default, runtime command output is not passed to any scrolling command.", "", " -i file", " Execute the command(s) contained in \"file\" prior to displaying ", " the \"crash>\" prompt for interactive user input.", "", " -d num ", " Set the internal debug level. The higher the number, the more", " debugging data will be printed when crash initializes and runs.", "", " -S ", " Use /boot/System.map as the mapfile.", "", " -e vi | emacs", " Set the readline(3) command line editing mode to \"vi\" or \"emacs\". ", " The default editing mode is \"vi\".", "", " -f ", " Force the usage of a compressed vmlinux file if its original", " name does not start with \"vmlinux\".", "", " -k ", " Indicate that the NAMELIST file is an LKCD \"Kerntypes\" debuginfo file.", "", " -g [namelist]", " Determine if a vmlinux or xen-syms namelist file contains debugging data.", "", " -t ", " Display the system-crash timestamp and exit.", "", " -L ", " Attempt to lock all of its virtual address space into memory by", " calling mlockall(MCL_CURRENT|MCL_FUTURE) during initialization.", " If the system call fails, an error message will be displayed,", " but the session continues.", "", " -c tty-device", " Open the tty-device as the console used for debug messages.", "", " -p page-size", " If a processor's page size cannot be determined by the dumpfile, ", " and the processor default cannot be used, use page-size.", "", " -o filename", " Only used with the MEMORY-IMAGE@ADDRESS format for raw RAM dumpfiles,", " specifies a filename of a new ELF vmcore that will be created and used", " as the dumpfile. It will be saved to allow future use as a standalone", " vmcore, replacing the original raw RAM dumpfile.", "", " -m option=value", " --machdep option=value", " Pass an option and value pair to machine-dependent code. These", " architecture-specific option/pairs should only be required in", " very rare circumstances:", "", " X86_64:", " phys_base=", " irq_eframe_link=", " irq_stack_gap=", " max_physmem_bits=", " kernel_image_size=", " vm=orig (pre-2.6.11 virtual memory address ranges)", " vm=2.6.11 (2.6.11 and later virtual memory address ranges)", " vm=xen (Xen kernel virtual memory address ranges)", " vm=xen-rhel4 (RHEL4 Xen kernel virtual address ranges)", " vm=5level (5-level page tables)", " PPC64:", " vm=orig", " vm=2.6.14 (4-level page tables)", " IA64:", " phys_start=", " init_stack_size=", " vm=4l (4-level page tables)", " ARM:", " phys_base=", " ARM64:", " phys_offset=", " kimage_voffset=", " X86:", " page_offset=", "", " -x ", " Automatically load extension modules from a particular directory.", " The directory is determined by the following order of precedence:", "", " (1) the directory specified in the CRASH_EXTENSIONS shell ", " environment variable", " (2) /usr/lib64/crash/extensions (64-bit architectures)", " (3) /usr/lib/crash/extensions (32-bit architectures)", " (4) the ./extensions subdirectory of the current directory", "", " --active", " Track only the active task on each cpu.", "", " --buildinfo", " Display the crash binary's build date, the user ID of the builder,", " the hostname of the machine where the build was done, the target", " architecture, the version number, and the compiler version.", "", " --memory_module modname", " Use the modname as an alternative kernel module to the crash.ko", " module that creates the /dev/crash device.", "", " --memory_device device", " Use device as an alternative device to the /dev/crash, /dev/mem", " or /proc/kcore devices.", "", " --log dumpfile", " Dump the contents of the kernel log buffer. A kernel namelist", " argument is not necessary, but the dumpfile must contain the", " VMCOREINFO data taken from the original /proc/vmcore ELF header.", "", " --no_kallsyms", " Do not use kallsyms-generated symbol information contained within", " kernel module object files.", "", " --no_modules", " Do not access or display any kernel module related information.", "", " --no_ikconfig", " Do not attempt to read configuration data that was built into", " kernels configured with CONFIG_IKCONFIG.", "", " --no_data_debug", " Do not verify the validity of all structure member offsets and", " structure sizes that it uses.", "", " --no_kmem_cache", " Do not initialize the kernel's slab cache infrastructure, and", " commands that use kmem_cache-related data will not work.", "", " --no_elf_notes", " Do not use the registers from the ELF NT_PRSTATUS notes saved", " in a compressed kdump header for backtraces.", "", " --kmem_cache_delay", " Delay the initialization of the kernel's slab cache infrastructure", " until it is required by a run-time command.", "", " --readnow", " Pass this flag to the embedded gdb module, which will override", " the two-stage strategy that it uses for reading symbol tables", " from the NAMELIST. If module symbol tables are loaded during", " runtime with the \"mod\" command, the same override will occur.", "", " --smp ", " Specify that the system being analyzed is an SMP kernel.", "", " -v", " --version", " Display the version of the crash utility, the version of the", " embedded gdb module, GPL information, and copyright notices.", "", " --cpus number", " Specify the number of cpus in the SMP system being analyzed.", "", " --osrelease dumpfile", " Display the OSRELEASE vmcoreinfo string from a kdump dumpfile", " header.", "", " --hyper", " Force the session to be that of a Xen hypervisor.", "", " --p2m_mfn pfn", " When a Xen Hypervisor or its dom0 kernel crashes, the dumpfile", " is typically analyzed with either the Xen hypervisor or the dom0", " kernel. It is also possible to analyze any of the guest domU", " kernels if the pfn_to_mfn_list_list pfn value of the guest kernel", " is passed on the command line along with its NAMELIST and the ", " dumpfile.", "", " --xen_phys_start physical-address", " Supply the base physical address of the Xen hypervisor's text", " and static data for older xendump dumpfiles that did not pass", " that information in the dumpfile header.", "", " --zero_excluded", " If the makedumpfile(8) facility has filtered a compressed kdump", " dumpfile to exclude various types of non-essential pages, or has", " marked a compressed or ELF kdump dumpfile as incomplete due to", " an ENOSPC or other error during its creation, any attempt to", " read missing pages will fail. With this flag, reads from any", " of those pages will return zero-filled memory.", "", " --no_panic", " Do not attempt to find the task that was running when the kernel", " crashed. Set the initial context to that of the \"swapper\" task", " on cpu 0.", "", " --more ", " Use /bin/more as the command output scroller, overriding the", " default of /usr/bin/less and any settings in either ./.crashrc", " or $HOME/.crashrc.", "", " --less ", " Use /usr/bin/less as the command output scroller, overriding any", " settings in either ./.crashrc or $HOME/.crashrc.", "", " --CRASHPAGER", " Use the output paging command defined in the CRASHPAGER shell", " environment variable, overriding any settings in either ./.crashrc ", " or $HOME/.crashrc.", "", " --no_scroll", " Do not pass run-time command output to any scrolling command.", "", " --no_strip", " Do not strip cloned kernel text symbol names.", "", " --no_crashrc", " Do not execute the commands in either $HOME/.crashrc or ./.crashrc.", "", " --mod directory", " When loading the debuginfo data of kernel modules with the \"mod -S\"", " command, search for their object files in directory instead of in ", " the standard location.", "", " --src directory", " Search for the kernel source code in directory instead of in the", " standard location that is compiled into the debuginfo data.", "", " --reloc size", " When analyzing live x86 kernels configured with a CONFIG_PHYSICAL_START ", " value that is larger than its CONFIG_PHYSICAL_ALIGN value, then it will", " be necessary to enter a relocation size equal to the difference between", " the two values.", "", " --hash count", " Set the number of internal hash queue heads used for list gathering", " and verification. The default count is 32768.", "", " --kaslr offset | auto", " If an x86 or x86_64 kernel was configured with CONFIG_RANDOMIZE_BASE,", " the offset value is equal to the difference between the symbol values ", " compiled into the vmlinux file and their relocated KASLR value. If", " set to auto, the KASLR offset value will be automatically calculated.", "", " --minimal", " Bring up a session that is restricted to the log, dis, rd, sym,", " eval, set and exit commands. This option may provide a way to", " extract some minimal/quick information from a corrupted or truncated", " dumpfile, or in situations where one of the several kernel subsystem ", " initialization routines would abort the crash session.", "", " --kvmhost [32|64]", " When examining an x86 KVM guest dumpfile, this option specifies", " that the KVM host that created the dumpfile was an x86 (32-bit)", " or an x86_64 (64-bit) machine, overriding the automatically", " determined value.", "", " --kvmio ", " override the automatically-calculated KVM guest I/O hole size.", "", " --offline [show|hide]", " Show or hide command output that is associated with offline cpus,", " overriding any settings in either ./.crashrc or $HOME/.crashrc.", "", "FILES:", "", " .crashrc", " Initialization commands. The file can be located in the user's", " HOME directory and/or the current directory. Commands found in", " the .crashrc file in the HOME directory are executed before", " those in the current directory's .crashrc file.", "", "ENVIRONMENT VARIABLES:", "", " EDITOR ", " Command input is read using readline(3). If EDITOR is set to", " emacs or vi then suitable keybindings are used. If EDITOR is", " not set, then vi is used. This can be overridden by \"set vi\" or", " \"set emacs\" commands located in a .crashrc file, or by entering", " \"-e emacs\" on the crash command line.", "", " CRASHPAGER", " If CRASHPAGER is set, its value is used as the name of the program", " to which command output will be sent. If not, then command output", " output is sent to \"/usr/bin/less -E -X\" by default.", "", " CRASH_MODULE_PATH", " Specifies an alternative directory tree to search for kernel", " module object files.", "", " CRASH_EXTENSIONS", " Specifies a directory containing extension modules that will be", " loaded automatically if the -x command line option is used.", "", NULL }; void program_usage(int form) { if (form == SHORT_FORM) { fprintf(fp, "\nUsage:\n\n"); fprintf(fp, "%s\n%s\n", program_usage_info[3], program_usage_info[4]); fprintf(fp, "\nEnter \"%s -h\" for details.\n", pc->program_name); clean_exit(1); } else { FILE *scroll; char *scroll_command; char **p; if ((scroll_command = setup_scroll_command()) && (scroll = popen(scroll_command, "w"))) fp = scroll; else scroll = NULL; for (p = program_usage_info; *p; p++) { fprintf(fp, *p, pc->program_name); fprintf(fp, "\n"); } fflush(fp); if (scroll) pclose(scroll); clean_exit(0); } } /* * Get an updated count of commands for subsequent help menu display, * reshuffling the deck if this is the first time or if something's changed. */ void help_init(void) { struct command_table_entry *cp; struct extension_table *ext; for (pc->ncmds = 0, cp = pc->cmd_table; cp->name; cp++) { if (!(cp->flags & HIDDEN_COMMAND)) pc->ncmds++; } for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) pc->ncmds++; } } if (!pc->cmdlist) { pc->cmdlistsz = pc->ncmds; if ((pc->cmdlist = (char **) malloc(sizeof(char *) * pc->cmdlistsz)) == NULL) error(FATAL, "cannot malloc command list space\n"); } else if (pc->ncmds > pc->cmdlistsz) { pc->cmdlistsz = pc->ncmds; if ((pc->cmdlist = (char **)realloc(pc->cmdlist, sizeof(char *) * pc->cmdlistsz)) == NULL) error(FATAL, "cannot realloc command list space\n"); } reshuffle_cmdlist(); } /* * If the command list is modified during runtime, re-shuffle the list * for proper help menu display. */ static void reshuffle_cmdlist(void) { int i, cnt; struct command_table_entry *cp; struct extension_table *ext; for (i = 0; i < pc->cmdlistsz; i++) pc->cmdlist[i] = NULL; for (cnt = 0, cp = pc->cmd_table; cp->name; cp++) { if (!(cp->flags & HIDDEN_COMMAND)) pc->cmdlist[cnt++] = cp->name; } for (ext = extension_table; ext; ext = ext->next) { for (cp = ext->command_table; cp->name; cp++) { if (!(cp->flags & (CLEANUP|HIDDEN_COMMAND))) pc->cmdlist[cnt++] = cp->name; } } if (cnt > pc->cmdlistsz) error(FATAL, "help table malfunction!\n"); qsort((void *)pc->cmdlist, (size_t)cnt, sizeof(char *), sort_command_name); } /* * The help list is in alphabetical order, with exception of the "q" command, * which has historically always been the last command in the list. */ static int sort_command_name(const void *name1, const void *name2) { char **s1, **s2; s1 = (char **)name1; s2 = (char **)name2; if (STREQ(*s1, "q")) return 1; return strcmp(*s1, *s2); } /* * Get help for a command, to dump an internal table, or the GNU public * license copying/warranty information. */ void cmd_help(void) { int c; int oflag; oflag = 0; while ((c = getopt(argcnt, args, "efNDdmM:ngcaBbHhkKsvVoptTzLxOr")) != EOF) { switch(c) { case 'e': dump_extension_table(VERBOSE); return; case 'f': dump_filesys_table(VERBOSE); return; case 'n': case 'D': dumpfile_memory(DUMPFILE_MEM_DUMP); return; case 'x': dump_text_value_cache(VERBOSE); return; case 'd': dump_dev_table(); return; case 'M': dump_machdep_table(stol(optarg, FAULT_ON_ERROR, NULL)); return; case 'm': dump_machdep_table(0); return; case 'g': dump_gdb_data(); return; case 'N': dump_net_table(); return; case 'a': dump_alias_data(); return; case 'b': dump_shared_bufs(); return; case 'B': dump_build_data(); return; case 'c': dump_numargs_cache(); return; case 'H': dump_hash_table(VERBOSE); return; case 'h': dump_hash_table(!VERBOSE); return; case 'k': dump_kernel_table(!VERBOSE); return; case 'K': dump_kernel_table(VERBOSE); return; case 's': dump_symbol_table(); return; case 'V': dump_vm_table(VERBOSE); return; case 'v': dump_vm_table(!VERBOSE); return; case 'O': dump_offset_table(NULL, TRUE); return; case 'o': oflag = TRUE; break; case 'T': dump_task_table(VERBOSE); return; case 't': dump_task_table(!VERBOSE); return; case 'p': dump_program_context(); return; case 'z': fprintf(fp, "help options:\n"); fprintf(fp, " -a - alias data\n"); fprintf(fp, " -b - shared buffer data\n"); fprintf(fp, " -B - build data\n"); fprintf(fp, " -c - numargs cache\n"); fprintf(fp, " -d - device table\n"); fprintf(fp, " -D - dumpfile contents/statistics\n"); fprintf(fp, " -e - extension table data\n"); fprintf(fp, " -f - filesys table\n"); fprintf(fp, " -h - hash_table data\n"); fprintf(fp, " -H - hash_table data (verbose)\n"); fprintf(fp, " -k - kernel_table\n"); fprintf(fp, " -K - kernel_table (verbose)\n"); fprintf(fp, " -L - LKCD page cache environment\n"); fprintf(fp, " -M machine specific\n"); fprintf(fp, " -m - machdep_table\n"); fprintf(fp, " -n - dumpfile contents/statistics\n"); fprintf(fp, " -o - offset_table and size_table\n"); fprintf(fp, " -p - program_context\n"); fprintf(fp, " -r - dump registers from dumpfile header\n"); fprintf(fp, " -s - symbol table data\n"); fprintf(fp, " -t - task_table\n"); fprintf(fp, " -T - task_table plus context_array\n"); fprintf(fp, " -v - vm_table\n"); fprintf(fp, " -V - vm_table (verbose)\n"); fprintf(fp, " -x - text cache\n"); return; case 'L': dumpfile_memory(DUMPFILE_ENVIRONMENT); return; case 'r': dump_registers(); return; default: argerrs++; break; } } if (argerrs) cmd_usage(pc->curcmd, COMPLETE_HELP); if (!args[optind]) { if (oflag) dump_offset_table(NULL, FALSE); else display_help_screen(""); return; } do { if (oflag) dump_offset_table(args[optind], FALSE); else cmd_usage(args[optind], COMPLETE_HELP|MUST_HELP); optind++; } while (args[optind]); } static void dump_registers(void) { if (pc->flags2 & QEMU_MEM_DUMP_ELF) { dump_registers_for_qemu_mem_dump(); return; } else if (DISKDUMP_DUMPFILE()) { dump_registers_for_compressed_kdump(); return; } else if (NETDUMP_DUMPFILE() || KDUMP_DUMPFILE()) { dump_registers_for_elf_dumpfiles(); return; } error(FATAL, "-r option not supported on %s\n", ACTIVE() ? "a live system" : "this dumpfile type"); } /* * Format and display the help menu. */ void display_help_screen(char *indent) { int i, j, rows; char **namep; help_init(); fprintf(fp, "\n%s", indent); rows = (pc->ncmds + (HELP_COLUMNS-1)) / HELP_COLUMNS; for (i = 0; i < rows; i++) { namep = &pc->cmdlist[i]; for (j = 0; j < HELP_COLUMNS; j++) { fprintf(fp,"%-15s", *namep); namep += rows; if ((namep - pc->cmdlist) >= pc->ncmds) break; } fprintf(fp,"\n%s", indent); } fprintf(fp, "\n%s%s version: %-6s gdb version: %s\n", indent, pc->program_name, pc->program_version, pc->gdb_version); fprintf(fp, "%sFor help on any command above, enter \"help \".\n", indent); fprintf(fp, "%sFor help on input options, enter \"help input\".\n", indent); fprintf(fp, "%sFor help on output options, enter \"help output\".\n", indent); #ifdef NO_LONGER_TRUE fprintf(fp, "%sFor the most recent version: " "http://www.missioncriticallinux.com/download\n\n", indent); #else fprintf(fp, "\n"); #endif } /* * Used for generating HTML pages, dump the commands in the order * they would be seen on the help menu, i.e., from left-to-right, row-by-row. * Line ends are signaled with a "BREAK" string. */ static void display_commands(void) { int i, j, rows; char **namep; help_init(); rows = (pc->ncmds + (HELP_COLUMNS-1)) / HELP_COLUMNS; for (i = 0; i < rows; i++) { namep = &pc->cmdlist[i]; for (j = 0; j < HELP_COLUMNS; j++) { fprintf(fp,"%s\n", *namep); namep += rows; if ((namep - pc->cmdlist) >= pc->ncmds) { fprintf(fp, "BREAK\n"); break; } } } } /* * Help data for a command must be formatted using the following template: "command-name", "command description line", "argument-usage line", "description...", "description...", "description...", NULL, * The first line is concatenated with the second line, and will follow the * help command's "NAME" header. * The first and third lines will also be concatenated, and will follow the * help command's "SYNOPSIS" header. If the command has no arguments, enter * a string consisting of a space, i.e., " ". * The fourth and subsequent lines will follow the help command's "DESCRIPTION" * header. * * The program name can be referenced by using the %%s format. The final * entry in each command's help data string list must be a NULL. */ char *help_foreach[] = { "foreach", "display command data for multiple tasks in the system", "[[pid | taskp | name | state | [kernel | user]] ...]\n" " command [flag] [argument]", " This command allows for an examination of various kernel data associated", " with any, or all, tasks in the system, without having to set the context", " to each targeted task.\n", " pid perform the command(s) on this PID.", " taskp perform the command(s) on task referenced by this hexadecimal", " task_struct pointer.", " name perform the command(s) on all tasks with this name. If the", " task name can be confused with a foreach command name, then", " precede the name string with a \"\\\". If the name string is", " enclosed within \"'\" characters, then the encompassed string", " must be a POSIX extended regular expression that will be used", " to match task names.", " user perform the command(s) on all user (non-kernel) threads.", " gleader perform the command(s) on all user (non-kernel) thread group leaders.", " kernel perform the command(s) on all kernel threads.", " active perform the command(s) on the active thread on each CPU.", " state perform the command(s) on all tasks in the specified state, which", " may be one of: RU, IN, UN, ST, ZO, TR, SW, DE, WA or PA.\n", " If none of the task-identifying arguments above are entered, the command", " will be performed on all tasks.\n", " command select one or more of the following commands to be run on the tasks", " selected, or on all tasks:\n", " bt run the \"bt\" command (optional flags: -r -t -l -e -R -f -F", " -o -s -x -d)", " vm run the \"vm\" command (optional flags: -p -v -m -R -d -x)", " task run the \"task\" command (optional flags: -R -d -x)", " files run the \"files\" command (optional flag: -c -R)", " net run the \"net\" command (optional flags: -s -S -R -d -x)", " set run the \"set\" command", " ps run the \"ps\" command (optional flags: -G -s -p -c -t -l -a", " -g -r -y)", " sig run the \"sig\" command (optional flag: -g)", " vtop run the \"vtop\" command (optional flags: -c -u -k)\n", " flag Pass this optional flag to the command selected.", " argument Pass this argument to the command selected.", " ", " A header containing the PID, task address, cpu and command name will be", " pre-pended before the command output for each selected task. Consult the", " help page of each of the command types above for details.", "\nEXAMPLES", " Display the stack traces for all tasks:\n", " %s> foreach bt", " PID: 4752 TASK: c7680000 CPU: 1 COMMAND: \"xterm\"", " #0 [c7681edc] schedule at c01135f6", " (void)", " #1 [c7681f34] schedule_timeout at c01131ff", " (24)", " #2 [c7681f64] do_select at c0132838", " (5, c7681fa4, c7681fa0)", " #3 [c7681fbc] sys_select at c0132dad", " (5, 8070300, 8070380, 0, 0)", " #4 [bffffb0c] system_call at c0109944", " EAX: 0000008e EBX: 00000005 ECX: 08070300 EDX: 08070380 ", " DS: 002b ESI: 00000000 ES: 002b EDI: 00000000 ", " SS: 002b ESP: bffffadc EBP: bffffb0c ", " CS: 0023 EIP: 402259ee ERR: 0000008e EFLAGS: 00000246 ", " ", " PID: 557 TASK: c5600000 CPU: 0 COMMAND: \"nfsd\"", " #0 [c5601f38] schedule at c01135f6", " (void)", " #1 [c5601f90] schedule_timeout at c01131ff", " (c5600000)", " #2 [c5601fb8] svc_recv at c805363a", " (c0096f40, c5602800, 7fffffff, 100, c65c9f1c)", " #3 [c5601fec] (nfsd module) at c806e303", " (c5602800, c5602800, c0096f40, 6c6e0002, 50)", " #4 [c65c9f24] kernel_thread at c010834f", " (0, 0, ext2_file_inode_operations)", " ", " PID: 824 TASK: c7c84000 CPU: 0 COMMAND: \"mingetty\"", " ...\n", " Display the task_struct structure for each \"bash\" command:\n", " %s> foreach bash task", " ...\n", " Display the open files for all tasks:\n", " %s> foreach files", " ...\n", " Display the state of tasks whose name contains a match to \"event.*\":\n", " %s> foreach 'event.*' task -R state", " PID: 99 TASK: ffff8804750d5500 CPU: 0 COMMAND: \"events/0\"", " state = 1,", " ", " PID: 100 TASK: ffff8804750d4ac0 CPU: 1 COMMAND: \"events/1\"", " state = 1,", " ", " PID: 101 TASK: ffff8804750d4080 CPU: 2 COMMAND: \"events/2\"", " state = 1,", " ...\n", " Display the stack traces for all blocked (TASK_UNINTERRUPTIBLE) tasks:\n", " %s> foreach UN bt", " PID: 428 TASK: ffff880036b6c560 CPU: 1 COMMAND: \"jbd2/dm-1-8\"", " #0 [ffff880035779a70] __schedule at ffffffff815df272", " #1 [ffff880035779b08] schedule at ffffffff815dfacf", " #2 [ffff880035779b18] io_schedule at ffffffff815dfb7f", " #3 [ffff880035779b38] sleep_on_page at ffffffff81119a4e", " #4 [ffff880035779b48] __wait_on_bit at ffffffff815e039f", " #5 [ffff880035779b98] wait_on_page_bit at ffffffff81119bb8", " #6 [ffff880035779be8] filemap_fdatawait_range at ffffffff81119ccc", " #7 [ffff880035779cd8] filemap_fdatawait at ffffffff81119d8b", " #8 [ffff880035779ce8] jbd2_journal_commit_transaction at ffffffff8123a99c", " #9 [ffff880035779e58] kjournald2 at ffffffff8123ee7b", " #10 [ffff880035779ee8] kthread at ffffffff8108fb9c", " #11 [ffff880035779f48] kernel_thread_helper at ffffffff815ebaf4", " ...\n", NULL }; char *help_ascii[] = { "ascii", "translate a hexadecimal string to ASCII", "value ...", " Translates 32-bit or 64-bit hexadecimal values to ASCII. If no argument", " is entered, an ASCII chart is displayed.", "\nEXAMPLES", " Translate the hexadecimal value of 0x62696c2f7273752f to ASCII:", "\n %s> ascii 62696c2f7273752f", " 62696c2f7273752f: /usr/lib", "\n Display an ASCII chart:", "\n %s> ascii", " ", " 0 1 2 3 4 5 6 7", " +-------------------------------", " 0 | NUL DLE SP 0 @ P ' p", " 1 | SOH DC1 ! 1 A Q a q", " 2 | STX DC2 \" 2 B R b r", " 3 | ETX DC3 # 3 C S c s", " 4 | EOT DC4 $ 4 D T d t", " 5 | ENQ NAK \% 5 E U e u", " 6 | ACK SYN & 6 F V f v", " 7 | BEL ETB ` 7 G W g w", " 8 | BS CAN ( 8 H X h x", " 9 | HT EM ) 9 I Y i y", " A | LF SUB * : J Z j z", " B | VT ESC + ; K [ k {", " C | FF FS , < L \\ l |", " D | CR GS _ = M ] m }", " E | SO RS . > N ^ n ~", " F | SI US / ? O - o DEL", NULL }; char *help_quit[] = { "quit", "exit this session", " ", " Bail out of the current %s session.", "\nNOTE", " This command is equivalent to the \"exit\" command.", NULL }; char *help_exit[] = { "exit", "exit this session", " ", " Bail out of the current %s session.", "\nNOTE", " This command is equivalent to the \"q\" command.", NULL }; char *help_help[] = { "help", "get help", "[command | all] [-